text
stringlengths 81
112k
|
---|
<Purpose>
Load the ED25519 public key object (conformant to
'securesystemslib.formats.KEY_SCHEMA') stored in 'filepath'. Return
'filepath' in securesystemslib.formats.ED25519KEY_SCHEMA format.
If the key object in 'filepath' contains a private key, it is discarded.
<Arguments>
filepath:
<filepath>.pub file, a public key file.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'filepath' is improperly
formatted or is an unexpected key type.
<Side Effects>
The contents of 'filepath' is read and saved.
<Returns>
An ED25519 key object conformant to
'securesystemslib.formats.ED25519KEY_SCHEMA'.
def import_ed25519_publickey_from_file(filepath):
"""
<Purpose>
Load the ED25519 public key object (conformant to
'securesystemslib.formats.KEY_SCHEMA') stored in 'filepath'. Return
'filepath' in securesystemslib.formats.ED25519KEY_SCHEMA format.
If the key object in 'filepath' contains a private key, it is discarded.
<Arguments>
filepath:
<filepath>.pub file, a public key file.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'filepath' is improperly
formatted or is an unexpected key type.
<Side Effects>
The contents of 'filepath' is read and saved.
<Returns>
An ED25519 key object conformant to
'securesystemslib.formats.ED25519KEY_SCHEMA'.
"""
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# ED25519 key objects are saved in json and metadata format. Return the
# loaded key object in securesystemslib.formats.ED25519KEY_SCHEMA' format that
# also includes the keyid.
ed25519_key_metadata = securesystemslib.util.load_json_file(filepath)
ed25519_key, junk = \
securesystemslib.keys.format_metadata_to_key(ed25519_key_metadata)
# Raise an exception if an unexpected key type is imported. Redundant
# validation of 'keytype'. 'securesystemslib.keys.format_metadata_to_key()'
# should have fully validated 'ed25519_key_metadata'.
if ed25519_key['keytype'] != 'ed25519': # pragma: no cover
message = 'Invalid key type loaded: ' + repr(ed25519_key['keytype'])
raise securesystemslib.exceptions.FormatError(message)
return ed25519_key
|
<Purpose>
Import the encrypted ed25519 key file in 'filepath', decrypt it, and return
the key object in 'securesystemslib.formats.ED25519KEY_SCHEMA' format.
The private key (may also contain the public part) is encrypted with AES
256 and CTR the mode of operation. The password is strengthened with
PBKDF2-HMAC-SHA256.
<Arguments>
filepath:
<filepath> file, an RSA encrypted key file.
password:
The password, or passphrase, to import the private key (i.e., the
encrypted key file 'filepath' must be decrypted before the ed25519 key
object can be returned.
prompt:
If True the user is prompted for a passphrase to decrypt 'filepath'.
Default is False.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted or the imported key object contains an invalid key type (i.e.,
not 'ed25519').
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be decrypted.
<Side Effects>
'password' is used to decrypt the 'filepath' key file.
<Returns>
An ed25519 key object of the form:
'securesystemslib.formats.ED25519KEY_SCHEMA'.
def import_ed25519_privatekey_from_file(filepath, password=None, prompt=False):
"""
<Purpose>
Import the encrypted ed25519 key file in 'filepath', decrypt it, and return
the key object in 'securesystemslib.formats.ED25519KEY_SCHEMA' format.
The private key (may also contain the public part) is encrypted with AES
256 and CTR the mode of operation. The password is strengthened with
PBKDF2-HMAC-SHA256.
<Arguments>
filepath:
<filepath> file, an RSA encrypted key file.
password:
The password, or passphrase, to import the private key (i.e., the
encrypted key file 'filepath' must be decrypted before the ed25519 key
object can be returned.
prompt:
If True the user is prompted for a passphrase to decrypt 'filepath'.
Default is False.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted or the imported key object contains an invalid key type (i.e.,
not 'ed25519').
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be decrypted.
<Side Effects>
'password' is used to decrypt the 'filepath' key file.
<Returns>
An ed25519 key object of the form:
'securesystemslib.formats.ED25519KEY_SCHEMA'.
"""
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
if password and prompt:
raise ValueError("Passing 'password' and 'prompt' True is not allowed.")
# If 'password' was passed check format and that it is not empty.
if password is not None:
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
# TODO: PASSWORD_SCHEMA should be securesystemslib.schema.AnyString(min=1)
if not len(password):
raise ValueError('Password must be 1 or more characters')
elif prompt:
# Password confirmation disabled here, which should ideally happen only
# when creating encrypted key files (i.e., improve usability).
# It is safe to specify the full path of 'filepath' in the prompt and not
# worry about leaking sensitive information about the key's location.
# However, care should be taken when including the full path in exceptions
# and log files.
# NOTE: A user who gets prompted for a password, can only signal that the
# key is not encrypted by entering no password in the prompt, as opposed
# to a programmer who can call the function with or without a 'password'.
# Hence, we treat an empty password here, as if no 'password' was passed.
password = get_password('Enter a password for an encrypted RSA'
' file \'' + Fore.RED + filepath + Fore.RESET + '\': ',
confirm=False)
# If user sets an empty string for the password, explicitly set the
# password to None, because some functions may expect this later.
if len(password) == 0: # pragma: no cover
password = None
# Finally, regardless of password, try decrypting the key, if necessary.
# Otherwise, load it straight from the disk.
with open(filepath, 'rb') as file_object:
json_str = file_object.read()
return securesystemslib.keys.\
import_ed25519key_from_private_json(json_str, password=password)
|
<Purpose>
Generate an ECDSA keypair, where the encrypted key (using 'password' as the
passphrase) is saved to <'filepath'>. The public key portion of the
generated ECDSA key is saved to <'filepath'>.pub. If the filepath is not
given, the KEYID is used as the filename and the keypair saved to the
current working directory.
The 'cryptography' library is currently supported. The private key is
encrypted according to 'cryptography's approach: "Encrypt using the best
available encryption for a given key's backend. This is a curated
encryption choice and the algorithm may change over time."
<Arguments>
filepath:
The public and private key files are saved to <filepath>.pub and
<filepath>, respectively. If the filepath is not given, the public and
private keys are saved to the current working directory as <KEYID>.pub
and <KEYID>. KEYID is the generated key's KEYID.
password:
The password, or passphrase, to encrypt the private portion of the
generated ECDSA key. A symmetric encryption key is derived from
'password', so it is not directly used.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be encrypted.
<Side Effects>
Writes key files to '<filepath>' and '<filepath>.pub'.
<Returns>
The 'filepath' of the written key.
def generate_and_write_ecdsa_keypair(filepath=None, password=None):
"""
<Purpose>
Generate an ECDSA keypair, where the encrypted key (using 'password' as the
passphrase) is saved to <'filepath'>. The public key portion of the
generated ECDSA key is saved to <'filepath'>.pub. If the filepath is not
given, the KEYID is used as the filename and the keypair saved to the
current working directory.
The 'cryptography' library is currently supported. The private key is
encrypted according to 'cryptography's approach: "Encrypt using the best
available encryption for a given key's backend. This is a curated
encryption choice and the algorithm may change over time."
<Arguments>
filepath:
The public and private key files are saved to <filepath>.pub and
<filepath>, respectively. If the filepath is not given, the public and
private keys are saved to the current working directory as <KEYID>.pub
and <KEYID>. KEYID is the generated key's KEYID.
password:
The password, or passphrase, to encrypt the private portion of the
generated ECDSA key. A symmetric encryption key is derived from
'password', so it is not directly used.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be encrypted.
<Side Effects>
Writes key files to '<filepath>' and '<filepath>.pub'.
<Returns>
The 'filepath' of the written key.
"""
# Generate a new ECDSA key object. The 'cryptography' library is currently
# supported and performs the actual cryptographic operations.
ecdsa_key = securesystemslib.keys.generate_ecdsa_key()
if not filepath:
filepath = os.path.join(os.getcwd(), ecdsa_key['keyid'])
else:
logger.debug('The filepath has been specified. Not using the key\'s'
' KEYID as the default filepath.')
# Does 'filepath' have the correct format?
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# If the caller does not provide a password argument, prompt for one.
if password is None: # pragma: no cover
# It is safe to specify the full path of 'filepath' in the prompt and not
# worry about leaking sensitive information about the key's location.
# However, care should be taken when including the full path in exceptions
# and log files.
password = get_password('Enter a password for the ECDSA'
' key (' + Fore.RED + filepath + Fore.RESET + '): ',
confirm=True)
else:
logger.debug('The password has been specified. Not prompting for one')
# Does 'password' have the correct format?
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
# If the parent directory of filepath does not exist,
# create it (and all its parent directories, if necessary).
securesystemslib.util.ensure_parent_dir(filepath)
# Create a temporary file, write the contents of the public key, and move
# to final destination.
file_object = securesystemslib.util.TempFile()
# Generate the ECDSA public key file contents in metadata format (i.e., does
# not include the keyid portion).
keytype = ecdsa_key['keytype']
keyval = ecdsa_key['keyval']
scheme = ecdsa_key['scheme']
ecdsakey_metadata_format = securesystemslib.keys.format_keyval_to_metadata(
keytype, scheme, keyval, private=False)
file_object.write(json.dumps(ecdsakey_metadata_format).encode('utf-8'))
# Write the public key (i.e., 'public', which is in PEM format) to
# '<filepath>.pub'. (1) Create a temporary file, (2) write the contents of
# the public key, and (3) move to final destination.
file_object.move(filepath + '.pub')
# Write the encrypted key string, conformant to
# 'securesystemslib.formats.ENCRYPTEDKEY_SCHEMA', to '<filepath>'.
file_object = securesystemslib.util.TempFile()
# Raise 'securesystemslib.exceptions.CryptoError' if 'ecdsa_key' cannot be
# encrypted.
encrypted_key = securesystemslib.keys.encrypt_key(ecdsa_key, password)
file_object.write(encrypted_key.encode('utf-8'))
file_object.move(filepath)
return filepath
|
<Purpose>
Load the ECDSA public key object (conformant to
'securesystemslib.formats.KEY_SCHEMA') stored in 'filepath'. Return
'filepath' in securesystemslib.formats.ECDSAKEY_SCHEMA format.
If the key object in 'filepath' contains a private key, it is discarded.
<Arguments>
filepath:
<filepath>.pub file, a public key file.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'filepath' is improperly
formatted or is an unexpected key type.
<Side Effects>
The contents of 'filepath' is read and saved.
<Returns>
An ECDSA key object conformant to
'securesystemslib.formats.ECDSAKEY_SCHEMA'.
def import_ecdsa_publickey_from_file(filepath):
"""
<Purpose>
Load the ECDSA public key object (conformant to
'securesystemslib.formats.KEY_SCHEMA') stored in 'filepath'. Return
'filepath' in securesystemslib.formats.ECDSAKEY_SCHEMA format.
If the key object in 'filepath' contains a private key, it is discarded.
<Arguments>
filepath:
<filepath>.pub file, a public key file.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'filepath' is improperly
formatted or is an unexpected key type.
<Side Effects>
The contents of 'filepath' is read and saved.
<Returns>
An ECDSA key object conformant to
'securesystemslib.formats.ECDSAKEY_SCHEMA'.
"""
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# ECDSA key objects are saved in json and metadata format. Return the
# loaded key object in securesystemslib.formats.ECDSAKEY_SCHEMA' format that
# also includes the keyid.
ecdsa_key_metadata = securesystemslib.util.load_json_file(filepath)
ecdsa_key, junk = \
securesystemslib.keys.format_metadata_to_key(ecdsa_key_metadata)
# Raise an exception if an unexpected key type is imported. Redundant
# validation of 'keytype'. 'securesystemslib.keys.format_metadata_to_key()'
# should have fully validated 'ecdsa_key_metadata'.
if ecdsa_key['keytype'] != 'ecdsa-sha2-nistp256': # pragma: no cover
message = 'Invalid key type loaded: ' + repr(ecdsa_key['keytype'])
raise securesystemslib.exceptions.FormatError(message)
return ecdsa_key
|
<Purpose>
Import the encrypted ECDSA key file in 'filepath', decrypt it, and return
the key object in 'securesystemslib.formats.ECDSAKEY_SCHEMA' format.
The 'cryptography' library is currently supported and performs the actual
cryptographic routine.
<Arguments>
filepath:
<filepath> file, an ECDSA encrypted key file.
password:
The password, or passphrase, to import the private key (i.e., the
encrypted key file 'filepath' must be decrypted before the ECDSA key
object can be returned.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted or the imported key object contains an invalid key type (i.e.,
not 'ecdsa-sha2-nistp256').
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be decrypted.
<Side Effects>
'password' is used to decrypt the 'filepath' key file.
<Returns>
An ECDSA key object of the form: 'securesystemslib.formats.ECDSAKEY_SCHEMA'.
def import_ecdsa_privatekey_from_file(filepath, password=None):
"""
<Purpose>
Import the encrypted ECDSA key file in 'filepath', decrypt it, and return
the key object in 'securesystemslib.formats.ECDSAKEY_SCHEMA' format.
The 'cryptography' library is currently supported and performs the actual
cryptographic routine.
<Arguments>
filepath:
<filepath> file, an ECDSA encrypted key file.
password:
The password, or passphrase, to import the private key (i.e., the
encrypted key file 'filepath' must be decrypted before the ECDSA key
object can be returned.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted or the imported key object contains an invalid key type (i.e.,
not 'ecdsa-sha2-nistp256').
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be decrypted.
<Side Effects>
'password' is used to decrypt the 'filepath' key file.
<Returns>
An ECDSA key object of the form: 'securesystemslib.formats.ECDSAKEY_SCHEMA'.
"""
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# If the caller does not provide a password argument, prompt for one.
# Password confirmation disabled here, which should ideally happen only
# when creating encrypted key files (i.e., improve usability).
if password is None: # pragma: no cover
# It is safe to specify the full path of 'filepath' in the prompt and not
# worry about leaking sensitive information about the key's location.
# However, care should be taken when including the full path in exceptions
# and log files.
password = get_password('Enter a password for the encrypted ECDSA'
' key (' + Fore.RED + filepath + Fore.RESET + '): ',
confirm=False)
# Does 'password' have the correct format?
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
# Store the encrypted contents of 'filepath' prior to calling the decryption
# routine.
encrypted_key = None
with open(filepath, 'rb') as file_object:
encrypted_key = file_object.read()
# Decrypt the loaded key file, calling the 'cryptography' library to generate
# the derived encryption key from 'password'. Raise
# 'securesystemslib.exceptions.CryptoError' if the decryption fails.
key_object = securesystemslib.keys.decrypt_key(encrypted_key.decode('utf-8'),
password)
# Raise an exception if an unexpected key type is imported.
if key_object['keytype'] != 'ecdsa-sha2-nistp256':
message = 'Invalid key type loaded: ' + repr(key_object['keytype'])
raise securesystemslib.exceptions.FormatError(message)
# Add "keyid_hash_algorithms" so that equal ecdsa keys with different keyids
# can be associated using supported keyid_hash_algorithms.
key_object['keyid_hash_algorithms'] = \
securesystemslib.settings.HASH_ALGORITHMS
return key_object
|
Waits until the element identified by `locator` has focus.
You might rather want to use `Element Focus Should Be Set`
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |
def wait_until_element_has_focus(self, locator, timeout=None):
"""Waits until the element identified by `locator` has focus.
You might rather want to use `Element Focus Should Be Set`
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |"""
self._info("Waiting for focus on '%s'" % (locator))
self._wait_until_no_error(timeout, self._check_element_focus_exp, True, locator, timeout)
|
Waits until the element identified by `locator` doesn't have focus.
You might rather want to use `Element Focus Should Not Be Set`
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |
def wait_until_element_does_not_have_focus(self, locator, timeout=None):
"""Waits until the element identified by `locator` doesn't have focus.
You might rather want to use `Element Focus Should Not Be Set`
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |"""
self._info("Waiting until '%s' does not have focus" % (locator))
self._wait_until_no_error(timeout, self._check_element_focus_exp, False, locator, timeout)
|
Waits until the element identified by `locator` value is exactly the
expected value. You might want to use `Element Value Should Be` instead.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected value | My Name Is Slim Shady |
| strip | boolean, determines whether it should strip the value of the field before comparison | ${True} / ${False} |
| timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |
def wait_until_element_value_is(self, locator, expected, strip=False, timeout=None):
"""Waits until the element identified by `locator` value is exactly the
expected value. You might want to use `Element Value Should Be` instead.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected value | My Name Is Slim Shady |
| strip | boolean, determines whether it should strip the value of the field before comparison | ${True} / ${False} |
| timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |"""
self._info("Waiting for '%s' value to be '%s'" % (locator, expected))
self._wait_until_no_error(timeout, self._check_element_value_exp, False, locator, expected, strip, timeout)
|
Waits until the element identified by `locator` contains
the expected value. You might want to use `Element Value Should Contain` instead.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected value | Slim Shady |
| timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |
def wait_until_element_value_contains(self, locator, expected, timeout=None):
"""Waits until the element identified by `locator` contains
the expected value. You might want to use `Element Value Should Contain` instead.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected value | Slim Shady |
| timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |"""
self._info("Waiting for '%s' value to contain '%s'" % (locator, expected))
self._wait_until_no_error(timeout, self._check_element_value_exp, True, locator, expected, False, timeout)
|
Sets focus on the element identified by `locator`. Should
be used with elements meant to have focus only, such as
text fields. This keywords also waits for the focus to be
active by calling the `Wait Until Element Has Focus` keyword.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
def set_element_focus(self, locator):
"""Sets focus on the element identified by `locator`. Should
be used with elements meant to have focus only, such as
text fields. This keywords also waits for the focus to be
active by calling the `Wait Until Element Has Focus` keyword.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |"""
self._info("Setting focus on element '%s'" % (locator))
element = self._element_find(locator, True, True)
element.send_keys(Keys.NULL)
self._wait_until_no_error(None, self._check_element_focus, True, locator)
|
Clears the text field identified by `locator`
The element.clear() method doesn't seem to work properly on
all browsers, so this keyword was created to offer alternatives.
The `method` argument defines the method it should use in order
to clear the target field.
0 = Uses the selenium method by doing element.clear \n
1 = Sets focus on the field and presses CTRL + A, and then DELETE \n
2 = Repeatedly presses BACKSPACE until the field is empty
This keyword, when using a method other than '2' does not validate it
successfully cleared the field, you should handle this verification by yourself.
When using the method '2', it presses delete until the field's value is empty.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| method | the clearing method that should be used | no example provided |
def clear_input_field(self, locator, method=0):
"""Clears the text field identified by `locator`
The element.clear() method doesn't seem to work properly on
all browsers, so this keyword was created to offer alternatives.
The `method` argument defines the method it should use in order
to clear the target field.
0 = Uses the selenium method by doing element.clear \n
1 = Sets focus on the field and presses CTRL + A, and then DELETE \n
2 = Repeatedly presses BACKSPACE until the field is empty
This keyword, when using a method other than '2' does not validate it
successfully cleared the field, you should handle this verification by yourself.
When using the method '2', it presses delete until the field's value is empty.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| method | the clearing method that should be used | no example provided |"""
element = self._element_find(locator, True, True)
if (int(method) == 0):
self._info("Clearing input on element '%s'" % (locator))
element.clear()
elif (int(method) == 1):
self._info("Clearing input on element '%s' by pressing 'CTRL + A + DELETE'" % (locator))
element.send_keys(Keys.CONTROL + 'a')
element.send_keys(Keys.DELETE)
elif (int(method) == 2):
self._info("Clearing input on element '%s' by repeatedly pressing BACKSPACE" % (locator))
while (len(element.get_attribute('value')) != 0):
element.send_keys(Keys.BACKSPACE)
else: element.clear()
|
Verifies the element identified by `locator` has the expected
text color (it verifies the CSS attribute color). Color should be in
RGBA format.
Example of rgba format: rgba(RED, GREEN, BLUE, ALPHA)
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected color | rgba(0, 128, 0, 1) |
def element_text_color_should_be(self, locator, expected):
"""Verifies the element identified by `locator` has the expected
text color (it verifies the CSS attribute color). Color should be in
RGBA format.
Example of rgba format: rgba(RED, GREEN, BLUE, ALPHA)
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected color | rgba(0, 128, 0, 1) |"""
self._info("Verifying element '%s' has text color '%s'" % (locator, expected))
self._check_element_css_value(locator, 'color', expected)
|
Verifies the element identified by `locator` has the expected
background color (it verifies the CSS attribute background-color). Color should
be in RGBA format.
Example of rgba format: rgba(RED, GREEN, BLUE, ALPHA)
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected color | rgba(0, 128, 0, 1) |
def element_background_color_should_be(self, locator, expected):
"""Verifies the element identified by `locator` has the expected
background color (it verifies the CSS attribute background-color). Color should
be in RGBA format.
Example of rgba format: rgba(RED, GREEN, BLUE, ALPHA)
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected color | rgba(0, 128, 0, 1) |"""
self._info("Verifying element '%s' has background color '%s'" % (locator, expected))
self._check_element_css_value(locator, 'background-color', expected)
|
Verifies the element identified by `locator` has the expected
width. Expected width should be in pixels.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected width | 800 |
def element_width_should_be(self, locator, expected):
"""Verifies the element identified by `locator` has the expected
width. Expected width should be in pixels.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected width | 800 |"""
self._info("Verifying element '%s' width is '%s'" % (locator, expected))
self._check_element_size(locator, 'width', expected)
|
Verifies the element identified by `locator` has the expected
height. Expected height should be in pixels.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected height | 600 |
def element_height_should_be(self, locator, expected):
"""Verifies the element identified by `locator` has the expected
height. Expected height should be in pixels.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected height | 600 |"""
self._info("Verifying element '%s' height is '%s'" % (locator, expected))
self._check_element_size(locator, 'height', expected)
|
Verifies the element identified by `locator` has the expected value.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected value | My Name Is Slim Shady |
| strip | Boolean, determines whether it should strip the field's value before comparison or not | ${True} / ${False} |
def element_value_should_be(self, locator, expected, strip=False):
"""Verifies the element identified by `locator` has the expected value.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected value | My Name Is Slim Shady |
| strip | Boolean, determines whether it should strip the field's value before comparison or not | ${True} / ${False} |"""
self._info("Verifying element '%s' value is '%s'" % (locator, expected))
element = self._element_find(locator, True, True)
value = element.get_attribute('value')
if (strip):
value = value.strip()
if str(value) == expected:
return
else:
raise AssertionError("Element '%s' value was not '%s', it was '%s'" % (locator, expected, value))
|
Verifies the element identified by `locator` is not the specified value.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| value | value it should not be | My Name Is Slim Shady |
| strip | Boolean, determines whether it should strip the field's value before comparison or not | ${True} / ${False} |
def element_value_should_not_be(self, locator, value, strip=False):
"""Verifies the element identified by `locator` is not the specified value.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| value | value it should not be | My Name Is Slim Shady |
| strip | Boolean, determines whether it should strip the field's value before comparison or not | ${True} / ${False} |"""
self._info("Verifying element '%s' value is not '%s'" % (locator, value))
element = self._element_find(locator, True, True)
elem_value = str(element.get_attribute('value'))
if (strip):
elem_value = elem_value.strip()
if elem_value == value:
raise AssertionError("Value was '%s' for element '%s' while it shouldn't have" % (elem_value, locator))
|
Verifies the element identified by `locator` contains the expected value.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected value | Slim Shady |
def element_value_should_contain(self, locator, expected):
"""Verifies the element identified by `locator` contains the expected value.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected value | Slim Shady |"""
self._info("Verifying element '%s' value contains '%s'" % (locator, expected))
element = self._element_find(locator, True, True)
value = str(element.get_attribute('value'))
if expected in value:
return
else:
raise AssertionError("Value '%s' did not appear in element '%s'. It's value was '%s'" % (expected, locator, value))
|
Verifies the element identified by `locator` does not contain the specified value.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| value | value it should not contain | Slim Shady |
def element_value_should_not_contain(self, locator, value):
"""Verifies the element identified by `locator` does not contain the specified value.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| value | value it should not contain | Slim Shady |"""
self._info("Verifying element '%s' value does not contain '%s'" % (locator, value))
element = self._element_find(locator, True, True)
elem_value = str(element.get_attribute('value'))
if value in elem_value:
raise AssertionError("Value '%s' was found in element '%s' while it shouldn't have" % (value, locator))
|
Verifies the element identified by `locator` has focus.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
def element_focus_should_be_set(self, locator):
"""Verifies the element identified by `locator` has focus.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |"""
self._info("Verifying element '%s' focus is set" % locator)
self._check_element_focus(True, locator)
|
Verifies the element identified by `locator` does not have focus.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
def element_focus_should_not_be_set(self, locator):
"""Verifies the element identified by `locator` does not have focus.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |"""
self._info("Verifying element '%s' focus is not set" % locator)
self._check_element_focus(False, locator)
|
Verifies the element identified by `locator` has the expected
value for the targeted `prop`.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| prop | targeted css attribute | background-color |
| expected | expected value | rgba(0, 128, 0, 1) |
def element_css_attribute_should_be(self, locator, prop, expected):
"""Verifies the element identified by `locator` has the expected
value for the targeted `prop`.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| prop | targeted css attribute | background-color |
| expected | expected value | rgba(0, 128, 0, 1) |"""
self._info("Verifying element '%s' has css attribute '%s' with a value of '%s'" % (locator, prop, expected))
self._check_element_css_value(locator, prop, expected)
|
This is a copy of `Wait Until Page Contains Element` but it allows
multiple arguments in order to wait for more than one element.
| *Argument* | *Description* | *Example* |
| timeout | maximum time to wait, if set to ${None} it will use Selenium's default timeout | 5s |
| *locators | Selenium 2 element locator(s) | id=MyId |
def wait_until_page_contains_elements(self, timeout, *locators):
"""This is a copy of `Wait Until Page Contains Element` but it allows
multiple arguments in order to wait for more than one element.
| *Argument* | *Description* | *Example* |
| timeout | maximum time to wait, if set to ${None} it will use Selenium's default timeout | 5s |
| *locators | Selenium 2 element locator(s) | id=MyId |"""
self._wait_until_no_error(timeout, self._wait_for_elements, locators)
|
Waits until at least one of the specified elements is found.
| *Argument* | *Description* | *Example* |
| timeout | maximum time to wait, if set to ${None} it will use Selenium's default timeout | 5s |
| *locators | Selenium 2 element locator(s) | id=MyId |
def wait_until_page_contains_one_of_these_elements(self, timeout, *locators):
"""Waits until at least one of the specified elements is found.
| *Argument* | *Description* | *Example* |
| timeout | maximum time to wait, if set to ${None} it will use Selenium's default timeout | 5s |
| *locators | Selenium 2 element locator(s) | id=MyId |"""
self._wait_until_no_error(timeout, self._wait_for_at_least_one_element, locators)
|
Waits until all of the specified elements are not found on the page.
| *Argument* | *Description* | *Example* |
| timeout | maximum time to wait, if set to ${None} it will use Selenium's default timeout | 5s |
| *locators | Selenium 2 element locator(s) | id=MyId |
def wait_until_page_does_not_contain_these_elements(self, timeout, *locators):
"""Waits until all of the specified elements are not found on the page.
| *Argument* | *Description* | *Example* |
| timeout | maximum time to wait, if set to ${None} it will use Selenium's default timeout | 5s |
| *locators | Selenium 2 element locator(s) | id=MyId |"""
self._wait_until_no_error(timeout, self._wait_for_elements_to_go_away, locators)
|
Presses the specified `key`. The `complementKey` defines the key to hold
when pressing the specified `key`. For example, you could use ${VK_TAB} as `key` and
use ${VK_SHIFT} as `complementKey' in order to press Shift + Tab (back tab)
| =Argument= | =Description= | =Example= |
| key | the key to press | ${VK_F4} |
| complementKey | the key to hold while pressing the key passed in previous argument | ${VK_ALT} |
def tap_key(self, key, complementKey=None) :
"""Presses the specified `key`. The `complementKey` defines the key to hold
when pressing the specified `key`. For example, you could use ${VK_TAB} as `key` and
use ${VK_SHIFT} as `complementKey' in order to press Shift + Tab (back tab)
| =Argument= | =Description= | =Example= |
| key | the key to press | ${VK_F4} |
| complementKey | the key to hold while pressing the key passed in previous argument | ${VK_ALT} |"""
driver = self._current_browser()
if (complementKey is not None) :
ActionChains(driver).key_down(complementKey).send_keys(key).key_up(complementKey).perform()
else :
ActionChains(driver).send_keys(key).perform()
|
Clicks the element specified by `locator` until the operation succeeds. This should be
used with buttons that are generated in real-time and that don't have their click handling available
immediately. This keyword avoids unclickable element exceptions.
| =Argument= | =Description= | =Example= |
| locator | Selenium 2 element locator(s) | id=MyId |
| timeout | maximum time to wait, if set to ${None} it will use Selenium's default timeout | 5s |
def wait_until_element_is_clickable(self, locator, timeout=None):
"""Clicks the element specified by `locator` until the operation succeeds. This should be
used with buttons that are generated in real-time and that don't have their click handling available
immediately. This keyword avoids unclickable element exceptions.
| =Argument= | =Description= | =Example= |
| locator | Selenium 2 element locator(s) | id=MyId |
| timeout | maximum time to wait, if set to ${None} it will use Selenium's default timeout | 5s |"""
self._wait_until_no_error(timeout, self._wait_for_click_to_succeed, locator)
|
Actual visitor method implementation.
def _visitor_impl(self, arg):
"""Actual visitor method implementation."""
if (_qualname(type(self)), type(arg)) in _methods:
method = _methods[(_qualname(type(self)), type(arg))]
return method(self, arg)
else:
# if no visitor method found for this arg type,
# search in parent arg type:
arg_parent_type = arg.__class__.__bases__[0]
while arg_parent_type != object:
if (_qualname(type(self)), arg_parent_type) in _methods:
method = _methods[(_qualname(type(self)), arg_parent_type)]
return method(self, arg)
else:
arg_parent_type = arg_parent_type.__bases__[0]
raise VisitorException('No visitor found for class ' + str(type(arg)))
|
Decorator that creates a visitor method.
def visitor(arg_type):
"""Decorator that creates a visitor method."""
def decorator(fn):
declaring_class = _declaring_class(fn)
_methods[(declaring_class, arg_type)] = fn
# Replace all decorated methods with _visitor_impl
return _visitor_impl
return decorator
|
Returns a full absolute URL based on the request host.
This template tag takes exactly the same paramters as url template tag.
def absolute(parser, token):
'''
Returns a full absolute URL based on the request host.
This template tag takes exactly the same paramters as url template tag.
'''
node = url(parser, token)
return AbsoluteUrlNode(
view_name=node.view_name,
args=node.args,
kwargs=node.kwargs,
asvar=node.asvar
)
|
Returns a full absolute URL based on the current site.
This template tag takes exactly the same paramters as url template tag.
def site(parser, token):
'''
Returns a full absolute URL based on the current site.
This template tag takes exactly the same paramters as url template tag.
'''
node = url(parser, token)
return SiteUrlNode(
view_name=node.view_name,
args=node.args,
kwargs=node.kwargs,
asvar=node.asvar
)
|
This replaces the method from Selenium2Library to fix the major logic error in it
def _wait_until_exp(self, timeout, error, function, *args):
"""This replaces the method from Selenium2Library to fix the major logic error in it"""
error = error.replace('<TIMEOUT>', self._format_timeout(timeout))
def wait_func():
return None if function(*args) else error
self._wait_until_no_error_exp(timeout, wait_func)
|
This replaces the method from Selenium2Library to fix the major logic error in it
def _wait_until_no_error_exp(self, timeout, wait_func, *args):
"""This replaces the method from Selenium2Library to fix the major logic error in it"""
timeout = robot.utils.timestr_to_secs(timeout) if timeout is not None else self._timeout_in_secs
maxtime = time.time() + timeout
while True:
try:
timeout_error = wait_func(*args)
if not timeout_error: return
if time.time() > maxtime: raise AssertionError(timeout_error)
time.sleep(0.2)
except AssertionError:
raise
except:
if time.time() > maxtime: raise
continue
|
Load rst file and sanitize it for PyPI.
Remove unsupported github tags:
- code-block directive
def rst(filename):
'''
Load rst file and sanitize it for PyPI.
Remove unsupported github tags:
- code-block directive
'''
content = open(filename).read()
return re.sub(r'\.\.\s? code-block::\s*(\w|\+)+', '::', content)
|
Returns a dictionary of callable methods of object `obj`.
@param obj: ZOS API Python COM object
@return: a dictionary of callable methods
Notes:
the function only returns the callable attributes that are listed by dir()
function. Properties are not returned.
def get_callable_method_dict(obj):
"""Returns a dictionary of callable methods of object `obj`.
@param obj: ZOS API Python COM object
@return: a dictionary of callable methods
Notes:
the function only returns the callable attributes that are listed by dir()
function. Properties are not returned.
"""
methodDict = {}
for methodStr in dir(obj):
method = getattr(obj, methodStr, 'none')
if callable(method) and not methodStr.startswith('_'):
methodDict[methodStr] = method
return methodDict
|
Replicate callable methods from a `srcObj` to `dstObj` (generally a wrapper object).
@param srcObj: source object
@param dstObj: destination object of the same type.
@return : none
Implementer notes:
1. Once the methods are mapped from the `srcObj` to the `dstObj`, the method calls will
not get "routed" through `__getattr__` method (if implemented) in `type(dstObj)` class.
2. An example of what a 'key' and 'value' look like:
key: MakeSequential
value: <bound method IOpticalSystem.MakeSequential of
<win32com.gen_py.ZOSAPI_Interfaces.IOpticalSystem instance at 0x77183968>>
def replicate_methods(srcObj, dstObj):
"""Replicate callable methods from a `srcObj` to `dstObj` (generally a wrapper object).
@param srcObj: source object
@param dstObj: destination object of the same type.
@return : none
Implementer notes:
1. Once the methods are mapped from the `srcObj` to the `dstObj`, the method calls will
not get "routed" through `__getattr__` method (if implemented) in `type(dstObj)` class.
2. An example of what a 'key' and 'value' look like:
key: MakeSequential
value: <bound method IOpticalSystem.MakeSequential of
<win32com.gen_py.ZOSAPI_Interfaces.IOpticalSystem instance at 0x77183968>>
"""
# prevent methods that we intend to specialize from being mapped. The specialized
# (overridden) methods are methods with the same name as the corresponding method in
# the source ZOS API COM object written for each ZOS API COM object in an associated
# python script such as i_analyses_methods.py for I_Analyses
overridden_methods = get_callable_method_dict(type(dstObj)).keys()
#overridden_attrs = [each for each in type(dstObj).__dict__.keys() if not each.startswith('_')]
#
def zos_wrapper_deco(func):
def wrapper(*args, **kwargs):
return wrapped_zos_object(func(*args, **kwargs))
varnames = func.im_func.func_code.co_varnames # alternative is to use inspect.getargspec
params = [par for par in varnames if par not in ('self', 'ret')] # removes 'self' and 'ret'
wrapper.__doc__ = func.im_func.func_name + '(' + ', '.join(params) + ')'
return wrapper
#
for key, value in get_callable_method_dict(srcObj).items():
if key not in overridden_methods:
setattr(dstObj, key, zos_wrapper_deco(value))
|
Returns a lists of properties bound to the object `zos_obj`
@param zos_obj: ZOS API Python COM object
@return prop_get: list of properties that are only getters
@return prop_set: list of properties that are both getters and setters
def get_properties(zos_obj):
"""Returns a lists of properties bound to the object `zos_obj`
@param zos_obj: ZOS API Python COM object
@return prop_get: list of properties that are only getters
@return prop_set: list of properties that are both getters and setters
"""
prop_get = set(zos_obj._prop_map_get_.keys())
prop_set = set(zos_obj._prop_map_put_.keys())
if prop_set.issubset(prop_get):
prop_get = prop_get.difference(prop_set)
else:
msg = 'Assumption all getters are also setters is incorrect!'
raise NotImplementedError(msg)
return list(prop_get), list(prop_set)
|
Creates and returns a wrapper class of a ZOS object, exposing the ZOS objects
methods and propertis, and patching custom specialized attributes
@param zos_obj: ZOS API Python COM object
def managed_wrapper_class_factory(zos_obj):
"""Creates and returns a wrapper class of a ZOS object, exposing the ZOS objects
methods and propertis, and patching custom specialized attributes
@param zos_obj: ZOS API Python COM object
"""
cls_name = repr(zos_obj).split()[0].split('.')[-1]
dispatch_attr = '_' + cls_name.lower() # protocol to be followed to store the ZOS COM object
cdict = {} # class dictionary
# patch the properties of the base objects
base_cls_list = inheritance_dict.get(cls_name, None)
if base_cls_list:
for base_cls_name in base_cls_list:
getters, setters = get_properties(_CastTo(zos_obj, base_cls_name))
for each in getters:
exec("p{} = ZOSPropMapper('{}', '{}', cast_to='{}')".format(each, dispatch_attr, each, base_cls_name), globals(), cdict)
for each in setters:
exec("p{} = ZOSPropMapper('{}', '{}', setter=True, cast_to='{}')".format(each, dispatch_attr, each, base_cls_name), globals(), cdict)
# patch the property attributes of the given ZOS object
getters, setters = get_properties(zos_obj)
for each in getters:
exec("p{} = ZOSPropMapper('{}', '{}')".format(each, dispatch_attr, each), globals(), cdict)
for each in setters:
exec("p{} = ZOSPropMapper('{}', '{}', setter=True)".format(each, dispatch_attr, each), globals(), cdict)
def __init__(self, zos_obj):
# dispatcher attribute
cls_name = repr(zos_obj).split()[0].split('.')[-1]
dispatch_attr = '_' + cls_name.lower() # protocol to be followed to store the ZOS COM object
self.__dict__[dispatch_attr] = zos_obj
self._dispatch_attr_value = dispatch_attr # used in __getattr__
# Store base class object
self._base_cls_list = inheritance_dict.get(cls_name, None)
# patch the methods of the base class(s) of the given ZOS object
if self._base_cls_list:
for base_cls_name in self._base_cls_list:
replicate_methods(_CastTo(zos_obj, base_cls_name), self)
# patch the methods of given ZOS object
replicate_methods(zos_obj, self)
# mark object as wrapped to prevent it from being wrapped subsequently
self._wrapped = True
# Provide a way to make property calls without the prefix p
def __getattr__(self, attrname):
return wrapped_zos_object(getattr(self.__dict__[self._dispatch_attr_value], attrname))
def __repr__(self):
if type(self).__name__ == 'IZOSAPI_Application':
repr_str = "{.__name__}(NumberOfOpticalSystems = {})".format(type(self), self.pNumberOfOpticalSystems)
else:
repr_str = "{.__name__}".format(type(self))
return repr_str
cdict['__init__'] = __init__
cdict['__getattr__'] = __getattr__
cdict['__repr__'] = __repr__
# patch custom methods from python files imported as modules
module_import_str = """
try:
from pyzos.zos_obj_override.{module:} import *
except ImportError:
pass
""".format(module=cls_name.lower() + '_methods')
exec(module_import_str, globals(), cdict)
_ = cdict.pop('print_function', None)
_ = cdict.pop('division', None)
return type(cls_name, (), cdict)
|
Helper function to wrap ZOS API COM objects.
@param zos_obj : ZOS API Python COM object
@return: instance of the wrapped ZOS API class. If the input object is not a ZOS-API
COM object or if it is already wrapped, then the object is returned without
wrapping.
Notes:
The function dynamically creates a wrapped class with all the provided methods,
properties, and custom methods monkey patched; and returns an instance of it.
def wrapped_zos_object(zos_obj):
"""Helper function to wrap ZOS API COM objects.
@param zos_obj : ZOS API Python COM object
@return: instance of the wrapped ZOS API class. If the input object is not a ZOS-API
COM object or if it is already wrapped, then the object is returned without
wrapping.
Notes:
The function dynamically creates a wrapped class with all the provided methods,
properties, and custom methods monkey patched; and returns an instance of it.
"""
if hasattr(zos_obj, '_wrapped') or ('CLSID' not in dir(zos_obj)):
return zos_obj
else:
Class = managed_wrapper_class_factory(zos_obj)
return Class(zos_obj)
|
<Purpose>
Generate a pair of ECDSA public and private keys with one of the supported,
external cryptography libraries. The public and private keys returned
conform to 'securesystemslib.formats.PEMECDSA_SCHEMA' and
'securesystemslib.formats.PEMECDSA_SCHEMA', respectively.
The public ECDSA public key has the PEM format:
TODO: should we encrypt the private keys returned here? Should the
create_signature() accept encrypted keys?
'-----BEGIN PUBLIC KEY-----
...
'-----END PUBLIC KEY-----'
The private ECDSA private key has the PEM format:
'-----BEGIN EC PRIVATE KEY-----
...
-----END EC PRIVATE KEY-----'
>>> public, private = generate_public_and_private()
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(public)
True
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(private)
True
<Arguments>
scheme:
A string indicating which algorithm to use for the generation of the
public and private ECDSA keys. 'ecdsa-sha2-nistp256' is the only
currently supported ECDSA algorithm, which is supported by OpenSSH and
specified in RFC 5656 (https://tools.ietf.org/html/rfc5656).
<Exceptions>
securesystemslib.exceptions.FormatError, if 'algorithm' is improperly
formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'scheme' is an
unsupported algorithm.
<Side Effects>
None.
<Returns>
A (public, private) tuple that conform to
'securesystemslib.formats.PEMECDSA_SCHEMA' and
'securesystemslib.formats.PEMECDSA_SCHEMA', respectively.
def generate_public_and_private(scheme='ecdsa-sha2-nistp256'):
"""
<Purpose>
Generate a pair of ECDSA public and private keys with one of the supported,
external cryptography libraries. The public and private keys returned
conform to 'securesystemslib.formats.PEMECDSA_SCHEMA' and
'securesystemslib.formats.PEMECDSA_SCHEMA', respectively.
The public ECDSA public key has the PEM format:
TODO: should we encrypt the private keys returned here? Should the
create_signature() accept encrypted keys?
'-----BEGIN PUBLIC KEY-----
...
'-----END PUBLIC KEY-----'
The private ECDSA private key has the PEM format:
'-----BEGIN EC PRIVATE KEY-----
...
-----END EC PRIVATE KEY-----'
>>> public, private = generate_public_and_private()
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(public)
True
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(private)
True
<Arguments>
scheme:
A string indicating which algorithm to use for the generation of the
public and private ECDSA keys. 'ecdsa-sha2-nistp256' is the only
currently supported ECDSA algorithm, which is supported by OpenSSH and
specified in RFC 5656 (https://tools.ietf.org/html/rfc5656).
<Exceptions>
securesystemslib.exceptions.FormatError, if 'algorithm' is improperly
formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'scheme' is an
unsupported algorithm.
<Side Effects>
None.
<Returns>
A (public, private) tuple that conform to
'securesystemslib.formats.PEMECDSA_SCHEMA' and
'securesystemslib.formats.PEMECDSA_SCHEMA', respectively.
"""
# Does 'scheme' have the correct format?
# Verify that 'scheme' is of the correct type, and that it's one of the
# supported ECDSA . It must conform to
# 'securesystemslib.formats.ECDSA_SCHEME_SCHEMA'. Raise
# 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.ECDSA_SCHEME_SCHEMA.check_match(scheme)
public_key = None
private_key = None
# An if-clause is strictly not needed, since 'ecdsa_sha2-nistp256' is the
# only currently supported ECDSA signature scheme. Nevertheness, include the
# conditional statement to accomodate any schemes that might be added.
if scheme == 'ecdsa-sha2-nistp256':
private_key = ec.generate_private_key(ec.SECP256R1, default_backend())
public_key = private_key.public_key()
# The ECDSA_SCHEME_SCHEMA.check_match() above should have detected any
# invalid 'scheme'. This is a defensive check.
else: #pragma: no cover
raise securesystemslib.exceptions.UnsupportedAlgorithmError('An unsupported'
' scheme specified: ' + repr(scheme) + '.\n Supported'
' algorithms: ' + repr(_SUPPORTED_ECDSA_SCHEMES))
private_pem = private_key.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
public_pem = public_key.public_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
return public_pem.decode('utf-8'), private_pem.decode('utf-8')
|
<Purpose>
Return a (signature, scheme) tuple.
>>> requested_scheme = 'ecdsa-sha2-nistp256'
>>> public, private = generate_public_and_private(requested_scheme)
>>> data = b'The quick brown fox jumps over the lazy dog'
>>> signature, scheme = create_signature(public, private, data, requested_scheme)
>>> securesystemslib.formats.ECDSASIGNATURE_SCHEMA.matches(signature)
True
>>> requested_scheme == scheme
True
<Arguments>
public:
The ECDSA public key in PEM format.
private:
The ECDSA private key in PEM format.
data:
Byte data used by create_signature() to generate the signature returned.
scheme:
The signature scheme used to generate the signature. For example:
'ecdsa-sha2-nistp256'.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if a signature cannot be created.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'scheme' is not
one of the supported signature schemes.
<Side Effects>
None.
<Returns>
A signature dictionary conformat to
'securesystemslib.format.SIGNATURE_SCHEMA'. ECDSA signatures are XX bytes,
however, the hexlified signature is stored in the dictionary returned.
def create_signature(public_key, private_key, data, scheme='ecdsa-sha2-nistp256'):
"""
<Purpose>
Return a (signature, scheme) tuple.
>>> requested_scheme = 'ecdsa-sha2-nistp256'
>>> public, private = generate_public_and_private(requested_scheme)
>>> data = b'The quick brown fox jumps over the lazy dog'
>>> signature, scheme = create_signature(public, private, data, requested_scheme)
>>> securesystemslib.formats.ECDSASIGNATURE_SCHEMA.matches(signature)
True
>>> requested_scheme == scheme
True
<Arguments>
public:
The ECDSA public key in PEM format.
private:
The ECDSA private key in PEM format.
data:
Byte data used by create_signature() to generate the signature returned.
scheme:
The signature scheme used to generate the signature. For example:
'ecdsa-sha2-nistp256'.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if a signature cannot be created.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'scheme' is not
one of the supported signature schemes.
<Side Effects>
None.
<Returns>
A signature dictionary conformat to
'securesystemslib.format.SIGNATURE_SCHEMA'. ECDSA signatures are XX bytes,
however, the hexlified signature is stored in the dictionary returned.
"""
# Do 'public_key' and 'private_key' have the correct format?
# This check will ensure that the arguments conform to
# 'securesystemslib.formats.PEMECDSA_SCHEMA'. Raise
# 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.PEMECDSA_SCHEMA.check_match(public_key)
# Is 'private_key' properly formatted?
securesystemslib.formats.PEMECDSA_SCHEMA.check_match(private_key)
# Is 'scheme' properly formatted?
securesystemslib.formats.ECDSA_SCHEME_SCHEMA.check_match(scheme)
# 'ecdsa-sha2-nistp256' is the only currently supported ECDSA scheme, so this
# if-clause isn't strictly needed. Nevertheless, the conditional statement
# is included to accommodate multiple schemes that can potentially be added
# in the future.
if scheme == 'ecdsa-sha2-nistp256':
try:
private_key = load_pem_private_key(private_key.encode('utf-8'),
password=None, backend=default_backend())
signature = private_key.sign(data, ec.ECDSA(hashes.SHA256()))
except TypeError as e:
raise securesystemslib.exceptions.CryptoError('Could not create'
' signature: ' + str(e))
# A defensive check for an invalid 'scheme'. The
# ECDSA_SCHEME_SCHEMA.check_match() above should have already validated it.
else: #pragma: no cover
raise securesystemslib.exceptions.UnsupportedAlgorithmError('Unsupported'
' signature scheme is specified: ' + repr(scheme))
return signature, scheme
|
<Purpose>
Verify that 'signature' was produced by the private key associated with
'public_key'.
>>> scheme = 'ecdsa-sha2-nistp256'
>>> public, private = generate_public_and_private(scheme)
>>> data = b'The quick brown fox jumps over the lazy dog'
>>> signature, scheme = create_signature(public, private, data, scheme)
>>> verify_signature(public, scheme, signature, data)
True
>>> verify_signature(public, scheme, signature, b'bad data')
False
<Arguments>
public_key:
The ECDSA public key in PEM format. The public key is needed to verify
'signature'.
scheme:
The signature scheme used to generate 'signature'. For example:
'ecdsa-sha2-nistp256'.
signature:
The signature to be verified, which should have been generated by
the private key associated with 'public_key'. 'data'.
data:
Byte data that was used by create_signature() to generate 'signature'.
<Exceptions>
securesystemslib.exceptions.FormatError, if any of the arguments are
improperly formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'scheme' is
not one of the supported signature schemes.
<Side Effects>
None.
<Returns>
Boolean, indicating whether the 'signature' of data was generated by
the private key associated with 'public_key'.
def verify_signature(public_key, scheme, signature, data):
"""
<Purpose>
Verify that 'signature' was produced by the private key associated with
'public_key'.
>>> scheme = 'ecdsa-sha2-nistp256'
>>> public, private = generate_public_and_private(scheme)
>>> data = b'The quick brown fox jumps over the lazy dog'
>>> signature, scheme = create_signature(public, private, data, scheme)
>>> verify_signature(public, scheme, signature, data)
True
>>> verify_signature(public, scheme, signature, b'bad data')
False
<Arguments>
public_key:
The ECDSA public key in PEM format. The public key is needed to verify
'signature'.
scheme:
The signature scheme used to generate 'signature'. For example:
'ecdsa-sha2-nistp256'.
signature:
The signature to be verified, which should have been generated by
the private key associated with 'public_key'. 'data'.
data:
Byte data that was used by create_signature() to generate 'signature'.
<Exceptions>
securesystemslib.exceptions.FormatError, if any of the arguments are
improperly formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'scheme' is
not one of the supported signature schemes.
<Side Effects>
None.
<Returns>
Boolean, indicating whether the 'signature' of data was generated by
the private key associated with 'public_key'.
"""
# Are the arguments properly formatted?
# If not, raise 'securesystemslib.exceptions.FormatError'.
securesystemslib.formats.PEMECDSA_SCHEMA.check_match(public_key)
securesystemslib.formats.ECDSA_SCHEME_SCHEMA.check_match(scheme)
securesystemslib.formats.ECDSASIGNATURE_SCHEMA.check_match(signature)
ecdsa_key = load_pem_public_key(public_key.encode('utf-8'),
backend=default_backend())
if not isinstance(ecdsa_key, ec.EllipticCurvePublicKey):
raise securesystemslib.exceptions.FormatError('Invalid ECDSA public'
' key: ' + repr(public_key))
else:
logger.debug('Loaded a valid ECDSA public key.')
# verify() raises an 'InvalidSignature' exception if 'signature'
# is invalid.
try:
ecdsa_key.verify(signature, data, ec.ECDSA(hashes.SHA256()))
return True
except (TypeError, cryptography.exceptions.InvalidSignature):
return False
|
<Purpose>
Create public and private ECDSA keys from a private 'pem'. The public and
private keys are strings in PEM format:
public: '-----BEGIN PUBLIC KEY----- ... -----END PUBLIC KEY-----',
private: '-----BEGIN EC PRIVATE KEY----- ... -----END EC PRIVATE KEY-----'}}
>>> junk, private = generate_public_and_private()
>>> public, private = create_ecdsa_public_and_private_from_pem(private)
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(public)
True
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(private)
True
>>> passphrase = 'secret'
>>> encrypted_pem = create_ecdsa_encrypted_pem(private, passphrase)
>>> public, private = create_ecdsa_public_and_private_from_pem(encrypted_pem, passphrase)
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(public)
True
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(private)
True
<Arguments>
pem:
A string in PEM format. The private key is extracted and returned in
an ecdsakey object.
password: (optional)
The password, or passphrase, to decrypt the private part of the ECDSA key
if it is encrypted. 'password' is not used directly as the encryption
key, a stronger encryption key is derived from it.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if the ECDSA key
pair could not be extracted, possibly due to an unsupported algorithm.
<Side Effects>
None.
<Returns>
A dictionary containing the ECDSA keys and other identifying information.
Conforms to 'securesystemslib.formats.ECDSAKEY_SCHEMA'.
def create_ecdsa_public_and_private_from_pem(pem, password=None):
"""
<Purpose>
Create public and private ECDSA keys from a private 'pem'. The public and
private keys are strings in PEM format:
public: '-----BEGIN PUBLIC KEY----- ... -----END PUBLIC KEY-----',
private: '-----BEGIN EC PRIVATE KEY----- ... -----END EC PRIVATE KEY-----'}}
>>> junk, private = generate_public_and_private()
>>> public, private = create_ecdsa_public_and_private_from_pem(private)
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(public)
True
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(private)
True
>>> passphrase = 'secret'
>>> encrypted_pem = create_ecdsa_encrypted_pem(private, passphrase)
>>> public, private = create_ecdsa_public_and_private_from_pem(encrypted_pem, passphrase)
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(public)
True
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(private)
True
<Arguments>
pem:
A string in PEM format. The private key is extracted and returned in
an ecdsakey object.
password: (optional)
The password, or passphrase, to decrypt the private part of the ECDSA key
if it is encrypted. 'password' is not used directly as the encryption
key, a stronger encryption key is derived from it.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if the ECDSA key
pair could not be extracted, possibly due to an unsupported algorithm.
<Side Effects>
None.
<Returns>
A dictionary containing the ECDSA keys and other identifying information.
Conforms to 'securesystemslib.formats.ECDSAKEY_SCHEMA'.
"""
# Does 'pem' have the correct format?
# This check will ensure 'pem' conforms to
# 'securesystemslib.formats.ECDSARSA_SCHEMA'.
securesystemslib.formats.PEMECDSA_SCHEMA.check_match(pem)
if password is not None:
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
password = password.encode('utf-8')
else:
logger.debug('The password/passphrase is unset. The PEM is expected'
' to be unencrypted.')
public = None
private = None
# Generate the public and private ECDSA keys. The pyca/cryptography library
# performs the actual import operation.
try:
private = load_pem_private_key(pem.encode('utf-8'), password=password,
backend=default_backend())
except (ValueError, cryptography.exceptions.UnsupportedAlgorithm) as e:
raise securesystemslib.exceptions.CryptoError('Could not import private'
' PEM.\n' + str(e))
public = private.public_key()
# Serialize public and private keys to PEM format.
private = private.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
public = public.public_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
return public.decode('utf-8'), private.decode('utf-8')
|
<Purpose>
Return a string in PEM format, where the private part of the ECDSA key is
encrypted. The private part of the ECDSA key is encrypted as done by
pyca/cryptography: "Encrypt using the best available encryption for a given
key's backend. This is a curated encryption choice and the algorithm may
change over time."
>>> junk, private = generate_public_and_private()
>>> passphrase = 'secret'
>>> encrypted_pem = create_ecdsa_encrypted_pem(private, passphrase)
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(encrypted_pem)
True
<Arguments>
private_pem:
The private ECDSA key string in PEM format.
passphrase:
The passphrase, or password, to encrypt the private part of the ECDSA
key. 'passphrase' is not used directly as the encryption key, a stronger
encryption key is derived from it.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if an ECDSA key in encrypted PEM
format cannot be created.
<Side Effects>
None.
<Returns>
A string in PEM format, where the private RSA portion is encrypted.
Conforms to 'securesystemslib.formats.PEMECDSA_SCHEMA'.
def create_ecdsa_encrypted_pem(private_pem, passphrase):
"""
<Purpose>
Return a string in PEM format, where the private part of the ECDSA key is
encrypted. The private part of the ECDSA key is encrypted as done by
pyca/cryptography: "Encrypt using the best available encryption for a given
key's backend. This is a curated encryption choice and the algorithm may
change over time."
>>> junk, private = generate_public_and_private()
>>> passphrase = 'secret'
>>> encrypted_pem = create_ecdsa_encrypted_pem(private, passphrase)
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(encrypted_pem)
True
<Arguments>
private_pem:
The private ECDSA key string in PEM format.
passphrase:
The passphrase, or password, to encrypt the private part of the ECDSA
key. 'passphrase' is not used directly as the encryption key, a stronger
encryption key is derived from it.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if an ECDSA key in encrypted PEM
format cannot be created.
<Side Effects>
None.
<Returns>
A string in PEM format, where the private RSA portion is encrypted.
Conforms to 'securesystemslib.formats.PEMECDSA_SCHEMA'.
"""
# Does 'private_key' have the correct format?
# Raise 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.PEMRSA_SCHEMA.check_match(private_pem)
# Does 'passphrase' have the correct format?
securesystemslib.formats.PASSWORD_SCHEMA.check_match(passphrase)
encrypted_pem = None
private = load_pem_private_key(private_pem.encode('utf-8'), password=None,
backend=default_backend())
encrypted_private_pem = \
private.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.BestAvailableEncryption(passphrase.encode('utf-8')))
return encrypted_private_pem
|
Clean any processing data, and prepare object for reuse
def reset(self):
"""Clean any processing data, and prepare object for reuse
"""
self.current_table = None
self.tables = []
self.data = [{}]
self.additional_data = {}
self.lines = []
self.set_state('document')
self.current_file = None
self.set_of_energies = set()
|
Parse single line (or more if particular keyword actually demands it)
:param file:
:type file: file
def _parse_line(self, file):
"""Parse single line (or more if particular keyword actually demands it)
:param file:
:type file: file
"""
line = self._strip_comments(file.readline())
# check if the file ended
if not line:
return False
# line was empty or it was a comment, continue
if line.strip() == '':
return True
# retrieve keyword and its value
reg = re.search("^\*(?P<key>[^:#]*)(:\s*(?P<value>.*)\s*)?$", line)
if reg:
key = reg.group('key').strip()
value = reg.group('value')
if key in self.mapping[self.current_state]:
self.mapping[self.current_state][key](value)
elif self.strict:
raise BadFormat("unknown key: *%s" % key)
else:
raise BadFormat("line can not be parsed: %s" % line)
return True
|
Set current parsing state to 'table',
create new table object and add it to tables collection
def _set_table(self, data):
"""Set current parsing state to 'table',
create new table object and add it to tables collection
"""
self.set_state('table')
self.current_table = HEPTable(index=len(self.tables) + 1)
self.tables.append(self.current_table)
self.data.append(self.current_table.metadata)
|
Parse dataset data of the original HEPData format
:param data: header of the table to be parsed
:raise ValueError:
def _parse_table_data(self, data):
"""Parse dataset data of the original HEPData format
:param data: header of the table to be parsed
:raise ValueError:
"""
header = data.split(':')
self.current_table.data_header = header
for i, h in enumerate(header):
header[i] = h.strip()
x_count = header.count('x')
y_count = header.count('y')
if not self.current_table.xheaders:
raise BadFormat("*xheader line needs to appear before *data: %s" % data)
if not self.current_table.yheaders:
raise BadFormat("*yheader line needs to appear before *data: %s" % data)
# use deepcopy to avoid references in yaml... may not be required, and will very probably be refactored
# TODO - is this appropriate behavior, or are references in YAML files acceptable, they are certainly less human readable
self.current_table.data = {'independent_variables': [{'header': self.current_table.xheaders[i] if i < len(self.current_table.xheaders) else copy.deepcopy(self.current_table.xheaders[-1]),
'values': []} for i in range(x_count)],
'dependent_variables': [{'header': self.current_table.yheaders[i] if i < len(self.current_table.yheaders) else copy.deepcopy(self.current_table.yheaders[-1]),
'qualifiers': [self.current_table.qualifiers[j][i] if i < len(self.current_table.qualifiers[j]) else copy.deepcopy(self.current_table.qualifiers[j][-1]) for j in range(len(self.current_table.qualifiers)) ],
'values': []} for i in range(y_count)]}
xy_mapping = []
current_x_count = 0
current_y_count = 0
for h in header:
if h == 'x':
xy_mapping.append(current_x_count)
current_x_count += 1
if h == 'y':
xy_mapping.append(current_y_count)
current_y_count += 1
last_index = self.current_file.tell()
line = self._strip_comments(self.current_file.readline())
while line and not line.startswith('*'):
data_entry_elements = line.split(';')[:-1] # split and also strip newline character at the end
if len(data_entry_elements) == len(header):
# this is kind of a big stretch... I assume that x is always first
for i, h in enumerate(header):
single_element = data_entry_elements[i].strip()
# number patterns copied from old subs.pl parsing script
pmnum1 = '[-+]?[\d]+\.?[\d]*'
pmnum2 = '[-+]?\.[\d]+'
pmnum3 = '[-+]?[\d]+\.?[\d]*\s*[eE]+\s*[+-]?\s*[\d]+'
pmnum = '(' + pmnum1 + '|' + pmnum2 + '|' + pmnum3 + ')'
# implement same regular expression matching as in old subs.pl parsing script
if h == 'x': # independent variables
r = re.search('^(?P<value>' + pmnum + ')$', single_element)
if r: # "value"
single_element = {'value': r.group('value')}
else:
r = re.search('^(?P<value>' + pmnum + ')\s*\(\s*BIN\s*=\s*(?P<low>' + pmnum + \
')\s+TO\s+(?P<high>' + pmnum + ')\s*\)$', single_element)
if r: # "value (BIN=low TO high)"
single_element = {'value': float(r.group('value')),
'low': float(r.group('low')), 'high': float(r.group('high'))}
else:
r = re.search('^(?P<low>' + pmnum + ')\s+TO\s+(?P<high>' + pmnum + ')$',
single_element)
if r: # "low TO high"
single_element = {'low': float(r.group('low')), 'high': float(r.group('high'))}
else: # everything else: don't try to convert to float
single_element = {'value': single_element}
# TO DO: subs.pl also parses other formats such as "low high", "value low high" (sorted),
# "value +- err", and "value -err_m, +err_p". Do we need to support these formats here?
# Probably not: unsupported formats will just be written as a text string.
self.current_table.data['independent_variables'][xy_mapping[i]]['values'].append(single_element)
# extract energy if SQRT(S) is one of the 'x' variables
xheader = self.current_table.data['independent_variables'][xy_mapping[i]]['header']
if xheader['name'].startswith('SQRT(S)') and lower(xheader['units']) in ('gev'):
for energy in single_element.values():
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
elif h == 'y': # dependent variable
pmnum_pct = pmnum + '(\s*PCT)?' # errors can possibly be given as percentages
r = re.search('^(?P<value>' + pmnum + ')\s+(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
element = {'errors': []}
if r: # asymmetric first error
element['value'] = r.group('value').strip()
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
element['errors'] += [{'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
r = re.search('^(?P<value>' + pmnum + ')\s*(\+-\s*(?P<error>' +
pmnum_pct + '))?\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
if r: # symmetric first error
element['value'] = r.group('value').strip()
if r.group('error'):
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'symerror': error}]
else:
element['errors'] += [{'symerror': error}]
else: # everything else
element['value'] = single_element
err_sys = []
if r and r.group('err_sys'):
err_sys = r.group('err_sys').strip(' \t()').split('DSYS=')
for err in err_sys + self.current_table.dserrors:
err = err.strip(' \t,')
if not err:
continue
error = {}
label = 'sys'
r = re.search('^(\+-)?\s*(?P<error>' + pmnum_pct + ')\s*(\:\s*(?P<label>.+))?$', err)
if r: # symmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
error = {'symerror': error}
else:
r = re.search('^(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(\:\s*(?P<label>.+))?$', err)
if r: # asymmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
error = {'asymerror': {'plus': err_p, 'minus': err_m}}
if not r:
# error happened
raise ValueError("Error while parsing data line: %s" % line)
error['label'] = label
if element['value'] != single_element:
element['errors'].append(error)
self.current_table.data['dependent_variables'][xy_mapping[i]]['values'].append(element)
elif data_entry_elements:
raise BadFormat("%s data entry elements but %s expected: %s" %
(len(data_entry_elements), len(header), line))
last_index = self.current_file.tell()
l = self.current_file.readline()
line = self._strip_comments(l)
self.current_file.seek(last_index)
# extract minimum and maximum from set of energies
if self.set_of_energies:
energy_min = min(self.set_of_energies)
energy_max = max(self.set_of_energies)
if energy_max > energy_min:
energy = str(energy_min) + '-' + str(energy_max)
else:
energy = energy_min
self._parse_energies(energy)
if self.current_table.description:
if any(word in self.current_table.description.lower() for word in ['covariance', 'correlation', 'matrix']):
reformatted = self._reformat_matrix()
|
Transform a square matrix into a format with two independent variables and one dependent variable.
def _reformat_matrix(self):
"""Transform a square matrix into a format with two independent variables and one dependent variable.
"""
nxax = len(self.current_table.data['independent_variables'])
nyax = len(self.current_table.data['dependent_variables'])
npts = len(self.current_table.data['dependent_variables'][0]['values'])
# check if 1 x-axis, and npts (>=2) equals number of y-axes
if nxax != 1 or nyax != npts or npts < 2:
return False
# add second independent variable with each value duplicated npts times
if len(self.current_table.xheaders) == 2:
xheader = self.current_table.xheaders[1]
else:
xheader = copy.deepcopy(self.current_table.data['independent_variables'][0]['header'])
self.current_table.data['independent_variables'].append({'header': xheader, 'values': []})
for value in self.current_table.data['independent_variables'][0]['values']:
self.current_table.data['independent_variables'][1]['values'].extend([copy.deepcopy(value) for npt in range(npts)])
# duplicate values of first independent variable npts times
self.current_table.data['independent_variables'][0]['values'] \
= [copy.deepcopy(value) for npt in range(npts) for value in self.current_table.data['independent_variables'][0]['values']]
# suppress header if different for second y-axis
if self.current_table.data['dependent_variables'][0]['header'] != \
self.current_table.data['dependent_variables'][1]['header']:
self.current_table.data['dependent_variables'][0]['header'] = {'name': ''}
# remove qualifier if different for second y-axis
iqdel = [] # list of qualifier indices to be deleted
for iq, qualifier in enumerate(self.current_table.data['dependent_variables'][0]['qualifiers']):
if qualifier != self.current_table.data['dependent_variables'][1]['qualifiers'][iq]:
iqdel.append(iq)
for iq in iqdel[::-1]: # need to delete in reverse order
del self.current_table.data['dependent_variables'][0]['qualifiers'][iq]
# append values of second and subsequent y-axes to first dependent variable
for iy in range(1, nyax):
for value in self.current_table.data['dependent_variables'][iy]['values']:
self.current_table.data['dependent_variables'][0]['values'].append(value)
# finally, delete the second and subsequent y-axes in reverse order
for iy in range(nyax-1, 0, -1):
del self.current_table.data['dependent_variables'][iy]
return True
|
Parse qual attribute of the old HEPData format
example qual:
*qual: RE : P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
def _parse_qual(self, data):
"""Parse qual attribute of the old HEPData format
example qual:
*qual: RE : P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
list = []
headers = data.split(':')
name = headers[0].strip()
name = re.split(' IN ', name, flags=re.I) # ignore case
units = None
if len(name) > 1:
units = name[1].strip()
name = name[0].strip()
if len(headers) < 2:
raise BadFormat("*qual line must contain a name and values: %s" % data)
for header in headers[1:]:
xheader = {'name': name}
if units:
xheader['units'] = units
xheader['value'] = header.strip()
list.append(xheader)
# extract energy if SQRT(S) is one of the qualifiers
if name.startswith('SQRT(S)') and lower(units) in ('gev'):
energies = re.split(' TO ', xheader['value'], flags=re.I)
for energy in energies:
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
self.current_table.qualifiers.append(list)
|
Parse header (xheader or yheader)
:param data: data to be parsed
:type data: str
:return: list with header's data
:rtype: list
def _parse_header(self, data):
"""Parse header (xheader or yheader)
:param data: data to be parsed
:type data: str
:return: list with header's data
:rtype: list
"""
return_list = []
headers = data.split(':')
for header in headers:
header = re.split(' IN ', header, flags=re.I) # ignore case
xheader = {'name': header[0].strip()}
if len(header) > 1:
xheader['units'] = header[1].strip()
return_list.append(xheader)
return return_list
|
Processes line stripping any comments from it
:param line: line to be processed
:type line: str
:return: line with removed comments
:rtype: str
def _strip_comments(line):
"""Processes line stripping any comments from it
:param line: line to be processed
:type line: str
:return: line with removed comments
:rtype: str
"""
if line == '':
return line
r = re.search('(?P<line>[^#]*)(#(?P<comment>.*))?', line)
if r:
line = r.group('line')
if not line.endswith('\n'):
line += '\n'
return line
return '\n'
|
Reads multiline symbols (ususally comments)
:param init_data: initial data (parsed from the line containing keyword)
:return: parsed value of the multiline symbol
:rtype: str
def _read_multiline(self, init_data):
"""Reads multiline symbols (ususally comments)
:param init_data: initial data (parsed from the line containing keyword)
:return: parsed value of the multiline symbol
:rtype: str
"""
result = init_data
first = True
while True:
last_index = self.current_file.tell()
line_raw = self.current_file.readline()
# don't add newlines from full line comments
if line_raw[0] == '#':
continue
# now strip comments
# TODO - is it appropriate behavior?
data = self._strip_comments(line_raw)
# EOF, stop here
if not data:
break
# we arrived to the next command, step back and break
if len(data.strip()) >= 1 and data.strip()[0] == '*':
self.current_file.seek(last_index)
break
if first:
result += '\n'
first = False
result += data
result = result.strip()
if result and not result.endswith('.'):
result += '.'
return result
|
Returns parsing function which will parse data as text, and add it to the table metatadata dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
def _bind_set_table_metadata(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table metatadata dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def set_table_metadata(self, data):
if multiline:
data = self._read_multiline(data)
if key == 'location' and data:
data = 'Data from ' + data
self.current_table.metadata[key] = data.strip()
# method must be bound, so we use __get__
return set_table_metadata.__get__(self)
|
Returns parsing function which will parse data as text, and add it to the table additional data dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
def _bind_parse_additional_data(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table additional data dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def _set_additional_data_bound(self, data):
"""Concrete method for setting additional data
:param self:
:type self: OldHEPData
"""
# if it's multiline, parse it
if multiline:
data = self._read_multiline(data)
if key not in self.additional_data:
self.additional_data[key] = []
self.additional_data[key].append(data)
# method must be bound, so we use __get__
return _set_additional_data_bound.__get__(self)
|
If an error is a percentage, we convert to a float, then
calculate the percentage of the supplied value.
:param value: base value, e.g. 10
:param error: e.g. 20.0%
:return: the absolute error, e.g. 12 for the above case.
def error_value_processor(value, error):
"""
If an error is a percentage, we convert to a float, then
calculate the percentage of the supplied value.
:param value: base value, e.g. 10
:param error: e.g. 20.0%
:return: the absolute error, e.g. 12 for the above case.
"""
if isinstance(error, (str, unicode)):
try:
if "%" in error:
error_float = float(error.replace("%", ""))
error_abs = (value/100) * error_float
return error_abs
elif error == "":
error = 0.0
else:
error = float(error)
except:
pass
return error
|
<Purpose>
To get file's length and hash information. The hash is computed using the
sha256 algorithm. This function is used in the signerlib.py and updater.py
modules.
<Arguments>
filepath:
Absolute file path of a file.
hash_algorithms:
<Exceptions>
securesystemslib.exceptions.FormatError: If hash of the file does not match
HASHDICT_SCHEMA.
securesystemslib.exceptions.Error: If 'filepath' does not exist.
<Returns>
A tuple (length, hashes) describing 'filepath'.
def get_file_details(filepath, hash_algorithms=['sha256']):
"""
<Purpose>
To get file's length and hash information. The hash is computed using the
sha256 algorithm. This function is used in the signerlib.py and updater.py
modules.
<Arguments>
filepath:
Absolute file path of a file.
hash_algorithms:
<Exceptions>
securesystemslib.exceptions.FormatError: If hash of the file does not match
HASHDICT_SCHEMA.
securesystemslib.exceptions.Error: If 'filepath' does not exist.
<Returns>
A tuple (length, hashes) describing 'filepath'.
"""
# Making sure that the format of 'filepath' is a path string.
# 'securesystemslib.exceptions.FormatError' is raised on incorrect format.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
securesystemslib.formats.HASHALGORITHMS_SCHEMA.check_match(hash_algorithms)
# The returned file hashes of 'filepath'.
file_hashes = {}
# Does the path exists?
if not os.path.exists(filepath):
raise securesystemslib.exceptions.Error('Path ' + repr(filepath) + ' doest'
' not exist.')
filepath = os.path.abspath(filepath)
# Obtaining length of the file.
file_length = os.path.getsize(filepath)
# Obtaining hash of the file.
for algorithm in hash_algorithms:
digest_object = securesystemslib.hash.digest_filename(filepath, algorithm)
file_hashes.update({algorithm: digest_object.hexdigest()})
# Performing a format check to ensure 'file_hash' corresponds HASHDICT_SCHEMA.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.HASHDICT_SCHEMA.check_match(file_hashes)
return file_length, file_hashes
|
<Purpose>
To ensure existence of the parent directory of 'filename'. If the parent
directory of 'name' does not exist, create it.
Example: If 'filename' is '/a/b/c/d.txt', and only the directory '/a/b/'
exists, then directory '/a/b/c/d/' will be created.
<Arguments>
filename:
A path string.
<Exceptions>
securesystemslib.exceptions.FormatError: If 'filename' is improperly
formatted.
<Side Effects>
A directory is created whenever the parent directory of 'filename' does not
exist.
<Return>
None.
def ensure_parent_dir(filename):
"""
<Purpose>
To ensure existence of the parent directory of 'filename'. If the parent
directory of 'name' does not exist, create it.
Example: If 'filename' is '/a/b/c/d.txt', and only the directory '/a/b/'
exists, then directory '/a/b/c/d/' will be created.
<Arguments>
filename:
A path string.
<Exceptions>
securesystemslib.exceptions.FormatError: If 'filename' is improperly
formatted.
<Side Effects>
A directory is created whenever the parent directory of 'filename' does not
exist.
<Return>
None.
"""
# Ensure 'filename' corresponds to 'PATH_SCHEMA'.
# Raise 'securesystemslib.exceptions.FormatError' on a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filename)
# Split 'filename' into head and tail, check if head exists.
directory = os.path.split(filename)[0]
if directory and not os.path.exists(directory):
# mode = 'rwx------'. 448 (decimal) is 700 in octal.
os.makedirs(directory, 448)
|
<Purpose>
Check if the directory containing 'filepath' is in the list/tuple of
'confined_directories'.
<Arguments>
filepath:
A string representing the path of a file. The following example path
strings are viewed as files and not directories: 'a/b/c', 'a/b/c.txt'.
confined_directories:
A list, or a tuple, of directory strings.
<Exceptions>
securesystemslib.exceptions.FormatError: On incorrect format of the input.
<Return>
Boolean. True, if path is either the empty string
or in 'confined_paths'; False, otherwise.
def file_in_confined_directories(filepath, confined_directories):
"""
<Purpose>
Check if the directory containing 'filepath' is in the list/tuple of
'confined_directories'.
<Arguments>
filepath:
A string representing the path of a file. The following example path
strings are viewed as files and not directories: 'a/b/c', 'a/b/c.txt'.
confined_directories:
A list, or a tuple, of directory strings.
<Exceptions>
securesystemslib.exceptions.FormatError: On incorrect format of the input.
<Return>
Boolean. True, if path is either the empty string
or in 'confined_paths'; False, otherwise.
"""
# Do the arguments have the correct format?
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.RELPATH_SCHEMA.check_match(filepath)
securesystemslib.formats.RELPATHS_SCHEMA.check_match(confined_directories)
for confined_directory in confined_directories:
# The empty string (arbitrarily chosen) signifies the client is confined
# to all directories and subdirectories. No need to check 'filepath'.
if confined_directory == '':
return True
# Normalized paths needed, to account for up-level references, etc.
# TUF clients have the option of setting the list of directories in
# 'confined_directories'.
filepath = os.path.normpath(filepath)
confined_directory = os.path.normpath(confined_directory)
# A TUF client may restrict himself to specific directories on the
# remote repository. The list of paths in 'confined_path', not including
# each path's subdirectories, are the only directories the client will
# download targets from.
if os.path.dirname(filepath) == confined_directory:
return True
return False
|
<Purpose>
Find the index, if any, of a role with a given name in a list of roles.
<Arguments>
roles:
The list of roles, each of which must have a 'name' attribute.
delegated_role:
The name of the role to be found in the list of roles.
<Exceptions>
securesystemslib.exceptions.RepositoryError, if the list of roles has
invalid data.
<Side Effects>
No known side effects.
<Returns>
The unique index, an interger, in the list of roles. if 'delegated_role'
does not exist, 'None' is returned.
def find_delegated_role(roles, delegated_role):
"""
<Purpose>
Find the index, if any, of a role with a given name in a list of roles.
<Arguments>
roles:
The list of roles, each of which must have a 'name' attribute.
delegated_role:
The name of the role to be found in the list of roles.
<Exceptions>
securesystemslib.exceptions.RepositoryError, if the list of roles has
invalid data.
<Side Effects>
No known side effects.
<Returns>
The unique index, an interger, in the list of roles. if 'delegated_role'
does not exist, 'None' is returned.
"""
# Do the arguments have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named. Raise
# 'securesystemslib.exceptions.FormatError' if any are improperly formatted.
securesystemslib.formats.ROLELIST_SCHEMA.check_match(roles)
securesystemslib.formats.ROLENAME_SCHEMA.check_match(delegated_role)
# The index of a role, if any, with the same name.
role_index = None
for index in six.moves.xrange(len(roles)):
role = roles[index]
name = role.get('name')
# This role has no name.
if name is None:
no_name_message = 'Role with no name.'
raise securesystemslib.exceptions.RepositoryError(no_name_message)
# Does this role have the same name?
else:
# This role has the same name, and...
if name == delegated_role:
# ...it is the only known role with the same name.
if role_index is None:
role_index = index
# ...there are at least two roles with the same name.
else:
duplicate_role_message = 'Duplicate role (' + str(delegated_role) + ').'
raise securesystemslib.exceptions.RepositoryError(
'Duplicate role (' + str(delegated_role) + ').')
# This role has a different name.
else:
logger.debug('Skipping delegated role: ' + repr(delegated_role))
return role_index
|
<Purpose>
Ensure that the list of targets specified by 'rolename' are allowed; this
is determined by inspecting the 'delegations' field of the parent role of
'rolename'. If a target specified by 'rolename' is not found in the
delegations field of 'metadata_object_of_parent', raise an exception. The
top-level role 'targets' is allowed to list any target file, so this
function does not raise an exception if 'rolename' is 'targets'.
Targets allowed are either exlicitly listed under the 'paths' field, or
implicitly exist under a subdirectory of a parent directory listed under
'paths'. A parent role may delegate trust to all files under a particular
directory, including files in subdirectories, by simply listing the
directory (e.g., '/packages/source/Django/', the equivalent of
'/packages/source/Django/*'). Targets listed in hashed bins are also
validated (i.e., its calculated path hash prefix must be delegated by the
parent role).
TODO: Should the TUF spec restrict the repository to one particular
algorithm when calcutating path hash prefixes (currently restricted to
SHA256)? Should we allow the repository to specify in the role dictionary
the algorithm used for these generated hashed paths?
<Arguments>
rolename:
The name of the role whose targets must be verified. This is a
role name and should not end in '.json'. Examples: 'root', 'targets',
'targets/linux/x86'.
list_of_targets:
The targets of 'rolename', as listed in targets field of the 'rolename'
metadata. 'list_of_targets' are target paths relative to the targets
directory of the repository. The delegations of the parent role are
checked to verify that the targets of 'list_of_targets' are valid.
parent_delegations:
The parent delegations of 'rolename'. The metadata object stores
the allowed paths and path hash prefixes of child delegations in its
'delegations' attribute.
<Exceptions>
securesystemslib.exceptions.FormatError:
If any of the arguments are improperly formatted.
securesystemslib.exceptions.ForbiddenTargetError:
If the targets of 'metadata_role' are not allowed according to
the parent's metadata file. The 'paths' and 'path_hash_prefixes'
attributes are verified.
securesystemslib.exceptions.RepositoryError:
If the parent of 'rolename' has not made a delegation to 'rolename'.
<Side Effects>
None.
<Returns>
None.
def ensure_all_targets_allowed(rolename, list_of_targets, parent_delegations):
"""
<Purpose>
Ensure that the list of targets specified by 'rolename' are allowed; this
is determined by inspecting the 'delegations' field of the parent role of
'rolename'. If a target specified by 'rolename' is not found in the
delegations field of 'metadata_object_of_parent', raise an exception. The
top-level role 'targets' is allowed to list any target file, so this
function does not raise an exception if 'rolename' is 'targets'.
Targets allowed are either exlicitly listed under the 'paths' field, or
implicitly exist under a subdirectory of a parent directory listed under
'paths'. A parent role may delegate trust to all files under a particular
directory, including files in subdirectories, by simply listing the
directory (e.g., '/packages/source/Django/', the equivalent of
'/packages/source/Django/*'). Targets listed in hashed bins are also
validated (i.e., its calculated path hash prefix must be delegated by the
parent role).
TODO: Should the TUF spec restrict the repository to one particular
algorithm when calcutating path hash prefixes (currently restricted to
SHA256)? Should we allow the repository to specify in the role dictionary
the algorithm used for these generated hashed paths?
<Arguments>
rolename:
The name of the role whose targets must be verified. This is a
role name and should not end in '.json'. Examples: 'root', 'targets',
'targets/linux/x86'.
list_of_targets:
The targets of 'rolename', as listed in targets field of the 'rolename'
metadata. 'list_of_targets' are target paths relative to the targets
directory of the repository. The delegations of the parent role are
checked to verify that the targets of 'list_of_targets' are valid.
parent_delegations:
The parent delegations of 'rolename'. The metadata object stores
the allowed paths and path hash prefixes of child delegations in its
'delegations' attribute.
<Exceptions>
securesystemslib.exceptions.FormatError:
If any of the arguments are improperly formatted.
securesystemslib.exceptions.ForbiddenTargetError:
If the targets of 'metadata_role' are not allowed according to
the parent's metadata file. The 'paths' and 'path_hash_prefixes'
attributes are verified.
securesystemslib.exceptions.RepositoryError:
If the parent of 'rolename' has not made a delegation to 'rolename'.
<Side Effects>
None.
<Returns>
None.
"""
# Do the arguments have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named. Raise
# 'securesystemslib.exceptions.FormatError' if any are improperly formatted.
securesystemslib.formats.ROLENAME_SCHEMA.check_match(rolename)
securesystemslib.formats.RELPATHS_SCHEMA.check_match(list_of_targets)
securesystemslib.formats.DELEGATIONS_SCHEMA.check_match(parent_delegations)
# Return if 'rolename' is 'targets'. 'targets' is not a delegated role. Any
# target file listed in 'targets' is allowed.
if rolename == 'targets':
return
# The allowed targets of delegated roles are stored in the parent's metadata
# file. Iterate 'list_of_targets' and confirm they are trusted, or their
# root parent directory exists in the role delegated paths, or path hash
# prefixes, of the parent role. First, locate 'rolename' in the 'roles'
# attribute of 'parent_delegations'.
roles = parent_delegations['roles']
role_index = find_delegated_role(roles, rolename)
# Ensure the delegated role exists prior to extracting trusted paths from
# the parent's 'paths', or trusted path hash prefixes from the parent's
# 'path_hash_prefixes'.
if role_index is not None:
role = roles[role_index]
allowed_child_paths = role.get('paths')
allowed_child_path_hash_prefixes = role.get('path_hash_prefixes')
actual_child_targets = list_of_targets
if allowed_child_path_hash_prefixes is not None:
consistent = paths_are_consistent_with_hash_prefixes
# 'actual_child_tarets' (i.e., 'list_of_targets') should have lenth
# greater than zero due to the format check above.
if not consistent(actual_child_targets,
allowed_child_path_hash_prefixes):
message = repr(rolename) + ' specifies a target that does not' + \
' have a path hash prefix listed in its parent role.'
raise securesystemslib.exceptions.ForbiddenTargetError(message)
elif allowed_child_paths is not None:
# Check that each delegated target is either explicitly listed or a
# parent directory is found under role['paths'], otherwise raise an
# exception. If the parent role explicitly lists target file paths in
# 'paths', this loop will run in O(n^2), the worst-case. The repository
# maintainer will likely delegate entire directories, and opt for
# explicit file paths if the targets in a directory are delegated to
# different roles/developers.
for child_target in actual_child_targets:
for allowed_child_path in allowed_child_paths:
if fnmatch.fnmatch(child_target, allowed_child_path):
break
else:
raise securesystemslib.exceptions.ForbiddenTargetError(
'Role ' + repr(rolename) + ' specifies'
' target' + repr(child_target) + ',' + ' which is not an allowed'
' path according to the delegations set by its parent role.')
else:
# 'role' should have been validated when it was downloaded.
# The 'paths' or 'path_hash_prefixes' attributes should not be missing,
# so raise an error in case this clause is reached.
raise securesystemslib.exceptions.FormatError(repr(role) + ' did not'
' contain one of the required fields ("paths" or'
' "path_hash_prefixes").')
# Raise an exception if the parent has not delegated to the specified
# 'rolename' child role.
else:
raise securesystemslib.exceptions.RepositoryError('The parent role has'
' not delegated to ' + repr(rolename) + '.')
|
<Purpose>
Determine whether a list of paths are consistent with their alleged path
hash prefixes. By default, the SHA256 hash function is used.
<Arguments>
paths:
A list of paths for which their hashes will be checked.
path_hash_prefixes:
The list of path hash prefixes with which to check the list of paths.
<Exceptions>
securesystemslib.exceptions.FormatError:
If the arguments are improperly formatted.
<Side Effects>
No known side effects.
<Returns>
A Boolean indicating whether or not the paths are consistent with the
hash prefix.
def paths_are_consistent_with_hash_prefixes(paths, path_hash_prefixes):
"""
<Purpose>
Determine whether a list of paths are consistent with their alleged path
hash prefixes. By default, the SHA256 hash function is used.
<Arguments>
paths:
A list of paths for which their hashes will be checked.
path_hash_prefixes:
The list of path hash prefixes with which to check the list of paths.
<Exceptions>
securesystemslib.exceptions.FormatError:
If the arguments are improperly formatted.
<Side Effects>
No known side effects.
<Returns>
A Boolean indicating whether or not the paths are consistent with the
hash prefix.
"""
# Do the arguments have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named. Raise
# 'securesystemslib.exceptions.FormatError' if any are improperly formatted.
securesystemslib.formats.RELPATHS_SCHEMA.check_match(paths)
securesystemslib.formats.PATH_HASH_PREFIXES_SCHEMA.check_match(path_hash_prefixes)
# Assume that 'paths' and 'path_hash_prefixes' are inconsistent until
# proven otherwise.
consistent = False
# The format checks above ensure the 'paths' and 'path_hash_prefix' lists
# have lengths greater than zero.
for path in paths:
path_hash = get_target_hash(path)
# Assume that every path is inconsistent until proven otherwise.
consistent = False
for path_hash_prefix in path_hash_prefixes:
if path_hash.startswith(path_hash_prefix):
consistent = True
break
# This path has no matching path_hash_prefix. Stop looking further.
if not consistent:
break
return consistent
|
<Purpose>
Compute the hash of 'target_filepath'. This is useful in conjunction with
the "path_hash_prefixes" attribute in a delegated targets role, which tells
us which paths it is implicitly responsible for.
The repository may optionally organize targets into hashed bins to ease
target delegations and role metadata management. The use of consistent
hashing allows for a uniform distribution of targets into bins.
<Arguments>
target_filepath:
The path to the target file on the repository. This will be relative to
the 'targets' (or equivalent) directory on a given mirror.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
The hash of 'target_filepath'.
def get_target_hash(target_filepath):
"""
<Purpose>
Compute the hash of 'target_filepath'. This is useful in conjunction with
the "path_hash_prefixes" attribute in a delegated targets role, which tells
us which paths it is implicitly responsible for.
The repository may optionally organize targets into hashed bins to ease
target delegations and role metadata management. The use of consistent
hashing allows for a uniform distribution of targets into bins.
<Arguments>
target_filepath:
The path to the target file on the repository. This will be relative to
the 'targets' (or equivalent) directory on a given mirror.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
The hash of 'target_filepath'.
"""
# Does 'target_filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.RELPATH_SCHEMA.check_match(target_filepath)
# Calculate the hash of the filepath to determine which bin to find the
# target. The client currently assumes the repository uses
# 'HASH_FUNCTION' to generate hashes and 'utf-8'.
digest_object = securesystemslib.hash.digest(HASH_FUNCTION)
encoded_target_filepath = target_filepath.encode('utf-8')
digest_object.update(encoded_target_filepath)
target_filepath_hash = digest_object.hexdigest()
return target_filepath_hash
|
<Purpose>
Tries to import json module. We used to fall back to the simplejson module,
but we have dropped support for that module. We are keeping this interface
intact for backwards compatibility.
<Arguments>
None.
<Exceptions>
ImportError: on failure to import the json module.
<Side Effects>
None.
<Return>
json module
def import_json():
"""
<Purpose>
Tries to import json module. We used to fall back to the simplejson module,
but we have dropped support for that module. We are keeping this interface
intact for backwards compatibility.
<Arguments>
None.
<Exceptions>
ImportError: on failure to import the json module.
<Side Effects>
None.
<Return>
json module
"""
global _json_module
if _json_module is not None:
return _json_module
else:
try:
module = __import__('json')
# The 'json' module is available in Python > 2.6, and thus this exception
# should not occur in all supported Python installations (> 2.6) of TUF.
except ImportError: #pragma: no cover
raise ImportError('Could not import the json module')
else:
_json_module = module
return module
|
<Purpose>
Deserialize 'data' (JSON string) to a Python object.
<Arguments>
data:
A JSON string.
<Exceptions>
securesystemslib.exceptions.Error, if 'data' cannot be deserialized to a
Python object.
<Side Effects>
None.
<Returns>
Deserialized object. For example, a dictionary.
def load_json_string(data):
"""
<Purpose>
Deserialize 'data' (JSON string) to a Python object.
<Arguments>
data:
A JSON string.
<Exceptions>
securesystemslib.exceptions.Error, if 'data' cannot be deserialized to a
Python object.
<Side Effects>
None.
<Returns>
Deserialized object. For example, a dictionary.
"""
deserialized_object = None
try:
deserialized_object = json.loads(data)
except TypeError:
message = 'Invalid JSON string: ' + repr(data)
raise securesystemslib.exceptions.Error(message)
except ValueError:
message = 'Cannot deserialize to a Python object: ' + repr(data)
raise securesystemslib.exceptions.Error(message)
else:
return deserialized_object
|
<Purpose>
Deserialize a JSON object from a file containing the object.
<Arguments>
filepath:
Absolute path of JSON file.
<Exceptions>
securesystemslib.exceptions.FormatError: If 'filepath' is improperly
formatted.
securesystemslib.exceptions.Error: If 'filepath' cannot be deserialized to
a Python object.
IOError in case of runtime IO exceptions.
<Side Effects>
None.
<Return>
Deserialized object. For example, a dictionary.
def load_json_file(filepath):
"""
<Purpose>
Deserialize a JSON object from a file containing the object.
<Arguments>
filepath:
Absolute path of JSON file.
<Exceptions>
securesystemslib.exceptions.FormatError: If 'filepath' is improperly
formatted.
securesystemslib.exceptions.Error: If 'filepath' cannot be deserialized to
a Python object.
IOError in case of runtime IO exceptions.
<Side Effects>
None.
<Return>
Deserialized object. For example, a dictionary.
"""
# Making sure that the format of 'filepath' is a path string.
# securesystemslib.exceptions.FormatError is raised on incorrect format.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
deserialized_object = None
# The file is mostly likely gzipped.
if filepath.endswith('.gz'):
logger.debug('gzip.open(' + str(filepath) + ')')
fileobject = six.StringIO(gzip.open(filepath).read().decode('utf-8'))
else:
logger.debug('open(' + str(filepath) + ')')
fileobject = open(filepath)
try:
deserialized_object = json.load(fileobject)
except (ValueError, TypeError) as e:
raise securesystemslib.exceptions.Error('Cannot deserialize to a'
' Python object: ' + repr(filepath))
else:
fileobject.close()
return deserialized_object
finally:
fileobject.close()
|
<Purpose>
While protecting against timing attacks, compare the hexadecimal arguments
and determine if they are equal.
<Arguments>
digest1:
The first hexadecimal string value to compare.
digest2:
The second hexadecimal string value to compare.
<Exceptions>
securesystemslib.exceptions.FormatError: If the arguments are improperly
formatted.
<Side Effects>
None.
<Return>
Return True if 'digest1' is equal to 'digest2', False otherwise.
def digests_are_equal(digest1, digest2):
"""
<Purpose>
While protecting against timing attacks, compare the hexadecimal arguments
and determine if they are equal.
<Arguments>
digest1:
The first hexadecimal string value to compare.
digest2:
The second hexadecimal string value to compare.
<Exceptions>
securesystemslib.exceptions.FormatError: If the arguments are improperly
formatted.
<Side Effects>
None.
<Return>
Return True if 'digest1' is equal to 'digest2', False otherwise.
"""
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.HEX_SCHEMA.check_match(digest1)
securesystemslib.formats.HEX_SCHEMA.check_match(digest2)
if len(digest1) != len(digest2):
return False
are_equal = True
for element in range(len(digest1)):
if digest1[element] != digest2[element]:
are_equal = False
return are_equal
|
__init__ helper.
def _default_temporary_directory(self, prefix):
"""__init__ helper."""
try:
self.temporary_file = tempfile.NamedTemporaryFile(prefix=prefix)
except OSError as err: # pragma: no cover
logger.critical('Cannot create a system temporary directory: '+repr(err))
raise securesystemslib.exceptions.Error(err)
|
<Purpose>
Read specified number of bytes. If size is not specified then the whole
file is read and the file pointer is placed at the beginning of the file.
<Arguments>
size:
Number of bytes to be read.
<Exceptions>
securesystemslib.exceptions.FormatError: if 'size' is invalid.
<Return>
String of data.
def read(self, size=None):
"""
<Purpose>
Read specified number of bytes. If size is not specified then the whole
file is read and the file pointer is placed at the beginning of the file.
<Arguments>
size:
Number of bytes to be read.
<Exceptions>
securesystemslib.exceptions.FormatError: if 'size' is invalid.
<Return>
String of data.
"""
if size is None:
self.temporary_file.seek(0)
data = self.temporary_file.read()
self.temporary_file.seek(0)
return data
else:
if not (isinstance(size, int) and size > 0):
raise securesystemslib.exceptions.FormatError
return self.temporary_file.read(size)
|
<Purpose>
Writes a data string to the file.
<Arguments>
data:
A string containing some data.
auto_flush:
Boolean argument, if set to 'True', all data will be flushed from
internal buffer.
<Exceptions>
None.
<Return>
None.
def write(self, data, auto_flush=True):
"""
<Purpose>
Writes a data string to the file.
<Arguments>
data:
A string containing some data.
auto_flush:
Boolean argument, if set to 'True', all data will be flushed from
internal buffer.
<Exceptions>
None.
<Return>
None.
"""
self.temporary_file.write(data)
if auto_flush:
self.flush()
|
<Purpose>
Copies 'self.temporary_file' to a non-temp file at 'destination_path' and
closes 'self.temporary_file' so that it is removed.
<Arguments>
destination_path:
Path to store the file in.
<Exceptions>
None.
<Return>
None.
def move(self, destination_path):
"""
<Purpose>
Copies 'self.temporary_file' to a non-temp file at 'destination_path' and
closes 'self.temporary_file' so that it is removed.
<Arguments>
destination_path:
Path to store the file in.
<Exceptions>
None.
<Return>
None.
"""
self.flush()
self.seek(0)
destination_file = open(destination_path, 'wb')
shutil.copyfileobj(self.temporary_file, destination_file)
# Force the destination file to be written to disk from Python's internal
# and the operation system's buffers. os.fsync() should follow flush().
destination_file.flush()
os.fsync(destination_file.fileno())
destination_file.close()
# 'self.close()' closes temporary file which destroys itself.
self.close_temp_file()
|
<Purpose>
To decompress a compressed temp file object. Decompression is performed
on a temp file object that is compressed, this occurs after downloading
a compressed file. For instance if a compressed version of some meta
file in the repository is downloaded, the temp file containing the
compressed meta file will be decompressed using this function.
Note that after calling this method, write() can no longer be called.
meta.json.gz
|...[download]
temporary_file (containing meta.json.gz)
/ \
temporary_file _orig_file
containing meta.json containing meta.json.gz
(decompressed data)
<Arguments>
compression:
A string indicating the type of compression that was used to compress
a file. Only gzip is allowed.
<Exceptions>
securesystemslib.exceptions.FormatError: If 'compression' is improperly formatted.
securesystemslib.exceptions.Error: If an invalid compression is given.
securesystemslib.exceptions.DecompressionError: If the compression failed for any reason.
<Side Effects>
'self._orig_file' is used to store the original data of 'temporary_file'.
<Return>
None.
def decompress_temp_file_object(self, compression):
"""
<Purpose>
To decompress a compressed temp file object. Decompression is performed
on a temp file object that is compressed, this occurs after downloading
a compressed file. For instance if a compressed version of some meta
file in the repository is downloaded, the temp file containing the
compressed meta file will be decompressed using this function.
Note that after calling this method, write() can no longer be called.
meta.json.gz
|...[download]
temporary_file (containing meta.json.gz)
/ \
temporary_file _orig_file
containing meta.json containing meta.json.gz
(decompressed data)
<Arguments>
compression:
A string indicating the type of compression that was used to compress
a file. Only gzip is allowed.
<Exceptions>
securesystemslib.exceptions.FormatError: If 'compression' is improperly formatted.
securesystemslib.exceptions.Error: If an invalid compression is given.
securesystemslib.exceptions.DecompressionError: If the compression failed for any reason.
<Side Effects>
'self._orig_file' is used to store the original data of 'temporary_file'.
<Return>
None.
"""
# Does 'compression' have the correct format?
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.NAME_SCHEMA.check_match(compression)
if self._orig_file is not None:
raise securesystemslib.exceptions.Error('Can only set compression on a'
' TempFile once.')
if compression != 'gzip':
raise securesystemslib.exceptions.Error('Only gzip compression is'
' supported.')
self.seek(0)
self._compression = compression
self._orig_file = self.temporary_file
try:
gzip_file_object = gzip.GzipFile(fileobj=self.temporary_file, mode='rb')
uncompressed_content = gzip_file_object.read()
self.temporary_file = tempfile.NamedTemporaryFile()
self.temporary_file.write(uncompressed_content)
self.flush()
except Exception as exception:
raise securesystemslib.exceptions.DecompressionError(exception)
|
<Purpose>
Closes the temporary file object. 'close_temp_file' mimics usual
file.close(), however temporary file destroys itself when
'close_temp_file' is called. Further if compression is set, second
temporary file instance 'self._orig_file' is also closed so that no open
temporary files are left open.
<Arguments>
None.
<Exceptions>
None.
<Side Effects>
Closes 'self._orig_file'.
<Return>
None.
def close_temp_file(self):
"""
<Purpose>
Closes the temporary file object. 'close_temp_file' mimics usual
file.close(), however temporary file destroys itself when
'close_temp_file' is called. Further if compression is set, second
temporary file instance 'self._orig_file' is also closed so that no open
temporary files are left open.
<Arguments>
None.
<Exceptions>
None.
<Side Effects>
Closes 'self._orig_file'.
<Return>
None.
"""
self.temporary_file.close()
# If compression has been set, we need to explicitly close the original
# file object.
if self._orig_file is not None:
self._orig_file.close()
|
<Purpose>
Return True if 'object' matches this schema, False if it doesn't.
If the caller wishes to signal an error on a failed match, check_match()
should be called, which will raise a 'exceptions.FormatError' exception.
def matches(self, object):
"""
<Purpose>
Return True if 'object' matches this schema, False if it doesn't.
If the caller wishes to signal an error on a failed match, check_match()
should be called, which will raise a 'exceptions.FormatError' exception.
"""
try:
self.check_match(object)
except securesystemslib.exceptions.FormatError:
return False
else:
return True
|
Return a single event object or block until an event is
received and return it.
- etypes(str): If defined, Slack event type(s) not matching
the filter will be ignored. See https://api.slack.com/events for
a listing of valid event types.
- timeout(int): Max time, in seconds, to block waiting for new event
def get_event(self, *etypes, timeout=None):
"""
Return a single event object or block until an event is
received and return it.
- etypes(str): If defined, Slack event type(s) not matching
the filter will be ignored. See https://api.slack.com/events for
a listing of valid event types.
- timeout(int): Max time, in seconds, to block waiting for new event
"""
self._validate_etypes(*etypes)
start = time.time()
e = self._eventq.get(timeout=timeout)
if isinstance(e, Exception):
raise e
self._stats['events_recieved'] += 1
if etypes and e.type not in etypes:
if timeout:
timeout -= time.time() - start
log.debug('ignoring filtered event: {}'.format(e.json))
self._stats['events_dropped'] += 1
return self.get_event(*etypes, timeout=timeout)
return e
|
returns a blocking generator yielding Slack event objects
params:
- etypes(str): If defined, Slack event type(s) not matching
the filter will be ignored. See https://api.slack.com/events for
a listing of valid event types.
- idle_timeout(int): optional maximum amount of time (in seconds)
to wait between events before returning
def events(self, *etypes, idle_timeout=None):
"""
returns a blocking generator yielding Slack event objects
params:
- etypes(str): If defined, Slack event type(s) not matching
the filter will be ignored. See https://api.slack.com/events for
a listing of valid event types.
- idle_timeout(int): optional maximum amount of time (in seconds)
to wait between events before returning
"""
while self._state != STATE_STOPPED:
try:
yield self.get_event(*etypes, timeout=idle_timeout)
except Queue.Empty:
log.info('idle timeout reached for events()')
return
|
Send a message to a channel or group via Slack RTM socket, returning
the resulting message object
params:
- text(str): Message text to send
- channel(Channel): Target channel
- confirm(bool): If True, wait for a reply-to confirmation before returning.
def send_msg(self, text, channel, confirm=True):
"""
Send a message to a channel or group via Slack RTM socket, returning
the resulting message object
params:
- text(str): Message text to send
- channel(Channel): Target channel
- confirm(bool): If True, wait for a reply-to confirmation before returning.
"""
self._send_id += 1
msg = SlackMsg(self._send_id, channel.id, text)
self.ws.send(msg.json)
self._stats['messages_sent'] += 1
if confirm:
# Wait for confirmation our message was received
for e in self.events():
if e.get('reply_to') == self._send_id:
msg.sent = True
msg.ts = e.ts
return msg
else:
return msg
|
Extend event object with User and Channel objects
def _process_event(self, event):
""" Extend event object with User and Channel objects """
if event.get('user'):
event.user = self.lookup_user(event.get('user'))
if event.get('channel'):
event.channel = self.lookup_channel(event.get('channel'))
if self.user.id in event.mentions:
event.mentions_me = True
event.mentions = [ self.lookup_user(uid) for uid in event.mentions ]
return event
|
Converts a supported ``input_format`` (*oldhepdata*, *yaml*)
to a supported ``output_format`` (*csv*, *root*, *yaml*, *yoda*).
:param input: location of input file for *oldhepdata* format or input directory for *yaml* format
:param output: location of output directory to which converted files will be written
:param options: additional options such as ``input_format`` and ``output_format`` used for conversion
:type input: str
:type output: str
:type options: dict
:raise ValueError: raised if no ``input_format`` or ``output_format`` is specified
def convert(input, output=None, options={}):
"""Converts a supported ``input_format`` (*oldhepdata*, *yaml*)
to a supported ``output_format`` (*csv*, *root*, *yaml*, *yoda*).
:param input: location of input file for *oldhepdata* format or input directory for *yaml* format
:param output: location of output directory to which converted files will be written
:param options: additional options such as ``input_format`` and ``output_format`` used for conversion
:type input: str
:type output: str
:type options: dict
:raise ValueError: raised if no ``input_format`` or ``output_format`` is specified
"""
if 'input_format' not in options and 'output_format' not in options:
raise ValueError("no input_format and output_format specified!")
input_format = options.get('input_format', 'yaml')
output_format = options.get('output_format', 'yaml')
parser = Parser.get_concrete_class(input_format)(**options)
writer = Writer.get_concrete_class(output_format)(**options)
if not output and not writer.single_file_output:
raise ValueError("this output_format requires specifying 'output' argument")
# if no output was specified create proxy output to which writer can insert data
_output = output
if not _output:
_output = StringIO.StringIO()
writer.write(parser.parse(input), _output)
# if no output was specified return output
if not output:
return _output.getvalue()
|
This is kind of legacy function - this functionality may be useful for some people, so even though
now the default of writing CSV is writing unpacked data (divided by independent variable) this method is
still available and accessible if ```pack``` flag is specified in Writer's options
:param output: output file like object to which data will be written
:param table: input table
:type table: hepdata_converter.parsers.Table
def _write_packed_data(self, data_out, table):
"""This is kind of legacy function - this functionality may be useful for some people, so even though
now the default of writing CSV is writing unpacked data (divided by independent variable) this method is
still available and accessible if ```pack``` flag is specified in Writer's options
:param output: output file like object to which data will be written
:param table: input table
:type table: hepdata_converter.parsers.Table
"""
headers = []
data = []
qualifiers_marks = []
qualifiers = {}
self._extract_independent_variables(table, headers, data, qualifiers_marks)
for dependent_variable in table.dependent_variables:
self._parse_dependent_variable(dependent_variable, headers, qualifiers, qualifiers_marks, data)
self._write_metadata(data_out, table)
self._write_csv_data(data_out, qualifiers, qualifiers_marks, headers, data)
|
<Purpose>
Provide the caller with the ability to create digest objects without having
to worry about crypto library availability or which library to use. The
caller also has the option of specifying which hash algorithm and/or
library to use.
# Creation of a digest object using defaults or by specifying hash
# algorithm and library.
digest_object = securesystemslib.hash.digest()
digest_object = securesystemslib.hash.digest('sha384')
digest_object = securesystemslib.hash.digest('sha256', 'hashlib')
# The expected interface for digest objects.
digest_object.digest_size
digest_object.hexdigest()
digest_object.update('data')
digest_object.digest()
# Added hash routines by this module.
digest_object = securesystemslib.hash.digest_fileobject(file_object)
digest_object = securesystemslib.hash.digest_filename(filename)
<Arguments>
algorithm:
The hash algorithm (e.g., 'md5', 'sha1', 'sha256').
hash_library:
The crypto library to use for the given hash algorithm (e.g., 'hashlib').
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are
improperly formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if an unsupported
hashing algorithm is specified, or digest could not be generated with given
the algorithm.
securesystemslib.exceptions.UnsupportedLibraryError, if an unsupported
library was requested via 'hash_library'.
<Side Effects>
None.
<Returns>
Digest object (e.g., hashlib.new(algorithm)).
def digest(algorithm=DEFAULT_HASH_ALGORITHM, hash_library=DEFAULT_HASH_LIBRARY):
"""
<Purpose>
Provide the caller with the ability to create digest objects without having
to worry about crypto library availability or which library to use. The
caller also has the option of specifying which hash algorithm and/or
library to use.
# Creation of a digest object using defaults or by specifying hash
# algorithm and library.
digest_object = securesystemslib.hash.digest()
digest_object = securesystemslib.hash.digest('sha384')
digest_object = securesystemslib.hash.digest('sha256', 'hashlib')
# The expected interface for digest objects.
digest_object.digest_size
digest_object.hexdigest()
digest_object.update('data')
digest_object.digest()
# Added hash routines by this module.
digest_object = securesystemslib.hash.digest_fileobject(file_object)
digest_object = securesystemslib.hash.digest_filename(filename)
<Arguments>
algorithm:
The hash algorithm (e.g., 'md5', 'sha1', 'sha256').
hash_library:
The crypto library to use for the given hash algorithm (e.g., 'hashlib').
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are
improperly formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if an unsupported
hashing algorithm is specified, or digest could not be generated with given
the algorithm.
securesystemslib.exceptions.UnsupportedLibraryError, if an unsupported
library was requested via 'hash_library'.
<Side Effects>
None.
<Returns>
Digest object (e.g., hashlib.new(algorithm)).
"""
# Are the arguments properly formatted? If not, raise
# 'securesystemslib.exceptions.FormatError'.
securesystemslib.formats.NAME_SCHEMA.check_match(algorithm)
securesystemslib.formats.NAME_SCHEMA.check_match(hash_library)
# Was a hashlib digest object requested and is it supported?
# If so, return the digest object.
if hash_library == 'hashlib' and hash_library in SUPPORTED_LIBRARIES:
try:
return hashlib.new(algorithm)
except ValueError:
raise securesystemslib.exceptions.UnsupportedAlgorithmError(algorithm)
# Was a pyca_crypto digest object requested and is it supported?
elif hash_library == 'pyca_crypto' and hash_library in SUPPORTED_LIBRARIES: #pragma: no cover
# TODO: Add support for pyca/cryptography's hashing routines.
pass
# The requested hash library is not supported.
else:
raise securesystemslib.exceptions.UnsupportedLibraryError('Unsupported'
' library requested. Supported hash'
' libraries: ' + repr(SUPPORTED_LIBRARIES))
|
<Purpose>
Generate a digest object given a file object. The new digest object
is updated with the contents of 'file_object' prior to returning the
object to the caller.
<Arguments>
file_object:
File object whose contents will be used as the data
to update the hash of a digest object to be returned.
algorithm:
The hash algorithm (e.g., 'md5', 'sha1', 'sha256').
hash_library:
The library providing the hash algorithms (e.g., 'hashlib').
normalize_line_endings: (default False)
Whether or not to normalize line endings for cross-platform support.
Note that this results in ambiguous hashes (e.g. 'abc\n' and 'abc\r\n'
will produce the same hash), so be careful to only apply this to text
files (not binary), when that equivalence is desirable and cannot result
in easily-maliciously-corrupted files producing the same hash as a valid
file.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are
improperly formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if an unsupported
hashing algorithm was specified via 'algorithm'.
securesystemslib.exceptions.UnsupportedLibraryError, if an unsupported
crypto library was specified via 'hash_library'.
<Side Effects>
None.
<Returns>
Digest object (e.g., hashlib.new(algorithm)).
def digest_fileobject(file_object, algorithm=DEFAULT_HASH_ALGORITHM,
hash_library=DEFAULT_HASH_LIBRARY, normalize_line_endings=False):
"""
<Purpose>
Generate a digest object given a file object. The new digest object
is updated with the contents of 'file_object' prior to returning the
object to the caller.
<Arguments>
file_object:
File object whose contents will be used as the data
to update the hash of a digest object to be returned.
algorithm:
The hash algorithm (e.g., 'md5', 'sha1', 'sha256').
hash_library:
The library providing the hash algorithms (e.g., 'hashlib').
normalize_line_endings: (default False)
Whether or not to normalize line endings for cross-platform support.
Note that this results in ambiguous hashes (e.g. 'abc\n' and 'abc\r\n'
will produce the same hash), so be careful to only apply this to text
files (not binary), when that equivalence is desirable and cannot result
in easily-maliciously-corrupted files producing the same hash as a valid
file.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are
improperly formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if an unsupported
hashing algorithm was specified via 'algorithm'.
securesystemslib.exceptions.UnsupportedLibraryError, if an unsupported
crypto library was specified via 'hash_library'.
<Side Effects>
None.
<Returns>
Digest object (e.g., hashlib.new(algorithm)).
"""
# Are the arguments properly formatted? If not, raise
# 'securesystemslib.exceptions.FormatError'.
securesystemslib.formats.NAME_SCHEMA.check_match(algorithm)
securesystemslib.formats.NAME_SCHEMA.check_match(hash_library)
# Digest object returned whose hash will be updated using 'file_object'.
# digest() raises:
# securesystemslib.exceptions.UnsupportedAlgorithmError
# securesystemslib.exceptions.UnsupportedLibraryError
digest_object = digest(algorithm, hash_library)
# Defensively seek to beginning, as there's no case where we don't
# intend to start from the beginning of the file.
file_object.seek(0)
# Read the contents of the file object in at most 4096-byte chunks.
# Update the hash with the data read from each chunk and return after
# the entire file is processed.
while True:
data = file_object.read(DEFAULT_CHUNK_SIZE)
if not data:
break
if normalize_line_endings:
while data[-1:] == b'\r':
c = file_object.read(1)
if not c:
break
data += c
data = (
data
# First Windows
.replace(b'\r\n', b'\n')
# Then Mac
.replace(b'\r', b'\n')
)
if not isinstance(data, six.binary_type):
digest_object.update(data.encode('utf-8'))
else:
digest_object.update(data)
return digest_object
|
<Purpose>
Generate a digest object, update its hash using a file object
specified by filename, and then return it to the caller.
<Arguments>
filename:
The filename belonging to the file object to be used.
algorithm:
The hash algorithm (e.g., 'md5', 'sha1', 'sha256').
hash_library:
The library providing the hash algorithms (e.g., 'hashlib').
normalize_line_endings:
Whether or not to normalize line endings for cross-platform support.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are
improperly formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if the given
'algorithm' is unsupported.
securesystemslib.exceptions.UnsupportedLibraryError, if the given
'hash_library' is unsupported.
<Side Effects>
None.
<Returns>
Digest object (e.g., hashlib.new(algorithm)).
def digest_filename(filename, algorithm=DEFAULT_HASH_ALGORITHM,
hash_library=DEFAULT_HASH_LIBRARY, normalize_line_endings=False):
"""
<Purpose>
Generate a digest object, update its hash using a file object
specified by filename, and then return it to the caller.
<Arguments>
filename:
The filename belonging to the file object to be used.
algorithm:
The hash algorithm (e.g., 'md5', 'sha1', 'sha256').
hash_library:
The library providing the hash algorithms (e.g., 'hashlib').
normalize_line_endings:
Whether or not to normalize line endings for cross-platform support.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are
improperly formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if the given
'algorithm' is unsupported.
securesystemslib.exceptions.UnsupportedLibraryError, if the given
'hash_library' is unsupported.
<Side Effects>
None.
<Returns>
Digest object (e.g., hashlib.new(algorithm)).
"""
# Are the arguments properly formatted? If not, raise
# 'securesystemslib.exceptions.FormatError'.
securesystemslib.formats.RELPATH_SCHEMA.check_match(filename)
securesystemslib.formats.NAME_SCHEMA.check_match(algorithm)
securesystemslib.formats.NAME_SCHEMA.check_match(hash_library)
digest_object = None
# Open 'filename' in read+binary mode.
with open(filename, 'rb') as file_object:
# Create digest_object and update its hash data from file_object.
# digest_fileobject() raises:
# securesystemslib.exceptions.UnsupportedAlgorithmError
# securesystemslib.exceptions.UnsupportedLibraryError
digest_object = digest_fileobject(
file_object, algorithm, hash_library, normalize_line_endings)
return digest_object
|
Get the antlr token stream.
def get_token_stream(source: str) -> CommonTokenStream:
""" Get the antlr token stream.
"""
lexer = LuaLexer(InputStream(source))
stream = CommonTokenStream(lexer)
return stream
|
Returns a Evolution Stone object containing the details about the
evolution stone.
def get_evolution_stone(self, slug):
"""
Returns a Evolution Stone object containing the details about the
evolution stone.
"""
endpoint = '/evolution-stone/' + slug
return self.make_request(self.BASE_URL + endpoint)
|
Returns a Pokemon League object containing the details about the
league.
def get_league(self, slug):
"""
Returns a Pokemon League object containing the details about the
league.
"""
endpoint = '/league/' + slug
return self.make_request(self.BASE_URL + endpoint)
|
Returns an array of Pokemon objects containing all the forms of the
Pokemon specified the name of the Pokemon.
def get_pokemon_by_name(self, name):
"""
Returns an array of Pokemon objects containing all the forms of the
Pokemon specified the name of the Pokemon.
"""
endpoint = '/pokemon/' + str(name)
return self.make_request(self.BASE_URL + endpoint)
|
Returns an array of Pokemon objects containing all the forms of the
Pokemon specified the Pokedex number.
def get_pokemon_by_number(self, number):
"""
Returns an array of Pokemon objects containing all the forms of the
Pokemon specified the Pokedex number.
"""
endpoint = '/pokemon/' + str(number)
return self.make_request(self.BASE_URL + endpoint)
|
<Purpose>
Generate public and private RSA keys with modulus length 'bits'. The
public and private keys returned conform to
'securesystemslib.formats.PEMRSA_SCHEMA' and have the form:
'-----BEGIN RSA PUBLIC KEY----- ...'
or
'-----BEGIN RSA PRIVATE KEY----- ...'
The public and private keys are returned as strings in PEM format.
'generate_rsa_public_and_private()' enforces a minimum key size of 2048
bits. If 'bits' is unspecified, a 3072-bit RSA key is generated, which is
the key size recommended by TUF.
>>> public, private = generate_rsa_public_and_private(2048)
>>> securesystemslib.formats.PEMRSA_SCHEMA.matches(public)
True
>>> securesystemslib.formats.PEMRSA_SCHEMA.matches(private)
True
<Arguments>
bits:
The key size, or key length, of the RSA key. 'bits' must be 2048, or
greater. 'bits' defaults to 3072 if not specified.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'bits' does not contain the
correct format.
<Side Effects>
The RSA keys are generated from pyca/cryptography's
rsa.generate_private_key() function.
<Returns>
A (public, private) tuple containing the RSA keys in PEM format.
def generate_rsa_public_and_private(bits=_DEFAULT_RSA_KEY_BITS):
"""
<Purpose>
Generate public and private RSA keys with modulus length 'bits'. The
public and private keys returned conform to
'securesystemslib.formats.PEMRSA_SCHEMA' and have the form:
'-----BEGIN RSA PUBLIC KEY----- ...'
or
'-----BEGIN RSA PRIVATE KEY----- ...'
The public and private keys are returned as strings in PEM format.
'generate_rsa_public_and_private()' enforces a minimum key size of 2048
bits. If 'bits' is unspecified, a 3072-bit RSA key is generated, which is
the key size recommended by TUF.
>>> public, private = generate_rsa_public_and_private(2048)
>>> securesystemslib.formats.PEMRSA_SCHEMA.matches(public)
True
>>> securesystemslib.formats.PEMRSA_SCHEMA.matches(private)
True
<Arguments>
bits:
The key size, or key length, of the RSA key. 'bits' must be 2048, or
greater. 'bits' defaults to 3072 if not specified.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'bits' does not contain the
correct format.
<Side Effects>
The RSA keys are generated from pyca/cryptography's
rsa.generate_private_key() function.
<Returns>
A (public, private) tuple containing the RSA keys in PEM format.
"""
# Does 'bits' have the correct format?
# This check will ensure 'bits' conforms to
# 'securesystemslib.formats.RSAKEYBITS_SCHEMA'. 'bits' must be an integer
# object, with a minimum value of 2048. Raise
# 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.RSAKEYBITS_SCHEMA.check_match(bits)
# Generate the public and private RSA keys. The pyca/cryptography 'rsa'
# module performs the actual key generation. The 'bits' argument is used,
# and a 2048-bit minimum is enforced by
# securesystemslib.formats.RSAKEYBITS_SCHEMA.check_match().
private_key = rsa.generate_private_key(public_exponent=65537, key_size=bits,
backend=default_backend())
# Extract the public & private halves of the RSA key and generate their
# PEM-formatted representations. Return the key pair as a (public, private)
# tuple, where each RSA is a string in PEM format.
private_pem = private_key.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
# Need to generate the public pem from the private key before serialization
# to PEM.
public_key = private_key.public_key()
public_pem = public_key.public_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
return public_pem.decode('utf-8'), private_pem.decode('utf-8')
|
<Purpose>
Generate a 'scheme' signature. The signature, and the signature scheme
used, is returned as a (signature, scheme) tuple.
The signing process will use 'private_key' to generate the signature of
'data'.
RFC3447 - RSASSA-PSS
http://www.ietf.org/rfc/rfc3447.txt
>>> public, private = generate_rsa_public_and_private(2048)
>>> data = 'The quick brown fox jumps over the lazy dog'.encode('utf-8')
>>> scheme = 'rsassa-pss-sha256'
>>> signature, scheme = create_rsa_signature(private, data, scheme)
>>> securesystemslib.formats.NAME_SCHEMA.matches(scheme)
True
>>> scheme == 'rsassa-pss-sha256'
True
>>> securesystemslib.formats.PYCACRYPTOSIGNATURE_SCHEMA.matches(signature)
True
<Arguments>
private_key:
The private RSA key, a string in PEM format.
data:
Data (string) used by create_rsa_signature() to generate the signature.
scheme:
The signature scheme used to generate the signature.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'private_key' is improperly
formatted.
ValueError, if 'private_key' is unset.
securesystemslib.exceptions.CryptoError, if the signature cannot be
generated.
<Side Effects>
pyca/cryptography's 'RSAPrivateKey.signer()' called to generate the
signature.
<Returns>
A (signature, scheme) tuple, where the signature is a string and the scheme
is one of the supported RSA signature schemes. For example:
'rsassa-pss-sha256'.
def create_rsa_signature(private_key, data, scheme='rsassa-pss-sha256'):
"""
<Purpose>
Generate a 'scheme' signature. The signature, and the signature scheme
used, is returned as a (signature, scheme) tuple.
The signing process will use 'private_key' to generate the signature of
'data'.
RFC3447 - RSASSA-PSS
http://www.ietf.org/rfc/rfc3447.txt
>>> public, private = generate_rsa_public_and_private(2048)
>>> data = 'The quick brown fox jumps over the lazy dog'.encode('utf-8')
>>> scheme = 'rsassa-pss-sha256'
>>> signature, scheme = create_rsa_signature(private, data, scheme)
>>> securesystemslib.formats.NAME_SCHEMA.matches(scheme)
True
>>> scheme == 'rsassa-pss-sha256'
True
>>> securesystemslib.formats.PYCACRYPTOSIGNATURE_SCHEMA.matches(signature)
True
<Arguments>
private_key:
The private RSA key, a string in PEM format.
data:
Data (string) used by create_rsa_signature() to generate the signature.
scheme:
The signature scheme used to generate the signature.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'private_key' is improperly
formatted.
ValueError, if 'private_key' is unset.
securesystemslib.exceptions.CryptoError, if the signature cannot be
generated.
<Side Effects>
pyca/cryptography's 'RSAPrivateKey.signer()' called to generate the
signature.
<Returns>
A (signature, scheme) tuple, where the signature is a string and the scheme
is one of the supported RSA signature schemes. For example:
'rsassa-pss-sha256'.
"""
# Does the arguments have the correct format?
# If not, raise 'securesystemslib.exceptions.FormatError' if any of the
# checks fail.
securesystemslib.formats.PEMRSA_SCHEMA.check_match(private_key)
securesystemslib.formats.DATA_SCHEMA.check_match(data)
securesystemslib.formats.RSA_SCHEME_SCHEMA.check_match(scheme)
# Signing 'data' requires a private key. 'rsassa-pss-sha256' is the only
# currently supported signature scheme.
signature = None
# Verify the signature, but only if the private key has been set. The
# private key is a NULL string if unset. Although it may be clearer to
# explicitly check that 'private_key' is not '', we can/should check for a
# value and not compare identities with the 'is' keyword. Up to this point
# 'private_key' has variable size and can be an empty string.
if len(private_key):
# An if-clause isn't strictly needed here, since 'rsasssa-pss-sha256' is
# the only currently supported RSA scheme. Nevertheless, include the
# conditional statement to accomodate future schemes that might be added.
if scheme == 'rsassa-pss-sha256':
# Generate an RSSA-PSS signature. Raise
# 'securesystemslib.exceptions.CryptoError' for any of the expected
# exceptions raised by pyca/cryptography.
try:
# 'private_key' (in PEM format) must first be converted to a
# pyca/cryptography private key object before a signature can be
# generated.
private_key_object = load_pem_private_key(private_key.encode('utf-8'),
password=None, backend=default_backend())
signature = private_key_object.sign(
data, padding.PSS(mgf=padding.MGF1(hashes.SHA256()),
salt_length=hashes.SHA256().digest_size), hashes.SHA256())
# If the PEM data could not be decrypted, or if its structure could not
# be decoded successfully.
except ValueError:
raise securesystemslib.exceptions.CryptoError('The private key'
' (in PEM format) could not be deserialized.')
# 'TypeError' is raised if a password was given and the private key was
# not encrypted, or if the key was encrypted but no password was
# supplied. Note: A passphrase or password is not used when generating
# 'private_key', since it should not be encrypted.
except TypeError:
raise securesystemslib.exceptions.CryptoError('The private key was'
' unexpectedly encrypted.')
# 'cryptography.exceptions.UnsupportedAlgorithm' is raised if the
# serialized key is of a type that is not supported by the backend, or if
# the key is encrypted with a symmetric cipher that is not supported by
# the backend.
except cryptography.exceptions.UnsupportedAlgorithm: #pragma: no cover
raise securesystemslib.exceptions.CryptoError('The private key is'
' encrypted with an unsupported algorithm.')
# The RSA_SCHEME_SCHEMA.check_match() above should have validated 'scheme'.
# This is a defensive check check..
else: #pragma: no cover
raise securesystemslib.exceptions.UnsupportedAlgorithmError('Unsupported'
' signature scheme is specified: ' + repr(scheme))
else:
raise ValueError('The required private key is unset.')
return signature, scheme
|
<Purpose>
Determine whether the corresponding private key of 'public_key' produced
'signature'. verify_signature() will use the public key, signature scheme,
and 'data' to complete the verification.
>>> public, private = generate_rsa_public_and_private(2048)
>>> data = b'The quick brown fox jumps over the lazy dog'
>>> scheme = 'rsassa-pss-sha256'
>>> signature, scheme = create_rsa_signature(private, data, scheme)
>>> verify_rsa_signature(signature, scheme, public, data)
True
>>> verify_rsa_signature(signature, scheme, public, b'bad_data')
False
<Arguments>
signature:
A signature, as a string. This is the signature returned
by create_rsa_signature().
signature_scheme:
A string that indicates the signature scheme used to generate
'signature'. 'rsassa-pss-sha256' is currently supported.
public_key:
The RSA public key, a string in PEM format.
data:
Data used by securesystemslib.keys.create_signature() to generate
'signature'. 'data' (a string) is needed here to verify 'signature'.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'signature',
'signature_scheme', 'public_key', or 'data' are improperly formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if the signature
scheme used by 'signature' is not one supported by
securesystemslib.keys.create_signature().
securesystemslib.exceptions.CryptoError, if the private key cannot be
decoded or its key type is unsupported.
<Side Effects>
pyca/cryptography's RSAPublicKey.verifier() called to do the actual
verification.
<Returns>
Boolean. True if the signature is valid, False otherwise.
def verify_rsa_signature(signature, signature_scheme, public_key, data):
"""
<Purpose>
Determine whether the corresponding private key of 'public_key' produced
'signature'. verify_signature() will use the public key, signature scheme,
and 'data' to complete the verification.
>>> public, private = generate_rsa_public_and_private(2048)
>>> data = b'The quick brown fox jumps over the lazy dog'
>>> scheme = 'rsassa-pss-sha256'
>>> signature, scheme = create_rsa_signature(private, data, scheme)
>>> verify_rsa_signature(signature, scheme, public, data)
True
>>> verify_rsa_signature(signature, scheme, public, b'bad_data')
False
<Arguments>
signature:
A signature, as a string. This is the signature returned
by create_rsa_signature().
signature_scheme:
A string that indicates the signature scheme used to generate
'signature'. 'rsassa-pss-sha256' is currently supported.
public_key:
The RSA public key, a string in PEM format.
data:
Data used by securesystemslib.keys.create_signature() to generate
'signature'. 'data' (a string) is needed here to verify 'signature'.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'signature',
'signature_scheme', 'public_key', or 'data' are improperly formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if the signature
scheme used by 'signature' is not one supported by
securesystemslib.keys.create_signature().
securesystemslib.exceptions.CryptoError, if the private key cannot be
decoded or its key type is unsupported.
<Side Effects>
pyca/cryptography's RSAPublicKey.verifier() called to do the actual
verification.
<Returns>
Boolean. True if the signature is valid, False otherwise.
"""
# Does 'public_key' have the correct format?
# This check will ensure 'public_key' conforms to
# 'securesystemslib.formats.PEMRSA_SCHEMA'. Raise
# 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.PEMRSA_SCHEMA.check_match(public_key)
# Does 'signature_scheme' have the correct format?
securesystemslib.formats.RSA_SCHEME_SCHEMA.check_match(signature_scheme)
# Does 'signature' have the correct format?
securesystemslib.formats.PYCACRYPTOSIGNATURE_SCHEMA.check_match(signature)
# What about 'data'?
securesystemslib.formats.DATA_SCHEMA.check_match(data)
# Verify whether the private key of 'public_key' produced 'signature'.
# Before returning the 'valid_signature' Boolean result, ensure 'RSASSA-PSS'
# was used as the signature scheme.
valid_signature = False
# Verify the RSASSA-PSS signature with pyca/cryptography.
try:
public_key_object = serialization.load_pem_public_key(public_key.encode('utf-8'),
backend=default_backend())
# verify() raises 'cryptography.exceptions.InvalidSignature' if the
# signature is invalid. 'salt_length' is set to the digest size of the
# hashing algorithm.
try:
public_key_object.verify(signature, data,
padding.PSS(mgf=padding.MGF1(hashes.SHA256()),
salt_length=hashes.SHA256().digest_size),
hashes.SHA256())
return True
except cryptography.exceptions.InvalidSignature:
return False
# Raised by load_pem_public_key().
except (ValueError, cryptography.exceptions.UnsupportedAlgorithm) as e:
raise securesystemslib.exceptions.CryptoError('The PEM could not be'
' decoded successfully, or contained an unsupported key type: ' + str(e))
|
<Purpose>
Return a string in PEM format (TraditionalOpenSSL), where the private part
of the RSA key is encrypted using the best available encryption for a given
key's backend. This is a curated (by cryptography.io) encryption choice and
the algorithm may change over time.
c.f. cryptography.io/en/latest/hazmat/primitives/asymmetric/serialization/
#cryptography.hazmat.primitives.serialization.BestAvailableEncryption
>>> public, private = generate_rsa_public_and_private(2048)
>>> passphrase = 'secret'
>>> encrypted_pem = create_rsa_encrypted_pem(private, passphrase)
>>> securesystemslib.formats.PEMRSA_SCHEMA.matches(encrypted_pem)
True
<Arguments>
private_key:
The private key string in PEM format.
passphrase:
The passphrase, or password, to encrypt the private part of the RSA
key.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if the passed RSA key cannot be
deserialized by pyca cryptography.
ValueError, if 'private_key' is unset.
<Returns>
A string in PEM format (TraditionalOpenSSL), where the private RSA key is
encrypted. Conforms to 'securesystemslib.formats.PEMRSA_SCHEMA'.
def create_rsa_encrypted_pem(private_key, passphrase):
"""
<Purpose>
Return a string in PEM format (TraditionalOpenSSL), where the private part
of the RSA key is encrypted using the best available encryption for a given
key's backend. This is a curated (by cryptography.io) encryption choice and
the algorithm may change over time.
c.f. cryptography.io/en/latest/hazmat/primitives/asymmetric/serialization/
#cryptography.hazmat.primitives.serialization.BestAvailableEncryption
>>> public, private = generate_rsa_public_and_private(2048)
>>> passphrase = 'secret'
>>> encrypted_pem = create_rsa_encrypted_pem(private, passphrase)
>>> securesystemslib.formats.PEMRSA_SCHEMA.matches(encrypted_pem)
True
<Arguments>
private_key:
The private key string in PEM format.
passphrase:
The passphrase, or password, to encrypt the private part of the RSA
key.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if the passed RSA key cannot be
deserialized by pyca cryptography.
ValueError, if 'private_key' is unset.
<Returns>
A string in PEM format (TraditionalOpenSSL), where the private RSA key is
encrypted. Conforms to 'securesystemslib.formats.PEMRSA_SCHEMA'.
"""
# This check will ensure 'private_key' has the appropriate number
# of objects and object types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.PEMRSA_SCHEMA.check_match(private_key)
# Does 'passphrase' have the correct format?
securesystemslib.formats.PASSWORD_SCHEMA.check_match(passphrase)
# 'private_key' may still be a NULL string after the
# 'securesystemslib.formats.PEMRSA_SCHEMA' so we need an additional check
if len(private_key):
try:
private_key = load_pem_private_key(private_key.encode('utf-8'),
password=None, backend=default_backend())
except ValueError:
raise securesystemslib.exceptions.CryptoError('The private key'
' (in PEM format) could not be deserialized.')
else:
raise ValueError('The required private key is unset.')
encrypted_pem = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.BestAvailableEncryption(
passphrase.encode('utf-8')))
return encrypted_pem.decode()
|
<Purpose>
Generate public and private RSA keys from an optionally encrypted PEM. The
public and private keys returned conform to
'securesystemslib.formats.PEMRSA_SCHEMA' and have the form:
'-----BEGIN RSA PUBLIC KEY----- ... -----END RSA PUBLIC KEY-----'
and
'-----BEGIN RSA PRIVATE KEY----- ...-----END RSA PRIVATE KEY-----'
The public and private keys are returned as strings in PEM format.
In case the private key part of 'pem' is encrypted pyca/cryptography's
load_pem_private_key() method is passed passphrase. In the default case
here, pyca/cryptography will decrypt with a PBKDF1+MD5
strengthened'passphrase', and 3DES with CBC mode for encryption/decryption.
Alternatively, key data may be encrypted with AES-CTR-Mode and the
passphrase strengthened with PBKDF2+SHA256, although this method is used
only with TUF encrypted key files.
>>> public, private = generate_rsa_public_and_private(2048)
>>> passphrase = 'secret'
>>> encrypted_pem = create_rsa_encrypted_pem(private, passphrase)
>>> returned_public, returned_private = \
create_rsa_public_and_private_from_pem(encrypted_pem, passphrase)
>>> securesystemslib.formats.PEMRSA_SCHEMA.matches(returned_public)
True
>>> securesystemslib.formats.PEMRSA_SCHEMA.matches(returned_private)
True
>>> public == returned_public
True
>>> private == returned_private
True
<Arguments>
pem:
A byte string in PEM format, where the private key can be encrypted.
It has the form:
'-----BEGIN RSA PRIVATE KEY-----\n
Proc-Type: 4,ENCRYPTED\nDEK-Info: DES-EDE3-CBC ...'
passphrase: (optional)
The passphrase, or password, to decrypt the private part of the RSA
key. 'passphrase' is not directly used as the encryption key, instead
it is used to derive a stronger symmetric key.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if the public and private RSA keys
cannot be generated from 'pem', or exported in PEM format.
<Side Effects>
pyca/cryptography's 'serialization.load_pem_private_key()' called to
perform the actual conversion from an encrypted RSA private key to
PEM format.
<Returns>
A (public, private) tuple containing the RSA keys in PEM format.
def create_rsa_public_and_private_from_pem(pem, passphrase=None):
"""
<Purpose>
Generate public and private RSA keys from an optionally encrypted PEM. The
public and private keys returned conform to
'securesystemslib.formats.PEMRSA_SCHEMA' and have the form:
'-----BEGIN RSA PUBLIC KEY----- ... -----END RSA PUBLIC KEY-----'
and
'-----BEGIN RSA PRIVATE KEY----- ...-----END RSA PRIVATE KEY-----'
The public and private keys are returned as strings in PEM format.
In case the private key part of 'pem' is encrypted pyca/cryptography's
load_pem_private_key() method is passed passphrase. In the default case
here, pyca/cryptography will decrypt with a PBKDF1+MD5
strengthened'passphrase', and 3DES with CBC mode for encryption/decryption.
Alternatively, key data may be encrypted with AES-CTR-Mode and the
passphrase strengthened with PBKDF2+SHA256, although this method is used
only with TUF encrypted key files.
>>> public, private = generate_rsa_public_and_private(2048)
>>> passphrase = 'secret'
>>> encrypted_pem = create_rsa_encrypted_pem(private, passphrase)
>>> returned_public, returned_private = \
create_rsa_public_and_private_from_pem(encrypted_pem, passphrase)
>>> securesystemslib.formats.PEMRSA_SCHEMA.matches(returned_public)
True
>>> securesystemslib.formats.PEMRSA_SCHEMA.matches(returned_private)
True
>>> public == returned_public
True
>>> private == returned_private
True
<Arguments>
pem:
A byte string in PEM format, where the private key can be encrypted.
It has the form:
'-----BEGIN RSA PRIVATE KEY-----\n
Proc-Type: 4,ENCRYPTED\nDEK-Info: DES-EDE3-CBC ...'
passphrase: (optional)
The passphrase, or password, to decrypt the private part of the RSA
key. 'passphrase' is not directly used as the encryption key, instead
it is used to derive a stronger symmetric key.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if the public and private RSA keys
cannot be generated from 'pem', or exported in PEM format.
<Side Effects>
pyca/cryptography's 'serialization.load_pem_private_key()' called to
perform the actual conversion from an encrypted RSA private key to
PEM format.
<Returns>
A (public, private) tuple containing the RSA keys in PEM format.
"""
# Does 'encryped_pem' have the correct format?
# This check will ensure 'pem' has the appropriate number
# of objects and object types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.PEMRSA_SCHEMA.check_match(pem)
# If passed, does 'passphrase' have the correct format?
if passphrase is not None:
securesystemslib.formats.PASSWORD_SCHEMA.check_match(passphrase)
passphrase = passphrase.encode('utf-8')
# Generate a pyca/cryptography key object from 'pem'. The generated
# pyca/cryptography key contains the required export methods needed to
# generate the PEM-formatted representations of the public and private RSA
# key.
try:
private_key = load_pem_private_key(pem.encode('utf-8'),
passphrase, backend=default_backend())
# pyca/cryptography's expected exceptions for 'load_pem_private_key()':
# ValueError: If the PEM data could not be decrypted.
# (possibly because the passphrase is wrong)."
# TypeError: If a password was given and the private key was not encrypted.
# Or if the key was encrypted but no password was supplied.
# UnsupportedAlgorithm: If the private key (or if the key is encrypted with
# an unsupported symmetric cipher) is not supported by the backend.
except (ValueError, TypeError, cryptography.exceptions.UnsupportedAlgorithm) as e:
# Raise 'securesystemslib.exceptions.CryptoError' and pyca/cryptography's
# exception message. Avoid propogating pyca/cryptography's exception trace
# to avoid revealing sensitive error.
raise securesystemslib.exceptions.CryptoError('RSA (public, private) tuple'
' cannot be generated from the encrypted PEM string: ' + str(e))
# Export the public and private halves of the pyca/cryptography RSA key
# object. The (public, private) tuple returned contains the public and
# private RSA keys in PEM format, as strings.
# Extract the public & private halves of the RSA key and generate their
# PEM-formatted representations. Return the key pair as a (public, private)
# tuple, where each RSA is a string in PEM format.
private_pem = private_key.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
# Need to generate the public key from the private one before serializing
# to PEM format.
public_key = private_key.public_key()
public_pem = public_key.public_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
return public_pem.decode(), private_pem.decode()
|
<Purpose>
Return a string containing 'key_object' in encrypted form. Encrypted
strings may be safely saved to a file. The corresponding decrypt_key()
function can be applied to the encrypted string to restore the original key
object. 'key_object' is a TUF key (e.g., RSAKEY_SCHEMA,
ED25519KEY_SCHEMA). This function calls the pyca/cryptography library to
perform the encryption and derive a suitable encryption key.
Whereas an encrypted PEM file uses the Triple Data Encryption Algorithm
(3DES), the Cipher-block chaining (CBC) mode of operation, and the Password
Based Key Derivation Function 1 (PBKF1) + MD5 to strengthen 'password',
encrypted TUF keys use AES-256-CTR-Mode and passwords strengthened with
PBKDF2-HMAC-SHA256 (100K iterations by default, but may be overriden in
'settings.PBKDF2_ITERATIONS' by the user).
http://en.wikipedia.org/wiki/Advanced_Encryption_Standard
http://en.wikipedia.org/wiki/CTR_mode#Counter_.28CTR.29
https://en.wikipedia.org/wiki/PBKDF2
>>> ed25519_key = {'keytype': 'ed25519', \
'scheme': 'ed25519', \
'keyid': \
'd62247f817883f593cf6c66a5a55292488d457bcf638ae03207dbbba9dbe457d', \
'keyval': {'public': \
'74addb5ad544a4306b34741bc1175a3613a8d7dc69ff64724243efdec0e301ad', \
'private': \
'1f26964cc8d4f7ee5f3c5da2fbb7ab35811169573ac367b860a537e47789f8c4'}}
>>> passphrase = 'secret'
>>> encrypted_key = encrypt_key(ed25519_key, passphrase)
>>> securesystemslib.formats.ENCRYPTEDKEY_SCHEMA.matches(encrypted_key.encode('utf-8'))
True
<Arguments>
key_object:
The TUF key object that should contain the private portion of the ED25519
key.
password:
The password, or passphrase, to encrypt the private part of the RSA
key. 'password' is not used directly as the encryption key, a stronger
encryption key is derived from it.
<Exceptions>
securesystemslib.exceptions.FormatError, if any of the arguments are
improperly formatted or 'key_object' does not contain the private portion
of the key.
securesystemslib.exceptions.CryptoError, if an Ed25519 key in encrypted TUF
format cannot be created.
<Side Effects>
pyca/Cryptography cryptographic operations called to perform the actual
encryption of 'key_object'. 'password' used to derive a suitable
encryption key.
<Returns>
An encrypted string in 'securesystemslib.formats.ENCRYPTEDKEY_SCHEMA' format.
def encrypt_key(key_object, password):
"""
<Purpose>
Return a string containing 'key_object' in encrypted form. Encrypted
strings may be safely saved to a file. The corresponding decrypt_key()
function can be applied to the encrypted string to restore the original key
object. 'key_object' is a TUF key (e.g., RSAKEY_SCHEMA,
ED25519KEY_SCHEMA). This function calls the pyca/cryptography library to
perform the encryption and derive a suitable encryption key.
Whereas an encrypted PEM file uses the Triple Data Encryption Algorithm
(3DES), the Cipher-block chaining (CBC) mode of operation, and the Password
Based Key Derivation Function 1 (PBKF1) + MD5 to strengthen 'password',
encrypted TUF keys use AES-256-CTR-Mode and passwords strengthened with
PBKDF2-HMAC-SHA256 (100K iterations by default, but may be overriden in
'settings.PBKDF2_ITERATIONS' by the user).
http://en.wikipedia.org/wiki/Advanced_Encryption_Standard
http://en.wikipedia.org/wiki/CTR_mode#Counter_.28CTR.29
https://en.wikipedia.org/wiki/PBKDF2
>>> ed25519_key = {'keytype': 'ed25519', \
'scheme': 'ed25519', \
'keyid': \
'd62247f817883f593cf6c66a5a55292488d457bcf638ae03207dbbba9dbe457d', \
'keyval': {'public': \
'74addb5ad544a4306b34741bc1175a3613a8d7dc69ff64724243efdec0e301ad', \
'private': \
'1f26964cc8d4f7ee5f3c5da2fbb7ab35811169573ac367b860a537e47789f8c4'}}
>>> passphrase = 'secret'
>>> encrypted_key = encrypt_key(ed25519_key, passphrase)
>>> securesystemslib.formats.ENCRYPTEDKEY_SCHEMA.matches(encrypted_key.encode('utf-8'))
True
<Arguments>
key_object:
The TUF key object that should contain the private portion of the ED25519
key.
password:
The password, or passphrase, to encrypt the private part of the RSA
key. 'password' is not used directly as the encryption key, a stronger
encryption key is derived from it.
<Exceptions>
securesystemslib.exceptions.FormatError, if any of the arguments are
improperly formatted or 'key_object' does not contain the private portion
of the key.
securesystemslib.exceptions.CryptoError, if an Ed25519 key in encrypted TUF
format cannot be created.
<Side Effects>
pyca/Cryptography cryptographic operations called to perform the actual
encryption of 'key_object'. 'password' used to derive a suitable
encryption key.
<Returns>
An encrypted string in 'securesystemslib.formats.ENCRYPTEDKEY_SCHEMA' format.
"""
# Do the arguments have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.ANYKEY_SCHEMA.check_match(key_object)
# Does 'password' have the correct format?
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
# Ensure the private portion of the key is included in 'key_object'.
if 'private' not in key_object['keyval'] or not key_object['keyval']['private']:
raise securesystemslib.exceptions.FormatError('Key object does not contain'
' a private part.')
# Derive a key (i.e., an appropriate encryption key and not the
# user's password) from the given 'password'. Strengthen 'password' with
# PBKDF2-HMAC-SHA256 (100K iterations by default, but may be overriden in
# 'settings.PBKDF2_ITERATIONS' by the user).
salt, iterations, derived_key = _generate_derived_key(password)
# Store the derived key info in a dictionary, the object expected
# by the non-public _encrypt() routine.
derived_key_information = {'salt': salt, 'iterations': iterations,
'derived_key': derived_key}
# Convert the key object to json string format and encrypt it with the
# derived key.
encrypted_key = _encrypt(json.dumps(key_object), derived_key_information)
return encrypted_key
|
<Purpose>
Return a string containing 'encrypted_key' in non-encrypted form.
The decrypt_key() function can be applied to the encrypted string to restore
the original key object, a TUF key (e.g., RSAKEY_SCHEMA, ED25519KEY_SCHEMA).
This function calls the appropriate cryptography module (i.e.,
pyca_crypto_keys.py) to perform the decryption.
Encrypted TUF keys use AES-256-CTR-Mode and passwords strengthened with
PBKDF2-HMAC-SHA256 (100K iterations be default, but may be overriden in
'settings.py' by the user).
http://en.wikipedia.org/wiki/Advanced_Encryption_Standard
http://en.wikipedia.org/wiki/CTR_mode#Counter_.28CTR.29
https://en.wikipedia.org/wiki/PBKDF2
>>> ed25519_key = {'keytype': 'ed25519', \
'scheme': 'ed25519', \
'keyid': \
'd62247f817883f593cf6c66a5a55292488d457bcf638ae03207dbbba9dbe457d', \
'keyval': {'public': \
'74addb5ad544a4306b34741bc1175a3613a8d7dc69ff64724243efdec0e301ad', \
'private': \
'1f26964cc8d4f7ee5f3c5da2fbb7ab35811169573ac367b860a537e47789f8c4'}}
>>> passphrase = 'secret'
>>> encrypted_key = encrypt_key(ed25519_key, passphrase)
>>> decrypted_key = decrypt_key(encrypted_key.encode('utf-8'), passphrase)
>>> securesystemslib.formats.ED25519KEY_SCHEMA.matches(decrypted_key)
True
>>> decrypted_key == ed25519_key
True
<Arguments>
encrypted_key:
An encrypted TUF key (additional data is also included, such as salt,
number of password iterations used for the derived encryption key, etc)
of the form 'securesystemslib.formats.ENCRYPTEDKEY_SCHEMA'.
'encrypted_key' should have been generated with encrypted_key().
password:
The password, or passphrase, to encrypt the private part of the RSA
key. 'password' is not used directly as the encryption key, a stronger
encryption key is derived from it.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if a TUF key cannot be decrypted
from 'encrypted_key'.
securesystemslib.exceptions.Error, if a valid TUF key object is not found in
'encrypted_key'.
<Side Effects>
The pyca/cryptography is library called to perform the actual decryption
of 'encrypted_key'. The key derivation data stored in 'encrypted_key' is
used to re-derive the encryption/decryption key.
<Returns>
The decrypted key object in 'securesystemslib.formats.ANYKEY_SCHEMA' format.
def decrypt_key(encrypted_key, password):
"""
<Purpose>
Return a string containing 'encrypted_key' in non-encrypted form.
The decrypt_key() function can be applied to the encrypted string to restore
the original key object, a TUF key (e.g., RSAKEY_SCHEMA, ED25519KEY_SCHEMA).
This function calls the appropriate cryptography module (i.e.,
pyca_crypto_keys.py) to perform the decryption.
Encrypted TUF keys use AES-256-CTR-Mode and passwords strengthened with
PBKDF2-HMAC-SHA256 (100K iterations be default, but may be overriden in
'settings.py' by the user).
http://en.wikipedia.org/wiki/Advanced_Encryption_Standard
http://en.wikipedia.org/wiki/CTR_mode#Counter_.28CTR.29
https://en.wikipedia.org/wiki/PBKDF2
>>> ed25519_key = {'keytype': 'ed25519', \
'scheme': 'ed25519', \
'keyid': \
'd62247f817883f593cf6c66a5a55292488d457bcf638ae03207dbbba9dbe457d', \
'keyval': {'public': \
'74addb5ad544a4306b34741bc1175a3613a8d7dc69ff64724243efdec0e301ad', \
'private': \
'1f26964cc8d4f7ee5f3c5da2fbb7ab35811169573ac367b860a537e47789f8c4'}}
>>> passphrase = 'secret'
>>> encrypted_key = encrypt_key(ed25519_key, passphrase)
>>> decrypted_key = decrypt_key(encrypted_key.encode('utf-8'), passphrase)
>>> securesystemslib.formats.ED25519KEY_SCHEMA.matches(decrypted_key)
True
>>> decrypted_key == ed25519_key
True
<Arguments>
encrypted_key:
An encrypted TUF key (additional data is also included, such as salt,
number of password iterations used for the derived encryption key, etc)
of the form 'securesystemslib.formats.ENCRYPTEDKEY_SCHEMA'.
'encrypted_key' should have been generated with encrypted_key().
password:
The password, or passphrase, to encrypt the private part of the RSA
key. 'password' is not used directly as the encryption key, a stronger
encryption key is derived from it.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if a TUF key cannot be decrypted
from 'encrypted_key'.
securesystemslib.exceptions.Error, if a valid TUF key object is not found in
'encrypted_key'.
<Side Effects>
The pyca/cryptography is library called to perform the actual decryption
of 'encrypted_key'. The key derivation data stored in 'encrypted_key' is
used to re-derive the encryption/decryption key.
<Returns>
The decrypted key object in 'securesystemslib.formats.ANYKEY_SCHEMA' format.
"""
# Do the arguments have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.ENCRYPTEDKEY_SCHEMA.check_match(encrypted_key)
# Does 'password' have the correct format?
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
# Decrypt 'encrypted_key', using 'password' (and additional key derivation
# data like salts and password iterations) to re-derive the decryption key.
json_data = _decrypt(encrypted_key, password)
# Raise 'securesystemslib.exceptions.Error' if 'json_data' cannot be
# deserialized to a valid 'securesystemslib.formats.ANYKEY_SCHEMA' key
# object.
key_object = securesystemslib.util.load_json_string(json_data.decode())
return key_object
|
Generate a derived key by feeding 'password' to the Password-Based Key
Derivation Function (PBKDF2). pyca/cryptography's PBKDF2 implementation is
used in this module. 'salt' may be specified so that a previous derived key
may be regenerated, otherwise '_SALT_SIZE' is used by default. 'iterations'
is the number of SHA-256 iterations to perform, otherwise
'_PBKDF2_ITERATIONS' is used by default.
def _generate_derived_key(password, salt=None, iterations=None):
"""
Generate a derived key by feeding 'password' to the Password-Based Key
Derivation Function (PBKDF2). pyca/cryptography's PBKDF2 implementation is
used in this module. 'salt' may be specified so that a previous derived key
may be regenerated, otherwise '_SALT_SIZE' is used by default. 'iterations'
is the number of SHA-256 iterations to perform, otherwise
'_PBKDF2_ITERATIONS' is used by default.
"""
# Use pyca/cryptography's default backend (e.g., openSSL, CommonCrypto, etc.)
# The default backend is not fixed and can be changed by pyca/cryptography
# over time.
backend = default_backend()
# If 'salt' and 'iterations' are unspecified, a new derived key is generated.
# If specified, a deterministic key is derived according to the given
# 'salt' and 'iterrations' values.
if salt is None:
salt = os.urandom(_SALT_SIZE)
if iterations is None:
iterations = _PBKDF2_ITERATIONS
# Derive an AES key with PBKDF2. The 'length' is the desired key length of
# the derived key.
pbkdf_object = PBKDF2HMAC(algorithm=hashes.SHA256(), length=32, salt=salt,
iterations=iterations, backend=backend)
derived_key = pbkdf_object.derive(password.encode('utf-8'))
return salt, iterations, derived_key
|
Encrypt 'key_data' using the Advanced Encryption Standard (AES-256) algorithm.
'derived_key_information' should contain a key strengthened by PBKDF2. The
key size is 256 bits and AES's mode of operation is set to CTR (CounTeR Mode).
The HMAC of the ciphertext is generated to ensure the ciphertext has not been
modified.
'key_data' is the JSON string representation of the key. In the case
of RSA keys, this format would be 'securesystemslib.formats.RSAKEY_SCHEMA':
{'keytype': 'rsa',
'keyval': {'public': '-----BEGIN RSA PUBLIC KEY----- ...',
'private': '-----BEGIN RSA PRIVATE KEY----- ...'}}
'derived_key_information' is a dictionary of the form:
{'salt': '...',
'derived_key': '...',
'iterations': '...'}
'securesystemslib.exceptions.CryptoError' raised if the encryption fails.
def _encrypt(key_data, derived_key_information):
"""
Encrypt 'key_data' using the Advanced Encryption Standard (AES-256) algorithm.
'derived_key_information' should contain a key strengthened by PBKDF2. The
key size is 256 bits and AES's mode of operation is set to CTR (CounTeR Mode).
The HMAC of the ciphertext is generated to ensure the ciphertext has not been
modified.
'key_data' is the JSON string representation of the key. In the case
of RSA keys, this format would be 'securesystemslib.formats.RSAKEY_SCHEMA':
{'keytype': 'rsa',
'keyval': {'public': '-----BEGIN RSA PUBLIC KEY----- ...',
'private': '-----BEGIN RSA PRIVATE KEY----- ...'}}
'derived_key_information' is a dictionary of the form:
{'salt': '...',
'derived_key': '...',
'iterations': '...'}
'securesystemslib.exceptions.CryptoError' raised if the encryption fails.
"""
# Generate a random Initialization Vector (IV). Follow the provably secure
# encrypt-then-MAC approach, which affords the ability to verify ciphertext
# without needing to decrypt it and preventing an attacker from feeding the
# block cipher malicious data. Modes like GCM provide both encryption and
# authentication, whereas CTR only provides encryption.
# Generate a random 128-bit IV. Random bits of data is needed for salts and
# initialization vectors suitable for the encryption algorithms used in
# 'pyca_crypto_keys.py'.
iv = os.urandom(16)
# Construct an AES-CTR Cipher object with the given key and a randomly
# generated IV.
symmetric_key = derived_key_information['derived_key']
encryptor = Cipher(algorithms.AES(symmetric_key), modes.CTR(iv),
backend=default_backend()).encryptor()
# Encrypt the plaintext and get the associated ciphertext.
# Do we need to check for any exceptions?
ciphertext = encryptor.update(key_data.encode('utf-8')) + encryptor.finalize()
# Generate the hmac of the ciphertext to ensure it has not been modified.
# The decryption routine may verify a ciphertext without having to perform
# a decryption operation.
symmetric_key = derived_key_information['derived_key']
salt = derived_key_information['salt']
hmac_object = \
cryptography.hazmat.primitives.hmac.HMAC(symmetric_key, hashes.SHA256(),
backend=default_backend())
hmac_object.update(ciphertext)
hmac_value = binascii.hexlify(hmac_object.finalize())
# Store the number of PBKDF2 iterations used to derive the symmetric key so
# that the decryption routine can regenerate the symmetric key successfully.
# The PBKDF2 iterations are allowed to vary for the keys loaded and saved.
iterations = derived_key_information['iterations']
# Return the salt, iterations, hmac, initialization vector, and ciphertext
# as a single string. These five values are delimited by
# '_ENCRYPTION_DELIMITER' to make extraction easier. This delimiter is
# arbitrarily chosen and should not occur in the hexadecimal representations
# of the fields it is separating.
return binascii.hexlify(salt).decode() + _ENCRYPTION_DELIMITER + \
str(iterations) + _ENCRYPTION_DELIMITER + \
hmac_value.decode() + _ENCRYPTION_DELIMITER + \
binascii.hexlify(iv).decode() + _ENCRYPTION_DELIMITER + \
binascii.hexlify(ciphertext).decode()
|
The corresponding decryption routine for _encrypt().
'securesystemslib.exceptions.CryptoError' raised if the decryption fails.
def _decrypt(file_contents, password):
"""
The corresponding decryption routine for _encrypt().
'securesystemslib.exceptions.CryptoError' raised if the decryption fails.
"""
# Extract the salt, iterations, hmac, initialization vector, and ciphertext
# from 'file_contents'. These five values are delimited by
# '_ENCRYPTION_DELIMITER'. This delimiter is arbitrarily chosen and should
# not occur in the hexadecimal representations of the fields it is
# separating. Raise 'securesystemslib.exceptions.CryptoError', if
# 'file_contents' does not contains the expected data layout.
try:
salt, iterations, hmac, iv, ciphertext = \
file_contents.split(_ENCRYPTION_DELIMITER)
except ValueError:
raise securesystemslib.exceptions.CryptoError('Invalid encrypted file.')
# Ensure we have the expected raw data for the delimited cryptographic data.
salt = binascii.unhexlify(salt.encode('utf-8'))
iterations = int(iterations)
iv = binascii.unhexlify(iv.encode('utf-8'))
ciphertext = binascii.unhexlify(ciphertext.encode('utf-8'))
# Generate derived key from 'password'. The salt and iterations are
# specified so that the expected derived key is regenerated correctly.
# Discard the old "salt" and "iterations" values, as we only need the old
# derived key.
junk_old_salt, junk_old_iterations, symmetric_key = \
_generate_derived_key(password, salt, iterations)
# Verify the hmac to ensure the ciphertext is valid and has not been altered.
# See the encryption routine for why we use the encrypt-then-MAC approach.
# The decryption routine may verify a ciphertext without having to perform
# a decryption operation.
generated_hmac_object = \
cryptography.hazmat.primitives.hmac.HMAC(symmetric_key, hashes.SHA256(),
backend=default_backend())
generated_hmac_object.update(ciphertext)
generated_hmac = binascii.hexlify(generated_hmac_object.finalize())
if not securesystemslib.util.digests_are_equal(generated_hmac.decode(), hmac):
raise securesystemslib.exceptions.CryptoError('Decryption failed.')
# Construct a Cipher object, with the key and iv.
decryptor = Cipher(algorithms.AES(symmetric_key), modes.CTR(iv),
backend=default_backend()).decryptor()
# Decryption gets us the authenticated plaintext.
plaintext = decryptor.update(ciphertext) + decryptor.finalize()
return plaintext
|
Creates a new environment in ``home_dir``.
If ``site_packages`` is true (the default) then the global
``site-packages/`` directory will be on the path.
If ``clear`` is true (default False) then the environment will
first be cleared.
def create_environment(home_dir, site_packages=True, clear=False,
unzip_setuptools=False, use_distribute=False):
"""
Creates a new environment in ``home_dir``.
If ``site_packages`` is true (the default) then the global
``site-packages/`` directory will be on the path.
If ``clear`` is true (default False) then the environment will
first be cleared.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
py_executable = os.path.abspath(install_python(
home_dir, lib_dir, inc_dir, bin_dir,
site_packages=site_packages, clear=clear))
install_distutils(lib_dir, home_dir)
if use_distribute or os.environ.get('VIRTUALENV_USE_DISTRIBUTE'):
install_distribute(py_executable, unzip=unzip_setuptools)
else:
install_setuptools(py_executable, unzip=unzip_setuptools)
install_pip(py_executable)
install_activate(home_dir, bin_dir)
|
Return the path locations for the environment (where libraries are,
where scripts go, etc)
def path_locations(home_dir):
"""Return the path locations for the environment (where libraries are,
where scripts go, etc)"""
# XXX: We'd use distutils.sysconfig.get_python_inc/lib but its
# prefix arg is broken: http://bugs.python.org/issue3386
if sys.platform == 'win32':
# Windows has lots of problems with executables with spaces in
# the name; this function will remove them (using the ~1
# format):
mkdir(home_dir)
if ' ' in home_dir:
try:
import win32api
except ImportError:
print 'Error: the path "%s" has a space in it' % home_dir
print 'To handle these kinds of paths, the win32api module must be installed:'
print ' http://sourceforge.net/projects/pywin32/'
sys.exit(3)
home_dir = win32api.GetShortPathName(home_dir)
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'Scripts')
elif is_jython:
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'bin')
else:
lib_dir = join(home_dir, 'lib', py_version)
inc_dir = join(home_dir, 'include', py_version)
bin_dir = join(home_dir, 'bin')
return home_dir, lib_dir, inc_dir, bin_dir
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.