repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
KeithSSmith/switcheo-python | switcheo/authenticated_client.py | AuthenticatedClient.execute_order | def execute_order(self, order_params, private_key):
"""
This function executes the order created before it and signs the transaction to be submitted to the blockchain.
Execution of this function is as follows::
execute_order(order_params=create_order, private_key=kp)
The expected return result for this function is the same as the execute_order function::
{
'id': '4e6a59fd-d750-4332-aaf0-f2babfa8ad67',
'blockchain': 'neo',
'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82',
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'side': 'buy',
'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b',
'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'offer_amount': '2000000',
'want_amount': '10000000000',
'transfer_amount': '0',
'priority_gas_amount': '0',
'use_native_token': True,
'native_fee_transfer_amount': 0,
'deposit_txn': None,
'created_at': '2018-08-05T10:38:37.714Z',
'status': 'processed',
'fills': [],
'makes': [
{
'id': 'e30a7fdf-779c-4623-8f92-8a961450d843',
'offer_hash': 'b45ddfb97ade5e0363d9e707dac9ad1c530448db263e86494225a0025006f968',
'available_amount': '2000000',
'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b',
'offer_amount': '2000000',
'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'want_amount': '10000000000',
'filled_amount': '0.0',
'txn': None,
'cancel_txn': None,
'price': '0.0002',
'status': 'confirming',
'created_at': '2018-08-05T10:38:37.731Z',
'transaction_hash': '5c4cb1e73b9f2e608b6e768e0654649a4d15e08a7fe63fc536c454fa563a2f0f',
'trades': []
}
]
}
:param order_params: Dictionary generated from the create order function.
:type order_params: dict
:param private_key: The Private Key (ETH) or KeyPair (NEO) for the wallet being used to sign deposit message.
:type private_key: KeyPair or str
:return: Dictionary of the transaction on the order book.
"""
order_id = order_params['id']
api_params = self.sign_execute_order_function[self.blockchain](order_params, private_key)
return self.request.post(path='/orders/{}/broadcast'.format(order_id), json_data=api_params) | python | def execute_order(self, order_params, private_key):
"""
This function executes the order created before it and signs the transaction to be submitted to the blockchain.
Execution of this function is as follows::
execute_order(order_params=create_order, private_key=kp)
The expected return result for this function is the same as the execute_order function::
{
'id': '4e6a59fd-d750-4332-aaf0-f2babfa8ad67',
'blockchain': 'neo',
'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82',
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'side': 'buy',
'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b',
'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'offer_amount': '2000000',
'want_amount': '10000000000',
'transfer_amount': '0',
'priority_gas_amount': '0',
'use_native_token': True,
'native_fee_transfer_amount': 0,
'deposit_txn': None,
'created_at': '2018-08-05T10:38:37.714Z',
'status': 'processed',
'fills': [],
'makes': [
{
'id': 'e30a7fdf-779c-4623-8f92-8a961450d843',
'offer_hash': 'b45ddfb97ade5e0363d9e707dac9ad1c530448db263e86494225a0025006f968',
'available_amount': '2000000',
'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b',
'offer_amount': '2000000',
'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'want_amount': '10000000000',
'filled_amount': '0.0',
'txn': None,
'cancel_txn': None,
'price': '0.0002',
'status': 'confirming',
'created_at': '2018-08-05T10:38:37.731Z',
'transaction_hash': '5c4cb1e73b9f2e608b6e768e0654649a4d15e08a7fe63fc536c454fa563a2f0f',
'trades': []
}
]
}
:param order_params: Dictionary generated from the create order function.
:type order_params: dict
:param private_key: The Private Key (ETH) or KeyPair (NEO) for the wallet being used to sign deposit message.
:type private_key: KeyPair or str
:return: Dictionary of the transaction on the order book.
"""
order_id = order_params['id']
api_params = self.sign_execute_order_function[self.blockchain](order_params, private_key)
return self.request.post(path='/orders/{}/broadcast'.format(order_id), json_data=api_params) | [
"def",
"execute_order",
"(",
"self",
",",
"order_params",
",",
"private_key",
")",
":",
"order_id",
"=",
"order_params",
"[",
"'id'",
"]",
"api_params",
"=",
"self",
".",
"sign_execute_order_function",
"[",
"self",
".",
"blockchain",
"]",
"(",
"order_params",
",",
"private_key",
")",
"return",
"self",
".",
"request",
".",
"post",
"(",
"path",
"=",
"'/orders/{}/broadcast'",
".",
"format",
"(",
"order_id",
")",
",",
"json_data",
"=",
"api_params",
")"
] | This function executes the order created before it and signs the transaction to be submitted to the blockchain.
Execution of this function is as follows::
execute_order(order_params=create_order, private_key=kp)
The expected return result for this function is the same as the execute_order function::
{
'id': '4e6a59fd-d750-4332-aaf0-f2babfa8ad67',
'blockchain': 'neo',
'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82',
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'side': 'buy',
'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b',
'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'offer_amount': '2000000',
'want_amount': '10000000000',
'transfer_amount': '0',
'priority_gas_amount': '0',
'use_native_token': True,
'native_fee_transfer_amount': 0,
'deposit_txn': None,
'created_at': '2018-08-05T10:38:37.714Z',
'status': 'processed',
'fills': [],
'makes': [
{
'id': 'e30a7fdf-779c-4623-8f92-8a961450d843',
'offer_hash': 'b45ddfb97ade5e0363d9e707dac9ad1c530448db263e86494225a0025006f968',
'available_amount': '2000000',
'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b',
'offer_amount': '2000000',
'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'want_amount': '10000000000',
'filled_amount': '0.0',
'txn': None,
'cancel_txn': None,
'price': '0.0002',
'status': 'confirming',
'created_at': '2018-08-05T10:38:37.731Z',
'transaction_hash': '5c4cb1e73b9f2e608b6e768e0654649a4d15e08a7fe63fc536c454fa563a2f0f',
'trades': []
}
]
}
:param order_params: Dictionary generated from the create order function.
:type order_params: dict
:param private_key: The Private Key (ETH) or KeyPair (NEO) for the wallet being used to sign deposit message.
:type private_key: KeyPair or str
:return: Dictionary of the transaction on the order book. | [
"This",
"function",
"executes",
"the",
"order",
"created",
"before",
"it",
"and",
"signs",
"the",
"transaction",
"to",
"be",
"submitted",
"to",
"the",
"blockchain",
".",
"Execution",
"of",
"this",
"function",
"is",
"as",
"follows",
"::"
] | train | https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/authenticated_client.py#L595-L651 |
KeithSSmith/switcheo-python | switcheo/authenticated_client.py | AuthenticatedClient.withdrawal | def withdrawal(self, asset, amount, private_key):
"""
This function is a wrapper function around the create and execute withdrawal functions to help make this
processes simpler for the end user by combining these requests in 1 step.
Execution of this function is as follows::
withdrawal(asset="SWTH", amount=1.1, private_key=kp))
The expected return result for this function is the same as the execute_withdrawal function::
{
'event_type': 'withdrawal',
'amount': -100000,
'asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'status': 'confirming',
'id': '96e5f797-435b-40ab-9085-4e95c6749218',
'blockchain': 'neo',
'reason_code': 9,
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'transaction_hash': None,
'created_at': '2018-08-05T10:03:58.885Z',
'updated_at': '2018-08-05T10:03:59.828Z',
'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82'
}
:param asset: Script Hash of asset ID from the available products.
:type asset: str
:param amount: The amount of coins/tokens to be withdrawn.
:type amount: float
:param private_key: The Private Key (ETH) or KeyPair (NEO) for the wallet being used to sign deposit message.
:type private_key: KeyPair or str
:return: Dictionary with the status of the withdrawal request and blockchain details.
"""
create_withdrawal = self.create_withdrawal(asset=asset, amount=amount, private_key=private_key)
return self.execute_withdrawal(withdrawal_params=create_withdrawal, private_key=private_key) | python | def withdrawal(self, asset, amount, private_key):
"""
This function is a wrapper function around the create and execute withdrawal functions to help make this
processes simpler for the end user by combining these requests in 1 step.
Execution of this function is as follows::
withdrawal(asset="SWTH", amount=1.1, private_key=kp))
The expected return result for this function is the same as the execute_withdrawal function::
{
'event_type': 'withdrawal',
'amount': -100000,
'asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'status': 'confirming',
'id': '96e5f797-435b-40ab-9085-4e95c6749218',
'blockchain': 'neo',
'reason_code': 9,
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'transaction_hash': None,
'created_at': '2018-08-05T10:03:58.885Z',
'updated_at': '2018-08-05T10:03:59.828Z',
'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82'
}
:param asset: Script Hash of asset ID from the available products.
:type asset: str
:param amount: The amount of coins/tokens to be withdrawn.
:type amount: float
:param private_key: The Private Key (ETH) or KeyPair (NEO) for the wallet being used to sign deposit message.
:type private_key: KeyPair or str
:return: Dictionary with the status of the withdrawal request and blockchain details.
"""
create_withdrawal = self.create_withdrawal(asset=asset, amount=amount, private_key=private_key)
return self.execute_withdrawal(withdrawal_params=create_withdrawal, private_key=private_key) | [
"def",
"withdrawal",
"(",
"self",
",",
"asset",
",",
"amount",
",",
"private_key",
")",
":",
"create_withdrawal",
"=",
"self",
".",
"create_withdrawal",
"(",
"asset",
"=",
"asset",
",",
"amount",
"=",
"amount",
",",
"private_key",
"=",
"private_key",
")",
"return",
"self",
".",
"execute_withdrawal",
"(",
"withdrawal_params",
"=",
"create_withdrawal",
",",
"private_key",
"=",
"private_key",
")"
] | This function is a wrapper function around the create and execute withdrawal functions to help make this
processes simpler for the end user by combining these requests in 1 step.
Execution of this function is as follows::
withdrawal(asset="SWTH", amount=1.1, private_key=kp))
The expected return result for this function is the same as the execute_withdrawal function::
{
'event_type': 'withdrawal',
'amount': -100000,
'asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'status': 'confirming',
'id': '96e5f797-435b-40ab-9085-4e95c6749218',
'blockchain': 'neo',
'reason_code': 9,
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'transaction_hash': None,
'created_at': '2018-08-05T10:03:58.885Z',
'updated_at': '2018-08-05T10:03:59.828Z',
'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82'
}
:param asset: Script Hash of asset ID from the available products.
:type asset: str
:param amount: The amount of coins/tokens to be withdrawn.
:type amount: float
:param private_key: The Private Key (ETH) or KeyPair (NEO) for the wallet being used to sign deposit message.
:type private_key: KeyPair or str
:return: Dictionary with the status of the withdrawal request and blockchain details. | [
"This",
"function",
"is",
"a",
"wrapper",
"function",
"around",
"the",
"create",
"and",
"execute",
"withdrawal",
"functions",
"to",
"help",
"make",
"this",
"processes",
"simpler",
"for",
"the",
"end",
"user",
"by",
"combining",
"these",
"requests",
"in",
"1",
"step",
".",
"Execution",
"of",
"this",
"function",
"is",
"as",
"follows",
"::"
] | train | https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/authenticated_client.py#L653-L687 |
KeithSSmith/switcheo-python | switcheo/authenticated_client.py | AuthenticatedClient.create_withdrawal | def create_withdrawal(self, asset, amount, private_key):
"""
Function to create a withdrawal request by generating a withdrawal ID request from the Switcheo API.
Execution of this function is as follows::
create_withdrawal(asset="SWTH", amount=1.1, private_key=kp)
The expected return result for this function is as follows::
{
'id': 'a5a4d396-fa9f-4191-bf50-39a3d06d5e0d'
}
:param asset: Script Hash of asset ID from the available products.
:type asset: str
:param amount: The amount of coins/tokens to be withdrawn.
:type amount: float
:param private_key: The Private Key (ETH) or KeyPair (NEO) for the wallet being used to sign deposit message.
:type private_key: KeyPair or str
:return: Dictionary with the withdrawal ID generated by the Switcheo API.
"""
signable_params = {
'blockchain': self.blockchain,
'asset_id': asset,
'amount': str(self.blockchain_amount[self.blockchain](amount)),
'timestamp': get_epoch_milliseconds(),
'contract_hash': self.contract_hash
}
api_params = self.sign_create_withdrawal_function[self.blockchain](signable_params, private_key)
return self.request.post(path='/withdrawals', json_data=api_params) | python | def create_withdrawal(self, asset, amount, private_key):
"""
Function to create a withdrawal request by generating a withdrawal ID request from the Switcheo API.
Execution of this function is as follows::
create_withdrawal(asset="SWTH", amount=1.1, private_key=kp)
The expected return result for this function is as follows::
{
'id': 'a5a4d396-fa9f-4191-bf50-39a3d06d5e0d'
}
:param asset: Script Hash of asset ID from the available products.
:type asset: str
:param amount: The amount of coins/tokens to be withdrawn.
:type amount: float
:param private_key: The Private Key (ETH) or KeyPair (NEO) for the wallet being used to sign deposit message.
:type private_key: KeyPair or str
:return: Dictionary with the withdrawal ID generated by the Switcheo API.
"""
signable_params = {
'blockchain': self.blockchain,
'asset_id': asset,
'amount': str(self.blockchain_amount[self.blockchain](amount)),
'timestamp': get_epoch_milliseconds(),
'contract_hash': self.contract_hash
}
api_params = self.sign_create_withdrawal_function[self.blockchain](signable_params, private_key)
return self.request.post(path='/withdrawals', json_data=api_params) | [
"def",
"create_withdrawal",
"(",
"self",
",",
"asset",
",",
"amount",
",",
"private_key",
")",
":",
"signable_params",
"=",
"{",
"'blockchain'",
":",
"self",
".",
"blockchain",
",",
"'asset_id'",
":",
"asset",
",",
"'amount'",
":",
"str",
"(",
"self",
".",
"blockchain_amount",
"[",
"self",
".",
"blockchain",
"]",
"(",
"amount",
")",
")",
",",
"'timestamp'",
":",
"get_epoch_milliseconds",
"(",
")",
",",
"'contract_hash'",
":",
"self",
".",
"contract_hash",
"}",
"api_params",
"=",
"self",
".",
"sign_create_withdrawal_function",
"[",
"self",
".",
"blockchain",
"]",
"(",
"signable_params",
",",
"private_key",
")",
"return",
"self",
".",
"request",
".",
"post",
"(",
"path",
"=",
"'/withdrawals'",
",",
"json_data",
"=",
"api_params",
")"
] | Function to create a withdrawal request by generating a withdrawal ID request from the Switcheo API.
Execution of this function is as follows::
create_withdrawal(asset="SWTH", amount=1.1, private_key=kp)
The expected return result for this function is as follows::
{
'id': 'a5a4d396-fa9f-4191-bf50-39a3d06d5e0d'
}
:param asset: Script Hash of asset ID from the available products.
:type asset: str
:param amount: The amount of coins/tokens to be withdrawn.
:type amount: float
:param private_key: The Private Key (ETH) or KeyPair (NEO) for the wallet being used to sign deposit message.
:type private_key: KeyPair or str
:return: Dictionary with the withdrawal ID generated by the Switcheo API. | [
"Function",
"to",
"create",
"a",
"withdrawal",
"request",
"by",
"generating",
"a",
"withdrawal",
"ID",
"request",
"from",
"the",
"Switcheo",
"API",
".",
"Execution",
"of",
"this",
"function",
"is",
"as",
"follows",
"::"
] | train | https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/authenticated_client.py#L689-L718 |
KeithSSmith/switcheo-python | switcheo/authenticated_client.py | AuthenticatedClient.execute_withdrawal | def execute_withdrawal(self, withdrawal_params, private_key):
"""
This function is to sign the message generated from the create withdrawal function and submit it to the
blockchain for transfer from the smart contract to the owners address.
Execution of this function is as follows::
execute_withdrawal(withdrawal_params=create_withdrawal, private_key=kp)
The expected return result for this function is as follows::
{
'event_type': 'withdrawal',
'amount': -100000,
'asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'status': 'confirming',
'id': '96e5f797-435b-40ab-9085-4e95c6749218',
'blockchain': 'neo',
'reason_code': 9,
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'transaction_hash': None,
'created_at': '2018-08-05T10:03:58.885Z',
'updated_at': '2018-08-05T10:03:59.828Z',
'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82'
}
:param withdrawal_params: Dictionary from the create withdrawal function to sign and submit to the blockchain.
:type withdrawal_params: dict
:param private_key: The Private Key (ETH) or KeyPair (NEO) for the wallet being used to sign deposit message.
:type private_key: KeyPair or str
:return: Dictionary with the status of the withdrawal request and blockchain transaction details.
"""
withdrawal_id = withdrawal_params['id']
api_params = self.sign_execute_withdrawal_function[self.blockchain](withdrawal_params, private_key)
return self.request.post(path='/withdrawals/{}/broadcast'.format(withdrawal_id), json_data=api_params) | python | def execute_withdrawal(self, withdrawal_params, private_key):
"""
This function is to sign the message generated from the create withdrawal function and submit it to the
blockchain for transfer from the smart contract to the owners address.
Execution of this function is as follows::
execute_withdrawal(withdrawal_params=create_withdrawal, private_key=kp)
The expected return result for this function is as follows::
{
'event_type': 'withdrawal',
'amount': -100000,
'asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'status': 'confirming',
'id': '96e5f797-435b-40ab-9085-4e95c6749218',
'blockchain': 'neo',
'reason_code': 9,
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'transaction_hash': None,
'created_at': '2018-08-05T10:03:58.885Z',
'updated_at': '2018-08-05T10:03:59.828Z',
'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82'
}
:param withdrawal_params: Dictionary from the create withdrawal function to sign and submit to the blockchain.
:type withdrawal_params: dict
:param private_key: The Private Key (ETH) or KeyPair (NEO) for the wallet being used to sign deposit message.
:type private_key: KeyPair or str
:return: Dictionary with the status of the withdrawal request and blockchain transaction details.
"""
withdrawal_id = withdrawal_params['id']
api_params = self.sign_execute_withdrawal_function[self.blockchain](withdrawal_params, private_key)
return self.request.post(path='/withdrawals/{}/broadcast'.format(withdrawal_id), json_data=api_params) | [
"def",
"execute_withdrawal",
"(",
"self",
",",
"withdrawal_params",
",",
"private_key",
")",
":",
"withdrawal_id",
"=",
"withdrawal_params",
"[",
"'id'",
"]",
"api_params",
"=",
"self",
".",
"sign_execute_withdrawal_function",
"[",
"self",
".",
"blockchain",
"]",
"(",
"withdrawal_params",
",",
"private_key",
")",
"return",
"self",
".",
"request",
".",
"post",
"(",
"path",
"=",
"'/withdrawals/{}/broadcast'",
".",
"format",
"(",
"withdrawal_id",
")",
",",
"json_data",
"=",
"api_params",
")"
] | This function is to sign the message generated from the create withdrawal function and submit it to the
blockchain for transfer from the smart contract to the owners address.
Execution of this function is as follows::
execute_withdrawal(withdrawal_params=create_withdrawal, private_key=kp)
The expected return result for this function is as follows::
{
'event_type': 'withdrawal',
'amount': -100000,
'asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'status': 'confirming',
'id': '96e5f797-435b-40ab-9085-4e95c6749218',
'blockchain': 'neo',
'reason_code': 9,
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'transaction_hash': None,
'created_at': '2018-08-05T10:03:58.885Z',
'updated_at': '2018-08-05T10:03:59.828Z',
'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82'
}
:param withdrawal_params: Dictionary from the create withdrawal function to sign and submit to the blockchain.
:type withdrawal_params: dict
:param private_key: The Private Key (ETH) or KeyPair (NEO) for the wallet being used to sign deposit message.
:type private_key: KeyPair or str
:return: Dictionary with the status of the withdrawal request and blockchain transaction details. | [
"This",
"function",
"is",
"to",
"sign",
"the",
"message",
"generated",
"from",
"the",
"create",
"withdrawal",
"function",
"and",
"submit",
"it",
"to",
"the",
"blockchain",
"for",
"transfer",
"from",
"the",
"smart",
"contract",
"to",
"the",
"owners",
"address",
".",
"Execution",
"of",
"this",
"function",
"is",
"as",
"follows",
"::"
] | train | https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/authenticated_client.py#L720-L753 |
MFreidank/ARSpy | arspy/ars.py | adaptive_rejection_sampling | def adaptive_rejection_sampling(logpdf: callable,
a: float, b: float,
domain: Tuple[float, float],
n_samples: int,
random_stream=None):
"""
Adaptive rejection sampling samples exactly (all samples are i.i.d) and efficiently from any univariate log-concave distribution. The basic idea is to successively determine an envelope of straight-line segments to construct an increasingly accurate approximation of the logarithm.
It does not require any normalization of the target distribution.
Parameters
----------
logpdf: callable
Univariate function that computes :math:`log(f(u))`
for a given :math:`u`, where :math:`f(u)` is proportional
to the target density to sample from.
a: float
Lower starting point used to initialize the hulls.
Must lie in the domain of the logpdf and it
must hold: :math:`a < b`.
b: float
Upper starting point used to initialize the hulls.
Must lie in the domain of the logpdf and it
must hold: :math:`a < b`.
domain : Tuple[float, float]
Domain of `logpdf`.
May be unbounded on either or both sides,
in which case `(float("-inf"), float("inf"))`
would be passed.
If this domain is unbounded to the left,
the derivative of the logpdf
for x<= a must be positive.
If this domain is unbounded to the right the derivative of the logpdf for x>=b
must be negative.
n_samples: int
Number of samples to draw.
random_stream : RandomState, optional
Seeded random number generator object with same interface as a NumPy
RandomState object. Defaults to `None` in which case a NumPy
RandomState seeded from `/dev/urandom` if available or the clock if not
will be used.
Returns
----------
samples : list
A list of samples drawn from the
target distribution :math:`f`
with the given `logpdf`.
Examples
----------
Sampling from a simple gaussian, adaptive rejection sampling style.
We use the logpdf of a standard gaussian and this small code snippet
demonstrates that our sample approximation accurately approximates the mean:
>>> from math import isclose
>>> from numpy import log, exp, mean
>>> gaussian_logpdf = lambda x, sigma=1: log(exp(-x ** 2 / sigma))
>>> a, b = -2, 2 # a < b must hold
>>> domain = (float("-inf"), float("inf"))
>>> n_samples = 10000
>>> samples = adaptive_rejection_sampling(logpdf=gaussian_logpdf, a=a, b=b, domain=domain, n_samples=n_samples)
>>> isclose(mean(samples), 0.0, abs_tol=1e-02)
True
"""
assert(hasattr(logpdf, "__call__"))
assert(len(domain) == 2), "Domain must be two-element iterable."
assert(domain[1] >= domain[0]), "Invalid domain, it must hold: domain[1] >= domain[0]."
assert(n_samples >= 0), "Number of samples must be >= 0."
if random_stream is None:
random_stream = RandomState()
if a >= b or isinf(a) or isinf(b) or a < domain[0] or b > domain[1]:
raise ValueError("invalid a and b")
n_derivative_steps = 1e-3 * (b - a)
S = (a, a + n_derivative_steps, b - n_derivative_steps, b)
if domain[0] == float("-inf"):
# ensure positive derivative at 'a'
derivative_sign = sign(logpdf(a + n_derivative_steps) - logpdf(a))
positive_derivative = derivative_sign > 0
assert(positive_derivative), "derivative at 'a' must be positive, since the domain is unbounded to the left"
if domain[1] == float("inf"):
# ensure negative derivative at 'b'
derivative_sign = sign(logpdf(b) - logpdf(b - n_derivative_steps))
negative_derivative = derivative_sign < 0
assert(negative_derivative), "derivative at 'b' must be negative, since the domain is unbounded to the right"
# initialize a mesh on which to create upper & lower hulls
n_initial_mesh_points = 3
S = unique(
(S[0], *(linspace(S[1], S[2], num=n_initial_mesh_points + 2)), S[3])
)
fS = tuple(logpdf(s) for s in S)
lower_hull, upper_hull = compute_hulls(S=S, fS=fS, domain=domain)
samples = []
while len(samples) < n_samples:
mesh_changed = False
x = sample_upper_hull(upper_hull, random_stream=random_stream)
lh_val, uh_val = evaluate_hulls(x, lower_hull, upper_hull)
U = random_stream.rand()
if log(U) <= lh_val - uh_val:
# accept u is below lower bound
samples.append(x)
elif log(U) <= logpdf(x) - uh_val:
# accept, u is between lower bound and f
samples.append(x)
mesh_changed = True
else:
# reject, u is between f and upper_bound
mesh_changed = True
if mesh_changed:
S = sorted([*S, x])
fS = tuple(logpdf(s) for s in S)
lower_hull, upper_hull = compute_hulls(S=S, fS=fS, domain=domain)
return samples | python | def adaptive_rejection_sampling(logpdf: callable,
a: float, b: float,
domain: Tuple[float, float],
n_samples: int,
random_stream=None):
"""
Adaptive rejection sampling samples exactly (all samples are i.i.d) and efficiently from any univariate log-concave distribution. The basic idea is to successively determine an envelope of straight-line segments to construct an increasingly accurate approximation of the logarithm.
It does not require any normalization of the target distribution.
Parameters
----------
logpdf: callable
Univariate function that computes :math:`log(f(u))`
for a given :math:`u`, where :math:`f(u)` is proportional
to the target density to sample from.
a: float
Lower starting point used to initialize the hulls.
Must lie in the domain of the logpdf and it
must hold: :math:`a < b`.
b: float
Upper starting point used to initialize the hulls.
Must lie in the domain of the logpdf and it
must hold: :math:`a < b`.
domain : Tuple[float, float]
Domain of `logpdf`.
May be unbounded on either or both sides,
in which case `(float("-inf"), float("inf"))`
would be passed.
If this domain is unbounded to the left,
the derivative of the logpdf
for x<= a must be positive.
If this domain is unbounded to the right the derivative of the logpdf for x>=b
must be negative.
n_samples: int
Number of samples to draw.
random_stream : RandomState, optional
Seeded random number generator object with same interface as a NumPy
RandomState object. Defaults to `None` in which case a NumPy
RandomState seeded from `/dev/urandom` if available or the clock if not
will be used.
Returns
----------
samples : list
A list of samples drawn from the
target distribution :math:`f`
with the given `logpdf`.
Examples
----------
Sampling from a simple gaussian, adaptive rejection sampling style.
We use the logpdf of a standard gaussian and this small code snippet
demonstrates that our sample approximation accurately approximates the mean:
>>> from math import isclose
>>> from numpy import log, exp, mean
>>> gaussian_logpdf = lambda x, sigma=1: log(exp(-x ** 2 / sigma))
>>> a, b = -2, 2 # a < b must hold
>>> domain = (float("-inf"), float("inf"))
>>> n_samples = 10000
>>> samples = adaptive_rejection_sampling(logpdf=gaussian_logpdf, a=a, b=b, domain=domain, n_samples=n_samples)
>>> isclose(mean(samples), 0.0, abs_tol=1e-02)
True
"""
assert(hasattr(logpdf, "__call__"))
assert(len(domain) == 2), "Domain must be two-element iterable."
assert(domain[1] >= domain[0]), "Invalid domain, it must hold: domain[1] >= domain[0]."
assert(n_samples >= 0), "Number of samples must be >= 0."
if random_stream is None:
random_stream = RandomState()
if a >= b or isinf(a) or isinf(b) or a < domain[0] or b > domain[1]:
raise ValueError("invalid a and b")
n_derivative_steps = 1e-3 * (b - a)
S = (a, a + n_derivative_steps, b - n_derivative_steps, b)
if domain[0] == float("-inf"):
# ensure positive derivative at 'a'
derivative_sign = sign(logpdf(a + n_derivative_steps) - logpdf(a))
positive_derivative = derivative_sign > 0
assert(positive_derivative), "derivative at 'a' must be positive, since the domain is unbounded to the left"
if domain[1] == float("inf"):
# ensure negative derivative at 'b'
derivative_sign = sign(logpdf(b) - logpdf(b - n_derivative_steps))
negative_derivative = derivative_sign < 0
assert(negative_derivative), "derivative at 'b' must be negative, since the domain is unbounded to the right"
# initialize a mesh on which to create upper & lower hulls
n_initial_mesh_points = 3
S = unique(
(S[0], *(linspace(S[1], S[2], num=n_initial_mesh_points + 2)), S[3])
)
fS = tuple(logpdf(s) for s in S)
lower_hull, upper_hull = compute_hulls(S=S, fS=fS, domain=domain)
samples = []
while len(samples) < n_samples:
mesh_changed = False
x = sample_upper_hull(upper_hull, random_stream=random_stream)
lh_val, uh_val = evaluate_hulls(x, lower_hull, upper_hull)
U = random_stream.rand()
if log(U) <= lh_val - uh_val:
# accept u is below lower bound
samples.append(x)
elif log(U) <= logpdf(x) - uh_val:
# accept, u is between lower bound and f
samples.append(x)
mesh_changed = True
else:
# reject, u is between f and upper_bound
mesh_changed = True
if mesh_changed:
S = sorted([*S, x])
fS = tuple(logpdf(s) for s in S)
lower_hull, upper_hull = compute_hulls(S=S, fS=fS, domain=domain)
return samples | [
"def",
"adaptive_rejection_sampling",
"(",
"logpdf",
":",
"callable",
",",
"a",
":",
"float",
",",
"b",
":",
"float",
",",
"domain",
":",
"Tuple",
"[",
"float",
",",
"float",
"]",
",",
"n_samples",
":",
"int",
",",
"random_stream",
"=",
"None",
")",
":",
"assert",
"(",
"hasattr",
"(",
"logpdf",
",",
"\"__call__\"",
")",
")",
"assert",
"(",
"len",
"(",
"domain",
")",
"==",
"2",
")",
",",
"\"Domain must be two-element iterable.\"",
"assert",
"(",
"domain",
"[",
"1",
"]",
">=",
"domain",
"[",
"0",
"]",
")",
",",
"\"Invalid domain, it must hold: domain[1] >= domain[0].\"",
"assert",
"(",
"n_samples",
">=",
"0",
")",
",",
"\"Number of samples must be >= 0.\"",
"if",
"random_stream",
"is",
"None",
":",
"random_stream",
"=",
"RandomState",
"(",
")",
"if",
"a",
">=",
"b",
"or",
"isinf",
"(",
"a",
")",
"or",
"isinf",
"(",
"b",
")",
"or",
"a",
"<",
"domain",
"[",
"0",
"]",
"or",
"b",
">",
"domain",
"[",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"\"invalid a and b\"",
")",
"n_derivative_steps",
"=",
"1e-3",
"*",
"(",
"b",
"-",
"a",
")",
"S",
"=",
"(",
"a",
",",
"a",
"+",
"n_derivative_steps",
",",
"b",
"-",
"n_derivative_steps",
",",
"b",
")",
"if",
"domain",
"[",
"0",
"]",
"==",
"float",
"(",
"\"-inf\"",
")",
":",
"# ensure positive derivative at 'a'",
"derivative_sign",
"=",
"sign",
"(",
"logpdf",
"(",
"a",
"+",
"n_derivative_steps",
")",
"-",
"logpdf",
"(",
"a",
")",
")",
"positive_derivative",
"=",
"derivative_sign",
">",
"0",
"assert",
"(",
"positive_derivative",
")",
",",
"\"derivative at 'a' must be positive, since the domain is unbounded to the left\"",
"if",
"domain",
"[",
"1",
"]",
"==",
"float",
"(",
"\"inf\"",
")",
":",
"# ensure negative derivative at 'b'",
"derivative_sign",
"=",
"sign",
"(",
"logpdf",
"(",
"b",
")",
"-",
"logpdf",
"(",
"b",
"-",
"n_derivative_steps",
")",
")",
"negative_derivative",
"=",
"derivative_sign",
"<",
"0",
"assert",
"(",
"negative_derivative",
")",
",",
"\"derivative at 'b' must be negative, since the domain is unbounded to the right\"",
"# initialize a mesh on which to create upper & lower hulls",
"n_initial_mesh_points",
"=",
"3",
"S",
"=",
"unique",
"(",
"(",
"S",
"[",
"0",
"]",
",",
"*",
"(",
"linspace",
"(",
"S",
"[",
"1",
"]",
",",
"S",
"[",
"2",
"]",
",",
"num",
"=",
"n_initial_mesh_points",
"+",
"2",
")",
")",
",",
"S",
"[",
"3",
"]",
")",
")",
"fS",
"=",
"tuple",
"(",
"logpdf",
"(",
"s",
")",
"for",
"s",
"in",
"S",
")",
"lower_hull",
",",
"upper_hull",
"=",
"compute_hulls",
"(",
"S",
"=",
"S",
",",
"fS",
"=",
"fS",
",",
"domain",
"=",
"domain",
")",
"samples",
"=",
"[",
"]",
"while",
"len",
"(",
"samples",
")",
"<",
"n_samples",
":",
"mesh_changed",
"=",
"False",
"x",
"=",
"sample_upper_hull",
"(",
"upper_hull",
",",
"random_stream",
"=",
"random_stream",
")",
"lh_val",
",",
"uh_val",
"=",
"evaluate_hulls",
"(",
"x",
",",
"lower_hull",
",",
"upper_hull",
")",
"U",
"=",
"random_stream",
".",
"rand",
"(",
")",
"if",
"log",
"(",
"U",
")",
"<=",
"lh_val",
"-",
"uh_val",
":",
"# accept u is below lower bound",
"samples",
".",
"append",
"(",
"x",
")",
"elif",
"log",
"(",
"U",
")",
"<=",
"logpdf",
"(",
"x",
")",
"-",
"uh_val",
":",
"# accept, u is between lower bound and f",
"samples",
".",
"append",
"(",
"x",
")",
"mesh_changed",
"=",
"True",
"else",
":",
"# reject, u is between f and upper_bound",
"mesh_changed",
"=",
"True",
"if",
"mesh_changed",
":",
"S",
"=",
"sorted",
"(",
"[",
"*",
"S",
",",
"x",
"]",
")",
"fS",
"=",
"tuple",
"(",
"logpdf",
"(",
"s",
")",
"for",
"s",
"in",
"S",
")",
"lower_hull",
",",
"upper_hull",
"=",
"compute_hulls",
"(",
"S",
"=",
"S",
",",
"fS",
"=",
"fS",
",",
"domain",
"=",
"domain",
")",
"return",
"samples"
] | Adaptive rejection sampling samples exactly (all samples are i.i.d) and efficiently from any univariate log-concave distribution. The basic idea is to successively determine an envelope of straight-line segments to construct an increasingly accurate approximation of the logarithm.
It does not require any normalization of the target distribution.
Parameters
----------
logpdf: callable
Univariate function that computes :math:`log(f(u))`
for a given :math:`u`, where :math:`f(u)` is proportional
to the target density to sample from.
a: float
Lower starting point used to initialize the hulls.
Must lie in the domain of the logpdf and it
must hold: :math:`a < b`.
b: float
Upper starting point used to initialize the hulls.
Must lie in the domain of the logpdf and it
must hold: :math:`a < b`.
domain : Tuple[float, float]
Domain of `logpdf`.
May be unbounded on either or both sides,
in which case `(float("-inf"), float("inf"))`
would be passed.
If this domain is unbounded to the left,
the derivative of the logpdf
for x<= a must be positive.
If this domain is unbounded to the right the derivative of the logpdf for x>=b
must be negative.
n_samples: int
Number of samples to draw.
random_stream : RandomState, optional
Seeded random number generator object with same interface as a NumPy
RandomState object. Defaults to `None` in which case a NumPy
RandomState seeded from `/dev/urandom` if available or the clock if not
will be used.
Returns
----------
samples : list
A list of samples drawn from the
target distribution :math:`f`
with the given `logpdf`.
Examples
----------
Sampling from a simple gaussian, adaptive rejection sampling style.
We use the logpdf of a standard gaussian and this small code snippet
demonstrates that our sample approximation accurately approximates the mean:
>>> from math import isclose
>>> from numpy import log, exp, mean
>>> gaussian_logpdf = lambda x, sigma=1: log(exp(-x ** 2 / sigma))
>>> a, b = -2, 2 # a < b must hold
>>> domain = (float("-inf"), float("inf"))
>>> n_samples = 10000
>>> samples = adaptive_rejection_sampling(logpdf=gaussian_logpdf, a=a, b=b, domain=domain, n_samples=n_samples)
>>> isclose(mean(samples), 0.0, abs_tol=1e-02)
True | [
"Adaptive",
"rejection",
"sampling",
"samples",
"exactly",
"(",
"all",
"samples",
"are",
"i",
".",
"i",
".",
"d",
")",
"and",
"efficiently",
"from",
"any",
"univariate",
"log",
"-",
"concave",
"distribution",
".",
"The",
"basic",
"idea",
"is",
"to",
"successively",
"determine",
"an",
"envelope",
"of",
"straight",
"-",
"line",
"segments",
"to",
"construct",
"an",
"increasingly",
"accurate",
"approximation",
"of",
"the",
"logarithm",
".",
"It",
"does",
"not",
"require",
"any",
"normalization",
"of",
"the",
"target",
"distribution",
"."
] | train | https://github.com/MFreidank/ARSpy/blob/866885071b43e36a529f2fecf584ceef5248d800/arspy/ars.py#L29-L168 |
KeithSSmith/switcheo-python | switcheo/neo/signatures.py | sign_create_cancellation | def sign_create_cancellation(cancellation_params, key_pair):
"""
Function to sign the parameters required to create a cancellation request from the Switcheo Exchange.
Execution of this function is as follows::
sign_create_cancellation(cancellation_params=signable_params, key_pair=key_pair)
The expected return result for this function is as follows::
{
'order_id': 'aa647b95-d546-4d29-961e-bd62b18b07bf',
'timestamp': 1542092600331,
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'signature': '475bc3ecd2310201a3b5357b52b1866aaf5a5618932500e43503ebb....'
}
:param cancellation_params: Dictionary with Order ID and timestamp to sign for creating the cancellation.
:type cancellation_params: dict
:param key_pair: The KeyPair for the wallet being used to sign deposit message.
:type key_pair: KeyPair
:return: Dictionary of signed message to send to the Switcheo API.
"""
encoded_message = encode_message(cancellation_params)
create_params = cancellation_params.copy()
create_params['address'] = neo_get_scripthash_from_private_key(private_key=key_pair.PrivateKey).ToString()
create_params['signature'] = sign_message(encoded_message=encoded_message,
private_key_hex=private_key_to_hex(key_pair=key_pair))
return create_params | python | def sign_create_cancellation(cancellation_params, key_pair):
"""
Function to sign the parameters required to create a cancellation request from the Switcheo Exchange.
Execution of this function is as follows::
sign_create_cancellation(cancellation_params=signable_params, key_pair=key_pair)
The expected return result for this function is as follows::
{
'order_id': 'aa647b95-d546-4d29-961e-bd62b18b07bf',
'timestamp': 1542092600331,
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'signature': '475bc3ecd2310201a3b5357b52b1866aaf5a5618932500e43503ebb....'
}
:param cancellation_params: Dictionary with Order ID and timestamp to sign for creating the cancellation.
:type cancellation_params: dict
:param key_pair: The KeyPair for the wallet being used to sign deposit message.
:type key_pair: KeyPair
:return: Dictionary of signed message to send to the Switcheo API.
"""
encoded_message = encode_message(cancellation_params)
create_params = cancellation_params.copy()
create_params['address'] = neo_get_scripthash_from_private_key(private_key=key_pair.PrivateKey).ToString()
create_params['signature'] = sign_message(encoded_message=encoded_message,
private_key_hex=private_key_to_hex(key_pair=key_pair))
return create_params | [
"def",
"sign_create_cancellation",
"(",
"cancellation_params",
",",
"key_pair",
")",
":",
"encoded_message",
"=",
"encode_message",
"(",
"cancellation_params",
")",
"create_params",
"=",
"cancellation_params",
".",
"copy",
"(",
")",
"create_params",
"[",
"'address'",
"]",
"=",
"neo_get_scripthash_from_private_key",
"(",
"private_key",
"=",
"key_pair",
".",
"PrivateKey",
")",
".",
"ToString",
"(",
")",
"create_params",
"[",
"'signature'",
"]",
"=",
"sign_message",
"(",
"encoded_message",
"=",
"encoded_message",
",",
"private_key_hex",
"=",
"private_key_to_hex",
"(",
"key_pair",
"=",
"key_pair",
")",
")",
"return",
"create_params"
] | Function to sign the parameters required to create a cancellation request from the Switcheo Exchange.
Execution of this function is as follows::
sign_create_cancellation(cancellation_params=signable_params, key_pair=key_pair)
The expected return result for this function is as follows::
{
'order_id': 'aa647b95-d546-4d29-961e-bd62b18b07bf',
'timestamp': 1542092600331,
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'signature': '475bc3ecd2310201a3b5357b52b1866aaf5a5618932500e43503ebb....'
}
:param cancellation_params: Dictionary with Order ID and timestamp to sign for creating the cancellation.
:type cancellation_params: dict
:param key_pair: The KeyPair for the wallet being used to sign deposit message.
:type key_pair: KeyPair
:return: Dictionary of signed message to send to the Switcheo API. | [
"Function",
"to",
"sign",
"the",
"parameters",
"required",
"to",
"create",
"a",
"cancellation",
"request",
"from",
"the",
"Switcheo",
"Exchange",
".",
"Execution",
"of",
"this",
"function",
"is",
"as",
"follows",
"::"
] | train | https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/neo/signatures.py#L15-L42 |
KeithSSmith/switcheo-python | switcheo/neo/signatures.py | sign_execute_cancellation | def sign_execute_cancellation(cancellation_params, key_pair):
"""
Function to sign the parameters required to execute a cancellation request on the Switcheo Exchange.
Execution of this function is as follows::
sign_execute_cancellation(cancellation_params=signable_params, key_pair=key_pair)
The expected return result for this function is as follows::
{
'signature': '6a40d6c011b7517f8fd3f2d0de32dd486adfd1d424d06d56c80eb....'
}
:param cancellation_params: Parameters the Switcheo Exchange returns from the create cancellation.
:type cancellation_params: dict
:param key_pair: The KeyPair for the wallet being used to sign deposit message.
:type key_pair: KeyPair
:return: Dictionary of signed message to send to the Switcheo API.
"""
signature = sign_transaction(transaction=cancellation_params['transaction'],
private_key_hex=private_key_to_hex(key_pair=key_pair))
return {'signature': signature} | python | def sign_execute_cancellation(cancellation_params, key_pair):
"""
Function to sign the parameters required to execute a cancellation request on the Switcheo Exchange.
Execution of this function is as follows::
sign_execute_cancellation(cancellation_params=signable_params, key_pair=key_pair)
The expected return result for this function is as follows::
{
'signature': '6a40d6c011b7517f8fd3f2d0de32dd486adfd1d424d06d56c80eb....'
}
:param cancellation_params: Parameters the Switcheo Exchange returns from the create cancellation.
:type cancellation_params: dict
:param key_pair: The KeyPair for the wallet being used to sign deposit message.
:type key_pair: KeyPair
:return: Dictionary of signed message to send to the Switcheo API.
"""
signature = sign_transaction(transaction=cancellation_params['transaction'],
private_key_hex=private_key_to_hex(key_pair=key_pair))
return {'signature': signature} | [
"def",
"sign_execute_cancellation",
"(",
"cancellation_params",
",",
"key_pair",
")",
":",
"signature",
"=",
"sign_transaction",
"(",
"transaction",
"=",
"cancellation_params",
"[",
"'transaction'",
"]",
",",
"private_key_hex",
"=",
"private_key_to_hex",
"(",
"key_pair",
"=",
"key_pair",
")",
")",
"return",
"{",
"'signature'",
":",
"signature",
"}"
] | Function to sign the parameters required to execute a cancellation request on the Switcheo Exchange.
Execution of this function is as follows::
sign_execute_cancellation(cancellation_params=signable_params, key_pair=key_pair)
The expected return result for this function is as follows::
{
'signature': '6a40d6c011b7517f8fd3f2d0de32dd486adfd1d424d06d56c80eb....'
}
:param cancellation_params: Parameters the Switcheo Exchange returns from the create cancellation.
:type cancellation_params: dict
:param key_pair: The KeyPair for the wallet being used to sign deposit message.
:type key_pair: KeyPair
:return: Dictionary of signed message to send to the Switcheo API. | [
"Function",
"to",
"sign",
"the",
"parameters",
"required",
"to",
"execute",
"a",
"cancellation",
"request",
"on",
"the",
"Switcheo",
"Exchange",
".",
"Execution",
"of",
"this",
"function",
"is",
"as",
"follows",
"::"
] | train | https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/neo/signatures.py#L45-L66 |
KeithSSmith/switcheo-python | switcheo/neo/signatures.py | sign_create_deposit | def sign_create_deposit(deposit_params, key_pair):
"""
Function to create a deposit request by generating a transaction request from the Switcheo API.
Execution of this function is as follows::
sign_create_deposit(deposit_details=create_deposit, key_pair=key_pair)
The expected return result for this function is as follows::
{
'blockchain': 'neo',
'asset_id': 'SWTH',
'amount': '100',
'timestamp': 1542091927575,
'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82',
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'signature': '24ef6c63964988a2efe5fe67f04f46fdc2f1504fb5....'
}
:param deposit_params: The parameters generated by the create deposit function that now requires signature.
:type deposit_params: dict
:param key_pair: The KeyPair for the wallet being used to sign deposit message.
:type key_pair: KeyPair
:return: Dictionary response of signed deposit request that is ready to be executed on the NEO blockchain.
"""
encoded_message = encode_message(deposit_params)
create_params = deposit_params.copy()
create_params['address'] = neo_get_scripthash_from_private_key(private_key=key_pair.PrivateKey).ToString()
create_params['signature'] = sign_message(encoded_message=encoded_message,
private_key_hex=private_key_to_hex(key_pair=key_pair))
return create_params | python | def sign_create_deposit(deposit_params, key_pair):
"""
Function to create a deposit request by generating a transaction request from the Switcheo API.
Execution of this function is as follows::
sign_create_deposit(deposit_details=create_deposit, key_pair=key_pair)
The expected return result for this function is as follows::
{
'blockchain': 'neo',
'asset_id': 'SWTH',
'amount': '100',
'timestamp': 1542091927575,
'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82',
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'signature': '24ef6c63964988a2efe5fe67f04f46fdc2f1504fb5....'
}
:param deposit_params: The parameters generated by the create deposit function that now requires signature.
:type deposit_params: dict
:param key_pair: The KeyPair for the wallet being used to sign deposit message.
:type key_pair: KeyPair
:return: Dictionary response of signed deposit request that is ready to be executed on the NEO blockchain.
"""
encoded_message = encode_message(deposit_params)
create_params = deposit_params.copy()
create_params['address'] = neo_get_scripthash_from_private_key(private_key=key_pair.PrivateKey).ToString()
create_params['signature'] = sign_message(encoded_message=encoded_message,
private_key_hex=private_key_to_hex(key_pair=key_pair))
return create_params | [
"def",
"sign_create_deposit",
"(",
"deposit_params",
",",
"key_pair",
")",
":",
"encoded_message",
"=",
"encode_message",
"(",
"deposit_params",
")",
"create_params",
"=",
"deposit_params",
".",
"copy",
"(",
")",
"create_params",
"[",
"'address'",
"]",
"=",
"neo_get_scripthash_from_private_key",
"(",
"private_key",
"=",
"key_pair",
".",
"PrivateKey",
")",
".",
"ToString",
"(",
")",
"create_params",
"[",
"'signature'",
"]",
"=",
"sign_message",
"(",
"encoded_message",
"=",
"encoded_message",
",",
"private_key_hex",
"=",
"private_key_to_hex",
"(",
"key_pair",
"=",
"key_pair",
")",
")",
"return",
"create_params"
] | Function to create a deposit request by generating a transaction request from the Switcheo API.
Execution of this function is as follows::
sign_create_deposit(deposit_details=create_deposit, key_pair=key_pair)
The expected return result for this function is as follows::
{
'blockchain': 'neo',
'asset_id': 'SWTH',
'amount': '100',
'timestamp': 1542091927575,
'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82',
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'signature': '24ef6c63964988a2efe5fe67f04f46fdc2f1504fb5....'
}
:param deposit_params: The parameters generated by the create deposit function that now requires signature.
:type deposit_params: dict
:param key_pair: The KeyPair for the wallet being used to sign deposit message.
:type key_pair: KeyPair
:return: Dictionary response of signed deposit request that is ready to be executed on the NEO blockchain. | [
"Function",
"to",
"create",
"a",
"deposit",
"request",
"by",
"generating",
"a",
"transaction",
"request",
"from",
"the",
"Switcheo",
"API",
".",
"Execution",
"of",
"this",
"function",
"is",
"as",
"follows",
"::"
] | train | https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/neo/signatures.py#L69-L99 |
KeithSSmith/switcheo-python | switcheo/neo/signatures.py | sign_execute_deposit | def sign_execute_deposit(deposit_params, key_pair):
"""
Function to execute the deposit request by signing the transaction generated by the create deposit function.
Execution of this function is as follows::
sign_execute_deposit(deposit_details=create_deposit, key_pair=key_pair)
The expected return result for this function is as follows::
{
'signature': '3cc4a5cb7b7d50383e799add2ba35382b6f2f1b2e3b97802....'
}
:param deposit_params: The parameters generated by the create deposit function that now requires signature.
:type deposit_params: dict
:param key_pair: The KeyPair for the wallet being used to sign deposit message.
:type key_pair: KeyPair
:return: Dictionary with the result status of the deposit attempt.
"""
signature = sign_transaction(transaction=deposit_params['transaction'],
private_key_hex=private_key_to_hex(key_pair=key_pair))
return {'signature': signature} | python | def sign_execute_deposit(deposit_params, key_pair):
"""
Function to execute the deposit request by signing the transaction generated by the create deposit function.
Execution of this function is as follows::
sign_execute_deposit(deposit_details=create_deposit, key_pair=key_pair)
The expected return result for this function is as follows::
{
'signature': '3cc4a5cb7b7d50383e799add2ba35382b6f2f1b2e3b97802....'
}
:param deposit_params: The parameters generated by the create deposit function that now requires signature.
:type deposit_params: dict
:param key_pair: The KeyPair for the wallet being used to sign deposit message.
:type key_pair: KeyPair
:return: Dictionary with the result status of the deposit attempt.
"""
signature = sign_transaction(transaction=deposit_params['transaction'],
private_key_hex=private_key_to_hex(key_pair=key_pair))
return {'signature': signature} | [
"def",
"sign_execute_deposit",
"(",
"deposit_params",
",",
"key_pair",
")",
":",
"signature",
"=",
"sign_transaction",
"(",
"transaction",
"=",
"deposit_params",
"[",
"'transaction'",
"]",
",",
"private_key_hex",
"=",
"private_key_to_hex",
"(",
"key_pair",
"=",
"key_pair",
")",
")",
"return",
"{",
"'signature'",
":",
"signature",
"}"
] | Function to execute the deposit request by signing the transaction generated by the create deposit function.
Execution of this function is as follows::
sign_execute_deposit(deposit_details=create_deposit, key_pair=key_pair)
The expected return result for this function is as follows::
{
'signature': '3cc4a5cb7b7d50383e799add2ba35382b6f2f1b2e3b97802....'
}
:param deposit_params: The parameters generated by the create deposit function that now requires signature.
:type deposit_params: dict
:param key_pair: The KeyPair for the wallet being used to sign deposit message.
:type key_pair: KeyPair
:return: Dictionary with the result status of the deposit attempt. | [
"Function",
"to",
"execute",
"the",
"deposit",
"request",
"by",
"signing",
"the",
"transaction",
"generated",
"by",
"the",
"create",
"deposit",
"function",
".",
"Execution",
"of",
"this",
"function",
"is",
"as",
"follows",
"::"
] | train | https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/neo/signatures.py#L102-L123 |
KeithSSmith/switcheo-python | switcheo/neo/signatures.py | sign_create_order | def sign_create_order(order_params, key_pair):
"""
Function to sign the create order parameters and send to the Switcheo API.
Execution of this function is as follows::
sign_create_order(order_params=signable_params, key_pair=key_pair)
The expected return result for this function is as follows::
{
'blockchain': 'neo',
'pair': 'SWTH_NEO',
'side': 'buy',
'price': '0.00001000',
'want_amount': '1000000000000',
'use_native_tokens': True,
'order_type': 'limit',
'timestamp': 1542091535839,
'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82',
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'signature': '88e93c14a7d3c2cf30dec012ad5cb69f5ff26fe2a....'
}
:param order_params: Parameters to create an order to be submitted to the Switcheo Order Book.
:type order_params: dict
:param key_pair: The NEO key pair to be used to sign messages for the NEO Blockchain.
:type key_pair: KeyPair
:return: Dictionary of signed message to send to the Switcheo API.
"""
encoded_message = encode_message(order_params)
create_params = order_params.copy()
create_params['address'] = neo_get_scripthash_from_private_key(private_key=key_pair.PrivateKey).ToString()
create_params['signature'] = sign_message(encoded_message=encoded_message,
private_key_hex=private_key_to_hex(key_pair=key_pair))
return create_params | python | def sign_create_order(order_params, key_pair):
"""
Function to sign the create order parameters and send to the Switcheo API.
Execution of this function is as follows::
sign_create_order(order_params=signable_params, key_pair=key_pair)
The expected return result for this function is as follows::
{
'blockchain': 'neo',
'pair': 'SWTH_NEO',
'side': 'buy',
'price': '0.00001000',
'want_amount': '1000000000000',
'use_native_tokens': True,
'order_type': 'limit',
'timestamp': 1542091535839,
'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82',
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'signature': '88e93c14a7d3c2cf30dec012ad5cb69f5ff26fe2a....'
}
:param order_params: Parameters to create an order to be submitted to the Switcheo Order Book.
:type order_params: dict
:param key_pair: The NEO key pair to be used to sign messages for the NEO Blockchain.
:type key_pair: KeyPair
:return: Dictionary of signed message to send to the Switcheo API.
"""
encoded_message = encode_message(order_params)
create_params = order_params.copy()
create_params['address'] = neo_get_scripthash_from_private_key(private_key=key_pair.PrivateKey).ToString()
create_params['signature'] = sign_message(encoded_message=encoded_message,
private_key_hex=private_key_to_hex(key_pair=key_pair))
return create_params | [
"def",
"sign_create_order",
"(",
"order_params",
",",
"key_pair",
")",
":",
"encoded_message",
"=",
"encode_message",
"(",
"order_params",
")",
"create_params",
"=",
"order_params",
".",
"copy",
"(",
")",
"create_params",
"[",
"'address'",
"]",
"=",
"neo_get_scripthash_from_private_key",
"(",
"private_key",
"=",
"key_pair",
".",
"PrivateKey",
")",
".",
"ToString",
"(",
")",
"create_params",
"[",
"'signature'",
"]",
"=",
"sign_message",
"(",
"encoded_message",
"=",
"encoded_message",
",",
"private_key_hex",
"=",
"private_key_to_hex",
"(",
"key_pair",
"=",
"key_pair",
")",
")",
"return",
"create_params"
] | Function to sign the create order parameters and send to the Switcheo API.
Execution of this function is as follows::
sign_create_order(order_params=signable_params, key_pair=key_pair)
The expected return result for this function is as follows::
{
'blockchain': 'neo',
'pair': 'SWTH_NEO',
'side': 'buy',
'price': '0.00001000',
'want_amount': '1000000000000',
'use_native_tokens': True,
'order_type': 'limit',
'timestamp': 1542091535839,
'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82',
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'signature': '88e93c14a7d3c2cf30dec012ad5cb69f5ff26fe2a....'
}
:param order_params: Parameters to create an order to be submitted to the Switcheo Order Book.
:type order_params: dict
:param key_pair: The NEO key pair to be used to sign messages for the NEO Blockchain.
:type key_pair: KeyPair
:return: Dictionary of signed message to send to the Switcheo API. | [
"Function",
"to",
"sign",
"the",
"create",
"order",
"parameters",
"and",
"send",
"to",
"the",
"Switcheo",
"API",
".",
"Execution",
"of",
"this",
"function",
"is",
"as",
"follows",
"::"
] | train | https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/neo/signatures.py#L126-L160 |
KeithSSmith/switcheo-python | switcheo/neo/signatures.py | sign_execute_order | def sign_execute_order(order_params, key_pair):
"""
Function to execute the order request by signing the transaction generated from the create order function.
Execution of this function is as follows::
sign_execute_order(order_params=signable_params, key_pair=key_pair)
The expected return result for this function is as follows::
{
'signatures': {
'fill_groups': {},
'fills': {},
'makes': {
'952defd3-ad8a-4db3-bbd1-27d58ff6c7bd': '3f5aa331a731a808fe260502421cbb06ae3d5ea5ddfb1....'
}
}
}
:param order_params: The parameters generated by the create function that now require signing.
:type order_params: dict
:param key_pair: The NEO key pair to be used to sign messages for the NEO Blockchain.
:type key_pair: KeyPair
:return: Dictionary of the signed transaction to place an order on the Switcheo Order Book.
"""
execute_params = {
'signatures': {
'fill_groups': {},
'fills': sign_txn_array(messages=order_params['fills'],
private_key_hex=private_key_to_hex(key_pair=key_pair)),
'makes': sign_txn_array(messages=order_params['makes'],
private_key_hex=private_key_to_hex(key_pair=key_pair))
}
}
return execute_params | python | def sign_execute_order(order_params, key_pair):
"""
Function to execute the order request by signing the transaction generated from the create order function.
Execution of this function is as follows::
sign_execute_order(order_params=signable_params, key_pair=key_pair)
The expected return result for this function is as follows::
{
'signatures': {
'fill_groups': {},
'fills': {},
'makes': {
'952defd3-ad8a-4db3-bbd1-27d58ff6c7bd': '3f5aa331a731a808fe260502421cbb06ae3d5ea5ddfb1....'
}
}
}
:param order_params: The parameters generated by the create function that now require signing.
:type order_params: dict
:param key_pair: The NEO key pair to be used to sign messages for the NEO Blockchain.
:type key_pair: KeyPair
:return: Dictionary of the signed transaction to place an order on the Switcheo Order Book.
"""
execute_params = {
'signatures': {
'fill_groups': {},
'fills': sign_txn_array(messages=order_params['fills'],
private_key_hex=private_key_to_hex(key_pair=key_pair)),
'makes': sign_txn_array(messages=order_params['makes'],
private_key_hex=private_key_to_hex(key_pair=key_pair))
}
}
return execute_params | [
"def",
"sign_execute_order",
"(",
"order_params",
",",
"key_pair",
")",
":",
"execute_params",
"=",
"{",
"'signatures'",
":",
"{",
"'fill_groups'",
":",
"{",
"}",
",",
"'fills'",
":",
"sign_txn_array",
"(",
"messages",
"=",
"order_params",
"[",
"'fills'",
"]",
",",
"private_key_hex",
"=",
"private_key_to_hex",
"(",
"key_pair",
"=",
"key_pair",
")",
")",
",",
"'makes'",
":",
"sign_txn_array",
"(",
"messages",
"=",
"order_params",
"[",
"'makes'",
"]",
",",
"private_key_hex",
"=",
"private_key_to_hex",
"(",
"key_pair",
"=",
"key_pair",
")",
")",
"}",
"}",
"return",
"execute_params"
] | Function to execute the order request by signing the transaction generated from the create order function.
Execution of this function is as follows::
sign_execute_order(order_params=signable_params, key_pair=key_pair)
The expected return result for this function is as follows::
{
'signatures': {
'fill_groups': {},
'fills': {},
'makes': {
'952defd3-ad8a-4db3-bbd1-27d58ff6c7bd': '3f5aa331a731a808fe260502421cbb06ae3d5ea5ddfb1....'
}
}
}
:param order_params: The parameters generated by the create function that now require signing.
:type order_params: dict
:param key_pair: The NEO key pair to be used to sign messages for the NEO Blockchain.
:type key_pair: KeyPair
:return: Dictionary of the signed transaction to place an order on the Switcheo Order Book. | [
"Function",
"to",
"execute",
"the",
"order",
"request",
"by",
"signing",
"the",
"transaction",
"generated",
"from",
"the",
"create",
"order",
"function",
".",
"Execution",
"of",
"this",
"function",
"is",
"as",
"follows",
"::"
] | train | https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/neo/signatures.py#L163-L197 |
KeithSSmith/switcheo-python | switcheo/neo/signatures.py | sign_create_withdrawal | def sign_create_withdrawal(withdrawal_params, key_pair):
"""
Function to create the withdrawal request by signing the parameters necessary for withdrawal.
Execution of this function is as follows::
sign_create_withdrawal(withdrawal_params=signable_params, private_key=eth_private_key)
The expected return result for this function is as follows::
{
'blockchain': 'neo',
'asset_id': 'SWTH',
'amount': '100',
'timestamp': 1542090737236,
'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82',
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'signature': 'f66d604c0a80940bf70ce9e13c0fd47bc79de....'
}
:param withdrawal_params: Dictionary specifications for withdrawal from the Switcheo Smart Contract.
:type withdrawal_params: dict
:param key_pair: The NEO key pair to be used to sign messages for the NEO Blockchain.
:type key_pair: KeyPair
:return: Dictionary of parameters to be sent to the Switcheo API
"""
encoded_message = encode_message(withdrawal_params)
create_params = withdrawal_params.copy()
create_params['address'] = neo_get_scripthash_from_private_key(private_key=key_pair.PrivateKey).ToString()
create_params['signature'] = sign_message(encoded_message=encoded_message,
private_key_hex=private_key_to_hex(key_pair=key_pair))
return create_params | python | def sign_create_withdrawal(withdrawal_params, key_pair):
"""
Function to create the withdrawal request by signing the parameters necessary for withdrawal.
Execution of this function is as follows::
sign_create_withdrawal(withdrawal_params=signable_params, private_key=eth_private_key)
The expected return result for this function is as follows::
{
'blockchain': 'neo',
'asset_id': 'SWTH',
'amount': '100',
'timestamp': 1542090737236,
'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82',
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'signature': 'f66d604c0a80940bf70ce9e13c0fd47bc79de....'
}
:param withdrawal_params: Dictionary specifications for withdrawal from the Switcheo Smart Contract.
:type withdrawal_params: dict
:param key_pair: The NEO key pair to be used to sign messages for the NEO Blockchain.
:type key_pair: KeyPair
:return: Dictionary of parameters to be sent to the Switcheo API
"""
encoded_message = encode_message(withdrawal_params)
create_params = withdrawal_params.copy()
create_params['address'] = neo_get_scripthash_from_private_key(private_key=key_pair.PrivateKey).ToString()
create_params['signature'] = sign_message(encoded_message=encoded_message,
private_key_hex=private_key_to_hex(key_pair=key_pair))
return create_params | [
"def",
"sign_create_withdrawal",
"(",
"withdrawal_params",
",",
"key_pair",
")",
":",
"encoded_message",
"=",
"encode_message",
"(",
"withdrawal_params",
")",
"create_params",
"=",
"withdrawal_params",
".",
"copy",
"(",
")",
"create_params",
"[",
"'address'",
"]",
"=",
"neo_get_scripthash_from_private_key",
"(",
"private_key",
"=",
"key_pair",
".",
"PrivateKey",
")",
".",
"ToString",
"(",
")",
"create_params",
"[",
"'signature'",
"]",
"=",
"sign_message",
"(",
"encoded_message",
"=",
"encoded_message",
",",
"private_key_hex",
"=",
"private_key_to_hex",
"(",
"key_pair",
"=",
"key_pair",
")",
")",
"return",
"create_params"
] | Function to create the withdrawal request by signing the parameters necessary for withdrawal.
Execution of this function is as follows::
sign_create_withdrawal(withdrawal_params=signable_params, private_key=eth_private_key)
The expected return result for this function is as follows::
{
'blockchain': 'neo',
'asset_id': 'SWTH',
'amount': '100',
'timestamp': 1542090737236,
'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82',
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'signature': 'f66d604c0a80940bf70ce9e13c0fd47bc79de....'
}
:param withdrawal_params: Dictionary specifications for withdrawal from the Switcheo Smart Contract.
:type withdrawal_params: dict
:param key_pair: The NEO key pair to be used to sign messages for the NEO Blockchain.
:type key_pair: KeyPair
:return: Dictionary of parameters to be sent to the Switcheo API | [
"Function",
"to",
"create",
"the",
"withdrawal",
"request",
"by",
"signing",
"the",
"parameters",
"necessary",
"for",
"withdrawal",
".",
"Execution",
"of",
"this",
"function",
"is",
"as",
"follows",
"::"
] | train | https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/neo/signatures.py#L200-L230 |
KeithSSmith/switcheo-python | switcheo/neo/signatures.py | sign_execute_withdrawal | def sign_execute_withdrawal(withdrawal_params, key_pair):
"""
Function to execute the withdrawal request by signing the transaction generated from the create withdrawal function.
Execution of this function is as follows::
sign_execute_withdrawal(withdrawal_params=signable_params, private_key=eth_private_key)
The expected return result for this function is as follows::
{
'id': '3e1c0802-b44e-4681-a94d-29c1dec2f518',
'timestamp': 1542090738192,
'signature': 'e05a7b7bd30eb85959d75ea634cee06ad35d96502a763ae40....'
}
:param withdrawal_params: Parameters passed from the create withdrawal function to be signed and confirmed.
:type withdrawal_params: dict
:param key_pair: The NEO key pair to be used to sign messages for the NEO Blockchain.
:type key_pair: KeyPair
:return: Dictionary of parameters to be sent to the Switcheo API
"""
withdrawal_id = withdrawal_params['id']
signable_params = {
'id': withdrawal_id,
'timestamp': get_epoch_milliseconds()
}
encoded_message = encode_message(signable_params)
execute_params = signable_params.copy()
execute_params['signature'] = sign_message(encoded_message=encoded_message,
private_key_hex=private_key_to_hex(key_pair=key_pair))
return execute_params | python | def sign_execute_withdrawal(withdrawal_params, key_pair):
"""
Function to execute the withdrawal request by signing the transaction generated from the create withdrawal function.
Execution of this function is as follows::
sign_execute_withdrawal(withdrawal_params=signable_params, private_key=eth_private_key)
The expected return result for this function is as follows::
{
'id': '3e1c0802-b44e-4681-a94d-29c1dec2f518',
'timestamp': 1542090738192,
'signature': 'e05a7b7bd30eb85959d75ea634cee06ad35d96502a763ae40....'
}
:param withdrawal_params: Parameters passed from the create withdrawal function to be signed and confirmed.
:type withdrawal_params: dict
:param key_pair: The NEO key pair to be used to sign messages for the NEO Blockchain.
:type key_pair: KeyPair
:return: Dictionary of parameters to be sent to the Switcheo API
"""
withdrawal_id = withdrawal_params['id']
signable_params = {
'id': withdrawal_id,
'timestamp': get_epoch_milliseconds()
}
encoded_message = encode_message(signable_params)
execute_params = signable_params.copy()
execute_params['signature'] = sign_message(encoded_message=encoded_message,
private_key_hex=private_key_to_hex(key_pair=key_pair))
return execute_params | [
"def",
"sign_execute_withdrawal",
"(",
"withdrawal_params",
",",
"key_pair",
")",
":",
"withdrawal_id",
"=",
"withdrawal_params",
"[",
"'id'",
"]",
"signable_params",
"=",
"{",
"'id'",
":",
"withdrawal_id",
",",
"'timestamp'",
":",
"get_epoch_milliseconds",
"(",
")",
"}",
"encoded_message",
"=",
"encode_message",
"(",
"signable_params",
")",
"execute_params",
"=",
"signable_params",
".",
"copy",
"(",
")",
"execute_params",
"[",
"'signature'",
"]",
"=",
"sign_message",
"(",
"encoded_message",
"=",
"encoded_message",
",",
"private_key_hex",
"=",
"private_key_to_hex",
"(",
"key_pair",
"=",
"key_pair",
")",
")",
"return",
"execute_params"
] | Function to execute the withdrawal request by signing the transaction generated from the create withdrawal function.
Execution of this function is as follows::
sign_execute_withdrawal(withdrawal_params=signable_params, private_key=eth_private_key)
The expected return result for this function is as follows::
{
'id': '3e1c0802-b44e-4681-a94d-29c1dec2f518',
'timestamp': 1542090738192,
'signature': 'e05a7b7bd30eb85959d75ea634cee06ad35d96502a763ae40....'
}
:param withdrawal_params: Parameters passed from the create withdrawal function to be signed and confirmed.
:type withdrawal_params: dict
:param key_pair: The NEO key pair to be used to sign messages for the NEO Blockchain.
:type key_pair: KeyPair
:return: Dictionary of parameters to be sent to the Switcheo API | [
"Function",
"to",
"execute",
"the",
"withdrawal",
"request",
"by",
"signing",
"the",
"transaction",
"generated",
"from",
"the",
"create",
"withdrawal",
"function",
".",
"Execution",
"of",
"this",
"function",
"is",
"as",
"follows",
"::"
] | train | https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/neo/signatures.py#L233-L263 |
runfalk/spans | spans/settypes.py | MetaRangeSet.register | def register(cls, range_mixin):
"""
Decorator for registering range set mixins for global use. This works
the same as :meth:`~spans.settypes.MetaRangeSet.add`
:param range_mixin: A :class:`~spans.types.Range` mixin class to
to register a decorated range set mixin class for
:return: A decorator to use on a range set mixin class
"""
def decorator(range_set_mixin):
cls.add(range_mixin, range_set_mixin)
return range_set_mixin
return decorator | python | def register(cls, range_mixin):
"""
Decorator for registering range set mixins for global use. This works
the same as :meth:`~spans.settypes.MetaRangeSet.add`
:param range_mixin: A :class:`~spans.types.Range` mixin class to
to register a decorated range set mixin class for
:return: A decorator to use on a range set mixin class
"""
def decorator(range_set_mixin):
cls.add(range_mixin, range_set_mixin)
return range_set_mixin
return decorator | [
"def",
"register",
"(",
"cls",
",",
"range_mixin",
")",
":",
"def",
"decorator",
"(",
"range_set_mixin",
")",
":",
"cls",
".",
"add",
"(",
"range_mixin",
",",
"range_set_mixin",
")",
"return",
"range_set_mixin",
"return",
"decorator"
] | Decorator for registering range set mixins for global use. This works
the same as :meth:`~spans.settypes.MetaRangeSet.add`
:param range_mixin: A :class:`~spans.types.Range` mixin class to
to register a decorated range set mixin class for
:return: A decorator to use on a range set mixin class | [
"Decorator",
"for",
"registering",
"range",
"set",
"mixins",
"for",
"global",
"use",
".",
"This",
"works",
"the",
"same",
"as",
":",
"meth",
":",
"~spans",
".",
"settypes",
".",
"MetaRangeSet",
".",
"add"
] | train | https://github.com/runfalk/spans/blob/59ed73407a569c3be86cfdb4b8f438cb8c794540/spans/settypes.py#L60-L73 |
deshima-dev/decode | decode/models/functions.py | pca | def pca(onarray, offarray, n=10, exchs=None, pc=False, mode='mean'):
"""Apply Principal Component Analysis (PCA) method to estimate baselines at each time.
Args:
onarray (decode.array): Decode array of on-point observations.
offarray (decode.array): Decode array of off-point observations.
n (int): The number of pricipal components.
pc (bool): When True, this function also returns eigen vectors and their coefficients.
mode (None or str): The way of correcting offsets.
'mean': Mean.
'median': Median.
None: No correction.
Returns:
filtered (decode.array): Baseline-subtracted array.
When pc is True:
Ps (list(np.ndarray)): Eigen vectors.
Cs (list(np.ndarray)): Coefficients.
"""
logger = getLogger('decode.models.pca')
logger.info('n_components exchs mode')
if exchs is None:
exchs = [16, 44, 46]
logger.info('{} {} {}'.format(n, exchs, mode))
offid = np.unique(offarray.scanid)
onid = np.unique(onarray.scanid)
onarray = onarray.copy() # Xarray
onarray[:, exchs] = 0
onvalues = onarray.values
onscanid = onarray.scanid.values
offarray = offarray.copy() # Xarray
offarray[:, exchs] = 0
offvalues = offarray.values
offscanid = offarray.scanid.values
Ps, Cs = [], []
Xatm = dc.full_like(onarray, onarray)
Xatmvalues = Xatm.values
model = TruncatedSVD(n_components=n)
for i in onid:
leftid = np.searchsorted(offid, i) - 1
rightid = np.searchsorted(offid, i)
Xon = onvalues[onscanid == i]
if leftid == -1:
Xoff = offvalues[offscanid == offid[rightid]]
Xoff_m = getattr(np, 'nan'+mode)(Xoff, axis=0) if mode is not None else 0
Xon_m = Xoff_m
model.fit(Xoff - Xoff_m)
elif rightid == len(offid):
Xoff = offvalues[offscanid == offid[leftid]]
Xoff_m = getattr(np, 'nan'+mode)(Xoff, axis=0) if mode is not None else 0
Xon_m = Xoff_m
model.fit(Xoff - Xoff_m)
else:
Xoff_l = offvalues[offscanid == offid[leftid]]
Xoff_lm = getattr(np, 'nan'+mode)(Xoff_l, axis=0) if mode is not None else 0
Xoff_r = offvalues[offscanid == offid[rightid]]
Xoff_rm = getattr(np, 'nan'+mode)(Xoff_r, axis=0) if mode is not None else 0
Xon_m = getattr(np, 'nan'+mode)(np.vstack([Xoff_l, Xoff_r]), axis=0) if mode is not None else 0
model.fit(np.vstack([Xoff_l - Xoff_lm, Xoff_r - Xoff_rm]))
P = model.components_
C = model.transform(Xon - Xon_m)
Xatmvalues[onscanid == i] = C @ P + Xon_m
# Xatms.append(dc.full_like(Xon, C @ P + Xon_m.values))
Ps.append(P)
Cs.append(C)
if pc:
return Xatm, Ps, Cs
else:
return Xatm | python | def pca(onarray, offarray, n=10, exchs=None, pc=False, mode='mean'):
"""Apply Principal Component Analysis (PCA) method to estimate baselines at each time.
Args:
onarray (decode.array): Decode array of on-point observations.
offarray (decode.array): Decode array of off-point observations.
n (int): The number of pricipal components.
pc (bool): When True, this function also returns eigen vectors and their coefficients.
mode (None or str): The way of correcting offsets.
'mean': Mean.
'median': Median.
None: No correction.
Returns:
filtered (decode.array): Baseline-subtracted array.
When pc is True:
Ps (list(np.ndarray)): Eigen vectors.
Cs (list(np.ndarray)): Coefficients.
"""
logger = getLogger('decode.models.pca')
logger.info('n_components exchs mode')
if exchs is None:
exchs = [16, 44, 46]
logger.info('{} {} {}'.format(n, exchs, mode))
offid = np.unique(offarray.scanid)
onid = np.unique(onarray.scanid)
onarray = onarray.copy() # Xarray
onarray[:, exchs] = 0
onvalues = onarray.values
onscanid = onarray.scanid.values
offarray = offarray.copy() # Xarray
offarray[:, exchs] = 0
offvalues = offarray.values
offscanid = offarray.scanid.values
Ps, Cs = [], []
Xatm = dc.full_like(onarray, onarray)
Xatmvalues = Xatm.values
model = TruncatedSVD(n_components=n)
for i in onid:
leftid = np.searchsorted(offid, i) - 1
rightid = np.searchsorted(offid, i)
Xon = onvalues[onscanid == i]
if leftid == -1:
Xoff = offvalues[offscanid == offid[rightid]]
Xoff_m = getattr(np, 'nan'+mode)(Xoff, axis=0) if mode is not None else 0
Xon_m = Xoff_m
model.fit(Xoff - Xoff_m)
elif rightid == len(offid):
Xoff = offvalues[offscanid == offid[leftid]]
Xoff_m = getattr(np, 'nan'+mode)(Xoff, axis=0) if mode is not None else 0
Xon_m = Xoff_m
model.fit(Xoff - Xoff_m)
else:
Xoff_l = offvalues[offscanid == offid[leftid]]
Xoff_lm = getattr(np, 'nan'+mode)(Xoff_l, axis=0) if mode is not None else 0
Xoff_r = offvalues[offscanid == offid[rightid]]
Xoff_rm = getattr(np, 'nan'+mode)(Xoff_r, axis=0) if mode is not None else 0
Xon_m = getattr(np, 'nan'+mode)(np.vstack([Xoff_l, Xoff_r]), axis=0) if mode is not None else 0
model.fit(np.vstack([Xoff_l - Xoff_lm, Xoff_r - Xoff_rm]))
P = model.components_
C = model.transform(Xon - Xon_m)
Xatmvalues[onscanid == i] = C @ P + Xon_m
# Xatms.append(dc.full_like(Xon, C @ P + Xon_m.values))
Ps.append(P)
Cs.append(C)
if pc:
return Xatm, Ps, Cs
else:
return Xatm | [
"def",
"pca",
"(",
"onarray",
",",
"offarray",
",",
"n",
"=",
"10",
",",
"exchs",
"=",
"None",
",",
"pc",
"=",
"False",
",",
"mode",
"=",
"'mean'",
")",
":",
"logger",
"=",
"getLogger",
"(",
"'decode.models.pca'",
")",
"logger",
".",
"info",
"(",
"'n_components exchs mode'",
")",
"if",
"exchs",
"is",
"None",
":",
"exchs",
"=",
"[",
"16",
",",
"44",
",",
"46",
"]",
"logger",
".",
"info",
"(",
"'{} {} {}'",
".",
"format",
"(",
"n",
",",
"exchs",
",",
"mode",
")",
")",
"offid",
"=",
"np",
".",
"unique",
"(",
"offarray",
".",
"scanid",
")",
"onid",
"=",
"np",
".",
"unique",
"(",
"onarray",
".",
"scanid",
")",
"onarray",
"=",
"onarray",
".",
"copy",
"(",
")",
"# Xarray",
"onarray",
"[",
":",
",",
"exchs",
"]",
"=",
"0",
"onvalues",
"=",
"onarray",
".",
"values",
"onscanid",
"=",
"onarray",
".",
"scanid",
".",
"values",
"offarray",
"=",
"offarray",
".",
"copy",
"(",
")",
"# Xarray",
"offarray",
"[",
":",
",",
"exchs",
"]",
"=",
"0",
"offvalues",
"=",
"offarray",
".",
"values",
"offscanid",
"=",
"offarray",
".",
"scanid",
".",
"values",
"Ps",
",",
"Cs",
"=",
"[",
"]",
",",
"[",
"]",
"Xatm",
"=",
"dc",
".",
"full_like",
"(",
"onarray",
",",
"onarray",
")",
"Xatmvalues",
"=",
"Xatm",
".",
"values",
"model",
"=",
"TruncatedSVD",
"(",
"n_components",
"=",
"n",
")",
"for",
"i",
"in",
"onid",
":",
"leftid",
"=",
"np",
".",
"searchsorted",
"(",
"offid",
",",
"i",
")",
"-",
"1",
"rightid",
"=",
"np",
".",
"searchsorted",
"(",
"offid",
",",
"i",
")",
"Xon",
"=",
"onvalues",
"[",
"onscanid",
"==",
"i",
"]",
"if",
"leftid",
"==",
"-",
"1",
":",
"Xoff",
"=",
"offvalues",
"[",
"offscanid",
"==",
"offid",
"[",
"rightid",
"]",
"]",
"Xoff_m",
"=",
"getattr",
"(",
"np",
",",
"'nan'",
"+",
"mode",
")",
"(",
"Xoff",
",",
"axis",
"=",
"0",
")",
"if",
"mode",
"is",
"not",
"None",
"else",
"0",
"Xon_m",
"=",
"Xoff_m",
"model",
".",
"fit",
"(",
"Xoff",
"-",
"Xoff_m",
")",
"elif",
"rightid",
"==",
"len",
"(",
"offid",
")",
":",
"Xoff",
"=",
"offvalues",
"[",
"offscanid",
"==",
"offid",
"[",
"leftid",
"]",
"]",
"Xoff_m",
"=",
"getattr",
"(",
"np",
",",
"'nan'",
"+",
"mode",
")",
"(",
"Xoff",
",",
"axis",
"=",
"0",
")",
"if",
"mode",
"is",
"not",
"None",
"else",
"0",
"Xon_m",
"=",
"Xoff_m",
"model",
".",
"fit",
"(",
"Xoff",
"-",
"Xoff_m",
")",
"else",
":",
"Xoff_l",
"=",
"offvalues",
"[",
"offscanid",
"==",
"offid",
"[",
"leftid",
"]",
"]",
"Xoff_lm",
"=",
"getattr",
"(",
"np",
",",
"'nan'",
"+",
"mode",
")",
"(",
"Xoff_l",
",",
"axis",
"=",
"0",
")",
"if",
"mode",
"is",
"not",
"None",
"else",
"0",
"Xoff_r",
"=",
"offvalues",
"[",
"offscanid",
"==",
"offid",
"[",
"rightid",
"]",
"]",
"Xoff_rm",
"=",
"getattr",
"(",
"np",
",",
"'nan'",
"+",
"mode",
")",
"(",
"Xoff_r",
",",
"axis",
"=",
"0",
")",
"if",
"mode",
"is",
"not",
"None",
"else",
"0",
"Xon_m",
"=",
"getattr",
"(",
"np",
",",
"'nan'",
"+",
"mode",
")",
"(",
"np",
".",
"vstack",
"(",
"[",
"Xoff_l",
",",
"Xoff_r",
"]",
")",
",",
"axis",
"=",
"0",
")",
"if",
"mode",
"is",
"not",
"None",
"else",
"0",
"model",
".",
"fit",
"(",
"np",
".",
"vstack",
"(",
"[",
"Xoff_l",
"-",
"Xoff_lm",
",",
"Xoff_r",
"-",
"Xoff_rm",
"]",
")",
")",
"P",
"=",
"model",
".",
"components_",
"C",
"=",
"model",
".",
"transform",
"(",
"Xon",
"-",
"Xon_m",
")",
"Xatmvalues",
"[",
"onscanid",
"==",
"i",
"]",
"=",
"C",
"@",
"P",
"+",
"Xon_m",
"# Xatms.append(dc.full_like(Xon, C @ P + Xon_m.values))",
"Ps",
".",
"append",
"(",
"P",
")",
"Cs",
".",
"append",
"(",
"C",
")",
"if",
"pc",
":",
"return",
"Xatm",
",",
"Ps",
",",
"Cs",
"else",
":",
"return",
"Xatm"
] | Apply Principal Component Analysis (PCA) method to estimate baselines at each time.
Args:
onarray (decode.array): Decode array of on-point observations.
offarray (decode.array): Decode array of off-point observations.
n (int): The number of pricipal components.
pc (bool): When True, this function also returns eigen vectors and their coefficients.
mode (None or str): The way of correcting offsets.
'mean': Mean.
'median': Median.
None: No correction.
Returns:
filtered (decode.array): Baseline-subtracted array.
When pc is True:
Ps (list(np.ndarray)): Eigen vectors.
Cs (list(np.ndarray)): Coefficients. | [
"Apply",
"Principal",
"Component",
"Analysis",
"(",
"PCA",
")",
"method",
"to",
"estimate",
"baselines",
"at",
"each",
"time",
"."
] | train | https://github.com/deshima-dev/decode/blob/e789e174cd316e7ec8bc55be7009ad35baced3c0/decode/models/functions.py#L21-L95 |
deshima-dev/decode | decode/models/functions.py | r_division | def r_division(onarray, offarray, rarray, mode='mean'):
"""Apply R division.
Args:
onarray (decode.array): Decode array of on-point observations.
offarray (decode.array): Decode array of off-point observations.
rarray (decode.array): Decode array of R observations.
mode (str): Method for the selection of nominal R value.
'mean': Mean.
'median': Median.
Returns:
onarray_cal (decode.array): Calibrated array of on-point observations.
offarray_cal (decode.array): Calibrated array of off-point observations.
"""
logger = getLogger('decode.models.r_division')
logger.info('mode')
logger.info('{}'.format(mode))
offid = np.unique(offarray.scanid)
onid = np.unique(onarray.scanid)
rid = np.unique(rarray.scanid)
onarray = onarray.copy() # Xarray
onvalues = onarray.values
onscanid = onarray.scanid.values
offarray = offarray.copy() # Xarray
offvalues = offarray.values
offscanid = offarray.scanid.values
rarray = rarray.copy() # Xarray
rvalues = rarray.values
rscanid = rarray.scanid.values
for i in onid:
rleftid = np.searchsorted(rid, i) - 1
rrightid = np.searchsorted(rid, i)
if rleftid == -1:
Xr = rvalues[rscanid == rid[rrightid]]
Xr_m = getattr(np, 'nan'+mode)(Xr, axis=0)
elif rrightid == len(rid):
Xr = rvalues[rscanid == rid[rleftid]]
Xr_m = getattr(np, 'nan'+mode)(Xr, axis=0)
else:
Xr_l = rvalues[rscanid == rid[rleftid]]
Xr_r = rvalues[rscanid == rid[rrightid]]
Xr_m = getattr(np, 'nan'+mode)(np.vstack([Xr_l, Xr_r]), axis=0)
onvalues[onscanid == i] /= Xr_m
for j in offid:
rleftid = np.searchsorted(rid, j) - 1
rrightid = np.searchsorted(rid, j)
Xoff_m = getattr(np, 'nan'+mode)(offvalues[offscanid == j], axis=0)
if rleftid == -1:
Xr = rvalues[rscanid == rid[rrightid]]
Xr_m = getattr(np, 'nan'+mode)(Xr, axis=0)
elif rrightid == len(rid):
Xr = rvalues[rscanid == rid[rleftid]]
Xr_m = getattr(np, 'nan'+mode)(Xr, axis=0)
else:
Xr_l = rvalues[rscanid == rid[rleftid]]
Xr_r = rvalues[rscanid == rid[rrightid]]
Xr_m = getattr(np, 'nan'+mode)(np.vstack([Xr_l, Xr_r]), axis=0)
offvalues[offscanid == j] /= Xr_m
Xon_rdiv = dc.full_like(onarray, onarray)
Xoff_rdiv = dc.full_like(offarray, offarray)
Xonoff_rdiv = dc.concat([Xon_rdiv, Xoff_rdiv], dim='t')
Xonoff_rdiv_sorted = Xonoff_rdiv[np.argsort(Xonoff_rdiv.time.values)]
scantype = Xonoff_rdiv_sorted.scantype.values
newscanid = np.cumsum(np.hstack([False, scantype[1:] != scantype[:-1]]))
onmask = np.in1d(Xonoff_rdiv_sorted.scanid, onid)
offmask = np.in1d(Xonoff_rdiv_sorted.scanid, offid)
Xon_rdiv = Xonoff_rdiv_sorted[onmask]
Xoff_rdiv = Xonoff_rdiv_sorted[offmask]
Xon_rdiv.coords.update({'scanid': ('t', newscanid[onmask])})
Xoff_rdiv.coords.update({'scanid': ('t', newscanid[offmask])})
return Xon_rdiv, Xoff_rdiv | python | def r_division(onarray, offarray, rarray, mode='mean'):
"""Apply R division.
Args:
onarray (decode.array): Decode array of on-point observations.
offarray (decode.array): Decode array of off-point observations.
rarray (decode.array): Decode array of R observations.
mode (str): Method for the selection of nominal R value.
'mean': Mean.
'median': Median.
Returns:
onarray_cal (decode.array): Calibrated array of on-point observations.
offarray_cal (decode.array): Calibrated array of off-point observations.
"""
logger = getLogger('decode.models.r_division')
logger.info('mode')
logger.info('{}'.format(mode))
offid = np.unique(offarray.scanid)
onid = np.unique(onarray.scanid)
rid = np.unique(rarray.scanid)
onarray = onarray.copy() # Xarray
onvalues = onarray.values
onscanid = onarray.scanid.values
offarray = offarray.copy() # Xarray
offvalues = offarray.values
offscanid = offarray.scanid.values
rarray = rarray.copy() # Xarray
rvalues = rarray.values
rscanid = rarray.scanid.values
for i in onid:
rleftid = np.searchsorted(rid, i) - 1
rrightid = np.searchsorted(rid, i)
if rleftid == -1:
Xr = rvalues[rscanid == rid[rrightid]]
Xr_m = getattr(np, 'nan'+mode)(Xr, axis=0)
elif rrightid == len(rid):
Xr = rvalues[rscanid == rid[rleftid]]
Xr_m = getattr(np, 'nan'+mode)(Xr, axis=0)
else:
Xr_l = rvalues[rscanid == rid[rleftid]]
Xr_r = rvalues[rscanid == rid[rrightid]]
Xr_m = getattr(np, 'nan'+mode)(np.vstack([Xr_l, Xr_r]), axis=0)
onvalues[onscanid == i] /= Xr_m
for j in offid:
rleftid = np.searchsorted(rid, j) - 1
rrightid = np.searchsorted(rid, j)
Xoff_m = getattr(np, 'nan'+mode)(offvalues[offscanid == j], axis=0)
if rleftid == -1:
Xr = rvalues[rscanid == rid[rrightid]]
Xr_m = getattr(np, 'nan'+mode)(Xr, axis=0)
elif rrightid == len(rid):
Xr = rvalues[rscanid == rid[rleftid]]
Xr_m = getattr(np, 'nan'+mode)(Xr, axis=0)
else:
Xr_l = rvalues[rscanid == rid[rleftid]]
Xr_r = rvalues[rscanid == rid[rrightid]]
Xr_m = getattr(np, 'nan'+mode)(np.vstack([Xr_l, Xr_r]), axis=0)
offvalues[offscanid == j] /= Xr_m
Xon_rdiv = dc.full_like(onarray, onarray)
Xoff_rdiv = dc.full_like(offarray, offarray)
Xonoff_rdiv = dc.concat([Xon_rdiv, Xoff_rdiv], dim='t')
Xonoff_rdiv_sorted = Xonoff_rdiv[np.argsort(Xonoff_rdiv.time.values)]
scantype = Xonoff_rdiv_sorted.scantype.values
newscanid = np.cumsum(np.hstack([False, scantype[1:] != scantype[:-1]]))
onmask = np.in1d(Xonoff_rdiv_sorted.scanid, onid)
offmask = np.in1d(Xonoff_rdiv_sorted.scanid, offid)
Xon_rdiv = Xonoff_rdiv_sorted[onmask]
Xoff_rdiv = Xonoff_rdiv_sorted[offmask]
Xon_rdiv.coords.update({'scanid': ('t', newscanid[onmask])})
Xoff_rdiv.coords.update({'scanid': ('t', newscanid[offmask])})
return Xon_rdiv, Xoff_rdiv | [
"def",
"r_division",
"(",
"onarray",
",",
"offarray",
",",
"rarray",
",",
"mode",
"=",
"'mean'",
")",
":",
"logger",
"=",
"getLogger",
"(",
"'decode.models.r_division'",
")",
"logger",
".",
"info",
"(",
"'mode'",
")",
"logger",
".",
"info",
"(",
"'{}'",
".",
"format",
"(",
"mode",
")",
")",
"offid",
"=",
"np",
".",
"unique",
"(",
"offarray",
".",
"scanid",
")",
"onid",
"=",
"np",
".",
"unique",
"(",
"onarray",
".",
"scanid",
")",
"rid",
"=",
"np",
".",
"unique",
"(",
"rarray",
".",
"scanid",
")",
"onarray",
"=",
"onarray",
".",
"copy",
"(",
")",
"# Xarray",
"onvalues",
"=",
"onarray",
".",
"values",
"onscanid",
"=",
"onarray",
".",
"scanid",
".",
"values",
"offarray",
"=",
"offarray",
".",
"copy",
"(",
")",
"# Xarray",
"offvalues",
"=",
"offarray",
".",
"values",
"offscanid",
"=",
"offarray",
".",
"scanid",
".",
"values",
"rarray",
"=",
"rarray",
".",
"copy",
"(",
")",
"# Xarray",
"rvalues",
"=",
"rarray",
".",
"values",
"rscanid",
"=",
"rarray",
".",
"scanid",
".",
"values",
"for",
"i",
"in",
"onid",
":",
"rleftid",
"=",
"np",
".",
"searchsorted",
"(",
"rid",
",",
"i",
")",
"-",
"1",
"rrightid",
"=",
"np",
".",
"searchsorted",
"(",
"rid",
",",
"i",
")",
"if",
"rleftid",
"==",
"-",
"1",
":",
"Xr",
"=",
"rvalues",
"[",
"rscanid",
"==",
"rid",
"[",
"rrightid",
"]",
"]",
"Xr_m",
"=",
"getattr",
"(",
"np",
",",
"'nan'",
"+",
"mode",
")",
"(",
"Xr",
",",
"axis",
"=",
"0",
")",
"elif",
"rrightid",
"==",
"len",
"(",
"rid",
")",
":",
"Xr",
"=",
"rvalues",
"[",
"rscanid",
"==",
"rid",
"[",
"rleftid",
"]",
"]",
"Xr_m",
"=",
"getattr",
"(",
"np",
",",
"'nan'",
"+",
"mode",
")",
"(",
"Xr",
",",
"axis",
"=",
"0",
")",
"else",
":",
"Xr_l",
"=",
"rvalues",
"[",
"rscanid",
"==",
"rid",
"[",
"rleftid",
"]",
"]",
"Xr_r",
"=",
"rvalues",
"[",
"rscanid",
"==",
"rid",
"[",
"rrightid",
"]",
"]",
"Xr_m",
"=",
"getattr",
"(",
"np",
",",
"'nan'",
"+",
"mode",
")",
"(",
"np",
".",
"vstack",
"(",
"[",
"Xr_l",
",",
"Xr_r",
"]",
")",
",",
"axis",
"=",
"0",
")",
"onvalues",
"[",
"onscanid",
"==",
"i",
"]",
"/=",
"Xr_m",
"for",
"j",
"in",
"offid",
":",
"rleftid",
"=",
"np",
".",
"searchsorted",
"(",
"rid",
",",
"j",
")",
"-",
"1",
"rrightid",
"=",
"np",
".",
"searchsorted",
"(",
"rid",
",",
"j",
")",
"Xoff_m",
"=",
"getattr",
"(",
"np",
",",
"'nan'",
"+",
"mode",
")",
"(",
"offvalues",
"[",
"offscanid",
"==",
"j",
"]",
",",
"axis",
"=",
"0",
")",
"if",
"rleftid",
"==",
"-",
"1",
":",
"Xr",
"=",
"rvalues",
"[",
"rscanid",
"==",
"rid",
"[",
"rrightid",
"]",
"]",
"Xr_m",
"=",
"getattr",
"(",
"np",
",",
"'nan'",
"+",
"mode",
")",
"(",
"Xr",
",",
"axis",
"=",
"0",
")",
"elif",
"rrightid",
"==",
"len",
"(",
"rid",
")",
":",
"Xr",
"=",
"rvalues",
"[",
"rscanid",
"==",
"rid",
"[",
"rleftid",
"]",
"]",
"Xr_m",
"=",
"getattr",
"(",
"np",
",",
"'nan'",
"+",
"mode",
")",
"(",
"Xr",
",",
"axis",
"=",
"0",
")",
"else",
":",
"Xr_l",
"=",
"rvalues",
"[",
"rscanid",
"==",
"rid",
"[",
"rleftid",
"]",
"]",
"Xr_r",
"=",
"rvalues",
"[",
"rscanid",
"==",
"rid",
"[",
"rrightid",
"]",
"]",
"Xr_m",
"=",
"getattr",
"(",
"np",
",",
"'nan'",
"+",
"mode",
")",
"(",
"np",
".",
"vstack",
"(",
"[",
"Xr_l",
",",
"Xr_r",
"]",
")",
",",
"axis",
"=",
"0",
")",
"offvalues",
"[",
"offscanid",
"==",
"j",
"]",
"/=",
"Xr_m",
"Xon_rdiv",
"=",
"dc",
".",
"full_like",
"(",
"onarray",
",",
"onarray",
")",
"Xoff_rdiv",
"=",
"dc",
".",
"full_like",
"(",
"offarray",
",",
"offarray",
")",
"Xonoff_rdiv",
"=",
"dc",
".",
"concat",
"(",
"[",
"Xon_rdiv",
",",
"Xoff_rdiv",
"]",
",",
"dim",
"=",
"'t'",
")",
"Xonoff_rdiv_sorted",
"=",
"Xonoff_rdiv",
"[",
"np",
".",
"argsort",
"(",
"Xonoff_rdiv",
".",
"time",
".",
"values",
")",
"]",
"scantype",
"=",
"Xonoff_rdiv_sorted",
".",
"scantype",
".",
"values",
"newscanid",
"=",
"np",
".",
"cumsum",
"(",
"np",
".",
"hstack",
"(",
"[",
"False",
",",
"scantype",
"[",
"1",
":",
"]",
"!=",
"scantype",
"[",
":",
"-",
"1",
"]",
"]",
")",
")",
"onmask",
"=",
"np",
".",
"in1d",
"(",
"Xonoff_rdiv_sorted",
".",
"scanid",
",",
"onid",
")",
"offmask",
"=",
"np",
".",
"in1d",
"(",
"Xonoff_rdiv_sorted",
".",
"scanid",
",",
"offid",
")",
"Xon_rdiv",
"=",
"Xonoff_rdiv_sorted",
"[",
"onmask",
"]",
"Xoff_rdiv",
"=",
"Xonoff_rdiv_sorted",
"[",
"offmask",
"]",
"Xon_rdiv",
".",
"coords",
".",
"update",
"(",
"{",
"'scanid'",
":",
"(",
"'t'",
",",
"newscanid",
"[",
"onmask",
"]",
")",
"}",
")",
"Xoff_rdiv",
".",
"coords",
".",
"update",
"(",
"{",
"'scanid'",
":",
"(",
"'t'",
",",
"newscanid",
"[",
"offmask",
"]",
")",
"}",
")",
"return",
"Xon_rdiv",
",",
"Xoff_rdiv"
] | Apply R division.
Args:
onarray (decode.array): Decode array of on-point observations.
offarray (decode.array): Decode array of off-point observations.
rarray (decode.array): Decode array of R observations.
mode (str): Method for the selection of nominal R value.
'mean': Mean.
'median': Median.
Returns:
onarray_cal (decode.array): Calibrated array of on-point observations.
offarray_cal (decode.array): Calibrated array of off-point observations. | [
"Apply",
"R",
"division",
"."
] | train | https://github.com/deshima-dev/decode/blob/e789e174cd316e7ec8bc55be7009ad35baced3c0/decode/models/functions.py#L139-L218 |
deshima-dev/decode | decode/models/functions.py | gauss_fit | def gauss_fit(map_data, chs=None, mode='deg', amplitude=1, x_mean=0, y_mean=0, x_stddev=None, y_stddev=None, theta=None, cov_matrix=None, noise=0, **kwargs):
"""make a 2D Gaussian model and fit the observed data with the model.
Args:
map_data (xarray.Dataarray): Dataarray of cube or single chs.
chs (list of int): in prep.
mode (str): Coordinates for the fitting
'pix'
'deg'
amplitude (float or None): Initial amplitude value of Gaussian fitting.
x_mean (float): Initial value of mean of the fitting Gaussian in x.
y_mean (float): Initial value of mean of the fitting Gaussian in y.
x_stddev (float or None): Standard deviation of the Gaussian in x before rotating by theta.
y_stddev (float or None): Standard deviation of the Gaussian in y before rotating by theta.
theta (float, optional or None): Rotation angle in radians.
cov_matrix (ndarray, optional): A 2x2 covariance matrix. If specified, overrides the ``x_stddev``, ``y_stddev``, and ``theta`` defaults.
Returns:
decode cube (xarray cube) with fitting results in array and attrs.
"""
if chs is None:
chs = np.ogrid[0:63] # the number of channels would be changed
if len(chs) > 1:
for n,ch in enumerate(chs):
subdata = np.transpose(np.full_like(map_data[:, :, ch], map_data.values[:, :, ch]))
subdata[np.isnan(subdata)] = 0
if mode == 'deg':
mX, mY = np.meshgrid(map_data.x, map_data.y)
elif mode == 'pix':
mX, mY = np.mgrid[0:len(map_data.y), 0:len(map_data.x)]
g_init = models.Gaussian2D(amplitude=np.nanmax(subdata),
x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev, theta=theta,
cov_matrix=cov_matrix, **kwargs) + models.Const2D(noise)
fit_g = fitting.LevMarLSQFitter()
g = fit_g(g_init, mX, mY, subdata)
g_init2 = models.Gaussian2D(amplitude=np.nanmax(subdata-g.amplitude_1),
x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev, theta=theta,
cov_matrix=cov_matrix, **kwargs)
fit_g2 = fitting.LevMarLSQFitter()
g2 = fit_g2(g_init2, mX, mY, subdata)
if n == 0:
results = np.array([g2(mX,mY)])
peaks = np.array([g2.amplitude.value])
x_means = np.array([g2.x_mean.value])
y_means = np.array([g2.y_mean.value])
x_stddevs = np.array([g2.x_stddev.value])
y_stddevs = np.array([g2.y_stddev.value])
thetas = np.array([g2.theta.value])
if fit_g2.fit_info['param_cov'] is None:
unserts = nop.array([0])
else:
error = np.diag(fit_g2.fit_info['param_cov'])**0.5
uncerts = np.array([error[0]])
else:
results = np.append(results, [g2(mX,mY)], axis=0)
peaks = np.append(peaks, [g2.amplitude.value], axis=0)
x_means = np.append(x_means, [g2.x_mean.value], axis=0)
y_means = np.append(y_means, [g2.y_mean.value], axis=0)
x_stddevs = np.append(x_stddevs, [g2.x_stddev.value], axis=0)
y_stddevs = np.append(y_stddevs, [g2.y_stddev.value], axis=0)
thetas = np.append(thetas, [g2.theta.value], axis=0)
if fit_g2.fit_info['param_cov'] is None:
uncerts = np.append(uncerts, [0], axis=0)
else:
error = np.diag(fit_g2.fit_info['param_cov'])**0.5
uncerts = np.append(uncerts, [error[0]], axis=0)
result = map_data.copy()
result.values = np.transpose(results)
result.attrs.update({'peak': peaks, 'x_mean': x_means, 'y_mean': y_means,
'x_stddev': x_stddevs, 'y_stddev': y_stddevs,
'theta': thetas, 'uncert': uncerts})
else:
subdata = np.transpose(np.full_like(map_data[:, :, 0], map_data.values[:, :, 0]))
subdata[np.isnan(subdata)] = 0
if mode == 'deg':
mX, mY = np.meshgrid(map_data.x, map_data.y)
elif mode == 'pix':
mX, mY = np.mgrid[0:len(map_data.y), 0:len(map_data.x)]
g_init = models.Gaussian2D(amplitude=np.nanmax(subdata),
x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev, theta=theta,
cov_matrix=cov_matrix, **kwargs) + models.Const2D(noise)
fit_g = fitting.LevMarLSQFitter()
g = fit_g(g_init, mX, mY, subdata)
g_init2 = models.Gaussian2D(amplitude=np.nanmax(subdata-g.amplitude_1),
x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev, theta=theta,
cov_matrix=cov_matrix, **kwargs)
fit_g2 = fitting.LevMarLSQFitter()
g2 = fit_g2(g_init2, mX, mY, subdata)
results = np.array([g2(mX, mY)])
peaks = np.array([g2.amplitude.value])
x_means = np.array([g2.x_mean.value])
y_means = np.array([g2.y_mean.value])
x_stddevs = np.array([g2.x_stddev.value])
y_stddevs = np.array([g2.y_stddev.value])
thetas = np.array([g2.theta.value])
error = np.diag(fit_g2.fit_info['param_cov'])**0.5
uncerts = np.array(error[0])
result = map_data.copy()
result.values = np.transpose(results)
result.attrs.update({'peak': peaks, 'x_mean': x_means, 'y_mean': y_means,
'x_stddev': x_stddevs, 'y_stddev': y_stddevs,
'theta': thetas, 'uncert': uncerts})
return result | python | def gauss_fit(map_data, chs=None, mode='deg', amplitude=1, x_mean=0, y_mean=0, x_stddev=None, y_stddev=None, theta=None, cov_matrix=None, noise=0, **kwargs):
"""make a 2D Gaussian model and fit the observed data with the model.
Args:
map_data (xarray.Dataarray): Dataarray of cube or single chs.
chs (list of int): in prep.
mode (str): Coordinates for the fitting
'pix'
'deg'
amplitude (float or None): Initial amplitude value of Gaussian fitting.
x_mean (float): Initial value of mean of the fitting Gaussian in x.
y_mean (float): Initial value of mean of the fitting Gaussian in y.
x_stddev (float or None): Standard deviation of the Gaussian in x before rotating by theta.
y_stddev (float or None): Standard deviation of the Gaussian in y before rotating by theta.
theta (float, optional or None): Rotation angle in radians.
cov_matrix (ndarray, optional): A 2x2 covariance matrix. If specified, overrides the ``x_stddev``, ``y_stddev``, and ``theta`` defaults.
Returns:
decode cube (xarray cube) with fitting results in array and attrs.
"""
if chs is None:
chs = np.ogrid[0:63] # the number of channels would be changed
if len(chs) > 1:
for n,ch in enumerate(chs):
subdata = np.transpose(np.full_like(map_data[:, :, ch], map_data.values[:, :, ch]))
subdata[np.isnan(subdata)] = 0
if mode == 'deg':
mX, mY = np.meshgrid(map_data.x, map_data.y)
elif mode == 'pix':
mX, mY = np.mgrid[0:len(map_data.y), 0:len(map_data.x)]
g_init = models.Gaussian2D(amplitude=np.nanmax(subdata),
x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev, theta=theta,
cov_matrix=cov_matrix, **kwargs) + models.Const2D(noise)
fit_g = fitting.LevMarLSQFitter()
g = fit_g(g_init, mX, mY, subdata)
g_init2 = models.Gaussian2D(amplitude=np.nanmax(subdata-g.amplitude_1),
x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev, theta=theta,
cov_matrix=cov_matrix, **kwargs)
fit_g2 = fitting.LevMarLSQFitter()
g2 = fit_g2(g_init2, mX, mY, subdata)
if n == 0:
results = np.array([g2(mX,mY)])
peaks = np.array([g2.amplitude.value])
x_means = np.array([g2.x_mean.value])
y_means = np.array([g2.y_mean.value])
x_stddevs = np.array([g2.x_stddev.value])
y_stddevs = np.array([g2.y_stddev.value])
thetas = np.array([g2.theta.value])
if fit_g2.fit_info['param_cov'] is None:
unserts = nop.array([0])
else:
error = np.diag(fit_g2.fit_info['param_cov'])**0.5
uncerts = np.array([error[0]])
else:
results = np.append(results, [g2(mX,mY)], axis=0)
peaks = np.append(peaks, [g2.amplitude.value], axis=0)
x_means = np.append(x_means, [g2.x_mean.value], axis=0)
y_means = np.append(y_means, [g2.y_mean.value], axis=0)
x_stddevs = np.append(x_stddevs, [g2.x_stddev.value], axis=0)
y_stddevs = np.append(y_stddevs, [g2.y_stddev.value], axis=0)
thetas = np.append(thetas, [g2.theta.value], axis=0)
if fit_g2.fit_info['param_cov'] is None:
uncerts = np.append(uncerts, [0], axis=0)
else:
error = np.diag(fit_g2.fit_info['param_cov'])**0.5
uncerts = np.append(uncerts, [error[0]], axis=0)
result = map_data.copy()
result.values = np.transpose(results)
result.attrs.update({'peak': peaks, 'x_mean': x_means, 'y_mean': y_means,
'x_stddev': x_stddevs, 'y_stddev': y_stddevs,
'theta': thetas, 'uncert': uncerts})
else:
subdata = np.transpose(np.full_like(map_data[:, :, 0], map_data.values[:, :, 0]))
subdata[np.isnan(subdata)] = 0
if mode == 'deg':
mX, mY = np.meshgrid(map_data.x, map_data.y)
elif mode == 'pix':
mX, mY = np.mgrid[0:len(map_data.y), 0:len(map_data.x)]
g_init = models.Gaussian2D(amplitude=np.nanmax(subdata),
x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev, theta=theta,
cov_matrix=cov_matrix, **kwargs) + models.Const2D(noise)
fit_g = fitting.LevMarLSQFitter()
g = fit_g(g_init, mX, mY, subdata)
g_init2 = models.Gaussian2D(amplitude=np.nanmax(subdata-g.amplitude_1),
x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev, theta=theta,
cov_matrix=cov_matrix, **kwargs)
fit_g2 = fitting.LevMarLSQFitter()
g2 = fit_g2(g_init2, mX, mY, subdata)
results = np.array([g2(mX, mY)])
peaks = np.array([g2.amplitude.value])
x_means = np.array([g2.x_mean.value])
y_means = np.array([g2.y_mean.value])
x_stddevs = np.array([g2.x_stddev.value])
y_stddevs = np.array([g2.y_stddev.value])
thetas = np.array([g2.theta.value])
error = np.diag(fit_g2.fit_info['param_cov'])**0.5
uncerts = np.array(error[0])
result = map_data.copy()
result.values = np.transpose(results)
result.attrs.update({'peak': peaks, 'x_mean': x_means, 'y_mean': y_means,
'x_stddev': x_stddevs, 'y_stddev': y_stddevs,
'theta': thetas, 'uncert': uncerts})
return result | [
"def",
"gauss_fit",
"(",
"map_data",
",",
"chs",
"=",
"None",
",",
"mode",
"=",
"'deg'",
",",
"amplitude",
"=",
"1",
",",
"x_mean",
"=",
"0",
",",
"y_mean",
"=",
"0",
",",
"x_stddev",
"=",
"None",
",",
"y_stddev",
"=",
"None",
",",
"theta",
"=",
"None",
",",
"cov_matrix",
"=",
"None",
",",
"noise",
"=",
"0",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"chs",
"is",
"None",
":",
"chs",
"=",
"np",
".",
"ogrid",
"[",
"0",
":",
"63",
"]",
"# the number of channels would be changed",
"if",
"len",
"(",
"chs",
")",
">",
"1",
":",
"for",
"n",
",",
"ch",
"in",
"enumerate",
"(",
"chs",
")",
":",
"subdata",
"=",
"np",
".",
"transpose",
"(",
"np",
".",
"full_like",
"(",
"map_data",
"[",
":",
",",
":",
",",
"ch",
"]",
",",
"map_data",
".",
"values",
"[",
":",
",",
":",
",",
"ch",
"]",
")",
")",
"subdata",
"[",
"np",
".",
"isnan",
"(",
"subdata",
")",
"]",
"=",
"0",
"if",
"mode",
"==",
"'deg'",
":",
"mX",
",",
"mY",
"=",
"np",
".",
"meshgrid",
"(",
"map_data",
".",
"x",
",",
"map_data",
".",
"y",
")",
"elif",
"mode",
"==",
"'pix'",
":",
"mX",
",",
"mY",
"=",
"np",
".",
"mgrid",
"[",
"0",
":",
"len",
"(",
"map_data",
".",
"y",
")",
",",
"0",
":",
"len",
"(",
"map_data",
".",
"x",
")",
"]",
"g_init",
"=",
"models",
".",
"Gaussian2D",
"(",
"amplitude",
"=",
"np",
".",
"nanmax",
"(",
"subdata",
")",
",",
"x_mean",
"=",
"x_mean",
",",
"y_mean",
"=",
"y_mean",
",",
"x_stddev",
"=",
"x_stddev",
",",
"y_stddev",
"=",
"y_stddev",
",",
"theta",
"=",
"theta",
",",
"cov_matrix",
"=",
"cov_matrix",
",",
"*",
"*",
"kwargs",
")",
"+",
"models",
".",
"Const2D",
"(",
"noise",
")",
"fit_g",
"=",
"fitting",
".",
"LevMarLSQFitter",
"(",
")",
"g",
"=",
"fit_g",
"(",
"g_init",
",",
"mX",
",",
"mY",
",",
"subdata",
")",
"g_init2",
"=",
"models",
".",
"Gaussian2D",
"(",
"amplitude",
"=",
"np",
".",
"nanmax",
"(",
"subdata",
"-",
"g",
".",
"amplitude_1",
")",
",",
"x_mean",
"=",
"x_mean",
",",
"y_mean",
"=",
"y_mean",
",",
"x_stddev",
"=",
"x_stddev",
",",
"y_stddev",
"=",
"y_stddev",
",",
"theta",
"=",
"theta",
",",
"cov_matrix",
"=",
"cov_matrix",
",",
"*",
"*",
"kwargs",
")",
"fit_g2",
"=",
"fitting",
".",
"LevMarLSQFitter",
"(",
")",
"g2",
"=",
"fit_g2",
"(",
"g_init2",
",",
"mX",
",",
"mY",
",",
"subdata",
")",
"if",
"n",
"==",
"0",
":",
"results",
"=",
"np",
".",
"array",
"(",
"[",
"g2",
"(",
"mX",
",",
"mY",
")",
"]",
")",
"peaks",
"=",
"np",
".",
"array",
"(",
"[",
"g2",
".",
"amplitude",
".",
"value",
"]",
")",
"x_means",
"=",
"np",
".",
"array",
"(",
"[",
"g2",
".",
"x_mean",
".",
"value",
"]",
")",
"y_means",
"=",
"np",
".",
"array",
"(",
"[",
"g2",
".",
"y_mean",
".",
"value",
"]",
")",
"x_stddevs",
"=",
"np",
".",
"array",
"(",
"[",
"g2",
".",
"x_stddev",
".",
"value",
"]",
")",
"y_stddevs",
"=",
"np",
".",
"array",
"(",
"[",
"g2",
".",
"y_stddev",
".",
"value",
"]",
")",
"thetas",
"=",
"np",
".",
"array",
"(",
"[",
"g2",
".",
"theta",
".",
"value",
"]",
")",
"if",
"fit_g2",
".",
"fit_info",
"[",
"'param_cov'",
"]",
"is",
"None",
":",
"unserts",
"=",
"nop",
".",
"array",
"(",
"[",
"0",
"]",
")",
"else",
":",
"error",
"=",
"np",
".",
"diag",
"(",
"fit_g2",
".",
"fit_info",
"[",
"'param_cov'",
"]",
")",
"**",
"0.5",
"uncerts",
"=",
"np",
".",
"array",
"(",
"[",
"error",
"[",
"0",
"]",
"]",
")",
"else",
":",
"results",
"=",
"np",
".",
"append",
"(",
"results",
",",
"[",
"g2",
"(",
"mX",
",",
"mY",
")",
"]",
",",
"axis",
"=",
"0",
")",
"peaks",
"=",
"np",
".",
"append",
"(",
"peaks",
",",
"[",
"g2",
".",
"amplitude",
".",
"value",
"]",
",",
"axis",
"=",
"0",
")",
"x_means",
"=",
"np",
".",
"append",
"(",
"x_means",
",",
"[",
"g2",
".",
"x_mean",
".",
"value",
"]",
",",
"axis",
"=",
"0",
")",
"y_means",
"=",
"np",
".",
"append",
"(",
"y_means",
",",
"[",
"g2",
".",
"y_mean",
".",
"value",
"]",
",",
"axis",
"=",
"0",
")",
"x_stddevs",
"=",
"np",
".",
"append",
"(",
"x_stddevs",
",",
"[",
"g2",
".",
"x_stddev",
".",
"value",
"]",
",",
"axis",
"=",
"0",
")",
"y_stddevs",
"=",
"np",
".",
"append",
"(",
"y_stddevs",
",",
"[",
"g2",
".",
"y_stddev",
".",
"value",
"]",
",",
"axis",
"=",
"0",
")",
"thetas",
"=",
"np",
".",
"append",
"(",
"thetas",
",",
"[",
"g2",
".",
"theta",
".",
"value",
"]",
",",
"axis",
"=",
"0",
")",
"if",
"fit_g2",
".",
"fit_info",
"[",
"'param_cov'",
"]",
"is",
"None",
":",
"uncerts",
"=",
"np",
".",
"append",
"(",
"uncerts",
",",
"[",
"0",
"]",
",",
"axis",
"=",
"0",
")",
"else",
":",
"error",
"=",
"np",
".",
"diag",
"(",
"fit_g2",
".",
"fit_info",
"[",
"'param_cov'",
"]",
")",
"**",
"0.5",
"uncerts",
"=",
"np",
".",
"append",
"(",
"uncerts",
",",
"[",
"error",
"[",
"0",
"]",
"]",
",",
"axis",
"=",
"0",
")",
"result",
"=",
"map_data",
".",
"copy",
"(",
")",
"result",
".",
"values",
"=",
"np",
".",
"transpose",
"(",
"results",
")",
"result",
".",
"attrs",
".",
"update",
"(",
"{",
"'peak'",
":",
"peaks",
",",
"'x_mean'",
":",
"x_means",
",",
"'y_mean'",
":",
"y_means",
",",
"'x_stddev'",
":",
"x_stddevs",
",",
"'y_stddev'",
":",
"y_stddevs",
",",
"'theta'",
":",
"thetas",
",",
"'uncert'",
":",
"uncerts",
"}",
")",
"else",
":",
"subdata",
"=",
"np",
".",
"transpose",
"(",
"np",
".",
"full_like",
"(",
"map_data",
"[",
":",
",",
":",
",",
"0",
"]",
",",
"map_data",
".",
"values",
"[",
":",
",",
":",
",",
"0",
"]",
")",
")",
"subdata",
"[",
"np",
".",
"isnan",
"(",
"subdata",
")",
"]",
"=",
"0",
"if",
"mode",
"==",
"'deg'",
":",
"mX",
",",
"mY",
"=",
"np",
".",
"meshgrid",
"(",
"map_data",
".",
"x",
",",
"map_data",
".",
"y",
")",
"elif",
"mode",
"==",
"'pix'",
":",
"mX",
",",
"mY",
"=",
"np",
".",
"mgrid",
"[",
"0",
":",
"len",
"(",
"map_data",
".",
"y",
")",
",",
"0",
":",
"len",
"(",
"map_data",
".",
"x",
")",
"]",
"g_init",
"=",
"models",
".",
"Gaussian2D",
"(",
"amplitude",
"=",
"np",
".",
"nanmax",
"(",
"subdata",
")",
",",
"x_mean",
"=",
"x_mean",
",",
"y_mean",
"=",
"y_mean",
",",
"x_stddev",
"=",
"x_stddev",
",",
"y_stddev",
"=",
"y_stddev",
",",
"theta",
"=",
"theta",
",",
"cov_matrix",
"=",
"cov_matrix",
",",
"*",
"*",
"kwargs",
")",
"+",
"models",
".",
"Const2D",
"(",
"noise",
")",
"fit_g",
"=",
"fitting",
".",
"LevMarLSQFitter",
"(",
")",
"g",
"=",
"fit_g",
"(",
"g_init",
",",
"mX",
",",
"mY",
",",
"subdata",
")",
"g_init2",
"=",
"models",
".",
"Gaussian2D",
"(",
"amplitude",
"=",
"np",
".",
"nanmax",
"(",
"subdata",
"-",
"g",
".",
"amplitude_1",
")",
",",
"x_mean",
"=",
"x_mean",
",",
"y_mean",
"=",
"y_mean",
",",
"x_stddev",
"=",
"x_stddev",
",",
"y_stddev",
"=",
"y_stddev",
",",
"theta",
"=",
"theta",
",",
"cov_matrix",
"=",
"cov_matrix",
",",
"*",
"*",
"kwargs",
")",
"fit_g2",
"=",
"fitting",
".",
"LevMarLSQFitter",
"(",
")",
"g2",
"=",
"fit_g2",
"(",
"g_init2",
",",
"mX",
",",
"mY",
",",
"subdata",
")",
"results",
"=",
"np",
".",
"array",
"(",
"[",
"g2",
"(",
"mX",
",",
"mY",
")",
"]",
")",
"peaks",
"=",
"np",
".",
"array",
"(",
"[",
"g2",
".",
"amplitude",
".",
"value",
"]",
")",
"x_means",
"=",
"np",
".",
"array",
"(",
"[",
"g2",
".",
"x_mean",
".",
"value",
"]",
")",
"y_means",
"=",
"np",
".",
"array",
"(",
"[",
"g2",
".",
"y_mean",
".",
"value",
"]",
")",
"x_stddevs",
"=",
"np",
".",
"array",
"(",
"[",
"g2",
".",
"x_stddev",
".",
"value",
"]",
")",
"y_stddevs",
"=",
"np",
".",
"array",
"(",
"[",
"g2",
".",
"y_stddev",
".",
"value",
"]",
")",
"thetas",
"=",
"np",
".",
"array",
"(",
"[",
"g2",
".",
"theta",
".",
"value",
"]",
")",
"error",
"=",
"np",
".",
"diag",
"(",
"fit_g2",
".",
"fit_info",
"[",
"'param_cov'",
"]",
")",
"**",
"0.5",
"uncerts",
"=",
"np",
".",
"array",
"(",
"error",
"[",
"0",
"]",
")",
"result",
"=",
"map_data",
".",
"copy",
"(",
")",
"result",
".",
"values",
"=",
"np",
".",
"transpose",
"(",
"results",
")",
"result",
".",
"attrs",
".",
"update",
"(",
"{",
"'peak'",
":",
"peaks",
",",
"'x_mean'",
":",
"x_means",
",",
"'y_mean'",
":",
"y_means",
",",
"'x_stddev'",
":",
"x_stddevs",
",",
"'y_stddev'",
":",
"y_stddevs",
",",
"'theta'",
":",
"thetas",
",",
"'uncert'",
":",
"uncerts",
"}",
")",
"return",
"result"
] | make a 2D Gaussian model and fit the observed data with the model.
Args:
map_data (xarray.Dataarray): Dataarray of cube or single chs.
chs (list of int): in prep.
mode (str): Coordinates for the fitting
'pix'
'deg'
amplitude (float or None): Initial amplitude value of Gaussian fitting.
x_mean (float): Initial value of mean of the fitting Gaussian in x.
y_mean (float): Initial value of mean of the fitting Gaussian in y.
x_stddev (float or None): Standard deviation of the Gaussian in x before rotating by theta.
y_stddev (float or None): Standard deviation of the Gaussian in y before rotating by theta.
theta (float, optional or None): Rotation angle in radians.
cov_matrix (ndarray, optional): A 2x2 covariance matrix. If specified, overrides the ``x_stddev``, ``y_stddev``, and ``theta`` defaults.
Returns:
decode cube (xarray cube) with fitting results in array and attrs. | [
"make",
"a",
"2D",
"Gaussian",
"model",
"and",
"fit",
"the",
"observed",
"data",
"with",
"the",
"model",
"."
] | train | https://github.com/deshima-dev/decode/blob/e789e174cd316e7ec8bc55be7009ad35baced3c0/decode/models/functions.py#L221-L345 |
facelessuser/pyspelling | pyspelling/filters/url.py | URLFilter.setup | def setup(self):
"""Setup."""
self.emails = self.config['emails']
self.urls = self.config['urls'] | python | def setup(self):
"""Setup."""
self.emails = self.config['emails']
self.urls = self.config['urls'] | [
"def",
"setup",
"(",
"self",
")",
":",
"self",
".",
"emails",
"=",
"self",
".",
"config",
"[",
"'emails'",
"]",
"self",
".",
"urls",
"=",
"self",
".",
"config",
"[",
"'urls'",
"]"
] | Setup. | [
"Setup",
"."
] | train | https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/url.py#L49-L53 |
facelessuser/pyspelling | pyspelling/filters/url.py | URLFilter._filter | def _filter(self, text):
"""Filter out the URL and email addresses."""
if self.urls:
text = RE_LINK.sub('', text)
if self.emails:
text = RE_MAIL.sub('', text)
return text | python | def _filter(self, text):
"""Filter out the URL and email addresses."""
if self.urls:
text = RE_LINK.sub('', text)
if self.emails:
text = RE_MAIL.sub('', text)
return text | [
"def",
"_filter",
"(",
"self",
",",
"text",
")",
":",
"if",
"self",
".",
"urls",
":",
"text",
"=",
"RE_LINK",
".",
"sub",
"(",
"''",
",",
"text",
")",
"if",
"self",
".",
"emails",
":",
"text",
"=",
"RE_MAIL",
".",
"sub",
"(",
"''",
",",
"text",
")",
"return",
"text"
] | Filter out the URL and email addresses. | [
"Filter",
"out",
"the",
"URL",
"and",
"email",
"addresses",
"."
] | train | https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/url.py#L55-L62 |
JMSwag/dsdev-utils | dsdev_utils/paths.py | get_mac_dot_app_dir | def get_mac_dot_app_dir(directory):
"""Returns parent directory of mac .app
Args:
directory (str): Current directory
Returns:
(str): Parent directory of mac .app
"""
return os.path.dirname(os.path.dirname(os.path.dirname(directory))) | python | def get_mac_dot_app_dir(directory):
"""Returns parent directory of mac .app
Args:
directory (str): Current directory
Returns:
(str): Parent directory of mac .app
"""
return os.path.dirname(os.path.dirname(os.path.dirname(directory))) | [
"def",
"get_mac_dot_app_dir",
"(",
"directory",
")",
":",
"return",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"directory",
")",
")",
")"
] | Returns parent directory of mac .app
Args:
directory (str): Current directory
Returns:
(str): Parent directory of mac .app | [
"Returns",
"parent",
"directory",
"of",
"mac",
".",
"app"
] | train | https://github.com/JMSwag/dsdev-utils/blob/5adbf9b3fd9fff92d1dd714423b08e26a5038e14/dsdev_utils/paths.py#L36-L47 |
deshima-dev/decode | decode/utils/misc/functions.py | copy_function | def copy_function(func, name=None):
"""Copy a function object with different name.
Args:
func (function): Function to be copied.
name (string, optional): Name of the new function.
If not spacified, the same name of `func` will be used.
Returns:
newfunc (function): New function with different name.
"""
code = func.__code__
newname = name or func.__name__
newcode = CodeType(
code.co_argcount,
code.co_kwonlyargcount,
code.co_nlocals,
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
newname,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars,
)
newfunc = FunctionType(
newcode,
func.__globals__,
newname,
func.__defaults__,
func.__closure__,
)
newfunc.__dict__.update(func.__dict__)
return newfunc | python | def copy_function(func, name=None):
"""Copy a function object with different name.
Args:
func (function): Function to be copied.
name (string, optional): Name of the new function.
If not spacified, the same name of `func` will be used.
Returns:
newfunc (function): New function with different name.
"""
code = func.__code__
newname = name or func.__name__
newcode = CodeType(
code.co_argcount,
code.co_kwonlyargcount,
code.co_nlocals,
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
newname,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars,
)
newfunc = FunctionType(
newcode,
func.__globals__,
newname,
func.__defaults__,
func.__closure__,
)
newfunc.__dict__.update(func.__dict__)
return newfunc | [
"def",
"copy_function",
"(",
"func",
",",
"name",
"=",
"None",
")",
":",
"code",
"=",
"func",
".",
"__code__",
"newname",
"=",
"name",
"or",
"func",
".",
"__name__",
"newcode",
"=",
"CodeType",
"(",
"code",
".",
"co_argcount",
",",
"code",
".",
"co_kwonlyargcount",
",",
"code",
".",
"co_nlocals",
",",
"code",
".",
"co_stacksize",
",",
"code",
".",
"co_flags",
",",
"code",
".",
"co_code",
",",
"code",
".",
"co_consts",
",",
"code",
".",
"co_names",
",",
"code",
".",
"co_varnames",
",",
"code",
".",
"co_filename",
",",
"newname",
",",
"code",
".",
"co_firstlineno",
",",
"code",
".",
"co_lnotab",
",",
"code",
".",
"co_freevars",
",",
"code",
".",
"co_cellvars",
",",
")",
"newfunc",
"=",
"FunctionType",
"(",
"newcode",
",",
"func",
".",
"__globals__",
",",
"newname",
",",
"func",
".",
"__defaults__",
",",
"func",
".",
"__closure__",
",",
")",
"newfunc",
".",
"__dict__",
".",
"update",
"(",
"func",
".",
"__dict__",
")",
"return",
"newfunc"
] | Copy a function object with different name.
Args:
func (function): Function to be copied.
name (string, optional): Name of the new function.
If not spacified, the same name of `func` will be used.
Returns:
newfunc (function): New function with different name. | [
"Copy",
"a",
"function",
"object",
"with",
"different",
"name",
"."
] | train | https://github.com/deshima-dev/decode/blob/e789e174cd316e7ec8bc55be7009ad35baced3c0/decode/utils/misc/functions.py#L61-L100 |
deshima-dev/decode | decode/utils/misc/functions.py | one_thread_per_process | def one_thread_per_process():
"""Return a context manager where only one thread is allocated to a process.
This function is intended to be used as a with statement like::
>>> with process_per_thread():
... do_something() # one thread per process
Notes:
This function only works when MKL (Intel Math Kernel Library)
is installed and used in, for example, NumPy and SciPy.
Otherwise this function does nothing.
"""
try:
import mkl
is_mkl = True
except ImportError:
is_mkl = False
if is_mkl:
n_threads = mkl.get_max_threads()
mkl.set_num_threads(1)
try:
# block nested in the with statement
yield
finally:
# revert to the original value
mkl.set_num_threads(n_threads)
else:
yield | python | def one_thread_per_process():
"""Return a context manager where only one thread is allocated to a process.
This function is intended to be used as a with statement like::
>>> with process_per_thread():
... do_something() # one thread per process
Notes:
This function only works when MKL (Intel Math Kernel Library)
is installed and used in, for example, NumPy and SciPy.
Otherwise this function does nothing.
"""
try:
import mkl
is_mkl = True
except ImportError:
is_mkl = False
if is_mkl:
n_threads = mkl.get_max_threads()
mkl.set_num_threads(1)
try:
# block nested in the with statement
yield
finally:
# revert to the original value
mkl.set_num_threads(n_threads)
else:
yield | [
"def",
"one_thread_per_process",
"(",
")",
":",
"try",
":",
"import",
"mkl",
"is_mkl",
"=",
"True",
"except",
"ImportError",
":",
"is_mkl",
"=",
"False",
"if",
"is_mkl",
":",
"n_threads",
"=",
"mkl",
".",
"get_max_threads",
"(",
")",
"mkl",
".",
"set_num_threads",
"(",
"1",
")",
"try",
":",
"# block nested in the with statement",
"yield",
"finally",
":",
"# revert to the original value",
"mkl",
".",
"set_num_threads",
"(",
"n_threads",
")",
"else",
":",
"yield"
] | Return a context manager where only one thread is allocated to a process.
This function is intended to be used as a with statement like::
>>> with process_per_thread():
... do_something() # one thread per process
Notes:
This function only works when MKL (Intel Math Kernel Library)
is installed and used in, for example, NumPy and SciPy.
Otherwise this function does nothing. | [
"Return",
"a",
"context",
"manager",
"where",
"only",
"one",
"thread",
"is",
"allocated",
"to",
"a",
"process",
"."
] | train | https://github.com/deshima-dev/decode/blob/e789e174cd316e7ec8bc55be7009ad35baced3c0/decode/utils/misc/functions.py#L104-L134 |
MFreidank/ARSpy | arspy/hull.py | compute_hulls | def compute_hulls(S, fS, domain):
"""
(Re-)compute upper and lower hull given
the segment points `S` with function values
`fS` and the `domain` of the logpdf.
Parameters
----------
S : np.ndarray (N, 1)
Straight-line segment points accumulated thus far.
fS : tuple
Value of the `logpdf` under sampling for each
of the given segment points in `S`.
domain : Tuple[float, float]
Domain of `logpdf`.
May be unbounded on either or both sides,
in which case `(float("-inf"), float("inf"))`
would be passed.
If this domain is unbounded to the left,
the derivative of the logpdf
for x<= a must be positive.
If this domain is unbounded to the right the derivative of the logpdf for x>=b
must be negative.
Returns
----------
lower_hull: List[arspy.hull.HullNode]
upper_hull: List[arspy.hull.HullNode]
"""
assert(len(S) == len(fS))
assert(len(domain) == 2)
lower_hull = []
for li in range(len(S) - 1):
m = (fS[li + 1] - fS[li]) / (S[li + 1] - S[li])
b = fS[li] - m * S[li]
left = S[li]
right = S[li + 1]
lower_hull.append(HullNode(m=m, b=b, left=left, right=right))
# compute upper piecewise-linear hull
# expected final length of upper hull after full computation
n_upper_segments = 2 * (len(S) - 2) + isinf(domain[0]) + isinf(domain[1])
upper_hull = []
if isinf(domain[0]):
# first line (from -infinity)
m = (fS[1] - fS[0]) / (S[1] - S[0])
b = fS[0] - m * S[0]
pr = compute_segment_log_prob(float("-inf"), S[0], m, b)
upper_hull.append(
HullNode(m=m, b=b, pr=pr, left=float("-inf"), right=S[0])
)
# second line
m = (fS[2] - fS[1]) / (S[2] - S[1])
b = fS[1] - m * S[1]
pr = compute_segment_log_prob(S[0], S[1], m, b)
upper_hull.append(HullNode(m=m, b=b, pr=pr, left=S[0], right=S[1]))
# interior lines
# there are two lines between each abscissa
for li in range(1, len(S) - 2):
m1 = (fS[li] - fS[li - 1]) / (S[li] - S[li - 1])
b1 = fS[li] - m1 * S[li]
m2 = (fS[li + 2] - fS[li + 1]) / (S[li + 2] - S[li + 1])
b2 = fS[li + 1] - m2 * S[li + 1]
if isinf(m1) and isinf(m2):
raise ValueError("both hull slopes are infinite")
dx1 = S[li] - S[li - 1]
df1 = fS[li] - fS[li - 1]
dx2 = S[li + 2] - S[li + 1]
df2 = fS[li + 2] - fS[li + 1]
f1 = fS[li]
f2 = fS[li + 1]
x1 = S[li]
x2 = S[li + 1]
# more numerically stable than above
ix = ((f1 * dx1 - df1 * x1) * dx2 - (f2 * dx2 - df2 * x2) * dx1) / (df2 * dx1 - df1 * dx2)
if isinf(m1) or abs(m1 - m2) < 10.0 ** 8 * eps(m1):
ix = S[li]
pr1 = float("-inf")
pr2 = compute_segment_log_prob(ix, S[li + 1], m2, b2)
elif isinf(m2):
ix = S[li + 1]
pr1 = compute_segment_log_prob(S[li], ix, m1, b1)
pr2 = float("-inf")
else:
if isinf(ix):
raise ValueError("Non finite intersection")
if abs(ix - S[li]) < 10.0 ** 12 * eps(S[li]):
ix = S[li]
elif abs(ix - S[li + 1]) < 10.0**12 * eps(S[li + 1]):
ix = S[li + 1]
if ix < S[li] or ix > S[li + 1]:
raise ValueError("Intersection out of bounds -- logpdf is not concave")
pr1 = compute_segment_log_prob(S[li], ix, m1, b1)
pr2 = compute_segment_log_prob(ix, S[li + 1], m2, b2)
upper_hull.append(HullNode(m=m1, b=b1, pr=pr1, left=S[li], right=ix))
upper_hull.append(HullNode(m=m2, b=b2, pr=pr2, left=ix, right=S[li + 1]))
# second last line
m = (fS[-2] - fS[-3]) / float(S[-2] - S[-3])
b = fS[-2] - m * S[-2]
pr = compute_segment_log_prob(S[-2], S[-1], m, b)
upper_hull.append(HullNode(m=m, b=b, pr=pr, left=S[-2], right=S[-1]))
if isinf(domain[1]):
# last line (to infinity)
m = (fS[-1] - fS[-2]) / (S[-1] - S[-2])
b = fS[-1] - m * S[-1]
pr = compute_segment_log_prob(S[-1], float("inf"), m, b)
upper_hull.append(
HullNode(m=m, b=b, pr=pr, left=S[-1], right=float("inf"))
)
# normalize probabilities
normalized_probabilities = exp_normalize(
asarray([node.pr for node in upper_hull])
)
for node, probability in zip(upper_hull, normalized_probabilities):
node.pr = probability
assert(len(lower_hull) == len(S) - 1)
assert(len(upper_hull) == n_upper_segments)
return lower_hull, upper_hull | python | def compute_hulls(S, fS, domain):
"""
(Re-)compute upper and lower hull given
the segment points `S` with function values
`fS` and the `domain` of the logpdf.
Parameters
----------
S : np.ndarray (N, 1)
Straight-line segment points accumulated thus far.
fS : tuple
Value of the `logpdf` under sampling for each
of the given segment points in `S`.
domain : Tuple[float, float]
Domain of `logpdf`.
May be unbounded on either or both sides,
in which case `(float("-inf"), float("inf"))`
would be passed.
If this domain is unbounded to the left,
the derivative of the logpdf
for x<= a must be positive.
If this domain is unbounded to the right the derivative of the logpdf for x>=b
must be negative.
Returns
----------
lower_hull: List[arspy.hull.HullNode]
upper_hull: List[arspy.hull.HullNode]
"""
assert(len(S) == len(fS))
assert(len(domain) == 2)
lower_hull = []
for li in range(len(S) - 1):
m = (fS[li + 1] - fS[li]) / (S[li + 1] - S[li])
b = fS[li] - m * S[li]
left = S[li]
right = S[li + 1]
lower_hull.append(HullNode(m=m, b=b, left=left, right=right))
# compute upper piecewise-linear hull
# expected final length of upper hull after full computation
n_upper_segments = 2 * (len(S) - 2) + isinf(domain[0]) + isinf(domain[1])
upper_hull = []
if isinf(domain[0]):
# first line (from -infinity)
m = (fS[1] - fS[0]) / (S[1] - S[0])
b = fS[0] - m * S[0]
pr = compute_segment_log_prob(float("-inf"), S[0], m, b)
upper_hull.append(
HullNode(m=m, b=b, pr=pr, left=float("-inf"), right=S[0])
)
# second line
m = (fS[2] - fS[1]) / (S[2] - S[1])
b = fS[1] - m * S[1]
pr = compute_segment_log_prob(S[0], S[1], m, b)
upper_hull.append(HullNode(m=m, b=b, pr=pr, left=S[0], right=S[1]))
# interior lines
# there are two lines between each abscissa
for li in range(1, len(S) - 2):
m1 = (fS[li] - fS[li - 1]) / (S[li] - S[li - 1])
b1 = fS[li] - m1 * S[li]
m2 = (fS[li + 2] - fS[li + 1]) / (S[li + 2] - S[li + 1])
b2 = fS[li + 1] - m2 * S[li + 1]
if isinf(m1) and isinf(m2):
raise ValueError("both hull slopes are infinite")
dx1 = S[li] - S[li - 1]
df1 = fS[li] - fS[li - 1]
dx2 = S[li + 2] - S[li + 1]
df2 = fS[li + 2] - fS[li + 1]
f1 = fS[li]
f2 = fS[li + 1]
x1 = S[li]
x2 = S[li + 1]
# more numerically stable than above
ix = ((f1 * dx1 - df1 * x1) * dx2 - (f2 * dx2 - df2 * x2) * dx1) / (df2 * dx1 - df1 * dx2)
if isinf(m1) or abs(m1 - m2) < 10.0 ** 8 * eps(m1):
ix = S[li]
pr1 = float("-inf")
pr2 = compute_segment_log_prob(ix, S[li + 1], m2, b2)
elif isinf(m2):
ix = S[li + 1]
pr1 = compute_segment_log_prob(S[li], ix, m1, b1)
pr2 = float("-inf")
else:
if isinf(ix):
raise ValueError("Non finite intersection")
if abs(ix - S[li]) < 10.0 ** 12 * eps(S[li]):
ix = S[li]
elif abs(ix - S[li + 1]) < 10.0**12 * eps(S[li + 1]):
ix = S[li + 1]
if ix < S[li] or ix > S[li + 1]:
raise ValueError("Intersection out of bounds -- logpdf is not concave")
pr1 = compute_segment_log_prob(S[li], ix, m1, b1)
pr2 = compute_segment_log_prob(ix, S[li + 1], m2, b2)
upper_hull.append(HullNode(m=m1, b=b1, pr=pr1, left=S[li], right=ix))
upper_hull.append(HullNode(m=m2, b=b2, pr=pr2, left=ix, right=S[li + 1]))
# second last line
m = (fS[-2] - fS[-3]) / float(S[-2] - S[-3])
b = fS[-2] - m * S[-2]
pr = compute_segment_log_prob(S[-2], S[-1], m, b)
upper_hull.append(HullNode(m=m, b=b, pr=pr, left=S[-2], right=S[-1]))
if isinf(domain[1]):
# last line (to infinity)
m = (fS[-1] - fS[-2]) / (S[-1] - S[-2])
b = fS[-1] - m * S[-1]
pr = compute_segment_log_prob(S[-1], float("inf"), m, b)
upper_hull.append(
HullNode(m=m, b=b, pr=pr, left=S[-1], right=float("inf"))
)
# normalize probabilities
normalized_probabilities = exp_normalize(
asarray([node.pr for node in upper_hull])
)
for node, probability in zip(upper_hull, normalized_probabilities):
node.pr = probability
assert(len(lower_hull) == len(S) - 1)
assert(len(upper_hull) == n_upper_segments)
return lower_hull, upper_hull | [
"def",
"compute_hulls",
"(",
"S",
",",
"fS",
",",
"domain",
")",
":",
"assert",
"(",
"len",
"(",
"S",
")",
"==",
"len",
"(",
"fS",
")",
")",
"assert",
"(",
"len",
"(",
"domain",
")",
"==",
"2",
")",
"lower_hull",
"=",
"[",
"]",
"for",
"li",
"in",
"range",
"(",
"len",
"(",
"S",
")",
"-",
"1",
")",
":",
"m",
"=",
"(",
"fS",
"[",
"li",
"+",
"1",
"]",
"-",
"fS",
"[",
"li",
"]",
")",
"/",
"(",
"S",
"[",
"li",
"+",
"1",
"]",
"-",
"S",
"[",
"li",
"]",
")",
"b",
"=",
"fS",
"[",
"li",
"]",
"-",
"m",
"*",
"S",
"[",
"li",
"]",
"left",
"=",
"S",
"[",
"li",
"]",
"right",
"=",
"S",
"[",
"li",
"+",
"1",
"]",
"lower_hull",
".",
"append",
"(",
"HullNode",
"(",
"m",
"=",
"m",
",",
"b",
"=",
"b",
",",
"left",
"=",
"left",
",",
"right",
"=",
"right",
")",
")",
"# compute upper piecewise-linear hull",
"# expected final length of upper hull after full computation",
"n_upper_segments",
"=",
"2",
"*",
"(",
"len",
"(",
"S",
")",
"-",
"2",
")",
"+",
"isinf",
"(",
"domain",
"[",
"0",
"]",
")",
"+",
"isinf",
"(",
"domain",
"[",
"1",
"]",
")",
"upper_hull",
"=",
"[",
"]",
"if",
"isinf",
"(",
"domain",
"[",
"0",
"]",
")",
":",
"# first line (from -infinity)",
"m",
"=",
"(",
"fS",
"[",
"1",
"]",
"-",
"fS",
"[",
"0",
"]",
")",
"/",
"(",
"S",
"[",
"1",
"]",
"-",
"S",
"[",
"0",
"]",
")",
"b",
"=",
"fS",
"[",
"0",
"]",
"-",
"m",
"*",
"S",
"[",
"0",
"]",
"pr",
"=",
"compute_segment_log_prob",
"(",
"float",
"(",
"\"-inf\"",
")",
",",
"S",
"[",
"0",
"]",
",",
"m",
",",
"b",
")",
"upper_hull",
".",
"append",
"(",
"HullNode",
"(",
"m",
"=",
"m",
",",
"b",
"=",
"b",
",",
"pr",
"=",
"pr",
",",
"left",
"=",
"float",
"(",
"\"-inf\"",
")",
",",
"right",
"=",
"S",
"[",
"0",
"]",
")",
")",
"# second line",
"m",
"=",
"(",
"fS",
"[",
"2",
"]",
"-",
"fS",
"[",
"1",
"]",
")",
"/",
"(",
"S",
"[",
"2",
"]",
"-",
"S",
"[",
"1",
"]",
")",
"b",
"=",
"fS",
"[",
"1",
"]",
"-",
"m",
"*",
"S",
"[",
"1",
"]",
"pr",
"=",
"compute_segment_log_prob",
"(",
"S",
"[",
"0",
"]",
",",
"S",
"[",
"1",
"]",
",",
"m",
",",
"b",
")",
"upper_hull",
".",
"append",
"(",
"HullNode",
"(",
"m",
"=",
"m",
",",
"b",
"=",
"b",
",",
"pr",
"=",
"pr",
",",
"left",
"=",
"S",
"[",
"0",
"]",
",",
"right",
"=",
"S",
"[",
"1",
"]",
")",
")",
"# interior lines",
"# there are two lines between each abscissa",
"for",
"li",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"S",
")",
"-",
"2",
")",
":",
"m1",
"=",
"(",
"fS",
"[",
"li",
"]",
"-",
"fS",
"[",
"li",
"-",
"1",
"]",
")",
"/",
"(",
"S",
"[",
"li",
"]",
"-",
"S",
"[",
"li",
"-",
"1",
"]",
")",
"b1",
"=",
"fS",
"[",
"li",
"]",
"-",
"m1",
"*",
"S",
"[",
"li",
"]",
"m2",
"=",
"(",
"fS",
"[",
"li",
"+",
"2",
"]",
"-",
"fS",
"[",
"li",
"+",
"1",
"]",
")",
"/",
"(",
"S",
"[",
"li",
"+",
"2",
"]",
"-",
"S",
"[",
"li",
"+",
"1",
"]",
")",
"b2",
"=",
"fS",
"[",
"li",
"+",
"1",
"]",
"-",
"m2",
"*",
"S",
"[",
"li",
"+",
"1",
"]",
"if",
"isinf",
"(",
"m1",
")",
"and",
"isinf",
"(",
"m2",
")",
":",
"raise",
"ValueError",
"(",
"\"both hull slopes are infinite\"",
")",
"dx1",
"=",
"S",
"[",
"li",
"]",
"-",
"S",
"[",
"li",
"-",
"1",
"]",
"df1",
"=",
"fS",
"[",
"li",
"]",
"-",
"fS",
"[",
"li",
"-",
"1",
"]",
"dx2",
"=",
"S",
"[",
"li",
"+",
"2",
"]",
"-",
"S",
"[",
"li",
"+",
"1",
"]",
"df2",
"=",
"fS",
"[",
"li",
"+",
"2",
"]",
"-",
"fS",
"[",
"li",
"+",
"1",
"]",
"f1",
"=",
"fS",
"[",
"li",
"]",
"f2",
"=",
"fS",
"[",
"li",
"+",
"1",
"]",
"x1",
"=",
"S",
"[",
"li",
"]",
"x2",
"=",
"S",
"[",
"li",
"+",
"1",
"]",
"# more numerically stable than above",
"ix",
"=",
"(",
"(",
"f1",
"*",
"dx1",
"-",
"df1",
"*",
"x1",
")",
"*",
"dx2",
"-",
"(",
"f2",
"*",
"dx2",
"-",
"df2",
"*",
"x2",
")",
"*",
"dx1",
")",
"/",
"(",
"df2",
"*",
"dx1",
"-",
"df1",
"*",
"dx2",
")",
"if",
"isinf",
"(",
"m1",
")",
"or",
"abs",
"(",
"m1",
"-",
"m2",
")",
"<",
"10.0",
"**",
"8",
"*",
"eps",
"(",
"m1",
")",
":",
"ix",
"=",
"S",
"[",
"li",
"]",
"pr1",
"=",
"float",
"(",
"\"-inf\"",
")",
"pr2",
"=",
"compute_segment_log_prob",
"(",
"ix",
",",
"S",
"[",
"li",
"+",
"1",
"]",
",",
"m2",
",",
"b2",
")",
"elif",
"isinf",
"(",
"m2",
")",
":",
"ix",
"=",
"S",
"[",
"li",
"+",
"1",
"]",
"pr1",
"=",
"compute_segment_log_prob",
"(",
"S",
"[",
"li",
"]",
",",
"ix",
",",
"m1",
",",
"b1",
")",
"pr2",
"=",
"float",
"(",
"\"-inf\"",
")",
"else",
":",
"if",
"isinf",
"(",
"ix",
")",
":",
"raise",
"ValueError",
"(",
"\"Non finite intersection\"",
")",
"if",
"abs",
"(",
"ix",
"-",
"S",
"[",
"li",
"]",
")",
"<",
"10.0",
"**",
"12",
"*",
"eps",
"(",
"S",
"[",
"li",
"]",
")",
":",
"ix",
"=",
"S",
"[",
"li",
"]",
"elif",
"abs",
"(",
"ix",
"-",
"S",
"[",
"li",
"+",
"1",
"]",
")",
"<",
"10.0",
"**",
"12",
"*",
"eps",
"(",
"S",
"[",
"li",
"+",
"1",
"]",
")",
":",
"ix",
"=",
"S",
"[",
"li",
"+",
"1",
"]",
"if",
"ix",
"<",
"S",
"[",
"li",
"]",
"or",
"ix",
">",
"S",
"[",
"li",
"+",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"\"Intersection out of bounds -- logpdf is not concave\"",
")",
"pr1",
"=",
"compute_segment_log_prob",
"(",
"S",
"[",
"li",
"]",
",",
"ix",
",",
"m1",
",",
"b1",
")",
"pr2",
"=",
"compute_segment_log_prob",
"(",
"ix",
",",
"S",
"[",
"li",
"+",
"1",
"]",
",",
"m2",
",",
"b2",
")",
"upper_hull",
".",
"append",
"(",
"HullNode",
"(",
"m",
"=",
"m1",
",",
"b",
"=",
"b1",
",",
"pr",
"=",
"pr1",
",",
"left",
"=",
"S",
"[",
"li",
"]",
",",
"right",
"=",
"ix",
")",
")",
"upper_hull",
".",
"append",
"(",
"HullNode",
"(",
"m",
"=",
"m2",
",",
"b",
"=",
"b2",
",",
"pr",
"=",
"pr2",
",",
"left",
"=",
"ix",
",",
"right",
"=",
"S",
"[",
"li",
"+",
"1",
"]",
")",
")",
"# second last line",
"m",
"=",
"(",
"fS",
"[",
"-",
"2",
"]",
"-",
"fS",
"[",
"-",
"3",
"]",
")",
"/",
"float",
"(",
"S",
"[",
"-",
"2",
"]",
"-",
"S",
"[",
"-",
"3",
"]",
")",
"b",
"=",
"fS",
"[",
"-",
"2",
"]",
"-",
"m",
"*",
"S",
"[",
"-",
"2",
"]",
"pr",
"=",
"compute_segment_log_prob",
"(",
"S",
"[",
"-",
"2",
"]",
",",
"S",
"[",
"-",
"1",
"]",
",",
"m",
",",
"b",
")",
"upper_hull",
".",
"append",
"(",
"HullNode",
"(",
"m",
"=",
"m",
",",
"b",
"=",
"b",
",",
"pr",
"=",
"pr",
",",
"left",
"=",
"S",
"[",
"-",
"2",
"]",
",",
"right",
"=",
"S",
"[",
"-",
"1",
"]",
")",
")",
"if",
"isinf",
"(",
"domain",
"[",
"1",
"]",
")",
":",
"# last line (to infinity)",
"m",
"=",
"(",
"fS",
"[",
"-",
"1",
"]",
"-",
"fS",
"[",
"-",
"2",
"]",
")",
"/",
"(",
"S",
"[",
"-",
"1",
"]",
"-",
"S",
"[",
"-",
"2",
"]",
")",
"b",
"=",
"fS",
"[",
"-",
"1",
"]",
"-",
"m",
"*",
"S",
"[",
"-",
"1",
"]",
"pr",
"=",
"compute_segment_log_prob",
"(",
"S",
"[",
"-",
"1",
"]",
",",
"float",
"(",
"\"inf\"",
")",
",",
"m",
",",
"b",
")",
"upper_hull",
".",
"append",
"(",
"HullNode",
"(",
"m",
"=",
"m",
",",
"b",
"=",
"b",
",",
"pr",
"=",
"pr",
",",
"left",
"=",
"S",
"[",
"-",
"1",
"]",
",",
"right",
"=",
"float",
"(",
"\"inf\"",
")",
")",
")",
"# normalize probabilities",
"normalized_probabilities",
"=",
"exp_normalize",
"(",
"asarray",
"(",
"[",
"node",
".",
"pr",
"for",
"node",
"in",
"upper_hull",
"]",
")",
")",
"for",
"node",
",",
"probability",
"in",
"zip",
"(",
"upper_hull",
",",
"normalized_probabilities",
")",
":",
"node",
".",
"pr",
"=",
"probability",
"assert",
"(",
"len",
"(",
"lower_hull",
")",
"==",
"len",
"(",
"S",
")",
"-",
"1",
")",
"assert",
"(",
"len",
"(",
"upper_hull",
")",
"==",
"n_upper_segments",
")",
"return",
"lower_hull",
",",
"upper_hull"
] | (Re-)compute upper and lower hull given
the segment points `S` with function values
`fS` and the `domain` of the logpdf.
Parameters
----------
S : np.ndarray (N, 1)
Straight-line segment points accumulated thus far.
fS : tuple
Value of the `logpdf` under sampling for each
of the given segment points in `S`.
domain : Tuple[float, float]
Domain of `logpdf`.
May be unbounded on either or both sides,
in which case `(float("-inf"), float("inf"))`
would be passed.
If this domain is unbounded to the left,
the derivative of the logpdf
for x<= a must be positive.
If this domain is unbounded to the right the derivative of the logpdf for x>=b
must be negative.
Returns
----------
lower_hull: List[arspy.hull.HullNode]
upper_hull: List[arspy.hull.HullNode] | [
"(",
"Re",
"-",
")",
"compute",
"upper",
"and",
"lower",
"hull",
"given",
"the",
"segment",
"points",
"S",
"with",
"function",
"values",
"fS",
"and",
"the",
"domain",
"of",
"the",
"logpdf",
"."
] | train | https://github.com/MFreidank/ARSpy/blob/866885071b43e36a529f2fecf584ceef5248d800/arspy/hull.py#L44-L193 |
MFreidank/ARSpy | arspy/hull.py | sample_upper_hull | def sample_upper_hull(upper_hull, random_stream):
"""
Return a single value randomly sampled from
the given `upper_hull`.
Parameters
----------
upper_hull : List[pyars.hull.HullNode]
Upper hull to evaluate.
random_stream : numpy.random.RandomState
(Seeded) stream of random values to use during sampling.
Returns
----------
sample : float
Single value randomly sampled from `upper_hull`.
"""
cdf = cumsum([node.pr for node in upper_hull])
# randomly choose a line segment
U = random_stream.rand()
node = next(
(node for node, cdf_value in zip(upper_hull, cdf) if U < cdf_value),
upper_hull[-1] # default is last line segment
)
# sample along that line segment
U = random_stream.rand()
m, left, right = node.m, node.left, node.right
M = max(m * right, m * left)
x = (log(U * (exp(m * right - M) - exp(m * left - M)) + exp(m * left - M)) + M) / m
assert(x >= left and x <= right)
if isinf(x) or isnan(x):
raise ValueError("sampled an infinite or 'nan' x")
return x | python | def sample_upper_hull(upper_hull, random_stream):
"""
Return a single value randomly sampled from
the given `upper_hull`.
Parameters
----------
upper_hull : List[pyars.hull.HullNode]
Upper hull to evaluate.
random_stream : numpy.random.RandomState
(Seeded) stream of random values to use during sampling.
Returns
----------
sample : float
Single value randomly sampled from `upper_hull`.
"""
cdf = cumsum([node.pr for node in upper_hull])
# randomly choose a line segment
U = random_stream.rand()
node = next(
(node for node, cdf_value in zip(upper_hull, cdf) if U < cdf_value),
upper_hull[-1] # default is last line segment
)
# sample along that line segment
U = random_stream.rand()
m, left, right = node.m, node.left, node.right
M = max(m * right, m * left)
x = (log(U * (exp(m * right - M) - exp(m * left - M)) + exp(m * left - M)) + M) / m
assert(x >= left and x <= right)
if isinf(x) or isnan(x):
raise ValueError("sampled an infinite or 'nan' x")
return x | [
"def",
"sample_upper_hull",
"(",
"upper_hull",
",",
"random_stream",
")",
":",
"cdf",
"=",
"cumsum",
"(",
"[",
"node",
".",
"pr",
"for",
"node",
"in",
"upper_hull",
"]",
")",
"# randomly choose a line segment",
"U",
"=",
"random_stream",
".",
"rand",
"(",
")",
"node",
"=",
"next",
"(",
"(",
"node",
"for",
"node",
",",
"cdf_value",
"in",
"zip",
"(",
"upper_hull",
",",
"cdf",
")",
"if",
"U",
"<",
"cdf_value",
")",
",",
"upper_hull",
"[",
"-",
"1",
"]",
"# default is last line segment",
")",
"# sample along that line segment",
"U",
"=",
"random_stream",
".",
"rand",
"(",
")",
"m",
",",
"left",
",",
"right",
"=",
"node",
".",
"m",
",",
"node",
".",
"left",
",",
"node",
".",
"right",
"M",
"=",
"max",
"(",
"m",
"*",
"right",
",",
"m",
"*",
"left",
")",
"x",
"=",
"(",
"log",
"(",
"U",
"*",
"(",
"exp",
"(",
"m",
"*",
"right",
"-",
"M",
")",
"-",
"exp",
"(",
"m",
"*",
"left",
"-",
"M",
")",
")",
"+",
"exp",
"(",
"m",
"*",
"left",
"-",
"M",
")",
")",
"+",
"M",
")",
"/",
"m",
"assert",
"(",
"x",
">=",
"left",
"and",
"x",
"<=",
"right",
")",
"if",
"isinf",
"(",
"x",
")",
"or",
"isnan",
"(",
"x",
")",
":",
"raise",
"ValueError",
"(",
"\"sampled an infinite or 'nan' x\"",
")",
"return",
"x"
] | Return a single value randomly sampled from
the given `upper_hull`.
Parameters
----------
upper_hull : List[pyars.hull.HullNode]
Upper hull to evaluate.
random_stream : numpy.random.RandomState
(Seeded) stream of random values to use during sampling.
Returns
----------
sample : float
Single value randomly sampled from `upper_hull`. | [
"Return",
"a",
"single",
"value",
"randomly",
"sampled",
"from",
"the",
"given",
"upper_hull",
"."
] | train | https://github.com/MFreidank/ARSpy/blob/866885071b43e36a529f2fecf584ceef5248d800/arspy/hull.py#L207-L249 |
mrcagney/make_gtfs | make_gtfs/validators.py | check_for_required_columns | def check_for_required_columns(problems, table, df):
"""
Check that the given ProtoFeed table has the required columns.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check that the DataFrame contains the colums required by
the ProtoFeed spec
and append to the problems list one error for each column
missing.
"""
r = cs.PROTOFEED_REF
req_columns = r.loc[(r['table'] == table) & r['column_required'],
'column'].values
for col in req_columns:
if col not in df.columns:
problems.append(['error', 'Missing column {!s}'.format(col),
table, []])
return problems | python | def check_for_required_columns(problems, table, df):
"""
Check that the given ProtoFeed table has the required columns.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check that the DataFrame contains the colums required by
the ProtoFeed spec
and append to the problems list one error for each column
missing.
"""
r = cs.PROTOFEED_REF
req_columns = r.loc[(r['table'] == table) & r['column_required'],
'column'].values
for col in req_columns:
if col not in df.columns:
problems.append(['error', 'Missing column {!s}'.format(col),
table, []])
return problems | [
"def",
"check_for_required_columns",
"(",
"problems",
",",
"table",
",",
"df",
")",
":",
"r",
"=",
"cs",
".",
"PROTOFEED_REF",
"req_columns",
"=",
"r",
".",
"loc",
"[",
"(",
"r",
"[",
"'table'",
"]",
"==",
"table",
")",
"&",
"r",
"[",
"'column_required'",
"]",
",",
"'column'",
"]",
".",
"values",
"for",
"col",
"in",
"req_columns",
":",
"if",
"col",
"not",
"in",
"df",
".",
"columns",
":",
"problems",
".",
"append",
"(",
"[",
"'error'",
",",
"'Missing column {!s}'",
".",
"format",
"(",
"col",
")",
",",
"table",
",",
"[",
"]",
"]",
")",
"return",
"problems"
] | Check that the given ProtoFeed table has the required columns.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check that the DataFrame contains the colums required by
the ProtoFeed spec
and append to the problems list one error for each column
missing. | [
"Check",
"that",
"the",
"given",
"ProtoFeed",
"table",
"has",
"the",
"required",
"columns",
"."
] | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/validators.py#L24-L66 |
mrcagney/make_gtfs | make_gtfs/validators.py | check_for_invalid_columns | def check_for_invalid_columns(problems, table, df):
"""
Check for invalid columns in the given ProtoFeed DataFrame.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or
``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check whether the DataFrame contains extra columns not in the
ProtoFeed and append to the problems list one warning for each extra
column.
"""
r = cs.PROTOFEED_REF
valid_columns = r.loc[r['table'] == table, 'column'].values
for col in df.columns:
if col not in valid_columns:
problems.append(['warning',
'Unrecognized column {!s}'.format(col),
table, []])
return problems | python | def check_for_invalid_columns(problems, table, df):
"""
Check for invalid columns in the given ProtoFeed DataFrame.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or
``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check whether the DataFrame contains extra columns not in the
ProtoFeed and append to the problems list one warning for each extra
column.
"""
r = cs.PROTOFEED_REF
valid_columns = r.loc[r['table'] == table, 'column'].values
for col in df.columns:
if col not in valid_columns:
problems.append(['warning',
'Unrecognized column {!s}'.format(col),
table, []])
return problems | [
"def",
"check_for_invalid_columns",
"(",
"problems",
",",
"table",
",",
"df",
")",
":",
"r",
"=",
"cs",
".",
"PROTOFEED_REF",
"valid_columns",
"=",
"r",
".",
"loc",
"[",
"r",
"[",
"'table'",
"]",
"==",
"table",
",",
"'column'",
"]",
".",
"values",
"for",
"col",
"in",
"df",
".",
"columns",
":",
"if",
"col",
"not",
"in",
"valid_columns",
":",
"problems",
".",
"append",
"(",
"[",
"'warning'",
",",
"'Unrecognized column {!s}'",
".",
"format",
"(",
"col",
")",
",",
"table",
",",
"[",
"]",
"]",
")",
"return",
"problems"
] | Check for invalid columns in the given ProtoFeed DataFrame.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or
``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check whether the DataFrame contains extra columns not in the
ProtoFeed and append to the problems list one warning for each extra
column. | [
"Check",
"for",
"invalid",
"columns",
"in",
"the",
"given",
"ProtoFeed",
"DataFrame",
"."
] | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/validators.py#L68-L110 |
mrcagney/make_gtfs | make_gtfs/validators.py | check_frequencies | def check_frequencies(pfeed, *, as_df=False, include_warnings=False):
"""
Check that ``pfeed.frequency`` follows the ProtoFeed spec.
Return a list of problems of the form described in
:func:`gt.check_table`;
the list will be empty if no problems are found.
"""
table = 'frequencies'
problems = []
# Preliminary checks
if pfeed.frequencies is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.frequencies.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check route_short_name and route_long_name
for column in ['route_short_name', 'route_long_name']:
problems = gt.check_column(problems, table, f, column, gt.valid_str,
column_required=False)
cond = ~(f['route_short_name'].notnull() | f['route_long_name'].notnull())
problems = gt.check_table(problems, table, f, cond,
'route_short_name and route_long_name both empty')
# Check route_type
v = lambda x: x in range(8)
problems = gt.check_column(problems, table, f, 'route_type', v)
# Check service window ID
problems = gt.check_column_linked_id(problems, table, f,
'service_window_id', pfeed.service_windows)
# Check direction
v = lambda x: x in range(3)
problems = gt.check_column(problems, table, f, 'direction', v)
# Check frequency
v = lambda x: isinstance(x, int)
problems = gt.check_column(problems, table, f, 'frequency', v)
# Check speed
problems = gt.check_column(problems, table, f, 'speed', valid_speed,
column_required=False)
# Check shape ID
problems = gt.check_column_linked_id(problems, table, f, 'shape_id',
pfeed.shapes)
return gt.format_problems(problems, as_df=as_df) | python | def check_frequencies(pfeed, *, as_df=False, include_warnings=False):
"""
Check that ``pfeed.frequency`` follows the ProtoFeed spec.
Return a list of problems of the form described in
:func:`gt.check_table`;
the list will be empty if no problems are found.
"""
table = 'frequencies'
problems = []
# Preliminary checks
if pfeed.frequencies is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.frequencies.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check route_short_name and route_long_name
for column in ['route_short_name', 'route_long_name']:
problems = gt.check_column(problems, table, f, column, gt.valid_str,
column_required=False)
cond = ~(f['route_short_name'].notnull() | f['route_long_name'].notnull())
problems = gt.check_table(problems, table, f, cond,
'route_short_name and route_long_name both empty')
# Check route_type
v = lambda x: x in range(8)
problems = gt.check_column(problems, table, f, 'route_type', v)
# Check service window ID
problems = gt.check_column_linked_id(problems, table, f,
'service_window_id', pfeed.service_windows)
# Check direction
v = lambda x: x in range(3)
problems = gt.check_column(problems, table, f, 'direction', v)
# Check frequency
v = lambda x: isinstance(x, int)
problems = gt.check_column(problems, table, f, 'frequency', v)
# Check speed
problems = gt.check_column(problems, table, f, 'speed', valid_speed,
column_required=False)
# Check shape ID
problems = gt.check_column_linked_id(problems, table, f, 'shape_id',
pfeed.shapes)
return gt.format_problems(problems, as_df=as_df) | [
"def",
"check_frequencies",
"(",
"pfeed",
",",
"*",
",",
"as_df",
"=",
"False",
",",
"include_warnings",
"=",
"False",
")",
":",
"table",
"=",
"'frequencies'",
"problems",
"=",
"[",
"]",
"# Preliminary checks",
"if",
"pfeed",
".",
"frequencies",
"is",
"None",
":",
"problems",
".",
"append",
"(",
"[",
"'error'",
",",
"'Missing table'",
",",
"table",
",",
"[",
"]",
"]",
")",
"else",
":",
"f",
"=",
"pfeed",
".",
"frequencies",
".",
"copy",
"(",
")",
"problems",
"=",
"check_for_required_columns",
"(",
"problems",
",",
"table",
",",
"f",
")",
"if",
"problems",
":",
"return",
"gt",
".",
"format_problems",
"(",
"problems",
",",
"as_df",
"=",
"as_df",
")",
"if",
"include_warnings",
":",
"problems",
"=",
"check_for_invalid_columns",
"(",
"problems",
",",
"table",
",",
"f",
")",
"# Check route_short_name and route_long_name",
"for",
"column",
"in",
"[",
"'route_short_name'",
",",
"'route_long_name'",
"]",
":",
"problems",
"=",
"gt",
".",
"check_column",
"(",
"problems",
",",
"table",
",",
"f",
",",
"column",
",",
"gt",
".",
"valid_str",
",",
"column_required",
"=",
"False",
")",
"cond",
"=",
"~",
"(",
"f",
"[",
"'route_short_name'",
"]",
".",
"notnull",
"(",
")",
"|",
"f",
"[",
"'route_long_name'",
"]",
".",
"notnull",
"(",
")",
")",
"problems",
"=",
"gt",
".",
"check_table",
"(",
"problems",
",",
"table",
",",
"f",
",",
"cond",
",",
"'route_short_name and route_long_name both empty'",
")",
"# Check route_type",
"v",
"=",
"lambda",
"x",
":",
"x",
"in",
"range",
"(",
"8",
")",
"problems",
"=",
"gt",
".",
"check_column",
"(",
"problems",
",",
"table",
",",
"f",
",",
"'route_type'",
",",
"v",
")",
"# Check service window ID",
"problems",
"=",
"gt",
".",
"check_column_linked_id",
"(",
"problems",
",",
"table",
",",
"f",
",",
"'service_window_id'",
",",
"pfeed",
".",
"service_windows",
")",
"# Check direction",
"v",
"=",
"lambda",
"x",
":",
"x",
"in",
"range",
"(",
"3",
")",
"problems",
"=",
"gt",
".",
"check_column",
"(",
"problems",
",",
"table",
",",
"f",
",",
"'direction'",
",",
"v",
")",
"# Check frequency",
"v",
"=",
"lambda",
"x",
":",
"isinstance",
"(",
"x",
",",
"int",
")",
"problems",
"=",
"gt",
".",
"check_column",
"(",
"problems",
",",
"table",
",",
"f",
",",
"'frequency'",
",",
"v",
")",
"# Check speed",
"problems",
"=",
"gt",
".",
"check_column",
"(",
"problems",
",",
"table",
",",
"f",
",",
"'speed'",
",",
"valid_speed",
",",
"column_required",
"=",
"False",
")",
"# Check shape ID",
"problems",
"=",
"gt",
".",
"check_column_linked_id",
"(",
"problems",
",",
"table",
",",
"f",
",",
"'shape_id'",
",",
"pfeed",
".",
"shapes",
")",
"return",
"gt",
".",
"format_problems",
"(",
"problems",
",",
"as_df",
"=",
"as_df",
")"
] | Check that ``pfeed.frequency`` follows the ProtoFeed spec.
Return a list of problems of the form described in
:func:`gt.check_table`;
the list will be empty if no problems are found. | [
"Check",
"that",
"pfeed",
".",
"frequency",
"follows",
"the",
"ProtoFeed",
"spec",
".",
"Return",
"a",
"list",
"of",
"problems",
"of",
"the",
"form",
"described",
"in",
":",
"func",
":",
"gt",
".",
"check_table",
";",
"the",
"list",
"will",
"be",
"empty",
"if",
"no",
"problems",
"are",
"found",
"."
] | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/validators.py#L112-L167 |
mrcagney/make_gtfs | make_gtfs/validators.py | check_meta | def check_meta(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.meta``
"""
table = 'meta'
problems = []
# Preliminary checks
if pfeed.meta is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.meta.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
if f.shape[0] > 1:
problems.append(['error', 'Meta must have only one row',
table, list(range(1, f.shape[0]))])
# Check agency_name
problems = gt.check_column(problems, table, f, 'agency_name', gt.valid_str)
# Check agency_url
problems = gt.check_column(problems, table, f, 'agency_url', gt.valid_url)
# Check agency_timezone
problems = gt.check_column(problems, table, f, 'agency_timezone',
gt.valid_timezone)
# Check start_date and end_date
for col in ['start_date', 'end_date']:
problems = gt.check_column(problems, table, f, col, gt.valid_date)
# Check default_route_speed
problems = gt.check_column(problems, table, f, 'default_route_speed',
valid_speed)
return gt.format_problems(problems, as_df=as_df) | python | def check_meta(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.meta``
"""
table = 'meta'
problems = []
# Preliminary checks
if pfeed.meta is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.meta.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
if f.shape[0] > 1:
problems.append(['error', 'Meta must have only one row',
table, list(range(1, f.shape[0]))])
# Check agency_name
problems = gt.check_column(problems, table, f, 'agency_name', gt.valid_str)
# Check agency_url
problems = gt.check_column(problems, table, f, 'agency_url', gt.valid_url)
# Check agency_timezone
problems = gt.check_column(problems, table, f, 'agency_timezone',
gt.valid_timezone)
# Check start_date and end_date
for col in ['start_date', 'end_date']:
problems = gt.check_column(problems, table, f, col, gt.valid_date)
# Check default_route_speed
problems = gt.check_column(problems, table, f, 'default_route_speed',
valid_speed)
return gt.format_problems(problems, as_df=as_df) | [
"def",
"check_meta",
"(",
"pfeed",
",",
"*",
",",
"as_df",
"=",
"False",
",",
"include_warnings",
"=",
"False",
")",
":",
"table",
"=",
"'meta'",
"problems",
"=",
"[",
"]",
"# Preliminary checks",
"if",
"pfeed",
".",
"meta",
"is",
"None",
":",
"problems",
".",
"append",
"(",
"[",
"'error'",
",",
"'Missing table'",
",",
"table",
",",
"[",
"]",
"]",
")",
"else",
":",
"f",
"=",
"pfeed",
".",
"meta",
".",
"copy",
"(",
")",
"problems",
"=",
"check_for_required_columns",
"(",
"problems",
",",
"table",
",",
"f",
")",
"if",
"problems",
":",
"return",
"gt",
".",
"format_problems",
"(",
"problems",
",",
"as_df",
"=",
"as_df",
")",
"if",
"include_warnings",
":",
"problems",
"=",
"check_for_invalid_columns",
"(",
"problems",
",",
"table",
",",
"f",
")",
"if",
"f",
".",
"shape",
"[",
"0",
"]",
">",
"1",
":",
"problems",
".",
"append",
"(",
"[",
"'error'",
",",
"'Meta must have only one row'",
",",
"table",
",",
"list",
"(",
"range",
"(",
"1",
",",
"f",
".",
"shape",
"[",
"0",
"]",
")",
")",
"]",
")",
"# Check agency_name",
"problems",
"=",
"gt",
".",
"check_column",
"(",
"problems",
",",
"table",
",",
"f",
",",
"'agency_name'",
",",
"gt",
".",
"valid_str",
")",
"# Check agency_url",
"problems",
"=",
"gt",
".",
"check_column",
"(",
"problems",
",",
"table",
",",
"f",
",",
"'agency_url'",
",",
"gt",
".",
"valid_url",
")",
"# Check agency_timezone",
"problems",
"=",
"gt",
".",
"check_column",
"(",
"problems",
",",
"table",
",",
"f",
",",
"'agency_timezone'",
",",
"gt",
".",
"valid_timezone",
")",
"# Check start_date and end_date",
"for",
"col",
"in",
"[",
"'start_date'",
",",
"'end_date'",
"]",
":",
"problems",
"=",
"gt",
".",
"check_column",
"(",
"problems",
",",
"table",
",",
"f",
",",
"col",
",",
"gt",
".",
"valid_date",
")",
"# Check default_route_speed",
"problems",
"=",
"gt",
".",
"check_column",
"(",
"problems",
",",
"table",
",",
"f",
",",
"'default_route_speed'",
",",
"valid_speed",
")",
"return",
"gt",
".",
"format_problems",
"(",
"problems",
",",
"as_df",
"=",
"as_df",
")"
] | Analog of :func:`check_frequencies` for ``pfeed.meta`` | [
"Analog",
"of",
":",
"func",
":",
"check_frequencies",
"for",
"pfeed",
".",
"meta"
] | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/validators.py#L169-L210 |
mrcagney/make_gtfs | make_gtfs/validators.py | check_service_windows | def check_service_windows(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.service_windows``
"""
table = 'service_windows'
problems = []
# Preliminary checks
if pfeed.service_windows is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.service_windows.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service window ID
problems = gt.check_column_id(problems, table, f,
'service_window_id')
# Check start_time and end_time
for column in ['start_time', 'end_time']:
problems = gt.check_column(problems, table, f, column, gt.valid_time)
# Check weekday columns
v = lambda x: x in range(2)
for col in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
problems = gt.check_column(problems, table, f, col, v)
return gt.format_problems(problems, as_df=as_df) | python | def check_service_windows(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.service_windows``
"""
table = 'service_windows'
problems = []
# Preliminary checks
if pfeed.service_windows is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.service_windows.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service window ID
problems = gt.check_column_id(problems, table, f,
'service_window_id')
# Check start_time and end_time
for column in ['start_time', 'end_time']:
problems = gt.check_column(problems, table, f, column, gt.valid_time)
# Check weekday columns
v = lambda x: x in range(2)
for col in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
problems = gt.check_column(problems, table, f, col, v)
return gt.format_problems(problems, as_df=as_df) | [
"def",
"check_service_windows",
"(",
"pfeed",
",",
"*",
",",
"as_df",
"=",
"False",
",",
"include_warnings",
"=",
"False",
")",
":",
"table",
"=",
"'service_windows'",
"problems",
"=",
"[",
"]",
"# Preliminary checks",
"if",
"pfeed",
".",
"service_windows",
"is",
"None",
":",
"problems",
".",
"append",
"(",
"[",
"'error'",
",",
"'Missing table'",
",",
"table",
",",
"[",
"]",
"]",
")",
"else",
":",
"f",
"=",
"pfeed",
".",
"service_windows",
".",
"copy",
"(",
")",
"problems",
"=",
"check_for_required_columns",
"(",
"problems",
",",
"table",
",",
"f",
")",
"if",
"problems",
":",
"return",
"gt",
".",
"format_problems",
"(",
"problems",
",",
"as_df",
"=",
"as_df",
")",
"if",
"include_warnings",
":",
"problems",
"=",
"check_for_invalid_columns",
"(",
"problems",
",",
"table",
",",
"f",
")",
"# Check service window ID",
"problems",
"=",
"gt",
".",
"check_column_id",
"(",
"problems",
",",
"table",
",",
"f",
",",
"'service_window_id'",
")",
"# Check start_time and end_time",
"for",
"column",
"in",
"[",
"'start_time'",
",",
"'end_time'",
"]",
":",
"problems",
"=",
"gt",
".",
"check_column",
"(",
"problems",
",",
"table",
",",
"f",
",",
"column",
",",
"gt",
".",
"valid_time",
")",
"# Check weekday columns",
"v",
"=",
"lambda",
"x",
":",
"x",
"in",
"range",
"(",
"2",
")",
"for",
"col",
"in",
"[",
"'monday'",
",",
"'tuesday'",
",",
"'wednesday'",
",",
"'thursday'",
",",
"'friday'",
",",
"'saturday'",
",",
"'sunday'",
"]",
":",
"problems",
"=",
"gt",
".",
"check_column",
"(",
"problems",
",",
"table",
",",
"f",
",",
"col",
",",
"v",
")",
"return",
"gt",
".",
"format_problems",
"(",
"problems",
",",
"as_df",
"=",
"as_df",
")"
] | Analog of :func:`check_frequencies` for ``pfeed.service_windows`` | [
"Analog",
"of",
":",
"func",
":",
"check_frequencies",
"for",
"pfeed",
".",
"service_windows"
] | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/validators.py#L212-L245 |
mrcagney/make_gtfs | make_gtfs/validators.py | check_shapes | def check_shapes(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.shapes``
"""
table = 'shapes'
problems = []
# Preliminary checks
if pfeed.shapes is None:
return problems
f = pfeed.shapes.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check shape_id
problems = gt.check_column(problems, table, f, 'shape_id', gt.valid_str)
# Check geometry
v = lambda x: isinstance(x, sg.LineString) and not x.is_empty
problems = gt.check_column(problems, table, f, 'geometry', v)
return gt.format_problems(problems, as_df=as_df) | python | def check_shapes(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.shapes``
"""
table = 'shapes'
problems = []
# Preliminary checks
if pfeed.shapes is None:
return problems
f = pfeed.shapes.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check shape_id
problems = gt.check_column(problems, table, f, 'shape_id', gt.valid_str)
# Check geometry
v = lambda x: isinstance(x, sg.LineString) and not x.is_empty
problems = gt.check_column(problems, table, f, 'geometry', v)
return gt.format_problems(problems, as_df=as_df) | [
"def",
"check_shapes",
"(",
"pfeed",
",",
"*",
",",
"as_df",
"=",
"False",
",",
"include_warnings",
"=",
"False",
")",
":",
"table",
"=",
"'shapes'",
"problems",
"=",
"[",
"]",
"# Preliminary checks",
"if",
"pfeed",
".",
"shapes",
"is",
"None",
":",
"return",
"problems",
"f",
"=",
"pfeed",
".",
"shapes",
".",
"copy",
"(",
")",
"problems",
"=",
"check_for_required_columns",
"(",
"problems",
",",
"table",
",",
"f",
")",
"if",
"problems",
":",
"return",
"gt",
".",
"format_problems",
"(",
"problems",
",",
"as_df",
"=",
"as_df",
")",
"if",
"include_warnings",
":",
"problems",
"=",
"check_for_invalid_columns",
"(",
"problems",
",",
"table",
",",
"f",
")",
"# Check shape_id",
"problems",
"=",
"gt",
".",
"check_column",
"(",
"problems",
",",
"table",
",",
"f",
",",
"'shape_id'",
",",
"gt",
".",
"valid_str",
")",
"# Check geometry",
"v",
"=",
"lambda",
"x",
":",
"isinstance",
"(",
"x",
",",
"sg",
".",
"LineString",
")",
"and",
"not",
"x",
".",
"is_empty",
"problems",
"=",
"gt",
".",
"check_column",
"(",
"problems",
",",
"table",
",",
"f",
",",
"'geometry'",
",",
"v",
")",
"return",
"gt",
".",
"format_problems",
"(",
"problems",
",",
"as_df",
"=",
"as_df",
")"
] | Analog of :func:`check_frequencies` for ``pfeed.shapes`` | [
"Analog",
"of",
":",
"func",
":",
"check_frequencies",
"for",
"pfeed",
".",
"shapes"
] | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/validators.py#L247-L273 |
mrcagney/make_gtfs | make_gtfs/validators.py | check_stops | def check_stops(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.stops``
"""
# Use gtfstk's stop validator
if pfeed.stops is not None:
stop_times = pd.DataFrame(columns=['stop_id'])
feed = gt.Feed(stops=pfeed.stops, stop_times=stop_times,
dist_units='km')
return gt.check_stops(feed, as_df=as_df, include_warnings=False) | python | def check_stops(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.stops``
"""
# Use gtfstk's stop validator
if pfeed.stops is not None:
stop_times = pd.DataFrame(columns=['stop_id'])
feed = gt.Feed(stops=pfeed.stops, stop_times=stop_times,
dist_units='km')
return gt.check_stops(feed, as_df=as_df, include_warnings=False) | [
"def",
"check_stops",
"(",
"pfeed",
",",
"*",
",",
"as_df",
"=",
"False",
",",
"include_warnings",
"=",
"False",
")",
":",
"# Use gtfstk's stop validator",
"if",
"pfeed",
".",
"stops",
"is",
"not",
"None",
":",
"stop_times",
"=",
"pd",
".",
"DataFrame",
"(",
"columns",
"=",
"[",
"'stop_id'",
"]",
")",
"feed",
"=",
"gt",
".",
"Feed",
"(",
"stops",
"=",
"pfeed",
".",
"stops",
",",
"stop_times",
"=",
"stop_times",
",",
"dist_units",
"=",
"'km'",
")",
"return",
"gt",
".",
"check_stops",
"(",
"feed",
",",
"as_df",
"=",
"as_df",
",",
"include_warnings",
"=",
"False",
")"
] | Analog of :func:`check_frequencies` for ``pfeed.stops`` | [
"Analog",
"of",
":",
"func",
":",
"check_frequencies",
"for",
"pfeed",
".",
"stops"
] | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/validators.py#L275-L284 |
mrcagney/make_gtfs | make_gtfs/validators.py | validate | def validate(pfeed, *, as_df=True, include_warnings=True):
"""
Check whether the given pfeed satisfies the ProtoFeed spec.
Parameters
----------
pfeed : ProtoFeed
as_df : boolean
If ``True``, then return the resulting report as a DataFrame;
otherwise return the result as a list
include_warnings : boolean
If ``True``, then include problems of types ``'error'`` and
``'warning'``; otherwise, only return problems of type
``'error'``
Returns
-------
list or DataFrame
Run all the table-checking functions: :func:`check_agency`,
:func:`check_calendar`, etc.
This yields a possibly empty list of items
[problem type, message, table, rows].
If ``as_df``, then format the error list as a DataFrame with the
columns
- ``'type'``: 'error' or 'warning'; 'error' means the ProtoFeed
spec is violated; 'warning' means there is a problem but it's
not a ProtoFeed spec violation
- ``'message'``: description of the problem
- ``'table'``: table in which problem occurs, e.g. 'routes'
- ``'rows'``: rows of the table's DataFrame where problem occurs
Return early if the pfeed is missing required tables or required
columns.
"""
problems = []
# Check for invalid columns and check the required tables
checkers = [
'check_frequencies',
'check_meta',
'check_service_windows',
'check_shapes',
'check_stops',
]
for checker in checkers:
problems.extend(globals()[checker](pfeed,
include_warnings=include_warnings))
return gt.format_problems(problems, as_df=as_df) | python | def validate(pfeed, *, as_df=True, include_warnings=True):
"""
Check whether the given pfeed satisfies the ProtoFeed spec.
Parameters
----------
pfeed : ProtoFeed
as_df : boolean
If ``True``, then return the resulting report as a DataFrame;
otherwise return the result as a list
include_warnings : boolean
If ``True``, then include problems of types ``'error'`` and
``'warning'``; otherwise, only return problems of type
``'error'``
Returns
-------
list or DataFrame
Run all the table-checking functions: :func:`check_agency`,
:func:`check_calendar`, etc.
This yields a possibly empty list of items
[problem type, message, table, rows].
If ``as_df``, then format the error list as a DataFrame with the
columns
- ``'type'``: 'error' or 'warning'; 'error' means the ProtoFeed
spec is violated; 'warning' means there is a problem but it's
not a ProtoFeed spec violation
- ``'message'``: description of the problem
- ``'table'``: table in which problem occurs, e.g. 'routes'
- ``'rows'``: rows of the table's DataFrame where problem occurs
Return early if the pfeed is missing required tables or required
columns.
"""
problems = []
# Check for invalid columns and check the required tables
checkers = [
'check_frequencies',
'check_meta',
'check_service_windows',
'check_shapes',
'check_stops',
]
for checker in checkers:
problems.extend(globals()[checker](pfeed,
include_warnings=include_warnings))
return gt.format_problems(problems, as_df=as_df) | [
"def",
"validate",
"(",
"pfeed",
",",
"*",
",",
"as_df",
"=",
"True",
",",
"include_warnings",
"=",
"True",
")",
":",
"problems",
"=",
"[",
"]",
"# Check for invalid columns and check the required tables",
"checkers",
"=",
"[",
"'check_frequencies'",
",",
"'check_meta'",
",",
"'check_service_windows'",
",",
"'check_shapes'",
",",
"'check_stops'",
",",
"]",
"for",
"checker",
"in",
"checkers",
":",
"problems",
".",
"extend",
"(",
"globals",
"(",
")",
"[",
"checker",
"]",
"(",
"pfeed",
",",
"include_warnings",
"=",
"include_warnings",
")",
")",
"return",
"gt",
".",
"format_problems",
"(",
"problems",
",",
"as_df",
"=",
"as_df",
")"
] | Check whether the given pfeed satisfies the ProtoFeed spec.
Parameters
----------
pfeed : ProtoFeed
as_df : boolean
If ``True``, then return the resulting report as a DataFrame;
otherwise return the result as a list
include_warnings : boolean
If ``True``, then include problems of types ``'error'`` and
``'warning'``; otherwise, only return problems of type
``'error'``
Returns
-------
list or DataFrame
Run all the table-checking functions: :func:`check_agency`,
:func:`check_calendar`, etc.
This yields a possibly empty list of items
[problem type, message, table, rows].
If ``as_df``, then format the error list as a DataFrame with the
columns
- ``'type'``: 'error' or 'warning'; 'error' means the ProtoFeed
spec is violated; 'warning' means there is a problem but it's
not a ProtoFeed spec violation
- ``'message'``: description of the problem
- ``'table'``: table in which problem occurs, e.g. 'routes'
- ``'rows'``: rows of the table's DataFrame where problem occurs
Return early if the pfeed is missing required tables or required
columns. | [
"Check",
"whether",
"the",
"given",
"pfeed",
"satisfies",
"the",
"ProtoFeed",
"spec",
"."
] | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/validators.py#L286-L336 |
deshima-dev/decode | decode/joke/functions.py | youtube | def youtube(keyword=None):
"""Open youtube.
Args:
keyword (optional): Search word.
"""
if keyword is None:
web.open('https://www.youtube.com/watch?v=L_mBVT2jBFw')
else:
web.open(quote('https://www.youtube.com/results?search_query={}'.format(keyword), RESERVED)) | python | def youtube(keyword=None):
"""Open youtube.
Args:
keyword (optional): Search word.
"""
if keyword is None:
web.open('https://www.youtube.com/watch?v=L_mBVT2jBFw')
else:
web.open(quote('https://www.youtube.com/results?search_query={}'.format(keyword), RESERVED)) | [
"def",
"youtube",
"(",
"keyword",
"=",
"None",
")",
":",
"if",
"keyword",
"is",
"None",
":",
"web",
".",
"open",
"(",
"'https://www.youtube.com/watch?v=L_mBVT2jBFw'",
")",
"else",
":",
"web",
".",
"open",
"(",
"quote",
"(",
"'https://www.youtube.com/results?search_query={}'",
".",
"format",
"(",
"keyword",
")",
",",
"RESERVED",
")",
")"
] | Open youtube.
Args:
keyword (optional): Search word. | [
"Open",
"youtube",
"."
] | train | https://github.com/deshima-dev/decode/blob/e789e174cd316e7ec8bc55be7009ad35baced3c0/decode/joke/functions.py#L21-L30 |
facelessuser/pyspelling | pyspelling/filters/javascript.py | JavaScriptFilter.setup | def setup(self):
"""Setup."""
self.blocks = self.config['block_comments']
self.lines = self.config['line_comments']
self.group_comments = self.config['group_comments']
self.jsdocs = self.config['jsdocs']
self.decode_escapes = self.config['decode_escapes']
self.strings = self.config['strings']
self.prefix = 'js' | python | def setup(self):
"""Setup."""
self.blocks = self.config['block_comments']
self.lines = self.config['line_comments']
self.group_comments = self.config['group_comments']
self.jsdocs = self.config['jsdocs']
self.decode_escapes = self.config['decode_escapes']
self.strings = self.config['strings']
self.prefix = 'js' | [
"def",
"setup",
"(",
"self",
")",
":",
"self",
".",
"blocks",
"=",
"self",
".",
"config",
"[",
"'block_comments'",
"]",
"self",
".",
"lines",
"=",
"self",
".",
"config",
"[",
"'line_comments'",
"]",
"self",
".",
"group_comments",
"=",
"self",
".",
"config",
"[",
"'group_comments'",
"]",
"self",
".",
"jsdocs",
"=",
"self",
".",
"config",
"[",
"'jsdocs'",
"]",
"self",
".",
"decode_escapes",
"=",
"self",
".",
"config",
"[",
"'decode_escapes'",
"]",
"self",
".",
"strings",
"=",
"self",
".",
"config",
"[",
"'strings'",
"]",
"self",
".",
"prefix",
"=",
"'js'"
] | Setup. | [
"Setup",
"."
] | train | https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/javascript.py#L71-L80 |
facelessuser/pyspelling | pyspelling/filters/javascript.py | JavaScriptFilter.replace_escapes | def replace_escapes(self, m):
"""Replace escapes."""
groups = m.groupdict()
esc = m.group(0)
if groups.get('special'):
value = BACK_SLASH_TRANSLATION[esc]
elif groups.get('char'):
try:
if esc.endswith('}'):
value = chr(int(esc[3:-1], 16))
else:
value = chr(int(esc[2:], 16))
except Exception:
value = esc
elif groups.get('oct'):
# JavaScript only supports hex range.
# So \400 would be \40 + '0'
value = int(esc[1:], 8)
overflow = ''
if value > 255:
value = esc[1:-1]
overflow + esc[-1]
value = chr(value) + overflow
elif('other'):
value = esc[1:]
return value.replace('\x00', '\n') | python | def replace_escapes(self, m):
"""Replace escapes."""
groups = m.groupdict()
esc = m.group(0)
if groups.get('special'):
value = BACK_SLASH_TRANSLATION[esc]
elif groups.get('char'):
try:
if esc.endswith('}'):
value = chr(int(esc[3:-1], 16))
else:
value = chr(int(esc[2:], 16))
except Exception:
value = esc
elif groups.get('oct'):
# JavaScript only supports hex range.
# So \400 would be \40 + '0'
value = int(esc[1:], 8)
overflow = ''
if value > 255:
value = esc[1:-1]
overflow + esc[-1]
value = chr(value) + overflow
elif('other'):
value = esc[1:]
return value.replace('\x00', '\n') | [
"def",
"replace_escapes",
"(",
"self",
",",
"m",
")",
":",
"groups",
"=",
"m",
".",
"groupdict",
"(",
")",
"esc",
"=",
"m",
".",
"group",
"(",
"0",
")",
"if",
"groups",
".",
"get",
"(",
"'special'",
")",
":",
"value",
"=",
"BACK_SLASH_TRANSLATION",
"[",
"esc",
"]",
"elif",
"groups",
".",
"get",
"(",
"'char'",
")",
":",
"try",
":",
"if",
"esc",
".",
"endswith",
"(",
"'}'",
")",
":",
"value",
"=",
"chr",
"(",
"int",
"(",
"esc",
"[",
"3",
":",
"-",
"1",
"]",
",",
"16",
")",
")",
"else",
":",
"value",
"=",
"chr",
"(",
"int",
"(",
"esc",
"[",
"2",
":",
"]",
",",
"16",
")",
")",
"except",
"Exception",
":",
"value",
"=",
"esc",
"elif",
"groups",
".",
"get",
"(",
"'oct'",
")",
":",
"# JavaScript only supports hex range.",
"# So \\400 would be \\40 + '0'",
"value",
"=",
"int",
"(",
"esc",
"[",
"1",
":",
"]",
",",
"8",
")",
"overflow",
"=",
"''",
"if",
"value",
">",
"255",
":",
"value",
"=",
"esc",
"[",
"1",
":",
"-",
"1",
"]",
"overflow",
"+",
"esc",
"[",
"-",
"1",
"]",
"value",
"=",
"chr",
"(",
"value",
")",
"+",
"overflow",
"elif",
"(",
"'other'",
")",
":",
"value",
"=",
"esc",
"[",
"1",
":",
"]",
"return",
"value",
".",
"replace",
"(",
"'\\x00'",
",",
"'\\n'",
")"
] | Replace escapes. | [
"Replace",
"escapes",
"."
] | train | https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/javascript.py#L82-L108 |
facelessuser/pyspelling | pyspelling/filters/javascript.py | JavaScriptFilter.replace_surrogates | def replace_surrogates(self, m):
"""Replace surrogates."""
high, low = ord(m.group(1)), ord(m.group(2))
return chr((high - 0xD800) * 0x400 + low - 0xDC00 + 0x10000) | python | def replace_surrogates(self, m):
"""Replace surrogates."""
high, low = ord(m.group(1)), ord(m.group(2))
return chr((high - 0xD800) * 0x400 + low - 0xDC00 + 0x10000) | [
"def",
"replace_surrogates",
"(",
"self",
",",
"m",
")",
":",
"high",
",",
"low",
"=",
"ord",
"(",
"m",
".",
"group",
"(",
"1",
")",
")",
",",
"ord",
"(",
"m",
".",
"group",
"(",
"2",
")",
")",
"return",
"chr",
"(",
"(",
"high",
"-",
"0xD800",
")",
"*",
"0x400",
"+",
"low",
"-",
"0xDC00",
"+",
"0x10000",
")"
] | Replace surrogates. | [
"Replace",
"surrogates",
"."
] | train | https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/javascript.py#L110-L114 |
facelessuser/pyspelling | pyspelling/filters/javascript.py | JavaScriptFilter.evaluate_strings | def evaluate_strings(self, string, temp=False):
"""Evaluate strings."""
value = ''
if self.strings:
if self.decode_escapes:
value = RE_SURROGATES.sub(
self.replace_surrogates,
(RE_TEMP_ESC if temp else RE_ESC).sub(self.replace_escapes, string)
)
else:
value = string
if not temp:
self.quoted_strings.append([value, self.line_num, 'utf-8'])
return value | python | def evaluate_strings(self, string, temp=False):
"""Evaluate strings."""
value = ''
if self.strings:
if self.decode_escapes:
value = RE_SURROGATES.sub(
self.replace_surrogates,
(RE_TEMP_ESC if temp else RE_ESC).sub(self.replace_escapes, string)
)
else:
value = string
if not temp:
self.quoted_strings.append([value, self.line_num, 'utf-8'])
return value | [
"def",
"evaluate_strings",
"(",
"self",
",",
"string",
",",
"temp",
"=",
"False",
")",
":",
"value",
"=",
"''",
"if",
"self",
".",
"strings",
":",
"if",
"self",
".",
"decode_escapes",
":",
"value",
"=",
"RE_SURROGATES",
".",
"sub",
"(",
"self",
".",
"replace_surrogates",
",",
"(",
"RE_TEMP_ESC",
"if",
"temp",
"else",
"RE_ESC",
")",
".",
"sub",
"(",
"self",
".",
"replace_escapes",
",",
"string",
")",
")",
"else",
":",
"value",
"=",
"string",
"if",
"not",
"temp",
":",
"self",
".",
"quoted_strings",
".",
"append",
"(",
"[",
"value",
",",
"self",
".",
"line_num",
",",
"'utf-8'",
"]",
")",
"return",
"value"
] | Evaluate strings. | [
"Evaluate",
"strings",
"."
] | train | https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/javascript.py#L116-L130 |
facelessuser/pyspelling | pyspelling/filters/javascript.py | JavaScriptFilter.evaluate_block | def evaluate_block(self, comments):
"""Evaluate block comments."""
if self.jsdocs:
m1 = RE_JSDOC.match(comments)
if m1:
lines = []
for line in m1.group(1).splitlines(True):
l = line.lstrip()
lines.append(l[1:] if l.startswith('*') else l)
self.jsdoc_comments.append([''.join(lines), self.line_num, self.current_encoding])
elif self.blocks:
self.block_comments.append([comments[2:-2], self.line_num, self.current_encoding])
elif self.blocks:
self.block_comments.append([comments[2:-2], self.line_num, self.current_encoding]) | python | def evaluate_block(self, comments):
"""Evaluate block comments."""
if self.jsdocs:
m1 = RE_JSDOC.match(comments)
if m1:
lines = []
for line in m1.group(1).splitlines(True):
l = line.lstrip()
lines.append(l[1:] if l.startswith('*') else l)
self.jsdoc_comments.append([''.join(lines), self.line_num, self.current_encoding])
elif self.blocks:
self.block_comments.append([comments[2:-2], self.line_num, self.current_encoding])
elif self.blocks:
self.block_comments.append([comments[2:-2], self.line_num, self.current_encoding]) | [
"def",
"evaluate_block",
"(",
"self",
",",
"comments",
")",
":",
"if",
"self",
".",
"jsdocs",
":",
"m1",
"=",
"RE_JSDOC",
".",
"match",
"(",
"comments",
")",
"if",
"m1",
":",
"lines",
"=",
"[",
"]",
"for",
"line",
"in",
"m1",
".",
"group",
"(",
"1",
")",
".",
"splitlines",
"(",
"True",
")",
":",
"l",
"=",
"line",
".",
"lstrip",
"(",
")",
"lines",
".",
"append",
"(",
"l",
"[",
"1",
":",
"]",
"if",
"l",
".",
"startswith",
"(",
"'*'",
")",
"else",
"l",
")",
"self",
".",
"jsdoc_comments",
".",
"append",
"(",
"[",
"''",
".",
"join",
"(",
"lines",
")",
",",
"self",
".",
"line_num",
",",
"self",
".",
"current_encoding",
"]",
")",
"elif",
"self",
".",
"blocks",
":",
"self",
".",
"block_comments",
".",
"append",
"(",
"[",
"comments",
"[",
"2",
":",
"-",
"2",
"]",
",",
"self",
".",
"line_num",
",",
"self",
".",
"current_encoding",
"]",
")",
"elif",
"self",
".",
"blocks",
":",
"self",
".",
"block_comments",
".",
"append",
"(",
"[",
"comments",
"[",
"2",
":",
"-",
"2",
"]",
",",
"self",
".",
"line_num",
",",
"self",
".",
"current_encoding",
"]",
")"
] | Evaluate block comments. | [
"Evaluate",
"block",
"comments",
"."
] | train | https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/javascript.py#L157-L171 |
facelessuser/pyspelling | pyspelling/filters/javascript.py | JavaScriptFilter.find_content | def find_content(self, text, index=0, backtick=False):
"""Find content."""
curly_count = 0
last = '\n'
self.lines_num = 1
length = len(text)
while index < length:
start_index = index
c = text[index]
if c == '{' and backtick:
curly_count += 1
elif c == '}' and backtick:
if curly_count:
curly_count -= 1
else:
index += 1
return index
elif c == '`':
done = False
first = True
backtick_content = []
start_index = index
while not done:
m = (RE_TEMPLATE_START if first else RE_TEMPLATE_MIDDLE_END).match(text, index)
first = False
if m:
self.line_num += m.group(0).count('\n')
content = self.evaluate_strings(m.group(1), True)
if content:
backtick_content.append(content)
index = m.end(0)
if m.group(2) == '${':
index = self.find_content(text, index, True)
else:
done = True
else:
done = True
if backtick_content:
self.quoted_strings.append([' '.join(backtick_content), self.line_num, 'utf-8'])
elif c in ('\'', '"'):
m = RE_STRING.match(text, index)
if m:
self.evaluate_strings(m.group(0)[1:-1])
self.line_num += m.group(0).count('\n')
index = m.end(0)
elif c == '\n':
self.line_num += 1
elif last == '\n' or c == '/':
m = RE_COMMENT.match(text, index)
if m:
g = m.groupdict()
if g['start'] is None:
self.evaluate_inline_tail(g)
else:
self.evaluate_inline(g)
index = m.end(0)
elif c == '/':
m = RE_BLOCK_COMMENT.match(text, index)
if m:
self.evaluate_block(m.group(0))
self.line_num += m.group(0).count('\n')
index = m.end(0)
if index == start_index:
index += 1
last = text[index - 1] | python | def find_content(self, text, index=0, backtick=False):
"""Find content."""
curly_count = 0
last = '\n'
self.lines_num = 1
length = len(text)
while index < length:
start_index = index
c = text[index]
if c == '{' and backtick:
curly_count += 1
elif c == '}' and backtick:
if curly_count:
curly_count -= 1
else:
index += 1
return index
elif c == '`':
done = False
first = True
backtick_content = []
start_index = index
while not done:
m = (RE_TEMPLATE_START if first else RE_TEMPLATE_MIDDLE_END).match(text, index)
first = False
if m:
self.line_num += m.group(0).count('\n')
content = self.evaluate_strings(m.group(1), True)
if content:
backtick_content.append(content)
index = m.end(0)
if m.group(2) == '${':
index = self.find_content(text, index, True)
else:
done = True
else:
done = True
if backtick_content:
self.quoted_strings.append([' '.join(backtick_content), self.line_num, 'utf-8'])
elif c in ('\'', '"'):
m = RE_STRING.match(text, index)
if m:
self.evaluate_strings(m.group(0)[1:-1])
self.line_num += m.group(0).count('\n')
index = m.end(0)
elif c == '\n':
self.line_num += 1
elif last == '\n' or c == '/':
m = RE_COMMENT.match(text, index)
if m:
g = m.groupdict()
if g['start'] is None:
self.evaluate_inline_tail(g)
else:
self.evaluate_inline(g)
index = m.end(0)
elif c == '/':
m = RE_BLOCK_COMMENT.match(text, index)
if m:
self.evaluate_block(m.group(0))
self.line_num += m.group(0).count('\n')
index = m.end(0)
if index == start_index:
index += 1
last = text[index - 1] | [
"def",
"find_content",
"(",
"self",
",",
"text",
",",
"index",
"=",
"0",
",",
"backtick",
"=",
"False",
")",
":",
"curly_count",
"=",
"0",
"last",
"=",
"'\\n'",
"self",
".",
"lines_num",
"=",
"1",
"length",
"=",
"len",
"(",
"text",
")",
"while",
"index",
"<",
"length",
":",
"start_index",
"=",
"index",
"c",
"=",
"text",
"[",
"index",
"]",
"if",
"c",
"==",
"'{'",
"and",
"backtick",
":",
"curly_count",
"+=",
"1",
"elif",
"c",
"==",
"'}'",
"and",
"backtick",
":",
"if",
"curly_count",
":",
"curly_count",
"-=",
"1",
"else",
":",
"index",
"+=",
"1",
"return",
"index",
"elif",
"c",
"==",
"'`'",
":",
"done",
"=",
"False",
"first",
"=",
"True",
"backtick_content",
"=",
"[",
"]",
"start_index",
"=",
"index",
"while",
"not",
"done",
":",
"m",
"=",
"(",
"RE_TEMPLATE_START",
"if",
"first",
"else",
"RE_TEMPLATE_MIDDLE_END",
")",
".",
"match",
"(",
"text",
",",
"index",
")",
"first",
"=",
"False",
"if",
"m",
":",
"self",
".",
"line_num",
"+=",
"m",
".",
"group",
"(",
"0",
")",
".",
"count",
"(",
"'\\n'",
")",
"content",
"=",
"self",
".",
"evaluate_strings",
"(",
"m",
".",
"group",
"(",
"1",
")",
",",
"True",
")",
"if",
"content",
":",
"backtick_content",
".",
"append",
"(",
"content",
")",
"index",
"=",
"m",
".",
"end",
"(",
"0",
")",
"if",
"m",
".",
"group",
"(",
"2",
")",
"==",
"'${'",
":",
"index",
"=",
"self",
".",
"find_content",
"(",
"text",
",",
"index",
",",
"True",
")",
"else",
":",
"done",
"=",
"True",
"else",
":",
"done",
"=",
"True",
"if",
"backtick_content",
":",
"self",
".",
"quoted_strings",
".",
"append",
"(",
"[",
"' '",
".",
"join",
"(",
"backtick_content",
")",
",",
"self",
".",
"line_num",
",",
"'utf-8'",
"]",
")",
"elif",
"c",
"in",
"(",
"'\\''",
",",
"'\"'",
")",
":",
"m",
"=",
"RE_STRING",
".",
"match",
"(",
"text",
",",
"index",
")",
"if",
"m",
":",
"self",
".",
"evaluate_strings",
"(",
"m",
".",
"group",
"(",
"0",
")",
"[",
"1",
":",
"-",
"1",
"]",
")",
"self",
".",
"line_num",
"+=",
"m",
".",
"group",
"(",
"0",
")",
".",
"count",
"(",
"'\\n'",
")",
"index",
"=",
"m",
".",
"end",
"(",
"0",
")",
"elif",
"c",
"==",
"'\\n'",
":",
"self",
".",
"line_num",
"+=",
"1",
"elif",
"last",
"==",
"'\\n'",
"or",
"c",
"==",
"'/'",
":",
"m",
"=",
"RE_COMMENT",
".",
"match",
"(",
"text",
",",
"index",
")",
"if",
"m",
":",
"g",
"=",
"m",
".",
"groupdict",
"(",
")",
"if",
"g",
"[",
"'start'",
"]",
"is",
"None",
":",
"self",
".",
"evaluate_inline_tail",
"(",
"g",
")",
"else",
":",
"self",
".",
"evaluate_inline",
"(",
"g",
")",
"index",
"=",
"m",
".",
"end",
"(",
"0",
")",
"elif",
"c",
"==",
"'/'",
":",
"m",
"=",
"RE_BLOCK_COMMENT",
".",
"match",
"(",
"text",
",",
"index",
")",
"if",
"m",
":",
"self",
".",
"evaluate_block",
"(",
"m",
".",
"group",
"(",
"0",
")",
")",
"self",
".",
"line_num",
"+=",
"m",
".",
"group",
"(",
"0",
")",
".",
"count",
"(",
"'\\n'",
")",
"index",
"=",
"m",
".",
"end",
"(",
"0",
")",
"if",
"index",
"==",
"start_index",
":",
"index",
"+=",
"1",
"last",
"=",
"text",
"[",
"index",
"-",
"1",
"]"
] | Find content. | [
"Find",
"content",
"."
] | train | https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/javascript.py#L196-L262 |
facelessuser/pyspelling | pyspelling/plugin.py | Plugin.override_config | def override_config(self, options):
"""Override the default configuration."""
for k, v in options.items():
# Reject names not in the default configuration
if k not in self.config:
raise KeyError("'{}' is not a valid option for '{}'".format(k, self.__class__.__name__))
self.validate_options(k, v)
self.config[k] = v | python | def override_config(self, options):
"""Override the default configuration."""
for k, v in options.items():
# Reject names not in the default configuration
if k not in self.config:
raise KeyError("'{}' is not a valid option for '{}'".format(k, self.__class__.__name__))
self.validate_options(k, v)
self.config[k] = v | [
"def",
"override_config",
"(",
"self",
",",
"options",
")",
":",
"for",
"k",
",",
"v",
"in",
"options",
".",
"items",
"(",
")",
":",
"# Reject names not in the default configuration",
"if",
"k",
"not",
"in",
"self",
".",
"config",
":",
"raise",
"KeyError",
"(",
"\"'{}' is not a valid option for '{}'\"",
".",
"format",
"(",
"k",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"self",
".",
"validate_options",
"(",
"k",
",",
"v",
")",
"self",
".",
"config",
"[",
"k",
"]",
"=",
"v"
] | Override the default configuration. | [
"Override",
"the",
"default",
"configuration",
"."
] | train | https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/plugin.py#L33-L41 |
facelessuser/pyspelling | pyspelling/plugin.py | Plugin.validate_options | def validate_options(self, k, v):
"""Validate options."""
args = [self.__class__.__name__, k]
# Booleans
if isinstance(self.config[k], bool) and not isinstance(v, bool):
raise ValueError("{}: option '{}' must be a bool type.".format(*args))
# Strings
elif isinstance(self.config[k], str) and not isinstance(v, str):
raise ValueError("{}: option '{}' must be a str type.".format(*args))
# Integers (whole floats allowed)
elif (
isinstance(self.config[k], int) and
(not isinstance(v, int) and not (isinstance(v, float) and v.is_integer()))
):
raise ValueError("{}: option '{}' must be a int type.".format(*args))
# Floats (integers allowed)
elif isinstance(self.config[k], float) and not isinstance(v, (int, float)):
raise ValueError("{}: option '{}' must be a float type.".format(*args))
# Basic iterables (list, tuple, sets)
elif isinstance(self.config[k], (list, tuple, set)) and not isinstance(v, list):
raise ValueError("{}: option '{}' must be a float type.".format(*args))
# Dictionaries
elif isinstance(self.config[k], (dict, OrderedDict)) and not isinstance(v, (dict, OrderedDict)):
raise ValueError("{}: option '{}' must be a dict type.".format(*args)) | python | def validate_options(self, k, v):
"""Validate options."""
args = [self.__class__.__name__, k]
# Booleans
if isinstance(self.config[k], bool) and not isinstance(v, bool):
raise ValueError("{}: option '{}' must be a bool type.".format(*args))
# Strings
elif isinstance(self.config[k], str) and not isinstance(v, str):
raise ValueError("{}: option '{}' must be a str type.".format(*args))
# Integers (whole floats allowed)
elif (
isinstance(self.config[k], int) and
(not isinstance(v, int) and not (isinstance(v, float) and v.is_integer()))
):
raise ValueError("{}: option '{}' must be a int type.".format(*args))
# Floats (integers allowed)
elif isinstance(self.config[k], float) and not isinstance(v, (int, float)):
raise ValueError("{}: option '{}' must be a float type.".format(*args))
# Basic iterables (list, tuple, sets)
elif isinstance(self.config[k], (list, tuple, set)) and not isinstance(v, list):
raise ValueError("{}: option '{}' must be a float type.".format(*args))
# Dictionaries
elif isinstance(self.config[k], (dict, OrderedDict)) and not isinstance(v, (dict, OrderedDict)):
raise ValueError("{}: option '{}' must be a dict type.".format(*args)) | [
"def",
"validate_options",
"(",
"self",
",",
"k",
",",
"v",
")",
":",
"args",
"=",
"[",
"self",
".",
"__class__",
".",
"__name__",
",",
"k",
"]",
"# Booleans",
"if",
"isinstance",
"(",
"self",
".",
"config",
"[",
"k",
"]",
",",
"bool",
")",
"and",
"not",
"isinstance",
"(",
"v",
",",
"bool",
")",
":",
"raise",
"ValueError",
"(",
"\"{}: option '{}' must be a bool type.\"",
".",
"format",
"(",
"*",
"args",
")",
")",
"# Strings",
"elif",
"isinstance",
"(",
"self",
".",
"config",
"[",
"k",
"]",
",",
"str",
")",
"and",
"not",
"isinstance",
"(",
"v",
",",
"str",
")",
":",
"raise",
"ValueError",
"(",
"\"{}: option '{}' must be a str type.\"",
".",
"format",
"(",
"*",
"args",
")",
")",
"# Integers (whole floats allowed)",
"elif",
"(",
"isinstance",
"(",
"self",
".",
"config",
"[",
"k",
"]",
",",
"int",
")",
"and",
"(",
"not",
"isinstance",
"(",
"v",
",",
"int",
")",
"and",
"not",
"(",
"isinstance",
"(",
"v",
",",
"float",
")",
"and",
"v",
".",
"is_integer",
"(",
")",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"{}: option '{}' must be a int type.\"",
".",
"format",
"(",
"*",
"args",
")",
")",
"# Floats (integers allowed)",
"elif",
"isinstance",
"(",
"self",
".",
"config",
"[",
"k",
"]",
",",
"float",
")",
"and",
"not",
"isinstance",
"(",
"v",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"{}: option '{}' must be a float type.\"",
".",
"format",
"(",
"*",
"args",
")",
")",
"# Basic iterables (list, tuple, sets)",
"elif",
"isinstance",
"(",
"self",
".",
"config",
"[",
"k",
"]",
",",
"(",
"list",
",",
"tuple",
",",
"set",
")",
")",
"and",
"not",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"\"{}: option '{}' must be a float type.\"",
".",
"format",
"(",
"*",
"args",
")",
")",
"# Dictionaries",
"elif",
"isinstance",
"(",
"self",
".",
"config",
"[",
"k",
"]",
",",
"(",
"dict",
",",
"OrderedDict",
")",
")",
"and",
"not",
"isinstance",
"(",
"v",
",",
"(",
"dict",
",",
"OrderedDict",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"{}: option '{}' must be a dict type.\"",
".",
"format",
"(",
"*",
"args",
")",
")"
] | Validate options. | [
"Validate",
"options",
"."
] | train | https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/plugin.py#L43-L67 |
mrcagney/make_gtfs | make_gtfs/main.py | get_duration | def get_duration(timestr1, timestr2, units='s'):
"""
Return the duration of the time period between the first and second
time string in the given units.
Allowable units are 's' (seconds), 'min' (minutes), 'h' (hours).
Assume ``timestr1 < timestr2``.
"""
valid_units = ['s', 'min', 'h']
assert units in valid_units,\
"Units must be one of {!s}".format(valid_units)
duration = (
gt.timestr_to_seconds(timestr2) - gt.timestr_to_seconds(timestr1)
)
if units == 's':
return duration
elif units == 'min':
return duration/60
else:
return duration/3600 | python | def get_duration(timestr1, timestr2, units='s'):
"""
Return the duration of the time period between the first and second
time string in the given units.
Allowable units are 's' (seconds), 'min' (minutes), 'h' (hours).
Assume ``timestr1 < timestr2``.
"""
valid_units = ['s', 'min', 'h']
assert units in valid_units,\
"Units must be one of {!s}".format(valid_units)
duration = (
gt.timestr_to_seconds(timestr2) - gt.timestr_to_seconds(timestr1)
)
if units == 's':
return duration
elif units == 'min':
return duration/60
else:
return duration/3600 | [
"def",
"get_duration",
"(",
"timestr1",
",",
"timestr2",
",",
"units",
"=",
"'s'",
")",
":",
"valid_units",
"=",
"[",
"'s'",
",",
"'min'",
",",
"'h'",
"]",
"assert",
"units",
"in",
"valid_units",
",",
"\"Units must be one of {!s}\"",
".",
"format",
"(",
"valid_units",
")",
"duration",
"=",
"(",
"gt",
".",
"timestr_to_seconds",
"(",
"timestr2",
")",
"-",
"gt",
".",
"timestr_to_seconds",
"(",
"timestr1",
")",
")",
"if",
"units",
"==",
"'s'",
":",
"return",
"duration",
"elif",
"units",
"==",
"'min'",
":",
"return",
"duration",
"/",
"60",
"else",
":",
"return",
"duration",
"/",
"3600"
] | Return the duration of the time period between the first and second
time string in the given units.
Allowable units are 's' (seconds), 'min' (minutes), 'h' (hours).
Assume ``timestr1 < timestr2``. | [
"Return",
"the",
"duration",
"of",
"the",
"time",
"period",
"between",
"the",
"first",
"and",
"second",
"time",
"string",
"in",
"the",
"given",
"units",
".",
"Allowable",
"units",
"are",
"s",
"(",
"seconds",
")",
"min",
"(",
"minutes",
")",
"h",
"(",
"hours",
")",
".",
"Assume",
"timestr1",
"<",
"timestr2",
"."
] | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/main.py#L10-L30 |
mrcagney/make_gtfs | make_gtfs/main.py | build_stop_ids | def build_stop_ids(shape_id):
"""
Create a pair of stop IDs based on the given shape ID.
"""
return [cs.SEP.join(['stp', shape_id, str(i)]) for i in range(2)] | python | def build_stop_ids(shape_id):
"""
Create a pair of stop IDs based on the given shape ID.
"""
return [cs.SEP.join(['stp', shape_id, str(i)]) for i in range(2)] | [
"def",
"build_stop_ids",
"(",
"shape_id",
")",
":",
"return",
"[",
"cs",
".",
"SEP",
".",
"join",
"(",
"[",
"'stp'",
",",
"shape_id",
",",
"str",
"(",
"i",
")",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"2",
")",
"]"
] | Create a pair of stop IDs based on the given shape ID. | [
"Create",
"a",
"pair",
"of",
"stop",
"IDs",
"based",
"on",
"the",
"given",
"shape",
"ID",
"."
] | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/main.py#L32-L36 |
mrcagney/make_gtfs | make_gtfs/main.py | build_agency | def build_agency(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``agency.txt``
"""
return pd.DataFrame({
'agency_name': pfeed.meta['agency_name'].iat[0],
'agency_url': pfeed.meta['agency_url'].iat[0],
'agency_timezone': pfeed.meta['agency_timezone'].iat[0],
}, index=[0]) | python | def build_agency(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``agency.txt``
"""
return pd.DataFrame({
'agency_name': pfeed.meta['agency_name'].iat[0],
'agency_url': pfeed.meta['agency_url'].iat[0],
'agency_timezone': pfeed.meta['agency_timezone'].iat[0],
}, index=[0]) | [
"def",
"build_agency",
"(",
"pfeed",
")",
":",
"return",
"pd",
".",
"DataFrame",
"(",
"{",
"'agency_name'",
":",
"pfeed",
".",
"meta",
"[",
"'agency_name'",
"]",
".",
"iat",
"[",
"0",
"]",
",",
"'agency_url'",
":",
"pfeed",
".",
"meta",
"[",
"'agency_url'",
"]",
".",
"iat",
"[",
"0",
"]",
",",
"'agency_timezone'",
":",
"pfeed",
".",
"meta",
"[",
"'agency_timezone'",
"]",
".",
"iat",
"[",
"0",
"]",
",",
"}",
",",
"index",
"=",
"[",
"0",
"]",
")"
] | Given a ProtoFeed, return a DataFrame representing ``agency.txt`` | [
"Given",
"a",
"ProtoFeed",
"return",
"a",
"DataFrame",
"representing",
"agency",
".",
"txt"
] | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/main.py#L45-L53 |
mrcagney/make_gtfs | make_gtfs/main.py | build_calendar_etc | def build_calendar_etc(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``calendar.txt``
and a dictionary of the form <service window ID> -> <service ID>,
respectively.
"""
windows = pfeed.service_windows.copy()
# Create a service ID for each distinct days_active field and map the
# service windows to those service IDs
def get_sid(bitlist):
return 'srv' + ''.join([str(b) for b in bitlist])
weekdays = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']
bitlists = set()
# Create a dictionary <service window ID> -> <service ID>
d = dict()
for index, window in windows.iterrows():
bitlist = window[weekdays].tolist()
d[window['service_window_id']] = get_sid(bitlist)
bitlists.add(tuple(bitlist))
service_by_window = d
# Create calendar
start_date = pfeed.meta['start_date'].iat[0]
end_date = pfeed.meta['end_date'].iat[0]
F = []
for bitlist in bitlists:
F.append([get_sid(bitlist)] + list(bitlist) +
[start_date, end_date])
calendar = pd.DataFrame(F, columns=(
['service_id'] + weekdays + ['start_date', 'end_date']))
return calendar, service_by_window | python | def build_calendar_etc(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``calendar.txt``
and a dictionary of the form <service window ID> -> <service ID>,
respectively.
"""
windows = pfeed.service_windows.copy()
# Create a service ID for each distinct days_active field and map the
# service windows to those service IDs
def get_sid(bitlist):
return 'srv' + ''.join([str(b) for b in bitlist])
weekdays = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']
bitlists = set()
# Create a dictionary <service window ID> -> <service ID>
d = dict()
for index, window in windows.iterrows():
bitlist = window[weekdays].tolist()
d[window['service_window_id']] = get_sid(bitlist)
bitlists.add(tuple(bitlist))
service_by_window = d
# Create calendar
start_date = pfeed.meta['start_date'].iat[0]
end_date = pfeed.meta['end_date'].iat[0]
F = []
for bitlist in bitlists:
F.append([get_sid(bitlist)] + list(bitlist) +
[start_date, end_date])
calendar = pd.DataFrame(F, columns=(
['service_id'] + weekdays + ['start_date', 'end_date']))
return calendar, service_by_window | [
"def",
"build_calendar_etc",
"(",
"pfeed",
")",
":",
"windows",
"=",
"pfeed",
".",
"service_windows",
".",
"copy",
"(",
")",
"# Create a service ID for each distinct days_active field and map the",
"# service windows to those service IDs",
"def",
"get_sid",
"(",
"bitlist",
")",
":",
"return",
"'srv'",
"+",
"''",
".",
"join",
"(",
"[",
"str",
"(",
"b",
")",
"for",
"b",
"in",
"bitlist",
"]",
")",
"weekdays",
"=",
"[",
"'monday'",
",",
"'tuesday'",
",",
"'wednesday'",
",",
"'thursday'",
",",
"'friday'",
",",
"'saturday'",
",",
"'sunday'",
"]",
"bitlists",
"=",
"set",
"(",
")",
"# Create a dictionary <service window ID> -> <service ID>",
"d",
"=",
"dict",
"(",
")",
"for",
"index",
",",
"window",
"in",
"windows",
".",
"iterrows",
"(",
")",
":",
"bitlist",
"=",
"window",
"[",
"weekdays",
"]",
".",
"tolist",
"(",
")",
"d",
"[",
"window",
"[",
"'service_window_id'",
"]",
"]",
"=",
"get_sid",
"(",
"bitlist",
")",
"bitlists",
".",
"add",
"(",
"tuple",
"(",
"bitlist",
")",
")",
"service_by_window",
"=",
"d",
"# Create calendar",
"start_date",
"=",
"pfeed",
".",
"meta",
"[",
"'start_date'",
"]",
".",
"iat",
"[",
"0",
"]",
"end_date",
"=",
"pfeed",
".",
"meta",
"[",
"'end_date'",
"]",
".",
"iat",
"[",
"0",
"]",
"F",
"=",
"[",
"]",
"for",
"bitlist",
"in",
"bitlists",
":",
"F",
".",
"append",
"(",
"[",
"get_sid",
"(",
"bitlist",
")",
"]",
"+",
"list",
"(",
"bitlist",
")",
"+",
"[",
"start_date",
",",
"end_date",
"]",
")",
"calendar",
"=",
"pd",
".",
"DataFrame",
"(",
"F",
",",
"columns",
"=",
"(",
"[",
"'service_id'",
"]",
"+",
"weekdays",
"+",
"[",
"'start_date'",
",",
"'end_date'",
"]",
")",
")",
"return",
"calendar",
",",
"service_by_window"
] | Given a ProtoFeed, return a DataFrame representing ``calendar.txt``
and a dictionary of the form <service window ID> -> <service ID>,
respectively. | [
"Given",
"a",
"ProtoFeed",
"return",
"a",
"DataFrame",
"representing",
"calendar",
".",
"txt",
"and",
"a",
"dictionary",
"of",
"the",
"form",
"<service",
"window",
"ID",
">",
"-",
">",
"<service",
"ID",
">",
"respectively",
"."
] | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/main.py#L55-L90 |
mrcagney/make_gtfs | make_gtfs/main.py | build_routes | def build_routes(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``routes.txt``.
"""
f = pfeed.frequencies[['route_short_name', 'route_long_name',
'route_type', 'shape_id']].drop_duplicates().copy()
# Create route IDs
f['route_id'] = 'r' + f['route_short_name'].map(str)
del f['shape_id']
return f | python | def build_routes(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``routes.txt``.
"""
f = pfeed.frequencies[['route_short_name', 'route_long_name',
'route_type', 'shape_id']].drop_duplicates().copy()
# Create route IDs
f['route_id'] = 'r' + f['route_short_name'].map(str)
del f['shape_id']
return f | [
"def",
"build_routes",
"(",
"pfeed",
")",
":",
"f",
"=",
"pfeed",
".",
"frequencies",
"[",
"[",
"'route_short_name'",
",",
"'route_long_name'",
",",
"'route_type'",
",",
"'shape_id'",
"]",
"]",
".",
"drop_duplicates",
"(",
")",
".",
"copy",
"(",
")",
"# Create route IDs",
"f",
"[",
"'route_id'",
"]",
"=",
"'r'",
"+",
"f",
"[",
"'route_short_name'",
"]",
".",
"map",
"(",
"str",
")",
"del",
"f",
"[",
"'shape_id'",
"]",
"return",
"f"
] | Given a ProtoFeed, return a DataFrame representing ``routes.txt``. | [
"Given",
"a",
"ProtoFeed",
"return",
"a",
"DataFrame",
"representing",
"routes",
".",
"txt",
"."
] | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/main.py#L92-L104 |
mrcagney/make_gtfs | make_gtfs/main.py | build_shapes | def build_shapes(pfeed):
"""
Given a ProtoFeed, return DataFrame representing ``shapes.txt``.
Only use shape IDs that occur in both ``pfeed.shapes`` and
``pfeed.frequencies``.
Create reversed shapes where routes traverse shapes in both
directions.
"""
rows = []
for shape, geom in pfeed.shapes[['shape_id',
'geometry']].itertuples(index=False):
if shape not in pfeed.shapes_extra:
continue
if pfeed.shapes_extra[shape] == 2:
# Add shape and its reverse
shid = shape + '-1'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
shid = shape + '-0'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(reversed(geom.coords))]
rows.extend(new_rows)
else:
# Add shape
shid = '{}{}{}'.format(shape, cs.SEP, pfeed.shapes_extra[shape])
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
return pd.DataFrame(rows, columns=['shape_id', 'shape_pt_sequence',
'shape_pt_lon', 'shape_pt_lat']) | python | def build_shapes(pfeed):
"""
Given a ProtoFeed, return DataFrame representing ``shapes.txt``.
Only use shape IDs that occur in both ``pfeed.shapes`` and
``pfeed.frequencies``.
Create reversed shapes where routes traverse shapes in both
directions.
"""
rows = []
for shape, geom in pfeed.shapes[['shape_id',
'geometry']].itertuples(index=False):
if shape not in pfeed.shapes_extra:
continue
if pfeed.shapes_extra[shape] == 2:
# Add shape and its reverse
shid = shape + '-1'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
shid = shape + '-0'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(reversed(geom.coords))]
rows.extend(new_rows)
else:
# Add shape
shid = '{}{}{}'.format(shape, cs.SEP, pfeed.shapes_extra[shape])
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
return pd.DataFrame(rows, columns=['shape_id', 'shape_pt_sequence',
'shape_pt_lon', 'shape_pt_lat']) | [
"def",
"build_shapes",
"(",
"pfeed",
")",
":",
"rows",
"=",
"[",
"]",
"for",
"shape",
",",
"geom",
"in",
"pfeed",
".",
"shapes",
"[",
"[",
"'shape_id'",
",",
"'geometry'",
"]",
"]",
".",
"itertuples",
"(",
"index",
"=",
"False",
")",
":",
"if",
"shape",
"not",
"in",
"pfeed",
".",
"shapes_extra",
":",
"continue",
"if",
"pfeed",
".",
"shapes_extra",
"[",
"shape",
"]",
"==",
"2",
":",
"# Add shape and its reverse",
"shid",
"=",
"shape",
"+",
"'-1'",
"new_rows",
"=",
"[",
"[",
"shid",
",",
"i",
",",
"lon",
",",
"lat",
"]",
"for",
"i",
",",
"(",
"lon",
",",
"lat",
")",
"in",
"enumerate",
"(",
"geom",
".",
"coords",
")",
"]",
"rows",
".",
"extend",
"(",
"new_rows",
")",
"shid",
"=",
"shape",
"+",
"'-0'",
"new_rows",
"=",
"[",
"[",
"shid",
",",
"i",
",",
"lon",
",",
"lat",
"]",
"for",
"i",
",",
"(",
"lon",
",",
"lat",
")",
"in",
"enumerate",
"(",
"reversed",
"(",
"geom",
".",
"coords",
")",
")",
"]",
"rows",
".",
"extend",
"(",
"new_rows",
")",
"else",
":",
"# Add shape",
"shid",
"=",
"'{}{}{}'",
".",
"format",
"(",
"shape",
",",
"cs",
".",
"SEP",
",",
"pfeed",
".",
"shapes_extra",
"[",
"shape",
"]",
")",
"new_rows",
"=",
"[",
"[",
"shid",
",",
"i",
",",
"lon",
",",
"lat",
"]",
"for",
"i",
",",
"(",
"lon",
",",
"lat",
")",
"in",
"enumerate",
"(",
"geom",
".",
"coords",
")",
"]",
"rows",
".",
"extend",
"(",
"new_rows",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"rows",
",",
"columns",
"=",
"[",
"'shape_id'",
",",
"'shape_pt_sequence'",
",",
"'shape_pt_lon'",
",",
"'shape_pt_lat'",
"]",
")"
] | Given a ProtoFeed, return DataFrame representing ``shapes.txt``.
Only use shape IDs that occur in both ``pfeed.shapes`` and
``pfeed.frequencies``.
Create reversed shapes where routes traverse shapes in both
directions. | [
"Given",
"a",
"ProtoFeed",
"return",
"DataFrame",
"representing",
"shapes",
".",
"txt",
".",
"Only",
"use",
"shape",
"IDs",
"that",
"occur",
"in",
"both",
"pfeed",
".",
"shapes",
"and",
"pfeed",
".",
"frequencies",
".",
"Create",
"reversed",
"shapes",
"where",
"routes",
"traverse",
"shapes",
"in",
"both",
"directions",
"."
] | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/main.py#L106-L137 |
mrcagney/make_gtfs | make_gtfs/main.py | build_stops | def build_stops(pfeed, shapes=None):
"""
Given a ProtoFeed, return a DataFrame representing ``stops.txt``.
If ``pfeed.stops`` is not ``None``, then return that.
Otherwise, require built shapes output by :func:`build_shapes`,
create one stop at the beginning (the first point) of each shape
and one at the end (the last point) of each shape,
and drop stops with duplicate coordinates.
Note that this will yield one stop for shapes that are loops.
"""
if pfeed.stops is not None:
stops = pfeed.stops.copy()
else:
if shapes is None:
raise ValueError('Must input shapes built by build_shapes()')
geo_shapes = gt.geometrize_shapes(shapes)
rows = []
for shape, geom in geo_shapes[['shape_id',
'geometry']].itertuples(index=False):
stop_ids = build_stop_ids(shape)
stop_names = build_stop_names(shape)
for i in range(2):
stop_id = stop_ids[i]
stop_name = stop_names[i]
stop_lon, stop_lat = geom.interpolate(i,
normalized=True).coords[0]
rows.append([stop_id, stop_name, stop_lon, stop_lat])
stops = (
pd.DataFrame(rows, columns=['stop_id', 'stop_name',
'stop_lon', 'stop_lat'])
.drop_duplicates(subset=['stop_lon', 'stop_lat'])
)
return stops | python | def build_stops(pfeed, shapes=None):
"""
Given a ProtoFeed, return a DataFrame representing ``stops.txt``.
If ``pfeed.stops`` is not ``None``, then return that.
Otherwise, require built shapes output by :func:`build_shapes`,
create one stop at the beginning (the first point) of each shape
and one at the end (the last point) of each shape,
and drop stops with duplicate coordinates.
Note that this will yield one stop for shapes that are loops.
"""
if pfeed.stops is not None:
stops = pfeed.stops.copy()
else:
if shapes is None:
raise ValueError('Must input shapes built by build_shapes()')
geo_shapes = gt.geometrize_shapes(shapes)
rows = []
for shape, geom in geo_shapes[['shape_id',
'geometry']].itertuples(index=False):
stop_ids = build_stop_ids(shape)
stop_names = build_stop_names(shape)
for i in range(2):
stop_id = stop_ids[i]
stop_name = stop_names[i]
stop_lon, stop_lat = geom.interpolate(i,
normalized=True).coords[0]
rows.append([stop_id, stop_name, stop_lon, stop_lat])
stops = (
pd.DataFrame(rows, columns=['stop_id', 'stop_name',
'stop_lon', 'stop_lat'])
.drop_duplicates(subset=['stop_lon', 'stop_lat'])
)
return stops | [
"def",
"build_stops",
"(",
"pfeed",
",",
"shapes",
"=",
"None",
")",
":",
"if",
"pfeed",
".",
"stops",
"is",
"not",
"None",
":",
"stops",
"=",
"pfeed",
".",
"stops",
".",
"copy",
"(",
")",
"else",
":",
"if",
"shapes",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Must input shapes built by build_shapes()'",
")",
"geo_shapes",
"=",
"gt",
".",
"geometrize_shapes",
"(",
"shapes",
")",
"rows",
"=",
"[",
"]",
"for",
"shape",
",",
"geom",
"in",
"geo_shapes",
"[",
"[",
"'shape_id'",
",",
"'geometry'",
"]",
"]",
".",
"itertuples",
"(",
"index",
"=",
"False",
")",
":",
"stop_ids",
"=",
"build_stop_ids",
"(",
"shape",
")",
"stop_names",
"=",
"build_stop_names",
"(",
"shape",
")",
"for",
"i",
"in",
"range",
"(",
"2",
")",
":",
"stop_id",
"=",
"stop_ids",
"[",
"i",
"]",
"stop_name",
"=",
"stop_names",
"[",
"i",
"]",
"stop_lon",
",",
"stop_lat",
"=",
"geom",
".",
"interpolate",
"(",
"i",
",",
"normalized",
"=",
"True",
")",
".",
"coords",
"[",
"0",
"]",
"rows",
".",
"append",
"(",
"[",
"stop_id",
",",
"stop_name",
",",
"stop_lon",
",",
"stop_lat",
"]",
")",
"stops",
"=",
"(",
"pd",
".",
"DataFrame",
"(",
"rows",
",",
"columns",
"=",
"[",
"'stop_id'",
",",
"'stop_name'",
",",
"'stop_lon'",
",",
"'stop_lat'",
"]",
")",
".",
"drop_duplicates",
"(",
"subset",
"=",
"[",
"'stop_lon'",
",",
"'stop_lat'",
"]",
")",
")",
"return",
"stops"
] | Given a ProtoFeed, return a DataFrame representing ``stops.txt``.
If ``pfeed.stops`` is not ``None``, then return that.
Otherwise, require built shapes output by :func:`build_shapes`,
create one stop at the beginning (the first point) of each shape
and one at the end (the last point) of each shape,
and drop stops with duplicate coordinates.
Note that this will yield one stop for shapes that are loops. | [
"Given",
"a",
"ProtoFeed",
"return",
"a",
"DataFrame",
"representing",
"stops",
".",
"txt",
".",
"If",
"pfeed",
".",
"stops",
"is",
"not",
"None",
"then",
"return",
"that",
".",
"Otherwise",
"require",
"built",
"shapes",
"output",
"by",
":",
"func",
":",
"build_shapes",
"create",
"one",
"stop",
"at",
"the",
"beginning",
"(",
"the",
"first",
"point",
")",
"of",
"each",
"shape",
"and",
"one",
"at",
"the",
"end",
"(",
"the",
"last",
"point",
")",
"of",
"each",
"shape",
"and",
"drop",
"stops",
"with",
"duplicate",
"coordinates",
".",
"Note",
"that",
"this",
"will",
"yield",
"one",
"stop",
"for",
"shapes",
"that",
"are",
"loops",
"."
] | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/main.py#L139-L174 |
mrcagney/make_gtfs | make_gtfs/main.py | build_trips | def build_trips(pfeed, routes, service_by_window):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
service-by-window (dictionary), return a DataFrame representing
``trips.txt``.
Trip IDs encode route, direction, and service window information
to make it easy to compute stop times later.
"""
# Put together the route and service data
routes = pd.merge(routes[['route_id', 'route_short_name']],
pfeed.frequencies)
routes = pd.merge(routes, pfeed.service_windows)
# For each row in routes, add trips at the specified frequency in
# the specified direction
rows = []
for index, row in routes.iterrows():
shape = row['shape_id']
route = row['route_id']
window = row['service_window_id']
start, end = row[['start_time', 'end_time']].values
duration = get_duration(start, end, 'h')
frequency = row['frequency']
if not frequency:
# No trips during this service window
continue
# Rounding down occurs here if the duration isn't integral
# (bad input)
num_trips_per_direction = int(frequency*duration)
service = service_by_window[window]
direction = row['direction']
if direction == 2:
directions = [0, 1]
else:
directions = [direction]
for direction in directions:
# Warning: this shape-ID-making logic needs to match that
# in ``build_shapes``
shid = '{}{}{}'.format(shape, cs.SEP, direction)
rows.extend([[
route,
cs.SEP.join(['t', route, window, start,
str(direction), str(i)]),
direction,
shid,
service
] for i in range(num_trips_per_direction)])
return pd.DataFrame(rows, columns=['route_id', 'trip_id', 'direction_id',
'shape_id', 'service_id']) | python | def build_trips(pfeed, routes, service_by_window):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
service-by-window (dictionary), return a DataFrame representing
``trips.txt``.
Trip IDs encode route, direction, and service window information
to make it easy to compute stop times later.
"""
# Put together the route and service data
routes = pd.merge(routes[['route_id', 'route_short_name']],
pfeed.frequencies)
routes = pd.merge(routes, pfeed.service_windows)
# For each row in routes, add trips at the specified frequency in
# the specified direction
rows = []
for index, row in routes.iterrows():
shape = row['shape_id']
route = row['route_id']
window = row['service_window_id']
start, end = row[['start_time', 'end_time']].values
duration = get_duration(start, end, 'h')
frequency = row['frequency']
if not frequency:
# No trips during this service window
continue
# Rounding down occurs here if the duration isn't integral
# (bad input)
num_trips_per_direction = int(frequency*duration)
service = service_by_window[window]
direction = row['direction']
if direction == 2:
directions = [0, 1]
else:
directions = [direction]
for direction in directions:
# Warning: this shape-ID-making logic needs to match that
# in ``build_shapes``
shid = '{}{}{}'.format(shape, cs.SEP, direction)
rows.extend([[
route,
cs.SEP.join(['t', route, window, start,
str(direction), str(i)]),
direction,
shid,
service
] for i in range(num_trips_per_direction)])
return pd.DataFrame(rows, columns=['route_id', 'trip_id', 'direction_id',
'shape_id', 'service_id']) | [
"def",
"build_trips",
"(",
"pfeed",
",",
"routes",
",",
"service_by_window",
")",
":",
"# Put together the route and service data",
"routes",
"=",
"pd",
".",
"merge",
"(",
"routes",
"[",
"[",
"'route_id'",
",",
"'route_short_name'",
"]",
"]",
",",
"pfeed",
".",
"frequencies",
")",
"routes",
"=",
"pd",
".",
"merge",
"(",
"routes",
",",
"pfeed",
".",
"service_windows",
")",
"# For each row in routes, add trips at the specified frequency in",
"# the specified direction",
"rows",
"=",
"[",
"]",
"for",
"index",
",",
"row",
"in",
"routes",
".",
"iterrows",
"(",
")",
":",
"shape",
"=",
"row",
"[",
"'shape_id'",
"]",
"route",
"=",
"row",
"[",
"'route_id'",
"]",
"window",
"=",
"row",
"[",
"'service_window_id'",
"]",
"start",
",",
"end",
"=",
"row",
"[",
"[",
"'start_time'",
",",
"'end_time'",
"]",
"]",
".",
"values",
"duration",
"=",
"get_duration",
"(",
"start",
",",
"end",
",",
"'h'",
")",
"frequency",
"=",
"row",
"[",
"'frequency'",
"]",
"if",
"not",
"frequency",
":",
"# No trips during this service window",
"continue",
"# Rounding down occurs here if the duration isn't integral",
"# (bad input)",
"num_trips_per_direction",
"=",
"int",
"(",
"frequency",
"*",
"duration",
")",
"service",
"=",
"service_by_window",
"[",
"window",
"]",
"direction",
"=",
"row",
"[",
"'direction'",
"]",
"if",
"direction",
"==",
"2",
":",
"directions",
"=",
"[",
"0",
",",
"1",
"]",
"else",
":",
"directions",
"=",
"[",
"direction",
"]",
"for",
"direction",
"in",
"directions",
":",
"# Warning: this shape-ID-making logic needs to match that",
"# in ``build_shapes``",
"shid",
"=",
"'{}{}{}'",
".",
"format",
"(",
"shape",
",",
"cs",
".",
"SEP",
",",
"direction",
")",
"rows",
".",
"extend",
"(",
"[",
"[",
"route",
",",
"cs",
".",
"SEP",
".",
"join",
"(",
"[",
"'t'",
",",
"route",
",",
"window",
",",
"start",
",",
"str",
"(",
"direction",
")",
",",
"str",
"(",
"i",
")",
"]",
")",
",",
"direction",
",",
"shid",
",",
"service",
"]",
"for",
"i",
"in",
"range",
"(",
"num_trips_per_direction",
")",
"]",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"rows",
",",
"columns",
"=",
"[",
"'route_id'",
",",
"'trip_id'",
",",
"'direction_id'",
",",
"'shape_id'",
",",
"'service_id'",
"]",
")"
] | Given a ProtoFeed and its corresponding routes (DataFrame),
service-by-window (dictionary), return a DataFrame representing
``trips.txt``.
Trip IDs encode route, direction, and service window information
to make it easy to compute stop times later. | [
"Given",
"a",
"ProtoFeed",
"and",
"its",
"corresponding",
"routes",
"(",
"DataFrame",
")",
"service",
"-",
"by",
"-",
"window",
"(",
"dictionary",
")",
"return",
"a",
"DataFrame",
"representing",
"trips",
".",
"txt",
".",
"Trip",
"IDs",
"encode",
"route",
"direction",
"and",
"service",
"window",
"information",
"to",
"make",
"it",
"easy",
"to",
"compute",
"stop",
"times",
"later",
"."
] | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/main.py#L176-L225 |
mrcagney/make_gtfs | make_gtfs/main.py | buffer_side | def buffer_side(linestring, side, buffer):
"""
Given a Shapely LineString, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer size in the distance units of
the LineString, buffer the LineString on the given side by
the buffer size and return the resulting Shapely polygon.
"""
b = linestring.buffer(buffer, cap_style=2)
if side in ['left', 'right'] and buffer > 0:
# Make a tiny buffer to split the normal-size buffer
# in half across the linestring
eps = min(buffer/2, 0.001)
b0 = linestring.buffer(eps, cap_style=3)
diff = b.difference(b0)
polys = so.polygonize(diff)
# Buffer sides slightly to include original linestring
if side == 'left':
b = list(polys)[0].buffer(1.1*eps)
else:
b = list(polys)[-1].buffer(1.1*eps)
return b | python | def buffer_side(linestring, side, buffer):
"""
Given a Shapely LineString, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer size in the distance units of
the LineString, buffer the LineString on the given side by
the buffer size and return the resulting Shapely polygon.
"""
b = linestring.buffer(buffer, cap_style=2)
if side in ['left', 'right'] and buffer > 0:
# Make a tiny buffer to split the normal-size buffer
# in half across the linestring
eps = min(buffer/2, 0.001)
b0 = linestring.buffer(eps, cap_style=3)
diff = b.difference(b0)
polys = so.polygonize(diff)
# Buffer sides slightly to include original linestring
if side == 'left':
b = list(polys)[0].buffer(1.1*eps)
else:
b = list(polys)[-1].buffer(1.1*eps)
return b | [
"def",
"buffer_side",
"(",
"linestring",
",",
"side",
",",
"buffer",
")",
":",
"b",
"=",
"linestring",
".",
"buffer",
"(",
"buffer",
",",
"cap_style",
"=",
"2",
")",
"if",
"side",
"in",
"[",
"'left'",
",",
"'right'",
"]",
"and",
"buffer",
">",
"0",
":",
"# Make a tiny buffer to split the normal-size buffer",
"# in half across the linestring",
"eps",
"=",
"min",
"(",
"buffer",
"/",
"2",
",",
"0.001",
")",
"b0",
"=",
"linestring",
".",
"buffer",
"(",
"eps",
",",
"cap_style",
"=",
"3",
")",
"diff",
"=",
"b",
".",
"difference",
"(",
"b0",
")",
"polys",
"=",
"so",
".",
"polygonize",
"(",
"diff",
")",
"# Buffer sides slightly to include original linestring",
"if",
"side",
"==",
"'left'",
":",
"b",
"=",
"list",
"(",
"polys",
")",
"[",
"0",
"]",
".",
"buffer",
"(",
"1.1",
"*",
"eps",
")",
"else",
":",
"b",
"=",
"list",
"(",
"polys",
")",
"[",
"-",
"1",
"]",
".",
"buffer",
"(",
"1.1",
"*",
"eps",
")",
"return",
"b"
] | Given a Shapely LineString, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer size in the distance units of
the LineString, buffer the LineString on the given side by
the buffer size and return the resulting Shapely polygon. | [
"Given",
"a",
"Shapely",
"LineString",
"a",
"side",
"of",
"the",
"LineString",
"(",
"string",
";",
"left",
"=",
"left",
"hand",
"side",
"of",
"LineString",
"right",
"=",
"right",
"hand",
"side",
"of",
"LineString",
"or",
"both",
"=",
"both",
"sides",
")",
"and",
"a",
"buffer",
"size",
"in",
"the",
"distance",
"units",
"of",
"the",
"LineString",
"buffer",
"the",
"LineString",
"on",
"the",
"given",
"side",
"by",
"the",
"buffer",
"size",
"and",
"return",
"the",
"resulting",
"Shapely",
"polygon",
"."
] | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/main.py#L227-L250 |
mrcagney/make_gtfs | make_gtfs/main.py | get_nearby_stops | def get_nearby_stops(geo_stops, linestring, side, buffer=cs.BUFFER):
"""
Given a GeoDataFrame of stops, a Shapely LineString in the
same coordinate system, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer in the distance units of that
coordinate system, do the following.
Return a GeoDataFrame of all the stops that lie within
``buffer`` distance units to the ``side`` of the LineString.
"""
b = buffer_side(linestring, side, buffer)
# Collect stops
return geo_stops.loc[geo_stops.intersects(b)].copy() | python | def get_nearby_stops(geo_stops, linestring, side, buffer=cs.BUFFER):
"""
Given a GeoDataFrame of stops, a Shapely LineString in the
same coordinate system, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer in the distance units of that
coordinate system, do the following.
Return a GeoDataFrame of all the stops that lie within
``buffer`` distance units to the ``side`` of the LineString.
"""
b = buffer_side(linestring, side, buffer)
# Collect stops
return geo_stops.loc[geo_stops.intersects(b)].copy() | [
"def",
"get_nearby_stops",
"(",
"geo_stops",
",",
"linestring",
",",
"side",
",",
"buffer",
"=",
"cs",
".",
"BUFFER",
")",
":",
"b",
"=",
"buffer_side",
"(",
"linestring",
",",
"side",
",",
"buffer",
")",
"# Collect stops",
"return",
"geo_stops",
".",
"loc",
"[",
"geo_stops",
".",
"intersects",
"(",
"b",
")",
"]",
".",
"copy",
"(",
")"
] | Given a GeoDataFrame of stops, a Shapely LineString in the
same coordinate system, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer in the distance units of that
coordinate system, do the following.
Return a GeoDataFrame of all the stops that lie within
``buffer`` distance units to the ``side`` of the LineString. | [
"Given",
"a",
"GeoDataFrame",
"of",
"stops",
"a",
"Shapely",
"LineString",
"in",
"the",
"same",
"coordinate",
"system",
"a",
"side",
"of",
"the",
"LineString",
"(",
"string",
";",
"left",
"=",
"left",
"hand",
"side",
"of",
"LineString",
"right",
"=",
"right",
"hand",
"side",
"of",
"LineString",
"or",
"both",
"=",
"both",
"sides",
")",
"and",
"a",
"buffer",
"in",
"the",
"distance",
"units",
"of",
"that",
"coordinate",
"system",
"do",
"the",
"following",
".",
"Return",
"a",
"GeoDataFrame",
"of",
"all",
"the",
"stops",
"that",
"lie",
"within",
"buffer",
"distance",
"units",
"to",
"the",
"side",
"of",
"the",
"LineString",
"."
] | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/main.py#L252-L266 |
mrcagney/make_gtfs | make_gtfs/main.py | build_stop_times | def build_stop_times(pfeed, routes, shapes, stops, trips, buffer=cs.BUFFER):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
shapes (DataFrame), stops (DataFrame), trips (DataFrame),
return DataFrame representing ``stop_times.txt``.
Includes the optional ``shape_dist_traveled`` column.
Don't make stop times for trips with no nearby stops.
"""
# Get the table of trips and add frequency and service window details
routes = (
routes
.filter(['route_id', 'route_short_name'])
.merge(pfeed.frequencies.drop(['shape_id'], axis=1))
)
trips = (
trips
.assign(service_window_id=lambda x: x.trip_id.map(
lambda y: y.split(cs.SEP)[2]))
.merge(routes)
)
# Get the geometries of ``shapes`` and not ``pfeed.shapes``
geometry_by_shape = dict(
gt.geometrize_shapes(shapes, use_utm=True)
.filter(['shape_id', 'geometry'])
.values
)
# Save on distance computations by memoizing
dist_by_stop_by_shape = {shape: {} for shape in geometry_by_shape}
def compute_stops_dists_times(geo_stops, linestring, shape,
start_time, end_time):
"""
Given a GeoDataFrame of stops on one side of a given Shapely
LineString with given shape ID, compute distances and departure
times of a trip traversing the LineString from start to end
at the given start and end times (in seconds past midnight)
and stopping at the stops encountered along the way.
Do not assume that the stops are ordered by trip encounter.
Return three lists of the same length: the stop IDs in order
that the trip encounters them, the shape distances traveled
along distances at the stops, and the times the stops are
encountered, respectively.
"""
g = geo_stops.copy()
dists_and_stops = []
for i, stop in enumerate(g['stop_id'].values):
if stop in dist_by_stop_by_shape[shape]:
d = dist_by_stop_by_shape[shape][stop]
else:
d = gt.get_segment_length(linestring,
g.geometry.iat[i])/1000 # km
dist_by_stop_by_shape[shape][stop] = d
dists_and_stops.append((d, stop))
dists, stops = zip(*sorted(dists_and_stops))
D = linestring.length/1000
dists_are_reasonable = all([d < D + 100 for d in dists])
if not dists_are_reasonable:
# Assume equal distances between stops :-(
n = len(stops)
delta = D/(n - 1)
dists = [i*delta for i in range(n)]
# Compute times using distances, start and end stop times,
# and linear interpolation
t0, t1 = start_time, end_time
d0, d1 = dists[0], dists[-1]
# Interpolate
times = np.interp(dists, [d0, d1], [t0, t1])
return stops, dists, times
# Iterate through trips and set stop times based on stop ID
# and service window frequency.
# Remember that every trip has a valid shape ID.
# Gather stops geographically from ``stops``.
rows = []
geo_stops = gt.geometrize_stops(stops, use_utm=True)
# Look on the side of the traffic side of street for this timezone
side = cs.traffic_by_timezone[pfeed.meta.agency_timezone.iat[0]]
for index, row in trips.iterrows():
shape = row['shape_id']
geom = geometry_by_shape[shape]
stops = get_nearby_stops(geo_stops, geom, side, buffer=buffer)
# Don't make stop times for trips without nearby stops
if stops.empty:
continue
length = geom.length/1000 # km
speed = row['speed'] # km/h
duration = int((length/speed)*3600) # seconds
frequency = row['frequency']
if not frequency:
# No stop times for this trip/frequency combo
continue
headway = 3600/frequency # seconds
trip = row['trip_id']
__, route, window, base_timestr, direction, i = (
trip.split(cs.SEP))
direction = int(direction)
base_time = gt.timestr_to_seconds(base_timestr)
start_time = base_time + headway*int(i)
end_time = start_time + duration
stops, dists, times = compute_stops_dists_times(stops, geom, shape,
start_time, end_time)
new_rows = [[trip, stop, j, time, time, dist]
for j, (stop, time, dist) in enumerate(zip(stops, times, dists))]
rows.extend(new_rows)
g = pd.DataFrame(rows, columns=['trip_id', 'stop_id', 'stop_sequence',
'arrival_time', 'departure_time', 'shape_dist_traveled'])
# Convert seconds back to time strings
g[['arrival_time', 'departure_time']] =\
g[['arrival_time', 'departure_time']].applymap(
lambda x: gt.timestr_to_seconds(x, inverse=True))
return g | python | def build_stop_times(pfeed, routes, shapes, stops, trips, buffer=cs.BUFFER):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
shapes (DataFrame), stops (DataFrame), trips (DataFrame),
return DataFrame representing ``stop_times.txt``.
Includes the optional ``shape_dist_traveled`` column.
Don't make stop times for trips with no nearby stops.
"""
# Get the table of trips and add frequency and service window details
routes = (
routes
.filter(['route_id', 'route_short_name'])
.merge(pfeed.frequencies.drop(['shape_id'], axis=1))
)
trips = (
trips
.assign(service_window_id=lambda x: x.trip_id.map(
lambda y: y.split(cs.SEP)[2]))
.merge(routes)
)
# Get the geometries of ``shapes`` and not ``pfeed.shapes``
geometry_by_shape = dict(
gt.geometrize_shapes(shapes, use_utm=True)
.filter(['shape_id', 'geometry'])
.values
)
# Save on distance computations by memoizing
dist_by_stop_by_shape = {shape: {} for shape in geometry_by_shape}
def compute_stops_dists_times(geo_stops, linestring, shape,
start_time, end_time):
"""
Given a GeoDataFrame of stops on one side of a given Shapely
LineString with given shape ID, compute distances and departure
times of a trip traversing the LineString from start to end
at the given start and end times (in seconds past midnight)
and stopping at the stops encountered along the way.
Do not assume that the stops are ordered by trip encounter.
Return three lists of the same length: the stop IDs in order
that the trip encounters them, the shape distances traveled
along distances at the stops, and the times the stops are
encountered, respectively.
"""
g = geo_stops.copy()
dists_and_stops = []
for i, stop in enumerate(g['stop_id'].values):
if stop in dist_by_stop_by_shape[shape]:
d = dist_by_stop_by_shape[shape][stop]
else:
d = gt.get_segment_length(linestring,
g.geometry.iat[i])/1000 # km
dist_by_stop_by_shape[shape][stop] = d
dists_and_stops.append((d, stop))
dists, stops = zip(*sorted(dists_and_stops))
D = linestring.length/1000
dists_are_reasonable = all([d < D + 100 for d in dists])
if not dists_are_reasonable:
# Assume equal distances between stops :-(
n = len(stops)
delta = D/(n - 1)
dists = [i*delta for i in range(n)]
# Compute times using distances, start and end stop times,
# and linear interpolation
t0, t1 = start_time, end_time
d0, d1 = dists[0], dists[-1]
# Interpolate
times = np.interp(dists, [d0, d1], [t0, t1])
return stops, dists, times
# Iterate through trips and set stop times based on stop ID
# and service window frequency.
# Remember that every trip has a valid shape ID.
# Gather stops geographically from ``stops``.
rows = []
geo_stops = gt.geometrize_stops(stops, use_utm=True)
# Look on the side of the traffic side of street for this timezone
side = cs.traffic_by_timezone[pfeed.meta.agency_timezone.iat[0]]
for index, row in trips.iterrows():
shape = row['shape_id']
geom = geometry_by_shape[shape]
stops = get_nearby_stops(geo_stops, geom, side, buffer=buffer)
# Don't make stop times for trips without nearby stops
if stops.empty:
continue
length = geom.length/1000 # km
speed = row['speed'] # km/h
duration = int((length/speed)*3600) # seconds
frequency = row['frequency']
if not frequency:
# No stop times for this trip/frequency combo
continue
headway = 3600/frequency # seconds
trip = row['trip_id']
__, route, window, base_timestr, direction, i = (
trip.split(cs.SEP))
direction = int(direction)
base_time = gt.timestr_to_seconds(base_timestr)
start_time = base_time + headway*int(i)
end_time = start_time + duration
stops, dists, times = compute_stops_dists_times(stops, geom, shape,
start_time, end_time)
new_rows = [[trip, stop, j, time, time, dist]
for j, (stop, time, dist) in enumerate(zip(stops, times, dists))]
rows.extend(new_rows)
g = pd.DataFrame(rows, columns=['trip_id', 'stop_id', 'stop_sequence',
'arrival_time', 'departure_time', 'shape_dist_traveled'])
# Convert seconds back to time strings
g[['arrival_time', 'departure_time']] =\
g[['arrival_time', 'departure_time']].applymap(
lambda x: gt.timestr_to_seconds(x, inverse=True))
return g | [
"def",
"build_stop_times",
"(",
"pfeed",
",",
"routes",
",",
"shapes",
",",
"stops",
",",
"trips",
",",
"buffer",
"=",
"cs",
".",
"BUFFER",
")",
":",
"# Get the table of trips and add frequency and service window details",
"routes",
"=",
"(",
"routes",
".",
"filter",
"(",
"[",
"'route_id'",
",",
"'route_short_name'",
"]",
")",
".",
"merge",
"(",
"pfeed",
".",
"frequencies",
".",
"drop",
"(",
"[",
"'shape_id'",
"]",
",",
"axis",
"=",
"1",
")",
")",
")",
"trips",
"=",
"(",
"trips",
".",
"assign",
"(",
"service_window_id",
"=",
"lambda",
"x",
":",
"x",
".",
"trip_id",
".",
"map",
"(",
"lambda",
"y",
":",
"y",
".",
"split",
"(",
"cs",
".",
"SEP",
")",
"[",
"2",
"]",
")",
")",
".",
"merge",
"(",
"routes",
")",
")",
"# Get the geometries of ``shapes`` and not ``pfeed.shapes``",
"geometry_by_shape",
"=",
"dict",
"(",
"gt",
".",
"geometrize_shapes",
"(",
"shapes",
",",
"use_utm",
"=",
"True",
")",
".",
"filter",
"(",
"[",
"'shape_id'",
",",
"'geometry'",
"]",
")",
".",
"values",
")",
"# Save on distance computations by memoizing",
"dist_by_stop_by_shape",
"=",
"{",
"shape",
":",
"{",
"}",
"for",
"shape",
"in",
"geometry_by_shape",
"}",
"def",
"compute_stops_dists_times",
"(",
"geo_stops",
",",
"linestring",
",",
"shape",
",",
"start_time",
",",
"end_time",
")",
":",
"\"\"\"\n Given a GeoDataFrame of stops on one side of a given Shapely\n LineString with given shape ID, compute distances and departure\n times of a trip traversing the LineString from start to end\n at the given start and end times (in seconds past midnight)\n and stopping at the stops encountered along the way.\n Do not assume that the stops are ordered by trip encounter.\n Return three lists of the same length: the stop IDs in order\n that the trip encounters them, the shape distances traveled\n along distances at the stops, and the times the stops are\n encountered, respectively.\n \"\"\"",
"g",
"=",
"geo_stops",
".",
"copy",
"(",
")",
"dists_and_stops",
"=",
"[",
"]",
"for",
"i",
",",
"stop",
"in",
"enumerate",
"(",
"g",
"[",
"'stop_id'",
"]",
".",
"values",
")",
":",
"if",
"stop",
"in",
"dist_by_stop_by_shape",
"[",
"shape",
"]",
":",
"d",
"=",
"dist_by_stop_by_shape",
"[",
"shape",
"]",
"[",
"stop",
"]",
"else",
":",
"d",
"=",
"gt",
".",
"get_segment_length",
"(",
"linestring",
",",
"g",
".",
"geometry",
".",
"iat",
"[",
"i",
"]",
")",
"/",
"1000",
"# km",
"dist_by_stop_by_shape",
"[",
"shape",
"]",
"[",
"stop",
"]",
"=",
"d",
"dists_and_stops",
".",
"append",
"(",
"(",
"d",
",",
"stop",
")",
")",
"dists",
",",
"stops",
"=",
"zip",
"(",
"*",
"sorted",
"(",
"dists_and_stops",
")",
")",
"D",
"=",
"linestring",
".",
"length",
"/",
"1000",
"dists_are_reasonable",
"=",
"all",
"(",
"[",
"d",
"<",
"D",
"+",
"100",
"for",
"d",
"in",
"dists",
"]",
")",
"if",
"not",
"dists_are_reasonable",
":",
"# Assume equal distances between stops :-(",
"n",
"=",
"len",
"(",
"stops",
")",
"delta",
"=",
"D",
"/",
"(",
"n",
"-",
"1",
")",
"dists",
"=",
"[",
"i",
"*",
"delta",
"for",
"i",
"in",
"range",
"(",
"n",
")",
"]",
"# Compute times using distances, start and end stop times,",
"# and linear interpolation",
"t0",
",",
"t1",
"=",
"start_time",
",",
"end_time",
"d0",
",",
"d1",
"=",
"dists",
"[",
"0",
"]",
",",
"dists",
"[",
"-",
"1",
"]",
"# Interpolate",
"times",
"=",
"np",
".",
"interp",
"(",
"dists",
",",
"[",
"d0",
",",
"d1",
"]",
",",
"[",
"t0",
",",
"t1",
"]",
")",
"return",
"stops",
",",
"dists",
",",
"times",
"# Iterate through trips and set stop times based on stop ID",
"# and service window frequency.",
"# Remember that every trip has a valid shape ID.",
"# Gather stops geographically from ``stops``.",
"rows",
"=",
"[",
"]",
"geo_stops",
"=",
"gt",
".",
"geometrize_stops",
"(",
"stops",
",",
"use_utm",
"=",
"True",
")",
"# Look on the side of the traffic side of street for this timezone",
"side",
"=",
"cs",
".",
"traffic_by_timezone",
"[",
"pfeed",
".",
"meta",
".",
"agency_timezone",
".",
"iat",
"[",
"0",
"]",
"]",
"for",
"index",
",",
"row",
"in",
"trips",
".",
"iterrows",
"(",
")",
":",
"shape",
"=",
"row",
"[",
"'shape_id'",
"]",
"geom",
"=",
"geometry_by_shape",
"[",
"shape",
"]",
"stops",
"=",
"get_nearby_stops",
"(",
"geo_stops",
",",
"geom",
",",
"side",
",",
"buffer",
"=",
"buffer",
")",
"# Don't make stop times for trips without nearby stops",
"if",
"stops",
".",
"empty",
":",
"continue",
"length",
"=",
"geom",
".",
"length",
"/",
"1000",
"# km",
"speed",
"=",
"row",
"[",
"'speed'",
"]",
"# km/h",
"duration",
"=",
"int",
"(",
"(",
"length",
"/",
"speed",
")",
"*",
"3600",
")",
"# seconds",
"frequency",
"=",
"row",
"[",
"'frequency'",
"]",
"if",
"not",
"frequency",
":",
"# No stop times for this trip/frequency combo",
"continue",
"headway",
"=",
"3600",
"/",
"frequency",
"# seconds",
"trip",
"=",
"row",
"[",
"'trip_id'",
"]",
"__",
",",
"route",
",",
"window",
",",
"base_timestr",
",",
"direction",
",",
"i",
"=",
"(",
"trip",
".",
"split",
"(",
"cs",
".",
"SEP",
")",
")",
"direction",
"=",
"int",
"(",
"direction",
")",
"base_time",
"=",
"gt",
".",
"timestr_to_seconds",
"(",
"base_timestr",
")",
"start_time",
"=",
"base_time",
"+",
"headway",
"*",
"int",
"(",
"i",
")",
"end_time",
"=",
"start_time",
"+",
"duration",
"stops",
",",
"dists",
",",
"times",
"=",
"compute_stops_dists_times",
"(",
"stops",
",",
"geom",
",",
"shape",
",",
"start_time",
",",
"end_time",
")",
"new_rows",
"=",
"[",
"[",
"trip",
",",
"stop",
",",
"j",
",",
"time",
",",
"time",
",",
"dist",
"]",
"for",
"j",
",",
"(",
"stop",
",",
"time",
",",
"dist",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"stops",
",",
"times",
",",
"dists",
")",
")",
"]",
"rows",
".",
"extend",
"(",
"new_rows",
")",
"g",
"=",
"pd",
".",
"DataFrame",
"(",
"rows",
",",
"columns",
"=",
"[",
"'trip_id'",
",",
"'stop_id'",
",",
"'stop_sequence'",
",",
"'arrival_time'",
",",
"'departure_time'",
",",
"'shape_dist_traveled'",
"]",
")",
"# Convert seconds back to time strings",
"g",
"[",
"[",
"'arrival_time'",
",",
"'departure_time'",
"]",
"]",
"=",
"g",
"[",
"[",
"'arrival_time'",
",",
"'departure_time'",
"]",
"]",
".",
"applymap",
"(",
"lambda",
"x",
":",
"gt",
".",
"timestr_to_seconds",
"(",
"x",
",",
"inverse",
"=",
"True",
")",
")",
"return",
"g"
] | Given a ProtoFeed and its corresponding routes (DataFrame),
shapes (DataFrame), stops (DataFrame), trips (DataFrame),
return DataFrame representing ``stop_times.txt``.
Includes the optional ``shape_dist_traveled`` column.
Don't make stop times for trips with no nearby stops. | [
"Given",
"a",
"ProtoFeed",
"and",
"its",
"corresponding",
"routes",
"(",
"DataFrame",
")",
"shapes",
"(",
"DataFrame",
")",
"stops",
"(",
"DataFrame",
")",
"trips",
"(",
"DataFrame",
")",
"return",
"DataFrame",
"representing",
"stop_times",
".",
"txt",
".",
"Includes",
"the",
"optional",
"shape_dist_traveled",
"column",
".",
"Don",
"t",
"make",
"stop",
"times",
"for",
"trips",
"with",
"no",
"nearby",
"stops",
"."
] | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/main.py#L268-L384 |
j0ack/flask-codemirror | flask_codemirror/widgets.py | CodeMirrorWidget._generate_content | def _generate_content(self):
"""Dumps content using JSON to send to CodeMirror"""
# concat into a dict
dic = self.config
dic['mode'] = self.language
if self.theme:
dic['theme'] = self.theme
# dumps with json
return json.dumps(dic, indent=8, separators=(',', ': ')) | python | def _generate_content(self):
"""Dumps content using JSON to send to CodeMirror"""
# concat into a dict
dic = self.config
dic['mode'] = self.language
if self.theme:
dic['theme'] = self.theme
# dumps with json
return json.dumps(dic, indent=8, separators=(',', ': ')) | [
"def",
"_generate_content",
"(",
"self",
")",
":",
"# concat into a dict",
"dic",
"=",
"self",
".",
"config",
"dic",
"[",
"'mode'",
"]",
"=",
"self",
".",
"language",
"if",
"self",
".",
"theme",
":",
"dic",
"[",
"'theme'",
"]",
"=",
"self",
".",
"theme",
"# dumps with json",
"return",
"json",
".",
"dumps",
"(",
"dic",
",",
"indent",
"=",
"8",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")"
] | Dumps content using JSON to send to CodeMirror | [
"Dumps",
"content",
"using",
"JSON",
"to",
"send",
"to",
"CodeMirror"
] | train | https://github.com/j0ack/flask-codemirror/blob/81ad831ff849b60bb34de5db727ad626ff3c9bdc/flask_codemirror/widgets.py#L70-L78 |
KeithSSmith/switcheo-python | switcheo/public_client.py | PublicClient.get_token_details | def get_token_details(self, show_listing_details=False, show_inactive=False):
"""
Function to fetch the available tokens available to trade on the Switcheo exchange.
Execution of this function is as follows::
get_token_details()
get_token_details(show_listing_details=True)
get_token_details(show_inactive=True)
get_token_details(show_listing_details=True, show_inactive=True)
The expected return result for this function is as follows::
{
'NEO': {
'hash': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b',
'decimals': 8
},
'GAS': {
'hash': '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7',
'decimals': 8
},
'SWTH': {
'hash': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'decimals': 8
},
...
}
:param show_listing_details: Parameter flag to indicate whether or not to show the token listing details.
:type show_listing_details: bool
:param show_inactive: Flag to return the tokens that are no longer traded on the Switcheo Exchange.
:type show_inactive: bool
:return: Dictionary in the form of a JSON message with the available tokens for trade on the Switcheo exchange.
"""
api_params = {
"show_listing_details": show_listing_details,
"show_inactive": show_inactive
}
return self.request.get(path='/exchange/tokens', params=api_params) | python | def get_token_details(self, show_listing_details=False, show_inactive=False):
"""
Function to fetch the available tokens available to trade on the Switcheo exchange.
Execution of this function is as follows::
get_token_details()
get_token_details(show_listing_details=True)
get_token_details(show_inactive=True)
get_token_details(show_listing_details=True, show_inactive=True)
The expected return result for this function is as follows::
{
'NEO': {
'hash': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b',
'decimals': 8
},
'GAS': {
'hash': '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7',
'decimals': 8
},
'SWTH': {
'hash': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'decimals': 8
},
...
}
:param show_listing_details: Parameter flag to indicate whether or not to show the token listing details.
:type show_listing_details: bool
:param show_inactive: Flag to return the tokens that are no longer traded on the Switcheo Exchange.
:type show_inactive: bool
:return: Dictionary in the form of a JSON message with the available tokens for trade on the Switcheo exchange.
"""
api_params = {
"show_listing_details": show_listing_details,
"show_inactive": show_inactive
}
return self.request.get(path='/exchange/tokens', params=api_params) | [
"def",
"get_token_details",
"(",
"self",
",",
"show_listing_details",
"=",
"False",
",",
"show_inactive",
"=",
"False",
")",
":",
"api_params",
"=",
"{",
"\"show_listing_details\"",
":",
"show_listing_details",
",",
"\"show_inactive\"",
":",
"show_inactive",
"}",
"return",
"self",
".",
"request",
".",
"get",
"(",
"path",
"=",
"'/exchange/tokens'",
",",
"params",
"=",
"api_params",
")"
] | Function to fetch the available tokens available to trade on the Switcheo exchange.
Execution of this function is as follows::
get_token_details()
get_token_details(show_listing_details=True)
get_token_details(show_inactive=True)
get_token_details(show_listing_details=True, show_inactive=True)
The expected return result for this function is as follows::
{
'NEO': {
'hash': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b',
'decimals': 8
},
'GAS': {
'hash': '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7',
'decimals': 8
},
'SWTH': {
'hash': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'decimals': 8
},
...
}
:param show_listing_details: Parameter flag to indicate whether or not to show the token listing details.
:type show_listing_details: bool
:param show_inactive: Flag to return the tokens that are no longer traded on the Switcheo Exchange.
:type show_inactive: bool
:return: Dictionary in the form of a JSON message with the available tokens for trade on the Switcheo exchange. | [
"Function",
"to",
"fetch",
"the",
"available",
"tokens",
"available",
"to",
"trade",
"on",
"the",
"Switcheo",
"exchange",
".",
"Execution",
"of",
"this",
"function",
"is",
"as",
"follows",
"::"
] | train | https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/public_client.py#L94-L132 |
KeithSSmith/switcheo-python | switcheo/public_client.py | PublicClient.get_candlesticks | def get_candlesticks(self, pair, start_time, end_time, interval):
"""
Function to fetch trading metrics from the past 24 hours for all trading pairs offered on the exchange.
Execution of this function is as follows::
get_candlesticks(pair="SWTH_NEO",
start_time=round(time.time()) - 350000,
end_time=round(time.time()),
interval=360))
The expected return result for this function is as follows::
[{
'time': '1533168000',
'open': '0.00046835',
'close': '0.00046835',
'high': '0.00046835',
'low': '0.00046835',
'volume': '240315335.0',
'quote_volume': '513110569018.0'
},{
'time': '1533081600',
'open': '0.00046835',
'close': '0.00046835',
'high': '0.00046835',
'low': '0.00046835',
'volume': '1170875.0',
'quote_volume': '2500000000.0'
},
...
]
:param pair: The trading pair used to request candle statistics.
:type pair: str
:param start_time: The start time (in epoch seconds) range for collecting candle statistics.
:type start_time: int
:param end_time: The end time (in epoch seconds) range for collecting candle statistics.
:type end_time: int
:param interval: The time interval (in minutes) for candle statistics. Allowed values: 1, 5, 30, 60, 360, 1440
:type interval: int
:return: List of dictionaries containing the candles statistics based on the parameter filters.
"""
api_params = {
"pair": pair,
"interval": interval,
"start_time": start_time,
"end_time": end_time,
"contract_hash": self.contract_hash
}
return self.request.get(path='/tickers/candlesticks', params=api_params) | python | def get_candlesticks(self, pair, start_time, end_time, interval):
"""
Function to fetch trading metrics from the past 24 hours for all trading pairs offered on the exchange.
Execution of this function is as follows::
get_candlesticks(pair="SWTH_NEO",
start_time=round(time.time()) - 350000,
end_time=round(time.time()),
interval=360))
The expected return result for this function is as follows::
[{
'time': '1533168000',
'open': '0.00046835',
'close': '0.00046835',
'high': '0.00046835',
'low': '0.00046835',
'volume': '240315335.0',
'quote_volume': '513110569018.0'
},{
'time': '1533081600',
'open': '0.00046835',
'close': '0.00046835',
'high': '0.00046835',
'low': '0.00046835',
'volume': '1170875.0',
'quote_volume': '2500000000.0'
},
...
]
:param pair: The trading pair used to request candle statistics.
:type pair: str
:param start_time: The start time (in epoch seconds) range for collecting candle statistics.
:type start_time: int
:param end_time: The end time (in epoch seconds) range for collecting candle statistics.
:type end_time: int
:param interval: The time interval (in minutes) for candle statistics. Allowed values: 1, 5, 30, 60, 360, 1440
:type interval: int
:return: List of dictionaries containing the candles statistics based on the parameter filters.
"""
api_params = {
"pair": pair,
"interval": interval,
"start_time": start_time,
"end_time": end_time,
"contract_hash": self.contract_hash
}
return self.request.get(path='/tickers/candlesticks', params=api_params) | [
"def",
"get_candlesticks",
"(",
"self",
",",
"pair",
",",
"start_time",
",",
"end_time",
",",
"interval",
")",
":",
"api_params",
"=",
"{",
"\"pair\"",
":",
"pair",
",",
"\"interval\"",
":",
"interval",
",",
"\"start_time\"",
":",
"start_time",
",",
"\"end_time\"",
":",
"end_time",
",",
"\"contract_hash\"",
":",
"self",
".",
"contract_hash",
"}",
"return",
"self",
".",
"request",
".",
"get",
"(",
"path",
"=",
"'/tickers/candlesticks'",
",",
"params",
"=",
"api_params",
")"
] | Function to fetch trading metrics from the past 24 hours for all trading pairs offered on the exchange.
Execution of this function is as follows::
get_candlesticks(pair="SWTH_NEO",
start_time=round(time.time()) - 350000,
end_time=round(time.time()),
interval=360))
The expected return result for this function is as follows::
[{
'time': '1533168000',
'open': '0.00046835',
'close': '0.00046835',
'high': '0.00046835',
'low': '0.00046835',
'volume': '240315335.0',
'quote_volume': '513110569018.0'
},{
'time': '1533081600',
'open': '0.00046835',
'close': '0.00046835',
'high': '0.00046835',
'low': '0.00046835',
'volume': '1170875.0',
'quote_volume': '2500000000.0'
},
...
]
:param pair: The trading pair used to request candle statistics.
:type pair: str
:param start_time: The start time (in epoch seconds) range for collecting candle statistics.
:type start_time: int
:param end_time: The end time (in epoch seconds) range for collecting candle statistics.
:type end_time: int
:param interval: The time interval (in minutes) for candle statistics. Allowed values: 1, 5, 30, 60, 360, 1440
:type interval: int
:return: List of dictionaries containing the candles statistics based on the parameter filters. | [
"Function",
"to",
"fetch",
"trading",
"metrics",
"from",
"the",
"past",
"24",
"hours",
"for",
"all",
"trading",
"pairs",
"offered",
"on",
"the",
"exchange",
".",
"Execution",
"of",
"this",
"function",
"is",
"as",
"follows",
"::"
] | train | https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/public_client.py#L134-L183 |
KeithSSmith/switcheo-python | switcheo/public_client.py | PublicClient.get_last_price | def get_last_price(self, symbols=None, bases=None):
"""
Function to fetch the most recently executed trade on the order book for each trading pair.
Execution of this function is as follows::
get_last_price()
get_last_price(symbols=['SWTH','GAS'])
get_last_price(bases=['NEO'])
get_last_price(symbols=['SWTH','GAS'], bases=['NEO'])
The expected return result for this function is as follows::
{
'SWTH': {
'GAS': '0.0015085',
'NEO': '0.00040911'
},
'GAS': {
'NEO': '0.30391642'
},
....
}
:param symbols: The trading symbols to retrieve the last price on the Switcheo Exchange.
:type symbols: list
:param bases: The base pair to retrieve the last price of symbols on the Switcheo Exchange.
:type bases: list
:return: Dictionary of trade symbols with the most recently executed trade price.
"""
api_params = {}
if symbols is not None:
api_params['symbols'] = symbols
if bases is not None:
api_params['bases'] = bases
return self.request.get(path='/tickers/last_price', params=api_params) | python | def get_last_price(self, symbols=None, bases=None):
"""
Function to fetch the most recently executed trade on the order book for each trading pair.
Execution of this function is as follows::
get_last_price()
get_last_price(symbols=['SWTH','GAS'])
get_last_price(bases=['NEO'])
get_last_price(symbols=['SWTH','GAS'], bases=['NEO'])
The expected return result for this function is as follows::
{
'SWTH': {
'GAS': '0.0015085',
'NEO': '0.00040911'
},
'GAS': {
'NEO': '0.30391642'
},
....
}
:param symbols: The trading symbols to retrieve the last price on the Switcheo Exchange.
:type symbols: list
:param bases: The base pair to retrieve the last price of symbols on the Switcheo Exchange.
:type bases: list
:return: Dictionary of trade symbols with the most recently executed trade price.
"""
api_params = {}
if symbols is not None:
api_params['symbols'] = symbols
if bases is not None:
api_params['bases'] = bases
return self.request.get(path='/tickers/last_price', params=api_params) | [
"def",
"get_last_price",
"(",
"self",
",",
"symbols",
"=",
"None",
",",
"bases",
"=",
"None",
")",
":",
"api_params",
"=",
"{",
"}",
"if",
"symbols",
"is",
"not",
"None",
":",
"api_params",
"[",
"'symbols'",
"]",
"=",
"symbols",
"if",
"bases",
"is",
"not",
"None",
":",
"api_params",
"[",
"'bases'",
"]",
"=",
"bases",
"return",
"self",
".",
"request",
".",
"get",
"(",
"path",
"=",
"'/tickers/last_price'",
",",
"params",
"=",
"api_params",
")"
] | Function to fetch the most recently executed trade on the order book for each trading pair.
Execution of this function is as follows::
get_last_price()
get_last_price(symbols=['SWTH','GAS'])
get_last_price(bases=['NEO'])
get_last_price(symbols=['SWTH','GAS'], bases=['NEO'])
The expected return result for this function is as follows::
{
'SWTH': {
'GAS': '0.0015085',
'NEO': '0.00040911'
},
'GAS': {
'NEO': '0.30391642'
},
....
}
:param symbols: The trading symbols to retrieve the last price on the Switcheo Exchange.
:type symbols: list
:param bases: The base pair to retrieve the last price of symbols on the Switcheo Exchange.
:type bases: list
:return: Dictionary of trade symbols with the most recently executed trade price. | [
"Function",
"to",
"fetch",
"the",
"most",
"recently",
"executed",
"trade",
"on",
"the",
"order",
"book",
"for",
"each",
"trading",
"pair",
".",
"Execution",
"of",
"this",
"function",
"is",
"as",
"follows",
"::"
] | train | https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/public_client.py#L218-L252 |
KeithSSmith/switcheo-python | switcheo/public_client.py | PublicClient.get_offers | def get_offers(self, pair="SWTH_NEO"):
"""
Function to fetch the open orders on the order book for the trade pair requested.
Execution of this function is as follows::
get_offers(pair="SWTH_NEO")
The expected return result for this function is as follows::
[{
'id': '2716c0ca-59bb-4c86-8ee4-6b9528d0e5d2',
'offer_asset': 'GAS',
'want_asset': 'NEO',
'available_amount': 9509259,
'offer_amount': 30000000,
'want_amount': 300000000,
'address': '7f345d1a031c4099540dbbbc220d4e5640ab2b6f'
}, {
....
}]
:param pair: The trading pair that will be used to request open offers on the order book.
:type pair: str
:return: List of dictionaries consisting of the open offers for the requested trading pair.
"""
api_params = {
"pair": pair,
"contract_hash": self.contract_hash
}
return self.request.get(path='/offers', params=api_params) | python | def get_offers(self, pair="SWTH_NEO"):
"""
Function to fetch the open orders on the order book for the trade pair requested.
Execution of this function is as follows::
get_offers(pair="SWTH_NEO")
The expected return result for this function is as follows::
[{
'id': '2716c0ca-59bb-4c86-8ee4-6b9528d0e5d2',
'offer_asset': 'GAS',
'want_asset': 'NEO',
'available_amount': 9509259,
'offer_amount': 30000000,
'want_amount': 300000000,
'address': '7f345d1a031c4099540dbbbc220d4e5640ab2b6f'
}, {
....
}]
:param pair: The trading pair that will be used to request open offers on the order book.
:type pair: str
:return: List of dictionaries consisting of the open offers for the requested trading pair.
"""
api_params = {
"pair": pair,
"contract_hash": self.contract_hash
}
return self.request.get(path='/offers', params=api_params) | [
"def",
"get_offers",
"(",
"self",
",",
"pair",
"=",
"\"SWTH_NEO\"",
")",
":",
"api_params",
"=",
"{",
"\"pair\"",
":",
"pair",
",",
"\"contract_hash\"",
":",
"self",
".",
"contract_hash",
"}",
"return",
"self",
".",
"request",
".",
"get",
"(",
"path",
"=",
"'/offers'",
",",
"params",
"=",
"api_params",
")"
] | Function to fetch the open orders on the order book for the trade pair requested.
Execution of this function is as follows::
get_offers(pair="SWTH_NEO")
The expected return result for this function is as follows::
[{
'id': '2716c0ca-59bb-4c86-8ee4-6b9528d0e5d2',
'offer_asset': 'GAS',
'want_asset': 'NEO',
'available_amount': 9509259,
'offer_amount': 30000000,
'want_amount': 300000000,
'address': '7f345d1a031c4099540dbbbc220d4e5640ab2b6f'
}, {
....
}]
:param pair: The trading pair that will be used to request open offers on the order book.
:type pair: str
:return: List of dictionaries consisting of the open offers for the requested trading pair. | [
"Function",
"to",
"fetch",
"the",
"open",
"orders",
"on",
"the",
"order",
"book",
"for",
"the",
"trade",
"pair",
"requested",
".",
"Execution",
"of",
"this",
"function",
"is",
"as",
"follows",
"::"
] | train | https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/public_client.py#L254-L283 |
KeithSSmith/switcheo-python | switcheo/public_client.py | PublicClient.get_trades | def get_trades(self, pair="SWTH_NEO", start_time=None, end_time=None, limit=5000):
"""
Function to fetch a list of filled trades for the parameters requested.
Execution of this function is as follows::
get_trades(pair="SWTH_NEO", limit=3)
The expected return result for this function is as follows::
[{
'id': '15bb16e2-7a80-4de1-bb59-bcaff877dee0',
'fill_amount': 100000000,
'take_amount': 100000000,
'event_time': '2018-08-04T15:00:12.634Z',
'is_buy': True
}, {
'id': 'b6f9e530-60ff-46ff-9a71-362097a2025e',
'fill_amount': 47833882,
'take_amount': 97950000000,
'event_time': '2018-08-03T02:44:47.706Z',
'is_buy': True
}, {
'id': '7a308ccc-b7f5-46a3-bf6b-752ab076cc9f',
'fill_amount': 1001117,
'take_amount': 2050000000,
'event_time': '2018-08-03T02:32:50.703Z',
'is_buy': True
}]
:param pair: The trading pair that will be used to request filled trades.
:type pair: str
:param start_time: Only return trades after this time (in epoch seconds).
:type start_time: int
:param end_time: Only return trades before this time (in epoch seconds).
:type end_time: int
:param limit: The number of filled trades to return. Min: 1, Max: 10000, Default: 5000
:type limit: int
:return: List of dictionaries consisting of filled orders that meet requirements of the parameters passed to it.
"""
if limit > 10000 or limit < 1:
raise ValueError("Attempting to request more trades than allowed by the API.")
api_params = {
"blockchain": self.blockchain,
"pair": pair,
"contract_hash": self.contract_hash
}
if start_time is not None:
api_params['from'] = start_time
if end_time is not None:
api_params['to'] = end_time
if limit != 5000:
api_params['limit'] = limit
return self.request.get(path='/trades', params=api_params) | python | def get_trades(self, pair="SWTH_NEO", start_time=None, end_time=None, limit=5000):
"""
Function to fetch a list of filled trades for the parameters requested.
Execution of this function is as follows::
get_trades(pair="SWTH_NEO", limit=3)
The expected return result for this function is as follows::
[{
'id': '15bb16e2-7a80-4de1-bb59-bcaff877dee0',
'fill_amount': 100000000,
'take_amount': 100000000,
'event_time': '2018-08-04T15:00:12.634Z',
'is_buy': True
}, {
'id': 'b6f9e530-60ff-46ff-9a71-362097a2025e',
'fill_amount': 47833882,
'take_amount': 97950000000,
'event_time': '2018-08-03T02:44:47.706Z',
'is_buy': True
}, {
'id': '7a308ccc-b7f5-46a3-bf6b-752ab076cc9f',
'fill_amount': 1001117,
'take_amount': 2050000000,
'event_time': '2018-08-03T02:32:50.703Z',
'is_buy': True
}]
:param pair: The trading pair that will be used to request filled trades.
:type pair: str
:param start_time: Only return trades after this time (in epoch seconds).
:type start_time: int
:param end_time: Only return trades before this time (in epoch seconds).
:type end_time: int
:param limit: The number of filled trades to return. Min: 1, Max: 10000, Default: 5000
:type limit: int
:return: List of dictionaries consisting of filled orders that meet requirements of the parameters passed to it.
"""
if limit > 10000 or limit < 1:
raise ValueError("Attempting to request more trades than allowed by the API.")
api_params = {
"blockchain": self.blockchain,
"pair": pair,
"contract_hash": self.contract_hash
}
if start_time is not None:
api_params['from'] = start_time
if end_time is not None:
api_params['to'] = end_time
if limit != 5000:
api_params['limit'] = limit
return self.request.get(path='/trades', params=api_params) | [
"def",
"get_trades",
"(",
"self",
",",
"pair",
"=",
"\"SWTH_NEO\"",
",",
"start_time",
"=",
"None",
",",
"end_time",
"=",
"None",
",",
"limit",
"=",
"5000",
")",
":",
"if",
"limit",
">",
"10000",
"or",
"limit",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Attempting to request more trades than allowed by the API.\"",
")",
"api_params",
"=",
"{",
"\"blockchain\"",
":",
"self",
".",
"blockchain",
",",
"\"pair\"",
":",
"pair",
",",
"\"contract_hash\"",
":",
"self",
".",
"contract_hash",
"}",
"if",
"start_time",
"is",
"not",
"None",
":",
"api_params",
"[",
"'from'",
"]",
"=",
"start_time",
"if",
"end_time",
"is",
"not",
"None",
":",
"api_params",
"[",
"'to'",
"]",
"=",
"end_time",
"if",
"limit",
"!=",
"5000",
":",
"api_params",
"[",
"'limit'",
"]",
"=",
"limit",
"return",
"self",
".",
"request",
".",
"get",
"(",
"path",
"=",
"'/trades'",
",",
"params",
"=",
"api_params",
")"
] | Function to fetch a list of filled trades for the parameters requested.
Execution of this function is as follows::
get_trades(pair="SWTH_NEO", limit=3)
The expected return result for this function is as follows::
[{
'id': '15bb16e2-7a80-4de1-bb59-bcaff877dee0',
'fill_amount': 100000000,
'take_amount': 100000000,
'event_time': '2018-08-04T15:00:12.634Z',
'is_buy': True
}, {
'id': 'b6f9e530-60ff-46ff-9a71-362097a2025e',
'fill_amount': 47833882,
'take_amount': 97950000000,
'event_time': '2018-08-03T02:44:47.706Z',
'is_buy': True
}, {
'id': '7a308ccc-b7f5-46a3-bf6b-752ab076cc9f',
'fill_amount': 1001117,
'take_amount': 2050000000,
'event_time': '2018-08-03T02:32:50.703Z',
'is_buy': True
}]
:param pair: The trading pair that will be used to request filled trades.
:type pair: str
:param start_time: Only return trades after this time (in epoch seconds).
:type start_time: int
:param end_time: Only return trades before this time (in epoch seconds).
:type end_time: int
:param limit: The number of filled trades to return. Min: 1, Max: 10000, Default: 5000
:type limit: int
:return: List of dictionaries consisting of filled orders that meet requirements of the parameters passed to it. | [
"Function",
"to",
"fetch",
"a",
"list",
"of",
"filled",
"trades",
"for",
"the",
"parameters",
"requested",
".",
"Execution",
"of",
"this",
"function",
"is",
"as",
"follows",
"::"
] | train | https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/public_client.py#L325-L377 |
KeithSSmith/switcheo-python | switcheo/public_client.py | PublicClient.get_recent_trades | def get_recent_trades(self, pair="SWTH_NEO"):
"""
Function to fetch a list of the 20 most recently filled trades for the parameters requested.
Execution of this function is as follows::
get_recent_trades(pair="SWTH_NEO")
The expected return result for this function is as follows::
[{
'id': '15bb16e2-7a80-4de1-bb59-bcaff877dee0',
'fill_amount': 100000000,
'take_amount': 100000000,
'event_time': '2018-08-04T15:00:12.634Z',
'is_buy': True
}, {
'id': 'b6f9e530-60ff-46ff-9a71-362097a2025e',
'fill_amount': 47833882,
'take_amount': 97950000000,
'event_time': '2018-08-03T02:44:47.706Z',
'is_buy': True
}, ...., {
'id': '7a308ccc-b7f5-46a3-bf6b-752ab076cc9f',
'fill_amount': 1001117,
'take_amount': 2050000000,
'event_time': '2018-08-03T02:32:50.703Z',
'is_buy': True
}]
:param pair: The trading pair that will be used to request filled trades.
:type pair: str
:return: List of 20 dictionaries consisting of filled orders for the trade pair.
"""
api_params = {
"pair": pair
}
return self.request.get(path='/trades/recent', params=api_params) | python | def get_recent_trades(self, pair="SWTH_NEO"):
"""
Function to fetch a list of the 20 most recently filled trades for the parameters requested.
Execution of this function is as follows::
get_recent_trades(pair="SWTH_NEO")
The expected return result for this function is as follows::
[{
'id': '15bb16e2-7a80-4de1-bb59-bcaff877dee0',
'fill_amount': 100000000,
'take_amount': 100000000,
'event_time': '2018-08-04T15:00:12.634Z',
'is_buy': True
}, {
'id': 'b6f9e530-60ff-46ff-9a71-362097a2025e',
'fill_amount': 47833882,
'take_amount': 97950000000,
'event_time': '2018-08-03T02:44:47.706Z',
'is_buy': True
}, ...., {
'id': '7a308ccc-b7f5-46a3-bf6b-752ab076cc9f',
'fill_amount': 1001117,
'take_amount': 2050000000,
'event_time': '2018-08-03T02:32:50.703Z',
'is_buy': True
}]
:param pair: The trading pair that will be used to request filled trades.
:type pair: str
:return: List of 20 dictionaries consisting of filled orders for the trade pair.
"""
api_params = {
"pair": pair
}
return self.request.get(path='/trades/recent', params=api_params) | [
"def",
"get_recent_trades",
"(",
"self",
",",
"pair",
"=",
"\"SWTH_NEO\"",
")",
":",
"api_params",
"=",
"{",
"\"pair\"",
":",
"pair",
"}",
"return",
"self",
".",
"request",
".",
"get",
"(",
"path",
"=",
"'/trades/recent'",
",",
"params",
"=",
"api_params",
")"
] | Function to fetch a list of the 20 most recently filled trades for the parameters requested.
Execution of this function is as follows::
get_recent_trades(pair="SWTH_NEO")
The expected return result for this function is as follows::
[{
'id': '15bb16e2-7a80-4de1-bb59-bcaff877dee0',
'fill_amount': 100000000,
'take_amount': 100000000,
'event_time': '2018-08-04T15:00:12.634Z',
'is_buy': True
}, {
'id': 'b6f9e530-60ff-46ff-9a71-362097a2025e',
'fill_amount': 47833882,
'take_amount': 97950000000,
'event_time': '2018-08-03T02:44:47.706Z',
'is_buy': True
}, ...., {
'id': '7a308ccc-b7f5-46a3-bf6b-752ab076cc9f',
'fill_amount': 1001117,
'take_amount': 2050000000,
'event_time': '2018-08-03T02:32:50.703Z',
'is_buy': True
}]
:param pair: The trading pair that will be used to request filled trades.
:type pair: str
:return: List of 20 dictionaries consisting of filled orders for the trade pair. | [
"Function",
"to",
"fetch",
"a",
"list",
"of",
"the",
"20",
"most",
"recently",
"filled",
"trades",
"for",
"the",
"parameters",
"requested",
".",
"Execution",
"of",
"this",
"function",
"is",
"as",
"follows",
"::"
] | train | https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/public_client.py#L379-L415 |
KeithSSmith/switcheo-python | switcheo/public_client.py | PublicClient.get_pairs | def get_pairs(self, base=None, show_details=False):
"""
Function to fetch a list of trading pairs offered on the Switcheo decentralized exchange.
Execution of this function is as follows::
get_pairs() # Fetch all pairs
get_pairs(base="SWTH") # Fetch only SWTH base pairs
get_pairs(show_details=True) # Fetch all pairs with extended information !Attention return value changes!
The expected return result for this function is as follows::
[
'GAS_NEO',
'SWTH_NEO',
'MCT_NEO',
'NKN_NEO',
....
'SWTH_GAS',
'MCT_GAS',
'NKN_GAS',
....
'MCT_SWTH',
'NKN_SWTH'
]
If you use the show_details parameter the server return a list with dictionaries as follows::
[
{'name': 'GAS_NEO', 'precision': 3},
{'name': 'SWTH_NEO', 'precision': 6},
{'name': 'ACAT_NEO', 'precision': 8},
{'name': 'APH_NEO', 'precision': 5},
{'name': 'ASA_NEO', 'precision': 8},
....
]
:param base: The base trade pair to optionally filter available trade pairs.
:type base: str
:param show_details: Extended information for the pairs.
:type show_details: bool
:return: List of trade pairs available for trade on Switcheo.
"""
api_params = {}
if show_details:
api_params["show_details"] = show_details
if base is not None and base in ["NEO", "GAS", "SWTH", "USD", "ETH"]:
api_params["bases"] = [base]
return self.request.get(path='/exchange/pairs', params=api_params) | python | def get_pairs(self, base=None, show_details=False):
"""
Function to fetch a list of trading pairs offered on the Switcheo decentralized exchange.
Execution of this function is as follows::
get_pairs() # Fetch all pairs
get_pairs(base="SWTH") # Fetch only SWTH base pairs
get_pairs(show_details=True) # Fetch all pairs with extended information !Attention return value changes!
The expected return result for this function is as follows::
[
'GAS_NEO',
'SWTH_NEO',
'MCT_NEO',
'NKN_NEO',
....
'SWTH_GAS',
'MCT_GAS',
'NKN_GAS',
....
'MCT_SWTH',
'NKN_SWTH'
]
If you use the show_details parameter the server return a list with dictionaries as follows::
[
{'name': 'GAS_NEO', 'precision': 3},
{'name': 'SWTH_NEO', 'precision': 6},
{'name': 'ACAT_NEO', 'precision': 8},
{'name': 'APH_NEO', 'precision': 5},
{'name': 'ASA_NEO', 'precision': 8},
....
]
:param base: The base trade pair to optionally filter available trade pairs.
:type base: str
:param show_details: Extended information for the pairs.
:type show_details: bool
:return: List of trade pairs available for trade on Switcheo.
"""
api_params = {}
if show_details:
api_params["show_details"] = show_details
if base is not None and base in ["NEO", "GAS", "SWTH", "USD", "ETH"]:
api_params["bases"] = [base]
return self.request.get(path='/exchange/pairs', params=api_params) | [
"def",
"get_pairs",
"(",
"self",
",",
"base",
"=",
"None",
",",
"show_details",
"=",
"False",
")",
":",
"api_params",
"=",
"{",
"}",
"if",
"show_details",
":",
"api_params",
"[",
"\"show_details\"",
"]",
"=",
"show_details",
"if",
"base",
"is",
"not",
"None",
"and",
"base",
"in",
"[",
"\"NEO\"",
",",
"\"GAS\"",
",",
"\"SWTH\"",
",",
"\"USD\"",
",",
"\"ETH\"",
"]",
":",
"api_params",
"[",
"\"bases\"",
"]",
"=",
"[",
"base",
"]",
"return",
"self",
".",
"request",
".",
"get",
"(",
"path",
"=",
"'/exchange/pairs'",
",",
"params",
"=",
"api_params",
")"
] | Function to fetch a list of trading pairs offered on the Switcheo decentralized exchange.
Execution of this function is as follows::
get_pairs() # Fetch all pairs
get_pairs(base="SWTH") # Fetch only SWTH base pairs
get_pairs(show_details=True) # Fetch all pairs with extended information !Attention return value changes!
The expected return result for this function is as follows::
[
'GAS_NEO',
'SWTH_NEO',
'MCT_NEO',
'NKN_NEO',
....
'SWTH_GAS',
'MCT_GAS',
'NKN_GAS',
....
'MCT_SWTH',
'NKN_SWTH'
]
If you use the show_details parameter the server return a list with dictionaries as follows::
[
{'name': 'GAS_NEO', 'precision': 3},
{'name': 'SWTH_NEO', 'precision': 6},
{'name': 'ACAT_NEO', 'precision': 8},
{'name': 'APH_NEO', 'precision': 5},
{'name': 'ASA_NEO', 'precision': 8},
....
]
:param base: The base trade pair to optionally filter available trade pairs.
:type base: str
:param show_details: Extended information for the pairs.
:type show_details: bool
:return: List of trade pairs available for trade on Switcheo. | [
"Function",
"to",
"fetch",
"a",
"list",
"of",
"trading",
"pairs",
"offered",
"on",
"the",
"Switcheo",
"decentralized",
"exchange",
".",
"Execution",
"of",
"this",
"function",
"is",
"as",
"follows",
"::"
] | train | https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/public_client.py#L417-L464 |
KeithSSmith/switcheo-python | switcheo/public_client.py | PublicClient.get_orders | def get_orders(self, address, chain_name='NEO', contract_version='V2', pair=None, from_epoch_time=None,
order_status=None, before_id=None, limit=50):
"""
Function to fetch the order history of the given address.
Execution of this function is as follows::
get_orders(address=neo_get_scripthash_from_address(address=address))
The expected return result for this function is as follows::
[{
'id': '7cbdf481-6acf-4bf3-a1ed-4773f31e6931',
'blockchain': 'neo',
'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82',
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'side': 'buy',
'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b',
'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'offer_amount': '53718500',
'want_amount': '110000000000',
'transfer_amount': '0',
'priority_gas_amount': '0',
'use_native_token': True,
'native_fee_transfer_amount': 0,
'deposit_txn': None,
'created_at': '2018-08-03T02:44:47.692Z',
'status': 'processed',
'fills': [{
'id': 'b6f9e530-60ff-46ff-9a71-362097a2025e',
'offer_hash': '95b3b03be0bff8f58aa86a8dd599700bbaeaffc05078329d5b726b6b995f4cda',
'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b',
'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'fill_amount': '47833882',
'want_amount': '97950000000',
'filled_amount': '',
'fee_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'fee_amount': '73462500',
'price': '0.00048835',
'txn': None,
'status': 'success',
'created_at': '2018-08-03T02:44:47.706Z',
'transaction_hash': '694745a09e33845ec008cfb79c73986a556e619799ec73274f82b30d85bda13a'
}],
'makes': [{
'id': '357088a0-cc80-49ab-acdd-980589c2d7d8',
'offer_hash': '420cc85abf02feaceb1bcd91489a0c1949c972d2a9a05ae922fa15d79de80c00',
'available_amount': '0',
'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b',
'offer_amount': '5884618',
'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'want_amount': '12050000000',
'filled_amount': '0.0',
'txn': None,
'cancel_txn': None,
'price': '0.000488350041493775933609958506224066390041494',
'status': 'cancelled',
'created_at': '2018-08-03T02:44:47.708Z',
'transaction_hash': '1afa946546550151bbbd19f197a87cec92e9be58c44ec431cae42076298548b7',
'trades': []
}]
}, {
....
}]
:param address: The ScriptHash of the address to filter orders for.
:type address: str
:param pair: The trading pair to filter order requests on.
:type pair: str
:param chain_name: The name of the chain to find orders against.
:type chain_name: str
:param contract_version: The version of the contract to find orders against.
:type contract_version: str
:param from_epoch_time: Only return orders that are last updated at or after this time.
:type from_epoch_time: int
:param order_status: Only return orders have this status. Possible values are open, cancelled, completed.
:type order_status: str
:param before_id: Only return orders that are created before the order with this id.
:type before_id: str
:param limit: Only return up to this number of orders (min: 1, max: 200, default: 50).
:type limit: int
:return: List of dictionaries containing the orders for the given NEO address and (optional) trading pair.
"""
api_params = {
"address": address,
"contract_hash": self.get_contracts()[chain_name.upper()][contract_version.upper()],
"limit": limit
}
if pair is not None:
api_params['pair'] = pair
if from_epoch_time is not None:
api_params['from_epoch_time'] = from_epoch_time
if order_status is not None:
api_params['order_status'] = order_status
if before_id is not None:
api_params['before_id'] = before_id
return self.request.get(path='/orders', params=api_params) | python | def get_orders(self, address, chain_name='NEO', contract_version='V2', pair=None, from_epoch_time=None,
order_status=None, before_id=None, limit=50):
"""
Function to fetch the order history of the given address.
Execution of this function is as follows::
get_orders(address=neo_get_scripthash_from_address(address=address))
The expected return result for this function is as follows::
[{
'id': '7cbdf481-6acf-4bf3-a1ed-4773f31e6931',
'blockchain': 'neo',
'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82',
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'side': 'buy',
'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b',
'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'offer_amount': '53718500',
'want_amount': '110000000000',
'transfer_amount': '0',
'priority_gas_amount': '0',
'use_native_token': True,
'native_fee_transfer_amount': 0,
'deposit_txn': None,
'created_at': '2018-08-03T02:44:47.692Z',
'status': 'processed',
'fills': [{
'id': 'b6f9e530-60ff-46ff-9a71-362097a2025e',
'offer_hash': '95b3b03be0bff8f58aa86a8dd599700bbaeaffc05078329d5b726b6b995f4cda',
'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b',
'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'fill_amount': '47833882',
'want_amount': '97950000000',
'filled_amount': '',
'fee_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'fee_amount': '73462500',
'price': '0.00048835',
'txn': None,
'status': 'success',
'created_at': '2018-08-03T02:44:47.706Z',
'transaction_hash': '694745a09e33845ec008cfb79c73986a556e619799ec73274f82b30d85bda13a'
}],
'makes': [{
'id': '357088a0-cc80-49ab-acdd-980589c2d7d8',
'offer_hash': '420cc85abf02feaceb1bcd91489a0c1949c972d2a9a05ae922fa15d79de80c00',
'available_amount': '0',
'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b',
'offer_amount': '5884618',
'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'want_amount': '12050000000',
'filled_amount': '0.0',
'txn': None,
'cancel_txn': None,
'price': '0.000488350041493775933609958506224066390041494',
'status': 'cancelled',
'created_at': '2018-08-03T02:44:47.708Z',
'transaction_hash': '1afa946546550151bbbd19f197a87cec92e9be58c44ec431cae42076298548b7',
'trades': []
}]
}, {
....
}]
:param address: The ScriptHash of the address to filter orders for.
:type address: str
:param pair: The trading pair to filter order requests on.
:type pair: str
:param chain_name: The name of the chain to find orders against.
:type chain_name: str
:param contract_version: The version of the contract to find orders against.
:type contract_version: str
:param from_epoch_time: Only return orders that are last updated at or after this time.
:type from_epoch_time: int
:param order_status: Only return orders have this status. Possible values are open, cancelled, completed.
:type order_status: str
:param before_id: Only return orders that are created before the order with this id.
:type before_id: str
:param limit: Only return up to this number of orders (min: 1, max: 200, default: 50).
:type limit: int
:return: List of dictionaries containing the orders for the given NEO address and (optional) trading pair.
"""
api_params = {
"address": address,
"contract_hash": self.get_contracts()[chain_name.upper()][contract_version.upper()],
"limit": limit
}
if pair is not None:
api_params['pair'] = pair
if from_epoch_time is not None:
api_params['from_epoch_time'] = from_epoch_time
if order_status is not None:
api_params['order_status'] = order_status
if before_id is not None:
api_params['before_id'] = before_id
return self.request.get(path='/orders', params=api_params) | [
"def",
"get_orders",
"(",
"self",
",",
"address",
",",
"chain_name",
"=",
"'NEO'",
",",
"contract_version",
"=",
"'V2'",
",",
"pair",
"=",
"None",
",",
"from_epoch_time",
"=",
"None",
",",
"order_status",
"=",
"None",
",",
"before_id",
"=",
"None",
",",
"limit",
"=",
"50",
")",
":",
"api_params",
"=",
"{",
"\"address\"",
":",
"address",
",",
"\"contract_hash\"",
":",
"self",
".",
"get_contracts",
"(",
")",
"[",
"chain_name",
".",
"upper",
"(",
")",
"]",
"[",
"contract_version",
".",
"upper",
"(",
")",
"]",
",",
"\"limit\"",
":",
"limit",
"}",
"if",
"pair",
"is",
"not",
"None",
":",
"api_params",
"[",
"'pair'",
"]",
"=",
"pair",
"if",
"from_epoch_time",
"is",
"not",
"None",
":",
"api_params",
"[",
"'from_epoch_time'",
"]",
"=",
"from_epoch_time",
"if",
"order_status",
"is",
"not",
"None",
":",
"api_params",
"[",
"'order_status'",
"]",
"=",
"order_status",
"if",
"before_id",
"is",
"not",
"None",
":",
"api_params",
"[",
"'before_id'",
"]",
"=",
"before_id",
"return",
"self",
".",
"request",
".",
"get",
"(",
"path",
"=",
"'/orders'",
",",
"params",
"=",
"api_params",
")"
] | Function to fetch the order history of the given address.
Execution of this function is as follows::
get_orders(address=neo_get_scripthash_from_address(address=address))
The expected return result for this function is as follows::
[{
'id': '7cbdf481-6acf-4bf3-a1ed-4773f31e6931',
'blockchain': 'neo',
'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82',
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'side': 'buy',
'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b',
'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'offer_amount': '53718500',
'want_amount': '110000000000',
'transfer_amount': '0',
'priority_gas_amount': '0',
'use_native_token': True,
'native_fee_transfer_amount': 0,
'deposit_txn': None,
'created_at': '2018-08-03T02:44:47.692Z',
'status': 'processed',
'fills': [{
'id': 'b6f9e530-60ff-46ff-9a71-362097a2025e',
'offer_hash': '95b3b03be0bff8f58aa86a8dd599700bbaeaffc05078329d5b726b6b995f4cda',
'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b',
'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'fill_amount': '47833882',
'want_amount': '97950000000',
'filled_amount': '',
'fee_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'fee_amount': '73462500',
'price': '0.00048835',
'txn': None,
'status': 'success',
'created_at': '2018-08-03T02:44:47.706Z',
'transaction_hash': '694745a09e33845ec008cfb79c73986a556e619799ec73274f82b30d85bda13a'
}],
'makes': [{
'id': '357088a0-cc80-49ab-acdd-980589c2d7d8',
'offer_hash': '420cc85abf02feaceb1bcd91489a0c1949c972d2a9a05ae922fa15d79de80c00',
'available_amount': '0',
'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b',
'offer_amount': '5884618',
'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'want_amount': '12050000000',
'filled_amount': '0.0',
'txn': None,
'cancel_txn': None,
'price': '0.000488350041493775933609958506224066390041494',
'status': 'cancelled',
'created_at': '2018-08-03T02:44:47.708Z',
'transaction_hash': '1afa946546550151bbbd19f197a87cec92e9be58c44ec431cae42076298548b7',
'trades': []
}]
}, {
....
}]
:param address: The ScriptHash of the address to filter orders for.
:type address: str
:param pair: The trading pair to filter order requests on.
:type pair: str
:param chain_name: The name of the chain to find orders against.
:type chain_name: str
:param contract_version: The version of the contract to find orders against.
:type contract_version: str
:param from_epoch_time: Only return orders that are last updated at or after this time.
:type from_epoch_time: int
:param order_status: Only return orders have this status. Possible values are open, cancelled, completed.
:type order_status: str
:param before_id: Only return orders that are created before the order with this id.
:type before_id: str
:param limit: Only return up to this number of orders (min: 1, max: 200, default: 50).
:type limit: int
:return: List of dictionaries containing the orders for the given NEO address and (optional) trading pair. | [
"Function",
"to",
"fetch",
"the",
"order",
"history",
"of",
"the",
"given",
"address",
".",
"Execution",
"of",
"this",
"function",
"is",
"as",
"follows",
"::"
] | train | https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/public_client.py#L490-L585 |
KeithSSmith/switcheo-python | switcheo/public_client.py | PublicClient.get_balance | def get_balance(self, addresses, contracts):
"""
Function to fetch the current account balance for the given address in the Switcheo smart contract.
Execution of this function is as follows::
get_balance(address=neo_get_scripthash_from_address(address=address))
The expected return result for this function is as follows::
{
'confirming': {},
'confirmed': {
'GAS': '100000000.0',
'SWTH': '97976537500.0',
'NEO': '52166118.0'
},
'locked': {}
}
:param addresses: The ScriptHash of the address(es) to retrieve its Smart Contract balance.
:type addresses: list
:param contracts: The contract hash(es) to retrieve all addresses Smart Contract balance.
:type contracts: list
:return: Dictionary containing the sum of all addresses smart contract balances by processing state.
"""
api_params = {
"addresses[]": addresses,
"contract_hashes[]": contracts
}
return self.request.get(path='/balances', params=api_params) | python | def get_balance(self, addresses, contracts):
"""
Function to fetch the current account balance for the given address in the Switcheo smart contract.
Execution of this function is as follows::
get_balance(address=neo_get_scripthash_from_address(address=address))
The expected return result for this function is as follows::
{
'confirming': {},
'confirmed': {
'GAS': '100000000.0',
'SWTH': '97976537500.0',
'NEO': '52166118.0'
},
'locked': {}
}
:param addresses: The ScriptHash of the address(es) to retrieve its Smart Contract balance.
:type addresses: list
:param contracts: The contract hash(es) to retrieve all addresses Smart Contract balance.
:type contracts: list
:return: Dictionary containing the sum of all addresses smart contract balances by processing state.
"""
api_params = {
"addresses[]": addresses,
"contract_hashes[]": contracts
}
return self.request.get(path='/balances', params=api_params) | [
"def",
"get_balance",
"(",
"self",
",",
"addresses",
",",
"contracts",
")",
":",
"api_params",
"=",
"{",
"\"addresses[]\"",
":",
"addresses",
",",
"\"contract_hashes[]\"",
":",
"contracts",
"}",
"return",
"self",
".",
"request",
".",
"get",
"(",
"path",
"=",
"'/balances'",
",",
"params",
"=",
"api_params",
")"
] | Function to fetch the current account balance for the given address in the Switcheo smart contract.
Execution of this function is as follows::
get_balance(address=neo_get_scripthash_from_address(address=address))
The expected return result for this function is as follows::
{
'confirming': {},
'confirmed': {
'GAS': '100000000.0',
'SWTH': '97976537500.0',
'NEO': '52166118.0'
},
'locked': {}
}
:param addresses: The ScriptHash of the address(es) to retrieve its Smart Contract balance.
:type addresses: list
:param contracts: The contract hash(es) to retrieve all addresses Smart Contract balance.
:type contracts: list
:return: Dictionary containing the sum of all addresses smart contract balances by processing state. | [
"Function",
"to",
"fetch",
"the",
"current",
"account",
"balance",
"for",
"the",
"given",
"address",
"in",
"the",
"Switcheo",
"smart",
"contract",
".",
"Execution",
"of",
"this",
"function",
"is",
"as",
"follows",
"::"
] | train | https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/public_client.py#L587-L616 |
tjwalch/django-livereload-server | livereload/server.py | Server.ignore_file_extension | def ignore_file_extension(self, extension):
"""
Configure a file extension to be ignored.
:param extension: file extension to be ignored
(ex. .less, .scss, etc)
"""
logger.info('Ignoring file extension: {}'.format(extension))
self.watcher.ignore_file_extension(extension) | python | def ignore_file_extension(self, extension):
"""
Configure a file extension to be ignored.
:param extension: file extension to be ignored
(ex. .less, .scss, etc)
"""
logger.info('Ignoring file extension: {}'.format(extension))
self.watcher.ignore_file_extension(extension) | [
"def",
"ignore_file_extension",
"(",
"self",
",",
"extension",
")",
":",
"logger",
".",
"info",
"(",
"'Ignoring file extension: {}'",
".",
"format",
"(",
"extension",
")",
")",
"self",
".",
"watcher",
".",
"ignore_file_extension",
"(",
"extension",
")"
] | Configure a file extension to be ignored.
:param extension: file extension to be ignored
(ex. .less, .scss, etc) | [
"Configure",
"a",
"file",
"extension",
"to",
"be",
"ignored",
"."
] | train | https://github.com/tjwalch/django-livereload-server/blob/ea3edaa1a5b2f8cb49761dd32f2fcc4554c4aa0c/livereload/server.py#L97-L105 |
tjwalch/django-livereload-server | livereload/server.py | Server.watch | def watch(self, filepath, func=None, delay=None):
"""Add the given filepath for watcher list.
Once you have intialized a server, watch file changes before
serve the server::
server.watch('static/*.stylus', 'make static')
def alert():
print('foo')
server.watch('foo.txt', alert)
server.serve()
:param filepath: files to be watched, it can be a filepath,
a directory, or a glob pattern
:param func: the function to be called, it can be a string of
shell command, or any callable object without
parameters
:param delay: Delay sending the reload message. Use 'forever' to
not send it. This is useful to compile sass files to
css, but reload on changed css files then only.
"""
if isinstance(func, string_types):
func = shell(func)
self.watcher.watch(filepath, func, delay) | python | def watch(self, filepath, func=None, delay=None):
"""Add the given filepath for watcher list.
Once you have intialized a server, watch file changes before
serve the server::
server.watch('static/*.stylus', 'make static')
def alert():
print('foo')
server.watch('foo.txt', alert)
server.serve()
:param filepath: files to be watched, it can be a filepath,
a directory, or a glob pattern
:param func: the function to be called, it can be a string of
shell command, or any callable object without
parameters
:param delay: Delay sending the reload message. Use 'forever' to
not send it. This is useful to compile sass files to
css, but reload on changed css files then only.
"""
if isinstance(func, string_types):
func = shell(func)
self.watcher.watch(filepath, func, delay) | [
"def",
"watch",
"(",
"self",
",",
"filepath",
",",
"func",
"=",
"None",
",",
"delay",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"func",
",",
"string_types",
")",
":",
"func",
"=",
"shell",
"(",
"func",
")",
"self",
".",
"watcher",
".",
"watch",
"(",
"filepath",
",",
"func",
",",
"delay",
")"
] | Add the given filepath for watcher list.
Once you have intialized a server, watch file changes before
serve the server::
server.watch('static/*.stylus', 'make static')
def alert():
print('foo')
server.watch('foo.txt', alert)
server.serve()
:param filepath: files to be watched, it can be a filepath,
a directory, or a glob pattern
:param func: the function to be called, it can be a string of
shell command, or any callable object without
parameters
:param delay: Delay sending the reload message. Use 'forever' to
not send it. This is useful to compile sass files to
css, but reload on changed css files then only. | [
"Add",
"the",
"given",
"filepath",
"for",
"watcher",
"list",
"."
] | train | https://github.com/tjwalch/django-livereload-server/blob/ea3edaa1a5b2f8cb49761dd32f2fcc4554c4aa0c/livereload/server.py#L107-L131 |
tjwalch/django-livereload-server | livereload/server.py | Server.serve | def serve(self, liveport=None, host=None, restart_delay=2):
"""Start serve the server with the given port.
:param liveport: live reload on this port
:param host: serve on this hostname, default is 127.0.0.1
:param open_url_delay: open webbrowser after the delay seconds
"""
host = host or '127.0.0.1'
logger.info('Serving on http://%s:%s' % (host, liveport))
self.application(host, liveport=liveport)
try:
self.watcher._changes.append(('__livereload__', restart_delay))
LiveReloadHandler.start_tasks()
IOLoop.instance().start()
except KeyboardInterrupt:
logger.info('Shutting down...') | python | def serve(self, liveport=None, host=None, restart_delay=2):
"""Start serve the server with the given port.
:param liveport: live reload on this port
:param host: serve on this hostname, default is 127.0.0.1
:param open_url_delay: open webbrowser after the delay seconds
"""
host = host or '127.0.0.1'
logger.info('Serving on http://%s:%s' % (host, liveport))
self.application(host, liveport=liveport)
try:
self.watcher._changes.append(('__livereload__', restart_delay))
LiveReloadHandler.start_tasks()
IOLoop.instance().start()
except KeyboardInterrupt:
logger.info('Shutting down...') | [
"def",
"serve",
"(",
"self",
",",
"liveport",
"=",
"None",
",",
"host",
"=",
"None",
",",
"restart_delay",
"=",
"2",
")",
":",
"host",
"=",
"host",
"or",
"'127.0.0.1'",
"logger",
".",
"info",
"(",
"'Serving on http://%s:%s'",
"%",
"(",
"host",
",",
"liveport",
")",
")",
"self",
".",
"application",
"(",
"host",
",",
"liveport",
"=",
"liveport",
")",
"try",
":",
"self",
".",
"watcher",
".",
"_changes",
".",
"append",
"(",
"(",
"'__livereload__'",
",",
"restart_delay",
")",
")",
"LiveReloadHandler",
".",
"start_tasks",
"(",
")",
"IOLoop",
".",
"instance",
"(",
")",
".",
"start",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"logger",
".",
"info",
"(",
"'Shutting down...'",
")"
] | Start serve the server with the given port.
:param liveport: live reload on this port
:param host: serve on this hostname, default is 127.0.0.1
:param open_url_delay: open webbrowser after the delay seconds | [
"Start",
"serve",
"the",
"server",
"with",
"the",
"given",
"port",
"."
] | train | https://github.com/tjwalch/django-livereload-server/blob/ea3edaa1a5b2f8cb49761dd32f2fcc4554c4aa0c/livereload/server.py#L143-L160 |
tjwalch/django-livereload-server | livereload/watcher.py | Watcher.should_ignore | def should_ignore(self, filename):
"""Should ignore a given filename?"""
_, ext = os.path.splitext(filename)
return ext in self.ignored_file_extensions | python | def should_ignore(self, filename):
"""Should ignore a given filename?"""
_, ext = os.path.splitext(filename)
return ext in self.ignored_file_extensions | [
"def",
"should_ignore",
"(",
"self",
",",
"filename",
")",
":",
"_",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"return",
"ext",
"in",
"self",
".",
"ignored_file_extensions"
] | Should ignore a given filename? | [
"Should",
"ignore",
"a",
"given",
"filename?"
] | train | https://github.com/tjwalch/django-livereload-server/blob/ea3edaa1a5b2f8cb49761dd32f2fcc4554c4aa0c/livereload/watcher.py#L37-L40 |
tjwalch/django-livereload-server | livereload/watcher.py | Watcher.examine | def examine(self):
"""Check if there are changes, if true, run the given task."""
if self._changes:
return self._changes.pop()
# clean filepath
self.filepath = None
delays = set([0])
for path in self._tasks:
item = self._tasks[path]
if self.is_changed(path, item['ignore']):
func = item['func']
func and func()
delay = item['delay']
if delay:
delays.add(delay)
if 'forever' in delays:
delay = 'forever'
else:
delay = max(delays)
return self.filepath, delay | python | def examine(self):
"""Check if there are changes, if true, run the given task."""
if self._changes:
return self._changes.pop()
# clean filepath
self.filepath = None
delays = set([0])
for path in self._tasks:
item = self._tasks[path]
if self.is_changed(path, item['ignore']):
func = item['func']
func and func()
delay = item['delay']
if delay:
delays.add(delay)
if 'forever' in delays:
delay = 'forever'
else:
delay = max(delays)
return self.filepath, delay | [
"def",
"examine",
"(",
"self",
")",
":",
"if",
"self",
".",
"_changes",
":",
"return",
"self",
".",
"_changes",
".",
"pop",
"(",
")",
"# clean filepath",
"self",
".",
"filepath",
"=",
"None",
"delays",
"=",
"set",
"(",
"[",
"0",
"]",
")",
"for",
"path",
"in",
"self",
".",
"_tasks",
":",
"item",
"=",
"self",
".",
"_tasks",
"[",
"path",
"]",
"if",
"self",
".",
"is_changed",
"(",
"path",
",",
"item",
"[",
"'ignore'",
"]",
")",
":",
"func",
"=",
"item",
"[",
"'func'",
"]",
"func",
"and",
"func",
"(",
")",
"delay",
"=",
"item",
"[",
"'delay'",
"]",
"if",
"delay",
":",
"delays",
".",
"add",
"(",
"delay",
")",
"if",
"'forever'",
"in",
"delays",
":",
"delay",
"=",
"'forever'",
"else",
":",
"delay",
"=",
"max",
"(",
"delays",
")",
"return",
"self",
".",
"filepath",
",",
"delay"
] | Check if there are changes, if true, run the given task. | [
"Check",
"if",
"there",
"are",
"changes",
"if",
"true",
"run",
"the",
"given",
"task",
"."
] | train | https://github.com/tjwalch/django-livereload-server/blob/ea3edaa1a5b2f8cb49761dd32f2fcc4554c4aa0c/livereload/watcher.py#L67-L88 |
tjwalch/django-livereload-server | livereload/management/commands/runserver.py | Command.livereload_request | def livereload_request(self, **options):
"""
Performs the LiveReload request.
"""
style = color_style()
verbosity = int(options['verbosity'])
host = '%s:%d' % (
options['livereload_host'],
options['livereload_port'],
)
try:
urlopen('http://%s/forcereload' % host)
self.message('LiveReload request emitted.\n',
verbosity, style.HTTP_INFO)
except IOError:
pass | python | def livereload_request(self, **options):
"""
Performs the LiveReload request.
"""
style = color_style()
verbosity = int(options['verbosity'])
host = '%s:%d' % (
options['livereload_host'],
options['livereload_port'],
)
try:
urlopen('http://%s/forcereload' % host)
self.message('LiveReload request emitted.\n',
verbosity, style.HTTP_INFO)
except IOError:
pass | [
"def",
"livereload_request",
"(",
"self",
",",
"*",
"*",
"options",
")",
":",
"style",
"=",
"color_style",
"(",
")",
"verbosity",
"=",
"int",
"(",
"options",
"[",
"'verbosity'",
"]",
")",
"host",
"=",
"'%s:%d'",
"%",
"(",
"options",
"[",
"'livereload_host'",
"]",
",",
"options",
"[",
"'livereload_port'",
"]",
",",
")",
"try",
":",
"urlopen",
"(",
"'http://%s/forcereload'",
"%",
"host",
")",
"self",
".",
"message",
"(",
"'LiveReload request emitted.\\n'",
",",
"verbosity",
",",
"style",
".",
"HTTP_INFO",
")",
"except",
"IOError",
":",
"pass"
] | Performs the LiveReload request. | [
"Performs",
"the",
"LiveReload",
"request",
"."
] | train | https://github.com/tjwalch/django-livereload-server/blob/ea3edaa1a5b2f8cb49761dd32f2fcc4554c4aa0c/livereload/management/commands/runserver.py#L53-L68 |
tjwalch/django-livereload-server | livereload/management/commands/runserver.py | Command.get_handler | def get_handler(self, *args, **options):
"""
Entry point to plug the LiveReload feature.
"""
handler = super(Command, self).get_handler(*args, **options)
if options['use_livereload']:
threading.Timer(1, self.livereload_request, kwargs=options).start()
return handler | python | def get_handler(self, *args, **options):
"""
Entry point to plug the LiveReload feature.
"""
handler = super(Command, self).get_handler(*args, **options)
if options['use_livereload']:
threading.Timer(1, self.livereload_request, kwargs=options).start()
return handler | [
"def",
"get_handler",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"options",
")",
":",
"handler",
"=",
"super",
"(",
"Command",
",",
"self",
")",
".",
"get_handler",
"(",
"*",
"args",
",",
"*",
"*",
"options",
")",
"if",
"options",
"[",
"'use_livereload'",
"]",
":",
"threading",
".",
"Timer",
"(",
"1",
",",
"self",
".",
"livereload_request",
",",
"kwargs",
"=",
"options",
")",
".",
"start",
"(",
")",
"return",
"handler"
] | Entry point to plug the LiveReload feature. | [
"Entry",
"point",
"to",
"plug",
"the",
"LiveReload",
"feature",
"."
] | train | https://github.com/tjwalch/django-livereload-server/blob/ea3edaa1a5b2f8cb49761dd32f2fcc4554c4aa0c/livereload/management/commands/runserver.py#L70-L77 |
brutasse/django-password-reset | password_reset/views.py | loads_with_timestamp | def loads_with_timestamp(value, salt):
"""Returns the unsigned value along with its timestamp, the time when it
got dumped."""
try:
signing.loads(value, salt=salt, max_age=-999999)
except signing.SignatureExpired as e:
age = float(str(e).split('Signature age ')[1].split(' >')[0])
timestamp = timezone.now() - datetime.timedelta(seconds=age)
return timestamp, signing.loads(value, salt=salt) | python | def loads_with_timestamp(value, salt):
"""Returns the unsigned value along with its timestamp, the time when it
got dumped."""
try:
signing.loads(value, salt=salt, max_age=-999999)
except signing.SignatureExpired as e:
age = float(str(e).split('Signature age ')[1].split(' >')[0])
timestamp = timezone.now() - datetime.timedelta(seconds=age)
return timestamp, signing.loads(value, salt=salt) | [
"def",
"loads_with_timestamp",
"(",
"value",
",",
"salt",
")",
":",
"try",
":",
"signing",
".",
"loads",
"(",
"value",
",",
"salt",
"=",
"salt",
",",
"max_age",
"=",
"-",
"999999",
")",
"except",
"signing",
".",
"SignatureExpired",
"as",
"e",
":",
"age",
"=",
"float",
"(",
"str",
"(",
"e",
")",
".",
"split",
"(",
"'Signature age '",
")",
"[",
"1",
"]",
".",
"split",
"(",
"' >'",
")",
"[",
"0",
"]",
")",
"timestamp",
"=",
"timezone",
".",
"now",
"(",
")",
"-",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"age",
")",
"return",
"timestamp",
",",
"signing",
".",
"loads",
"(",
"value",
",",
"salt",
"=",
"salt",
")"
] | Returns the unsigned value along with its timestamp, the time when it
got dumped. | [
"Returns",
"the",
"unsigned",
"value",
"along",
"with",
"its",
"timestamp",
"the",
"time",
"when",
"it",
"got",
"dumped",
"."
] | train | https://github.com/brutasse/django-password-reset/blob/3f3a531d8bbd7e456af214757afccdf06b0d12d1/password_reset/views.py#L27-L35 |
socialwifi/RouterOS-api | routeros_api/api_socket.py | set_keepalive | def set_keepalive(sock, after_idle_sec=1, interval_sec=3, max_fails=5):
"""Set TCP keepalive on an open socket.
It activates after 1 second (after_idle_sec) of idleness,
then sends a keepalive ping once every 3 seconds (interval_sec),
and closes the connection after 5 failed ping (max_fails), or 15 seconds
"""
if hasattr(socket, "SO_KEEPALIVE"):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, "TCP_KEEPIDLE"):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, after_idle_sec)
if hasattr(socket, "TCP_KEEPINTVL"):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, interval_sec)
if hasattr(socket, "TCP_KEEPCNT"):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, max_fails) | python | def set_keepalive(sock, after_idle_sec=1, interval_sec=3, max_fails=5):
"""Set TCP keepalive on an open socket.
It activates after 1 second (after_idle_sec) of idleness,
then sends a keepalive ping once every 3 seconds (interval_sec),
and closes the connection after 5 failed ping (max_fails), or 15 seconds
"""
if hasattr(socket, "SO_KEEPALIVE"):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, "TCP_KEEPIDLE"):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, after_idle_sec)
if hasattr(socket, "TCP_KEEPINTVL"):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, interval_sec)
if hasattr(socket, "TCP_KEEPCNT"):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, max_fails) | [
"def",
"set_keepalive",
"(",
"sock",
",",
"after_idle_sec",
"=",
"1",
",",
"interval_sec",
"=",
"3",
",",
"max_fails",
"=",
"5",
")",
":",
"if",
"hasattr",
"(",
"socket",
",",
"\"SO_KEEPALIVE\"",
")",
":",
"sock",
".",
"setsockopt",
"(",
"socket",
".",
"SOL_SOCKET",
",",
"socket",
".",
"SO_KEEPALIVE",
",",
"1",
")",
"if",
"hasattr",
"(",
"socket",
",",
"\"TCP_KEEPIDLE\"",
")",
":",
"sock",
".",
"setsockopt",
"(",
"socket",
".",
"IPPROTO_TCP",
",",
"socket",
".",
"TCP_KEEPIDLE",
",",
"after_idle_sec",
")",
"if",
"hasattr",
"(",
"socket",
",",
"\"TCP_KEEPINTVL\"",
")",
":",
"sock",
".",
"setsockopt",
"(",
"socket",
".",
"IPPROTO_TCP",
",",
"socket",
".",
"TCP_KEEPINTVL",
",",
"interval_sec",
")",
"if",
"hasattr",
"(",
"socket",
",",
"\"TCP_KEEPCNT\"",
")",
":",
"sock",
".",
"setsockopt",
"(",
"socket",
".",
"IPPROTO_TCP",
",",
"socket",
".",
"TCP_KEEPCNT",
",",
"max_fails",
")"
] | Set TCP keepalive on an open socket.
It activates after 1 second (after_idle_sec) of idleness,
then sends a keepalive ping once every 3 seconds (interval_sec),
and closes the connection after 5 failed ping (max_fails), or 15 seconds | [
"Set",
"TCP",
"keepalive",
"on",
"an",
"open",
"socket",
"."
] | train | https://github.com/socialwifi/RouterOS-api/blob/d4eb2422f2437b3c99193a79fa857fc1e2c672af/routeros_api/api_socket.py#L37-L51 |
thespacedoctor/sherlock | sherlock/imports/veron.py | veron.ingest | def ingest(self):
"""ingest the veron catalogue into the catalogues database
See class docstring for usage.
"""
self.log.debug('starting the ``get`` method')
dictList = self._create_dictionary_of_veron()
tableName = self.dbTableName
createStatement = """
CREATE TABLE `%(tableName)s` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`B_V` float DEFAULT NULL,
`U_B` float DEFAULT NULL,
`abs_magnitude` float DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`decDeg` double DEFAULT NULL,
`magnitude` float DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`class` varchar(10) COLLATE utf8_unicode_ci DEFAULT NULL,
`name` varchar(100) COLLATE utf8_unicode_ci DEFAULT NULL,
`redshift` float DEFAULT NULL,
`not_radio` varchar(10) COLLATE utf8_unicode_ci DEFAULT NULL,
`magnitude_filter` varchar(10) COLLATE utf8_unicode_ci DEFAULT 'V',
`htm16ID` bigint(20) DEFAULT NULL,
`redshift_flag` varchar(100) COLLATE utf8_unicode_ci DEFAULT NULL,
`spectral_classification` varchar(100) COLLATE utf8_unicode_ci DEFAULT NULL,
`dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,
`updated` varchar(45) DEFAULT '0',
`htm10ID` bigint(20) DEFAULT NULL,
`htm13ID` bigint(20) DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `radeg_decdeg` (`raDeg`,`decDeg`),
KEY `idx_htm16ID` (`htm16ID`),
KEY `idx_htm10ID` (`htm10ID`),
KEY `idx_htm13ID` (`htm13ID`)
) ENGINE=MyISAM AUTO_INCREMENT=168945 DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
""" % locals()
self.add_data_to_database_table(
dictList=dictList,
createStatement=createStatement
)
self.log.debug('completed the ``get`` method')
return None | python | def ingest(self):
"""ingest the veron catalogue into the catalogues database
See class docstring for usage.
"""
self.log.debug('starting the ``get`` method')
dictList = self._create_dictionary_of_veron()
tableName = self.dbTableName
createStatement = """
CREATE TABLE `%(tableName)s` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`B_V` float DEFAULT NULL,
`U_B` float DEFAULT NULL,
`abs_magnitude` float DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`decDeg` double DEFAULT NULL,
`magnitude` float DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`class` varchar(10) COLLATE utf8_unicode_ci DEFAULT NULL,
`name` varchar(100) COLLATE utf8_unicode_ci DEFAULT NULL,
`redshift` float DEFAULT NULL,
`not_radio` varchar(10) COLLATE utf8_unicode_ci DEFAULT NULL,
`magnitude_filter` varchar(10) COLLATE utf8_unicode_ci DEFAULT 'V',
`htm16ID` bigint(20) DEFAULT NULL,
`redshift_flag` varchar(100) COLLATE utf8_unicode_ci DEFAULT NULL,
`spectral_classification` varchar(100) COLLATE utf8_unicode_ci DEFAULT NULL,
`dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,
`updated` varchar(45) DEFAULT '0',
`htm10ID` bigint(20) DEFAULT NULL,
`htm13ID` bigint(20) DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `radeg_decdeg` (`raDeg`,`decDeg`),
KEY `idx_htm16ID` (`htm16ID`),
KEY `idx_htm10ID` (`htm10ID`),
KEY `idx_htm13ID` (`htm13ID`)
) ENGINE=MyISAM AUTO_INCREMENT=168945 DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
""" % locals()
self.add_data_to_database_table(
dictList=dictList,
createStatement=createStatement
)
self.log.debug('completed the ``get`` method')
return None | [
"def",
"ingest",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``get`` method'",
")",
"dictList",
"=",
"self",
".",
"_create_dictionary_of_veron",
"(",
")",
"tableName",
"=",
"self",
".",
"dbTableName",
"createStatement",
"=",
"\"\"\"\n CREATE TABLE `%(tableName)s` (\n `primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',\n `B_V` float DEFAULT NULL,\n `U_B` float DEFAULT NULL,\n `abs_magnitude` float DEFAULT NULL,\n `dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,\n `decDeg` double DEFAULT NULL,\n `magnitude` float DEFAULT NULL,\n `raDeg` double DEFAULT NULL,\n `class` varchar(10) COLLATE utf8_unicode_ci DEFAULT NULL,\n `name` varchar(100) COLLATE utf8_unicode_ci DEFAULT NULL,\n `redshift` float DEFAULT NULL,\n `not_radio` varchar(10) COLLATE utf8_unicode_ci DEFAULT NULL,\n `magnitude_filter` varchar(10) COLLATE utf8_unicode_ci DEFAULT 'V',\n `htm16ID` bigint(20) DEFAULT NULL,\n `redshift_flag` varchar(100) COLLATE utf8_unicode_ci DEFAULT NULL,\n `spectral_classification` varchar(100) COLLATE utf8_unicode_ci DEFAULT NULL,\n `dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,\n `updated` varchar(45) DEFAULT '0',\n `htm10ID` bigint(20) DEFAULT NULL,\n `htm13ID` bigint(20) DEFAULT NULL,\n PRIMARY KEY (`primaryId`),\n UNIQUE KEY `radeg_decdeg` (`raDeg`,`decDeg`),\n KEY `idx_htm16ID` (`htm16ID`),\n KEY `idx_htm10ID` (`htm10ID`),\n KEY `idx_htm13ID` (`htm13ID`)\n ) ENGINE=MyISAM AUTO_INCREMENT=168945 DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;\n\"\"\"",
"%",
"locals",
"(",
")",
"self",
".",
"add_data_to_database_table",
"(",
"dictList",
"=",
"dictList",
",",
"createStatement",
"=",
"createStatement",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``get`` method'",
")",
"return",
"None"
] | ingest the veron catalogue into the catalogues database
See class docstring for usage. | [
"ingest",
"the",
"veron",
"catalogue",
"into",
"the",
"catalogues",
"database"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/veron.py#L75-L121 |
thespacedoctor/sherlock | sherlock/imports/veron.py | veron._create_dictionary_of_veron | def _create_dictionary_of_veron(
self):
"""create a list of dictionaries containing all the rows in the veron catalogue
**Return:**
- ``dictList`` - a list of dictionaries containing all the rows in the veron catalogue
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_create_dictionary_of_veron`` method')
dictList = []
lines = string.split(self.catData, '\n')
totalCount = len(lines)
count = 0
switch = 0
for line in lines:
if (len(line) == 0 or line[0] in ["#", " "]) and switch == 0:
continue
else:
switch = 1
count += 1
if count > 1:
# Cursor up one line and clear line
sys.stdout.write("\x1b[1A\x1b[2K")
print "%(count)s / %(totalCount)s veron data added to memory" % locals()
if count == 1:
theseKeys = []
someKeys = string.split(line, '|')
for key in someKeys:
if key == "_RAJ2000":
key = "raDeg"
if key == "_DEJ2000":
key = "decDeg"
if key == "Cl":
key = "class"
if key == "nR":
key = "not_radio"
if key == "Name":
key = "name"
if key == "l_z":
key = "redshift_flag"
if key == "z":
key = "redshift"
if key == "Sp":
key = "spectral_classification"
if key == "n_Vmag":
key = "magnitude_filter"
if key == "Vmag":
key = "magnitude"
if key == "B-V":
key = "B_V"
if key == "U-B":
key = "U_B"
if key == "Mabs":
key = "abs_magnitude"
theseKeys.append(key)
continue
if count in [2, 3]:
continue
thisDict = {}
theseValues = string.split(line, '|')
for k, v in zip(theseKeys, theseValues):
v = v.strip()
if len(v) == 0 or v == "-":
v = None
thisDict[k] = v
dictList.append(thisDict)
self.log.debug(
'completed the ``_create_dictionary_of_veron`` method')
return dictList | python | def _create_dictionary_of_veron(
self):
"""create a list of dictionaries containing all the rows in the veron catalogue
**Return:**
- ``dictList`` - a list of dictionaries containing all the rows in the veron catalogue
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_create_dictionary_of_veron`` method')
dictList = []
lines = string.split(self.catData, '\n')
totalCount = len(lines)
count = 0
switch = 0
for line in lines:
if (len(line) == 0 or line[0] in ["#", " "]) and switch == 0:
continue
else:
switch = 1
count += 1
if count > 1:
# Cursor up one line and clear line
sys.stdout.write("\x1b[1A\x1b[2K")
print "%(count)s / %(totalCount)s veron data added to memory" % locals()
if count == 1:
theseKeys = []
someKeys = string.split(line, '|')
for key in someKeys:
if key == "_RAJ2000":
key = "raDeg"
if key == "_DEJ2000":
key = "decDeg"
if key == "Cl":
key = "class"
if key == "nR":
key = "not_radio"
if key == "Name":
key = "name"
if key == "l_z":
key = "redshift_flag"
if key == "z":
key = "redshift"
if key == "Sp":
key = "spectral_classification"
if key == "n_Vmag":
key = "magnitude_filter"
if key == "Vmag":
key = "magnitude"
if key == "B-V":
key = "B_V"
if key == "U-B":
key = "U_B"
if key == "Mabs":
key = "abs_magnitude"
theseKeys.append(key)
continue
if count in [2, 3]:
continue
thisDict = {}
theseValues = string.split(line, '|')
for k, v in zip(theseKeys, theseValues):
v = v.strip()
if len(v) == 0 or v == "-":
v = None
thisDict[k] = v
dictList.append(thisDict)
self.log.debug(
'completed the ``_create_dictionary_of_veron`` method')
return dictList | [
"def",
"_create_dictionary_of_veron",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_create_dictionary_of_veron`` method'",
")",
"dictList",
"=",
"[",
"]",
"lines",
"=",
"string",
".",
"split",
"(",
"self",
".",
"catData",
",",
"'\\n'",
")",
"totalCount",
"=",
"len",
"(",
"lines",
")",
"count",
"=",
"0",
"switch",
"=",
"0",
"for",
"line",
"in",
"lines",
":",
"if",
"(",
"len",
"(",
"line",
")",
"==",
"0",
"or",
"line",
"[",
"0",
"]",
"in",
"[",
"\"#\"",
",",
"\" \"",
"]",
")",
"and",
"switch",
"==",
"0",
":",
"continue",
"else",
":",
"switch",
"=",
"1",
"count",
"+=",
"1",
"if",
"count",
">",
"1",
":",
"# Cursor up one line and clear line",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\x1b[1A\\x1b[2K\"",
")",
"print",
"\"%(count)s / %(totalCount)s veron data added to memory\"",
"%",
"locals",
"(",
")",
"if",
"count",
"==",
"1",
":",
"theseKeys",
"=",
"[",
"]",
"someKeys",
"=",
"string",
".",
"split",
"(",
"line",
",",
"'|'",
")",
"for",
"key",
"in",
"someKeys",
":",
"if",
"key",
"==",
"\"_RAJ2000\"",
":",
"key",
"=",
"\"raDeg\"",
"if",
"key",
"==",
"\"_DEJ2000\"",
":",
"key",
"=",
"\"decDeg\"",
"if",
"key",
"==",
"\"Cl\"",
":",
"key",
"=",
"\"class\"",
"if",
"key",
"==",
"\"nR\"",
":",
"key",
"=",
"\"not_radio\"",
"if",
"key",
"==",
"\"Name\"",
":",
"key",
"=",
"\"name\"",
"if",
"key",
"==",
"\"l_z\"",
":",
"key",
"=",
"\"redshift_flag\"",
"if",
"key",
"==",
"\"z\"",
":",
"key",
"=",
"\"redshift\"",
"if",
"key",
"==",
"\"Sp\"",
":",
"key",
"=",
"\"spectral_classification\"",
"if",
"key",
"==",
"\"n_Vmag\"",
":",
"key",
"=",
"\"magnitude_filter\"",
"if",
"key",
"==",
"\"Vmag\"",
":",
"key",
"=",
"\"magnitude\"",
"if",
"key",
"==",
"\"B-V\"",
":",
"key",
"=",
"\"B_V\"",
"if",
"key",
"==",
"\"U-B\"",
":",
"key",
"=",
"\"U_B\"",
"if",
"key",
"==",
"\"Mabs\"",
":",
"key",
"=",
"\"abs_magnitude\"",
"theseKeys",
".",
"append",
"(",
"key",
")",
"continue",
"if",
"count",
"in",
"[",
"2",
",",
"3",
"]",
":",
"continue",
"thisDict",
"=",
"{",
"}",
"theseValues",
"=",
"string",
".",
"split",
"(",
"line",
",",
"'|'",
")",
"for",
"k",
",",
"v",
"in",
"zip",
"(",
"theseKeys",
",",
"theseValues",
")",
":",
"v",
"=",
"v",
".",
"strip",
"(",
")",
"if",
"len",
"(",
"v",
")",
"==",
"0",
"or",
"v",
"==",
"\"-\"",
":",
"v",
"=",
"None",
"thisDict",
"[",
"k",
"]",
"=",
"v",
"dictList",
".",
"append",
"(",
"thisDict",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_create_dictionary_of_veron`` method'",
")",
"return",
"dictList"
] | create a list of dictionaries containing all the rows in the veron catalogue
**Return:**
- ``dictList`` - a list of dictionaries containing all the rows in the veron catalogue
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"create",
"a",
"list",
"of",
"dictionaries",
"containing",
"all",
"the",
"rows",
"in",
"the",
"veron",
"catalogue"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/veron.py#L123-L208 |
thespacedoctor/sherlock | sherlock/cl_utils.py | main | def main(arguments=None):
"""
The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
# setup the command-line util settings
su = tools(
arguments=arguments,
docString=__doc__,
logLevel="WARNING",
options_first=False,
projectName="sherlock"
)
arguments, settings, log, dbConn = su.setup()
# unpack remaining cl arguments using `exec` to setup the variable names
# automatically
for arg, val in arguments.iteritems():
if arg[0] == "-":
varname = arg.replace("-", "") + "Flag"
else:
varname = arg.replace("<", "").replace(">", "")
if varname == "import":
varname = "iimport"
if isinstance(val, str) or isinstance(val, unicode):
exec(varname + " = '%s'" % (val,))
else:
exec(varname + " = %s" % (val,))
if arg == "--dbConn":
dbConn = val
log.debug('%s = %s' % (varname, val,))
## START LOGGING ##
startTime = times.get_now_sql_datetime()
log.debug(
'--- STARTING TO RUN THE cl_utils.py AT %s' %
(startTime,))
# call the worker function
# x-if-settings-or-database-credientials
if init:
from os.path import expanduser
home = expanduser("~")
filepath = home + "/.config/sherlock/sherlock.yaml"
cmd = """open %(filepath)s""" % locals()
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
try:
cmd = """open %(filepath)s""" % locals()
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
except:
pass
try:
cmd = """start %(filepath)s""" % locals()
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
except:
pass
if match or dbmatch:
if verboseFlag:
verbose = 2
else:
verbose = 1
if skipNedUpdateFlag:
updateNed = False
else:
updateNed = True
if skipMagUpdateFlag:
updatePeakMags = False
else:
updatePeakMags = True
classifier = transient_classifier.transient_classifier(
log=log,
settings=settings,
ra=ra,
dec=dec,
name=False,
verbose=verbose,
update=updateFlag,
updateNed=updateNed,
updatePeakMags=updatePeakMags
)
classifier.classify()
if clean:
cleaner = database_cleaner(
log=log,
settings=settings
)
cleaner.clean()
if wiki:
updateWiki = update_wiki_pages(
log=log,
settings=settings
)
updateWiki.update()
if iimport and ned:
ned = nedStreamImporter(
log=log,
settings=settings,
coordinateList=["%(ra)s %(dec)s" % locals()],
radiusArcsec=radiusArcsec
)
ned.ingest()
if iimport and cat:
if cat_name == "veron":
catalogue = veronImporter(
log=log,
settings=settings,
pathToDataFile=pathToDataFile,
version=cat_version,
catalogueName=cat_name
)
catalogue.ingest()
if "ned_d" in cat_name:
catalogue = nedImporter(
log=log,
settings=settings,
pathToDataFile=pathToDataFile,
version=cat_version,
catalogueName=cat_name
)
catalogue.ingest()
if iimport and stream:
if "marshall" in stream_name:
stream = marshallImporter(
log=log,
settings=settings,
)
stream.ingest()
if "ifs" in stream_name:
stream = ifsImporter(
log=log,
settings=settings
)
stream.ingest()
if not init and not match and not clean and not wiki and not iimport and ra:
classifier = transient_classifier.transient_classifier(
log=log,
settings=settings,
ra=ra,
dec=dec,
name=False,
verbose=verboseFlag
)
classifier.classify()
if info:
print "sherlock-catalogues"
wiki = update_wiki_pages(
log=log,
settings=settings
)
table = list(wiki._get_table_infos(trimmed=True))
dataSet = list_of_dictionaries(
log=log,
listOfDictionaries=table
)
tableData = dataSet.reST(filepath=None)
print tableData
print
print "Crossmatch Streams"
table = list(wiki._get_stream_view_infos(trimmed=True))
dataSet = list_of_dictionaries(
log=log,
listOfDictionaries=table
)
tableData = dataSet.reST(filepath=None)
print tableData
print
print "Views on Catalogues and Streams"
table = list(wiki._get_view_infos(trimmed=True))
dataSet = list_of_dictionaries(
log=log,
listOfDictionaries=table
)
tableData = dataSet.reST(filepath=None)
print tableData
if "dbConn" in locals() and dbConn:
dbConn.commit()
dbConn.close()
## FINISH LOGGING ##
endTime = times.get_now_sql_datetime()
runningTime = times.calculate_time_difference(startTime, endTime)
log.debug('-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' %
(endTime, runningTime, ))
return | python | def main(arguments=None):
"""
The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
# setup the command-line util settings
su = tools(
arguments=arguments,
docString=__doc__,
logLevel="WARNING",
options_first=False,
projectName="sherlock"
)
arguments, settings, log, dbConn = su.setup()
# unpack remaining cl arguments using `exec` to setup the variable names
# automatically
for arg, val in arguments.iteritems():
if arg[0] == "-":
varname = arg.replace("-", "") + "Flag"
else:
varname = arg.replace("<", "").replace(">", "")
if varname == "import":
varname = "iimport"
if isinstance(val, str) or isinstance(val, unicode):
exec(varname + " = '%s'" % (val,))
else:
exec(varname + " = %s" % (val,))
if arg == "--dbConn":
dbConn = val
log.debug('%s = %s' % (varname, val,))
## START LOGGING ##
startTime = times.get_now_sql_datetime()
log.debug(
'--- STARTING TO RUN THE cl_utils.py AT %s' %
(startTime,))
# call the worker function
# x-if-settings-or-database-credientials
if init:
from os.path import expanduser
home = expanduser("~")
filepath = home + "/.config/sherlock/sherlock.yaml"
cmd = """open %(filepath)s""" % locals()
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
try:
cmd = """open %(filepath)s""" % locals()
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
except:
pass
try:
cmd = """start %(filepath)s""" % locals()
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
except:
pass
if match or dbmatch:
if verboseFlag:
verbose = 2
else:
verbose = 1
if skipNedUpdateFlag:
updateNed = False
else:
updateNed = True
if skipMagUpdateFlag:
updatePeakMags = False
else:
updatePeakMags = True
classifier = transient_classifier.transient_classifier(
log=log,
settings=settings,
ra=ra,
dec=dec,
name=False,
verbose=verbose,
update=updateFlag,
updateNed=updateNed,
updatePeakMags=updatePeakMags
)
classifier.classify()
if clean:
cleaner = database_cleaner(
log=log,
settings=settings
)
cleaner.clean()
if wiki:
updateWiki = update_wiki_pages(
log=log,
settings=settings
)
updateWiki.update()
if iimport and ned:
ned = nedStreamImporter(
log=log,
settings=settings,
coordinateList=["%(ra)s %(dec)s" % locals()],
radiusArcsec=radiusArcsec
)
ned.ingest()
if iimport and cat:
if cat_name == "veron":
catalogue = veronImporter(
log=log,
settings=settings,
pathToDataFile=pathToDataFile,
version=cat_version,
catalogueName=cat_name
)
catalogue.ingest()
if "ned_d" in cat_name:
catalogue = nedImporter(
log=log,
settings=settings,
pathToDataFile=pathToDataFile,
version=cat_version,
catalogueName=cat_name
)
catalogue.ingest()
if iimport and stream:
if "marshall" in stream_name:
stream = marshallImporter(
log=log,
settings=settings,
)
stream.ingest()
if "ifs" in stream_name:
stream = ifsImporter(
log=log,
settings=settings
)
stream.ingest()
if not init and not match and not clean and not wiki and not iimport and ra:
classifier = transient_classifier.transient_classifier(
log=log,
settings=settings,
ra=ra,
dec=dec,
name=False,
verbose=verboseFlag
)
classifier.classify()
if info:
print "sherlock-catalogues"
wiki = update_wiki_pages(
log=log,
settings=settings
)
table = list(wiki._get_table_infos(trimmed=True))
dataSet = list_of_dictionaries(
log=log,
listOfDictionaries=table
)
tableData = dataSet.reST(filepath=None)
print tableData
print
print "Crossmatch Streams"
table = list(wiki._get_stream_view_infos(trimmed=True))
dataSet = list_of_dictionaries(
log=log,
listOfDictionaries=table
)
tableData = dataSet.reST(filepath=None)
print tableData
print
print "Views on Catalogues and Streams"
table = list(wiki._get_view_infos(trimmed=True))
dataSet = list_of_dictionaries(
log=log,
listOfDictionaries=table
)
tableData = dataSet.reST(filepath=None)
print tableData
if "dbConn" in locals() and dbConn:
dbConn.commit()
dbConn.close()
## FINISH LOGGING ##
endTime = times.get_now_sql_datetime()
runningTime = times.calculate_time_difference(startTime, endTime)
log.debug('-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' %
(endTime, runningTime, ))
return | [
"def",
"main",
"(",
"arguments",
"=",
"None",
")",
":",
"# setup the command-line util settings",
"su",
"=",
"tools",
"(",
"arguments",
"=",
"arguments",
",",
"docString",
"=",
"__doc__",
",",
"logLevel",
"=",
"\"WARNING\"",
",",
"options_first",
"=",
"False",
",",
"projectName",
"=",
"\"sherlock\"",
")",
"arguments",
",",
"settings",
",",
"log",
",",
"dbConn",
"=",
"su",
".",
"setup",
"(",
")",
"# unpack remaining cl arguments using `exec` to setup the variable names",
"# automatically",
"for",
"arg",
",",
"val",
"in",
"arguments",
".",
"iteritems",
"(",
")",
":",
"if",
"arg",
"[",
"0",
"]",
"==",
"\"-\"",
":",
"varname",
"=",
"arg",
".",
"replace",
"(",
"\"-\"",
",",
"\"\"",
")",
"+",
"\"Flag\"",
"else",
":",
"varname",
"=",
"arg",
".",
"replace",
"(",
"\"<\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\">\"",
",",
"\"\"",
")",
"if",
"varname",
"==",
"\"import\"",
":",
"varname",
"=",
"\"iimport\"",
"if",
"isinstance",
"(",
"val",
",",
"str",
")",
"or",
"isinstance",
"(",
"val",
",",
"unicode",
")",
":",
"exec",
"(",
"varname",
"+",
"\" = '%s'\"",
"%",
"(",
"val",
",",
")",
")",
"else",
":",
"exec",
"(",
"varname",
"+",
"\" = %s\"",
"%",
"(",
"val",
",",
")",
")",
"if",
"arg",
"==",
"\"--dbConn\"",
":",
"dbConn",
"=",
"val",
"log",
".",
"debug",
"(",
"'%s = %s'",
"%",
"(",
"varname",
",",
"val",
",",
")",
")",
"## START LOGGING ##",
"startTime",
"=",
"times",
".",
"get_now_sql_datetime",
"(",
")",
"log",
".",
"debug",
"(",
"'--- STARTING TO RUN THE cl_utils.py AT %s'",
"%",
"(",
"startTime",
",",
")",
")",
"# call the worker function",
"# x-if-settings-or-database-credientials",
"if",
"init",
":",
"from",
"os",
".",
"path",
"import",
"expanduser",
"home",
"=",
"expanduser",
"(",
"\"~\"",
")",
"filepath",
"=",
"home",
"+",
"\"/.config/sherlock/sherlock.yaml\"",
"cmd",
"=",
"\"\"\"open %(filepath)s\"\"\"",
"%",
"locals",
"(",
")",
"p",
"=",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
",",
"shell",
"=",
"True",
")",
"try",
":",
"cmd",
"=",
"\"\"\"open %(filepath)s\"\"\"",
"%",
"locals",
"(",
")",
"p",
"=",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
",",
"shell",
"=",
"True",
")",
"except",
":",
"pass",
"try",
":",
"cmd",
"=",
"\"\"\"start %(filepath)s\"\"\"",
"%",
"locals",
"(",
")",
"p",
"=",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
",",
"shell",
"=",
"True",
")",
"except",
":",
"pass",
"if",
"match",
"or",
"dbmatch",
":",
"if",
"verboseFlag",
":",
"verbose",
"=",
"2",
"else",
":",
"verbose",
"=",
"1",
"if",
"skipNedUpdateFlag",
":",
"updateNed",
"=",
"False",
"else",
":",
"updateNed",
"=",
"True",
"if",
"skipMagUpdateFlag",
":",
"updatePeakMags",
"=",
"False",
"else",
":",
"updatePeakMags",
"=",
"True",
"classifier",
"=",
"transient_classifier",
".",
"transient_classifier",
"(",
"log",
"=",
"log",
",",
"settings",
"=",
"settings",
",",
"ra",
"=",
"ra",
",",
"dec",
"=",
"dec",
",",
"name",
"=",
"False",
",",
"verbose",
"=",
"verbose",
",",
"update",
"=",
"updateFlag",
",",
"updateNed",
"=",
"updateNed",
",",
"updatePeakMags",
"=",
"updatePeakMags",
")",
"classifier",
".",
"classify",
"(",
")",
"if",
"clean",
":",
"cleaner",
"=",
"database_cleaner",
"(",
"log",
"=",
"log",
",",
"settings",
"=",
"settings",
")",
"cleaner",
".",
"clean",
"(",
")",
"if",
"wiki",
":",
"updateWiki",
"=",
"update_wiki_pages",
"(",
"log",
"=",
"log",
",",
"settings",
"=",
"settings",
")",
"updateWiki",
".",
"update",
"(",
")",
"if",
"iimport",
"and",
"ned",
":",
"ned",
"=",
"nedStreamImporter",
"(",
"log",
"=",
"log",
",",
"settings",
"=",
"settings",
",",
"coordinateList",
"=",
"[",
"\"%(ra)s %(dec)s\"",
"%",
"locals",
"(",
")",
"]",
",",
"radiusArcsec",
"=",
"radiusArcsec",
")",
"ned",
".",
"ingest",
"(",
")",
"if",
"iimport",
"and",
"cat",
":",
"if",
"cat_name",
"==",
"\"veron\"",
":",
"catalogue",
"=",
"veronImporter",
"(",
"log",
"=",
"log",
",",
"settings",
"=",
"settings",
",",
"pathToDataFile",
"=",
"pathToDataFile",
",",
"version",
"=",
"cat_version",
",",
"catalogueName",
"=",
"cat_name",
")",
"catalogue",
".",
"ingest",
"(",
")",
"if",
"\"ned_d\"",
"in",
"cat_name",
":",
"catalogue",
"=",
"nedImporter",
"(",
"log",
"=",
"log",
",",
"settings",
"=",
"settings",
",",
"pathToDataFile",
"=",
"pathToDataFile",
",",
"version",
"=",
"cat_version",
",",
"catalogueName",
"=",
"cat_name",
")",
"catalogue",
".",
"ingest",
"(",
")",
"if",
"iimport",
"and",
"stream",
":",
"if",
"\"marshall\"",
"in",
"stream_name",
":",
"stream",
"=",
"marshallImporter",
"(",
"log",
"=",
"log",
",",
"settings",
"=",
"settings",
",",
")",
"stream",
".",
"ingest",
"(",
")",
"if",
"\"ifs\"",
"in",
"stream_name",
":",
"stream",
"=",
"ifsImporter",
"(",
"log",
"=",
"log",
",",
"settings",
"=",
"settings",
")",
"stream",
".",
"ingest",
"(",
")",
"if",
"not",
"init",
"and",
"not",
"match",
"and",
"not",
"clean",
"and",
"not",
"wiki",
"and",
"not",
"iimport",
"and",
"ra",
":",
"classifier",
"=",
"transient_classifier",
".",
"transient_classifier",
"(",
"log",
"=",
"log",
",",
"settings",
"=",
"settings",
",",
"ra",
"=",
"ra",
",",
"dec",
"=",
"dec",
",",
"name",
"=",
"False",
",",
"verbose",
"=",
"verboseFlag",
")",
"classifier",
".",
"classify",
"(",
")",
"if",
"info",
":",
"print",
"\"sherlock-catalogues\"",
"wiki",
"=",
"update_wiki_pages",
"(",
"log",
"=",
"log",
",",
"settings",
"=",
"settings",
")",
"table",
"=",
"list",
"(",
"wiki",
".",
"_get_table_infos",
"(",
"trimmed",
"=",
"True",
")",
")",
"dataSet",
"=",
"list_of_dictionaries",
"(",
"log",
"=",
"log",
",",
"listOfDictionaries",
"=",
"table",
")",
"tableData",
"=",
"dataSet",
".",
"reST",
"(",
"filepath",
"=",
"None",
")",
"print",
"tableData",
"print",
"print",
"\"Crossmatch Streams\"",
"table",
"=",
"list",
"(",
"wiki",
".",
"_get_stream_view_infos",
"(",
"trimmed",
"=",
"True",
")",
")",
"dataSet",
"=",
"list_of_dictionaries",
"(",
"log",
"=",
"log",
",",
"listOfDictionaries",
"=",
"table",
")",
"tableData",
"=",
"dataSet",
".",
"reST",
"(",
"filepath",
"=",
"None",
")",
"print",
"tableData",
"print",
"print",
"\"Views on Catalogues and Streams\"",
"table",
"=",
"list",
"(",
"wiki",
".",
"_get_view_infos",
"(",
"trimmed",
"=",
"True",
")",
")",
"dataSet",
"=",
"list_of_dictionaries",
"(",
"log",
"=",
"log",
",",
"listOfDictionaries",
"=",
"table",
")",
"tableData",
"=",
"dataSet",
".",
"reST",
"(",
"filepath",
"=",
"None",
")",
"print",
"tableData",
"if",
"\"dbConn\"",
"in",
"locals",
"(",
")",
"and",
"dbConn",
":",
"dbConn",
".",
"commit",
"(",
")",
"dbConn",
".",
"close",
"(",
")",
"## FINISH LOGGING ##",
"endTime",
"=",
"times",
".",
"get_now_sql_datetime",
"(",
")",
"runningTime",
"=",
"times",
".",
"calculate_time_difference",
"(",
"startTime",
",",
"endTime",
")",
"log",
".",
"debug",
"(",
"'-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --'",
"%",
"(",
"endTime",
",",
"runningTime",
",",
")",
")",
"return"
] | The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"The",
"main",
"function",
"used",
"when",
"cl_utils",
".",
"py",
"is",
"run",
"as",
"a",
"single",
"script",
"from",
"the",
"cl",
"or",
"when",
"installed",
"as",
"a",
"cl",
"command"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/cl_utils.py#L75-L283 |
andrewda/frc-livescore | livescore/simpleocr_utils/segmentation_filters.py | Filter.display | def display(self, display_before=False):
"""shows the effect of this filter"""
try:
copy = self.image.copy()
except AttributeError:
raise Exception("You need to set the Filter.image attribute for displaying")
copy = BrightnessProcessor(brightness=0.6).process(copy)
s, g = self._input, self.good_segments_indexes
draw_segments(copy, s[g], (0, 255, 0))
draw_segments(copy, s[True ^ g], (0, 0, 255))
show_image_and_wait_for_key(copy, "segments filtered by " + self.__class__.__name__) | python | def display(self, display_before=False):
"""shows the effect of this filter"""
try:
copy = self.image.copy()
except AttributeError:
raise Exception("You need to set the Filter.image attribute for displaying")
copy = BrightnessProcessor(brightness=0.6).process(copy)
s, g = self._input, self.good_segments_indexes
draw_segments(copy, s[g], (0, 255, 0))
draw_segments(copy, s[True ^ g], (0, 0, 255))
show_image_and_wait_for_key(copy, "segments filtered by " + self.__class__.__name__) | [
"def",
"display",
"(",
"self",
",",
"display_before",
"=",
"False",
")",
":",
"try",
":",
"copy",
"=",
"self",
".",
"image",
".",
"copy",
"(",
")",
"except",
"AttributeError",
":",
"raise",
"Exception",
"(",
"\"You need to set the Filter.image attribute for displaying\"",
")",
"copy",
"=",
"BrightnessProcessor",
"(",
"brightness",
"=",
"0.6",
")",
".",
"process",
"(",
"copy",
")",
"s",
",",
"g",
"=",
"self",
".",
"_input",
",",
"self",
".",
"good_segments_indexes",
"draw_segments",
"(",
"copy",
",",
"s",
"[",
"g",
"]",
",",
"(",
"0",
",",
"255",
",",
"0",
")",
")",
"draw_segments",
"(",
"copy",
",",
"s",
"[",
"True",
"^",
"g",
"]",
",",
"(",
"0",
",",
"0",
",",
"255",
")",
")",
"show_image_and_wait_for_key",
"(",
"copy",
",",
"\"segments filtered by \"",
"+",
"self",
".",
"__class__",
".",
"__name__",
")"
] | shows the effect of this filter | [
"shows",
"the",
"effect",
"of",
"this",
"filter"
] | train | https://github.com/andrewda/frc-livescore/blob/71594cd6d2c8b6c5feb3889bb05552d09b8128b1/livescore/simpleocr_utils/segmentation_filters.py#L18-L28 |
Clinical-Genomics/trailblazer | trailblazer/mip/trending.py | parse_mip_analysis | def parse_mip_analysis(mip_config_raw: dict, qcmetrics_raw: dict, sampleinfo_raw: dict) -> dict:
"""Parse the output analysis files from MIP for adding info
to trend database
Args:
mip_config_raw (dict): raw YAML input from MIP analysis config file
qcmetrics_raw (dict): raw YAML input from MIP analysis qc metric file
sampleinfo_raw (dict): raw YAML input from MIP analysis qc sample info file
Returns:
dict: parsed data
"""
outdata = _define_output_dict()
_config(mip_config_raw, outdata)
_qc_metrics(outdata, qcmetrics_raw)
_qc_sample_info(outdata, sampleinfo_raw)
return outdata | python | def parse_mip_analysis(mip_config_raw: dict, qcmetrics_raw: dict, sampleinfo_raw: dict) -> dict:
"""Parse the output analysis files from MIP for adding info
to trend database
Args:
mip_config_raw (dict): raw YAML input from MIP analysis config file
qcmetrics_raw (dict): raw YAML input from MIP analysis qc metric file
sampleinfo_raw (dict): raw YAML input from MIP analysis qc sample info file
Returns:
dict: parsed data
"""
outdata = _define_output_dict()
_config(mip_config_raw, outdata)
_qc_metrics(outdata, qcmetrics_raw)
_qc_sample_info(outdata, sampleinfo_raw)
return outdata | [
"def",
"parse_mip_analysis",
"(",
"mip_config_raw",
":",
"dict",
",",
"qcmetrics_raw",
":",
"dict",
",",
"sampleinfo_raw",
":",
"dict",
")",
"->",
"dict",
":",
"outdata",
"=",
"_define_output_dict",
"(",
")",
"_config",
"(",
"mip_config_raw",
",",
"outdata",
")",
"_qc_metrics",
"(",
"outdata",
",",
"qcmetrics_raw",
")",
"_qc_sample_info",
"(",
"outdata",
",",
"sampleinfo_raw",
")",
"return",
"outdata"
] | Parse the output analysis files from MIP for adding info
to trend database
Args:
mip_config_raw (dict): raw YAML input from MIP analysis config file
qcmetrics_raw (dict): raw YAML input from MIP analysis qc metric file
sampleinfo_raw (dict): raw YAML input from MIP analysis qc sample info file
Returns:
dict: parsed data | [
"Parse",
"the",
"output",
"analysis",
"files",
"from",
"MIP",
"for",
"adding",
"info",
"to",
"trend",
"database"
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/mip/trending.py#L7-L24 |
thespacedoctor/sherlock | sherlock/database_cleaner.py | database_cleaner.clean | def clean(self):
"""*clean up and run some maintance tasks on the crossmatch catalogue helper tables*
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``get`` method')
self._update_tcs_helper_catalogue_tables_info_with_new_tables()
self._updated_row_counts_in_tcs_helper_catalogue_tables_info()
self._clean_up_columns()
self._update_tcs_helper_catalogue_views_info_with_new_views()
self._clean_up_columns()
self._updated_row_counts_in_tcs_helper_catalogue_tables_info()
print "`tcs_helper_catalogue_tables_info` & `tcs_helper_catalogue_views_info` database tables updated"
self.log.debug('completed the ``get`` method')
return None | python | def clean(self):
"""*clean up and run some maintance tasks on the crossmatch catalogue helper tables*
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``get`` method')
self._update_tcs_helper_catalogue_tables_info_with_new_tables()
self._updated_row_counts_in_tcs_helper_catalogue_tables_info()
self._clean_up_columns()
self._update_tcs_helper_catalogue_views_info_with_new_views()
self._clean_up_columns()
self._updated_row_counts_in_tcs_helper_catalogue_tables_info()
print "`tcs_helper_catalogue_tables_info` & `tcs_helper_catalogue_views_info` database tables updated"
self.log.debug('completed the ``get`` method')
return None | [
"def",
"clean",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``get`` method'",
")",
"self",
".",
"_update_tcs_helper_catalogue_tables_info_with_new_tables",
"(",
")",
"self",
".",
"_updated_row_counts_in_tcs_helper_catalogue_tables_info",
"(",
")",
"self",
".",
"_clean_up_columns",
"(",
")",
"self",
".",
"_update_tcs_helper_catalogue_views_info_with_new_views",
"(",
")",
"self",
".",
"_clean_up_columns",
"(",
")",
"self",
".",
"_updated_row_counts_in_tcs_helper_catalogue_tables_info",
"(",
")",
"print",
"\"`tcs_helper_catalogue_tables_info` & `tcs_helper_catalogue_views_info` database tables updated\"",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``get`` method'",
")",
"return",
"None"
] | *clean up and run some maintance tasks on the crossmatch catalogue helper tables*
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"*",
"clean",
"up",
"and",
"run",
"some",
"maintance",
"tasks",
"on",
"the",
"crossmatch",
"catalogue",
"helper",
"tables",
"*"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/database_cleaner.py#L86-L111 |
thespacedoctor/sherlock | sherlock/database_cleaner.py | database_cleaner._updated_row_counts_in_tcs_helper_catalogue_tables_info | def _updated_row_counts_in_tcs_helper_catalogue_tables_info(
self):
""" updated row counts in tcs catalogue tables
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_updated_row_counts_in_tcs_helper_catalogue_tables_info`` method')
sqlQuery = u"""
select * from tcs_helper_catalogue_tables_info where table_name like "%%stream" or (number_of_rows is null and legacy_table = 0)
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
for row in rows:
tbName = row["table_name"]
sqlQuery = u"""
update tcs_helper_catalogue_tables_info set number_of_rows = (select count(*) as count from %(tbName)s) where table_name = "%(tbName)s"
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
sqlQuery = u"""
select * from tcs_helper_catalogue_views_info where (number_of_rows is null and legacy_view = 0)
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
for row in rows:
tbName = row["view_name"]
print tbName
sqlQuery = u"""
update tcs_helper_catalogue_views_info set number_of_rows = (select count(*) as count from %(tbName)s) where view_name = "%(tbName)s"
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
self.log.debug(
'completed the ``_updated_row_counts_in_tcs_helper_catalogue_tables_info`` method')
return None | python | def _updated_row_counts_in_tcs_helper_catalogue_tables_info(
self):
""" updated row counts in tcs catalogue tables
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_updated_row_counts_in_tcs_helper_catalogue_tables_info`` method')
sqlQuery = u"""
select * from tcs_helper_catalogue_tables_info where table_name like "%%stream" or (number_of_rows is null and legacy_table = 0)
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
for row in rows:
tbName = row["table_name"]
sqlQuery = u"""
update tcs_helper_catalogue_tables_info set number_of_rows = (select count(*) as count from %(tbName)s) where table_name = "%(tbName)s"
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
sqlQuery = u"""
select * from tcs_helper_catalogue_views_info where (number_of_rows is null and legacy_view = 0)
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
for row in rows:
tbName = row["view_name"]
print tbName
sqlQuery = u"""
update tcs_helper_catalogue_views_info set number_of_rows = (select count(*) as count from %(tbName)s) where view_name = "%(tbName)s"
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
self.log.debug(
'completed the ``_updated_row_counts_in_tcs_helper_catalogue_tables_info`` method')
return None | [
"def",
"_updated_row_counts_in_tcs_helper_catalogue_tables_info",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_updated_row_counts_in_tcs_helper_catalogue_tables_info`` method'",
")",
"sqlQuery",
"=",
"u\"\"\"\n select * from tcs_helper_catalogue_tables_info where table_name like \"%%stream\" or (number_of_rows is null and legacy_table = 0)\n \"\"\"",
"%",
"locals",
"(",
")",
"rows",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
"quiet",
"=",
"False",
")",
"for",
"row",
"in",
"rows",
":",
"tbName",
"=",
"row",
"[",
"\"table_name\"",
"]",
"sqlQuery",
"=",
"u\"\"\"\n update tcs_helper_catalogue_tables_info set number_of_rows = (select count(*) as count from %(tbName)s) where table_name = \"%(tbName)s\"\n \"\"\"",
"%",
"locals",
"(",
")",
"writequery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
")",
"sqlQuery",
"=",
"u\"\"\"\n select * from tcs_helper_catalogue_views_info where (number_of_rows is null and legacy_view = 0)\n \"\"\"",
"%",
"locals",
"(",
")",
"rows",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
"quiet",
"=",
"False",
")",
"for",
"row",
"in",
"rows",
":",
"tbName",
"=",
"row",
"[",
"\"view_name\"",
"]",
"print",
"tbName",
"sqlQuery",
"=",
"u\"\"\"\n update tcs_helper_catalogue_views_info set number_of_rows = (select count(*) as count from %(tbName)s) where view_name = \"%(tbName)s\"\n \"\"\"",
"%",
"locals",
"(",
")",
"writequery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_updated_row_counts_in_tcs_helper_catalogue_tables_info`` method'",
")",
"return",
"None"
] | updated row counts in tcs catalogue tables
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"updated",
"row",
"counts",
"in",
"tcs",
"catalogue",
"tables"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/database_cleaner.py#L113-L177 |
thespacedoctor/sherlock | sherlock/database_cleaner.py | database_cleaner._update_tcs_helper_catalogue_tables_info_with_new_tables | def _update_tcs_helper_catalogue_tables_info_with_new_tables(
self):
"""update tcs helper catalogue tables info with new tables
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_update_tcs_helper_catalogue_tables_info_with_new_tables`` method')
sqlQuery = u"""
SELECT max(id) as thisId FROM tcs_helper_catalogue_tables_info;
""" % locals()
thisId = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
try:
highestId = thisId[0]["thisId"] + 1
except:
highestId = 1
sqlQuery = u"""
SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='BASE TABLE' AND TABLE_SCHEMA like '%%catalogues%%' and TABLE_NAME like "tcs_cat%%";
""" % locals()
tablesInDatabase = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
sqlQuery = u"""
SELECT table_name, old_table_name FROM tcs_helper_catalogue_tables_info;
""" % locals()
tableList = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
tbList = []
oldList = []
for tb in tableList:
oldList.append(tb["old_table_name"])
for tb in tableList:
if tb["old_table_name"] not in tbList:
tbList.append(tb["old_table_name"])
if tb["table_name"] not in tbList:
tbList.append(tb["table_name"])
for tb in tablesInDatabase:
if tb["TABLE_NAME"] not in tbList:
thisTableName = tb["TABLE_NAME"]
print "`%(thisTableName)s` added to `tcs_helper_catalogue_tables_info` database table" % locals()
sqlQuery = u"""
INSERT INTO tcs_helper_catalogue_tables_info (
id,
table_name
)
VALUES (
%(highestId)s,
"%(thisTableName)s"
)""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
highestId += 1
self.log.debug(
'completed the ``_update_tcs_helper_catalogue_tables_info_with_new_tables`` method')
return None | python | def _update_tcs_helper_catalogue_tables_info_with_new_tables(
self):
"""update tcs helper catalogue tables info with new tables
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_update_tcs_helper_catalogue_tables_info_with_new_tables`` method')
sqlQuery = u"""
SELECT max(id) as thisId FROM tcs_helper_catalogue_tables_info;
""" % locals()
thisId = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
try:
highestId = thisId[0]["thisId"] + 1
except:
highestId = 1
sqlQuery = u"""
SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='BASE TABLE' AND TABLE_SCHEMA like '%%catalogues%%' and TABLE_NAME like "tcs_cat%%";
""" % locals()
tablesInDatabase = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
sqlQuery = u"""
SELECT table_name, old_table_name FROM tcs_helper_catalogue_tables_info;
""" % locals()
tableList = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
tbList = []
oldList = []
for tb in tableList:
oldList.append(tb["old_table_name"])
for tb in tableList:
if tb["old_table_name"] not in tbList:
tbList.append(tb["old_table_name"])
if tb["table_name"] not in tbList:
tbList.append(tb["table_name"])
for tb in tablesInDatabase:
if tb["TABLE_NAME"] not in tbList:
thisTableName = tb["TABLE_NAME"]
print "`%(thisTableName)s` added to `tcs_helper_catalogue_tables_info` database table" % locals()
sqlQuery = u"""
INSERT INTO tcs_helper_catalogue_tables_info (
id,
table_name
)
VALUES (
%(highestId)s,
"%(thisTableName)s"
)""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
highestId += 1
self.log.debug(
'completed the ``_update_tcs_helper_catalogue_tables_info_with_new_tables`` method')
return None | [
"def",
"_update_tcs_helper_catalogue_tables_info_with_new_tables",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_update_tcs_helper_catalogue_tables_info_with_new_tables`` method'",
")",
"sqlQuery",
"=",
"u\"\"\"\n SELECT max(id) as thisId FROM tcs_helper_catalogue_tables_info;\n \"\"\"",
"%",
"locals",
"(",
")",
"thisId",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
"quiet",
"=",
"False",
")",
"try",
":",
"highestId",
"=",
"thisId",
"[",
"0",
"]",
"[",
"\"thisId\"",
"]",
"+",
"1",
"except",
":",
"highestId",
"=",
"1",
"sqlQuery",
"=",
"u\"\"\"\n SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='BASE TABLE' AND TABLE_SCHEMA like '%%catalogues%%' and TABLE_NAME like \"tcs_cat%%\";\n \"\"\"",
"%",
"locals",
"(",
")",
"tablesInDatabase",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
"quiet",
"=",
"False",
")",
"sqlQuery",
"=",
"u\"\"\"\n SELECT table_name, old_table_name FROM tcs_helper_catalogue_tables_info;\n \"\"\"",
"%",
"locals",
"(",
")",
"tableList",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
"quiet",
"=",
"False",
")",
"tbList",
"=",
"[",
"]",
"oldList",
"=",
"[",
"]",
"for",
"tb",
"in",
"tableList",
":",
"oldList",
".",
"append",
"(",
"tb",
"[",
"\"old_table_name\"",
"]",
")",
"for",
"tb",
"in",
"tableList",
":",
"if",
"tb",
"[",
"\"old_table_name\"",
"]",
"not",
"in",
"tbList",
":",
"tbList",
".",
"append",
"(",
"tb",
"[",
"\"old_table_name\"",
"]",
")",
"if",
"tb",
"[",
"\"table_name\"",
"]",
"not",
"in",
"tbList",
":",
"tbList",
".",
"append",
"(",
"tb",
"[",
"\"table_name\"",
"]",
")",
"for",
"tb",
"in",
"tablesInDatabase",
":",
"if",
"tb",
"[",
"\"TABLE_NAME\"",
"]",
"not",
"in",
"tbList",
":",
"thisTableName",
"=",
"tb",
"[",
"\"TABLE_NAME\"",
"]",
"print",
"\"`%(thisTableName)s` added to `tcs_helper_catalogue_tables_info` database table\"",
"%",
"locals",
"(",
")",
"sqlQuery",
"=",
"u\"\"\"\n INSERT INTO tcs_helper_catalogue_tables_info (\n id,\n table_name\n )\n VALUES (\n %(highestId)s,\n \"%(thisTableName)s\"\n )\"\"\"",
"%",
"locals",
"(",
")",
"writequery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
")",
"highestId",
"+=",
"1",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_update_tcs_helper_catalogue_tables_info_with_new_tables`` method'",
")",
"return",
"None"
] | update tcs helper catalogue tables info with new tables
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"update",
"tcs",
"helper",
"catalogue",
"tables",
"info",
"with",
"new",
"tables"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/database_cleaner.py#L179-L262 |
thespacedoctor/sherlock | sherlock/database_cleaner.py | database_cleaner._clean_up_columns | def _clean_up_columns(
self):
"""clean up columns
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_clean_up_columns`` method')
sqlQueries = [
"update tcs_helper_catalogue_tables_info set old_table_name = table_name where old_table_name is null;",
"update tcs_helper_catalogue_tables_info set version_number = 'stream' where table_name like '%%stream' and version_number is null;",
"""update tcs_helper_catalogue_tables_info set in_ned = 0 where table_name like '%%stream' and in_ned is null;""",
"""update tcs_helper_catalogue_tables_info set vizier_link = 0 where table_name like '%%stream' and vizier_link is null;""",
"update tcs_helper_catalogue_views_info set old_view_name = view_name where old_view_name is null;",
]
for sqlQuery in sqlQueries:
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
# VIEW OBJECT TYPES
sqlQuery = u"""
SELECT view_name FROM tcs_helper_catalogue_views_info where legacy_view = 0 and object_type is null;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
for row in rows:
view_name = row["view_name"]
object_type = view_name.replace("tcs_view_", "").split("_")[0]
sqlQuery = u"""
update tcs_helper_catalogue_views_info set object_type = "%(object_type)s" where view_name = "%(view_name)s"
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
# MASTER TABLE ID FOR VIEWS
sqlQuery = u"""
SELECT view_name FROM tcs_helper_catalogue_views_info where legacy_view = 0 and table_id is null;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
for row in rows:
view_name = row["view_name"]
table_name = view_name.replace("tcs_view_", "").split("_")[1:]
table_name = ("_").join(table_name)
table_name = "tcs_cat_%(table_name)s" % locals()
sqlQuery = u"""
update tcs_helper_catalogue_views_info set table_id = (select id from tcs_helper_catalogue_tables_info where table_name = "%(table_name)s") where view_name = "%(view_name)s"
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
self.log.debug('completed the ``_clean_up_columns`` method')
return None | python | def _clean_up_columns(
self):
"""clean up columns
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_clean_up_columns`` method')
sqlQueries = [
"update tcs_helper_catalogue_tables_info set old_table_name = table_name where old_table_name is null;",
"update tcs_helper_catalogue_tables_info set version_number = 'stream' where table_name like '%%stream' and version_number is null;",
"""update tcs_helper_catalogue_tables_info set in_ned = 0 where table_name like '%%stream' and in_ned is null;""",
"""update tcs_helper_catalogue_tables_info set vizier_link = 0 where table_name like '%%stream' and vizier_link is null;""",
"update tcs_helper_catalogue_views_info set old_view_name = view_name where old_view_name is null;",
]
for sqlQuery in sqlQueries:
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
# VIEW OBJECT TYPES
sqlQuery = u"""
SELECT view_name FROM tcs_helper_catalogue_views_info where legacy_view = 0 and object_type is null;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
for row in rows:
view_name = row["view_name"]
object_type = view_name.replace("tcs_view_", "").split("_")[0]
sqlQuery = u"""
update tcs_helper_catalogue_views_info set object_type = "%(object_type)s" where view_name = "%(view_name)s"
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
# MASTER TABLE ID FOR VIEWS
sqlQuery = u"""
SELECT view_name FROM tcs_helper_catalogue_views_info where legacy_view = 0 and table_id is null;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
for row in rows:
view_name = row["view_name"]
table_name = view_name.replace("tcs_view_", "").split("_")[1:]
table_name = ("_").join(table_name)
table_name = "tcs_cat_%(table_name)s" % locals()
sqlQuery = u"""
update tcs_helper_catalogue_views_info set table_id = (select id from tcs_helper_catalogue_tables_info where table_name = "%(table_name)s") where view_name = "%(view_name)s"
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
self.log.debug('completed the ``_clean_up_columns`` method')
return None | [
"def",
"_clean_up_columns",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_clean_up_columns`` method'",
")",
"sqlQueries",
"=",
"[",
"\"update tcs_helper_catalogue_tables_info set old_table_name = table_name where old_table_name is null;\"",
",",
"\"update tcs_helper_catalogue_tables_info set version_number = 'stream' where table_name like '%%stream' and version_number is null;\"",
",",
"\"\"\"update tcs_helper_catalogue_tables_info set in_ned = 0 where table_name like '%%stream' and in_ned is null;\"\"\"",
",",
"\"\"\"update tcs_helper_catalogue_tables_info set vizier_link = 0 where table_name like '%%stream' and vizier_link is null;\"\"\"",
",",
"\"update tcs_helper_catalogue_views_info set old_view_name = view_name where old_view_name is null;\"",
",",
"]",
"for",
"sqlQuery",
"in",
"sqlQueries",
":",
"writequery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
")",
"# VIEW OBJECT TYPES",
"sqlQuery",
"=",
"u\"\"\"\n SELECT view_name FROM tcs_helper_catalogue_views_info where legacy_view = 0 and object_type is null;\n \"\"\"",
"%",
"locals",
"(",
")",
"rows",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
"quiet",
"=",
"False",
")",
"for",
"row",
"in",
"rows",
":",
"view_name",
"=",
"row",
"[",
"\"view_name\"",
"]",
"object_type",
"=",
"view_name",
".",
"replace",
"(",
"\"tcs_view_\"",
",",
"\"\"",
")",
".",
"split",
"(",
"\"_\"",
")",
"[",
"0",
"]",
"sqlQuery",
"=",
"u\"\"\"\n update tcs_helper_catalogue_views_info set object_type = \"%(object_type)s\" where view_name = \"%(view_name)s\"\n \"\"\"",
"%",
"locals",
"(",
")",
"writequery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
")",
"# MASTER TABLE ID FOR VIEWS",
"sqlQuery",
"=",
"u\"\"\"\n SELECT view_name FROM tcs_helper_catalogue_views_info where legacy_view = 0 and table_id is null;\n \"\"\"",
"%",
"locals",
"(",
")",
"rows",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
"quiet",
"=",
"False",
")",
"for",
"row",
"in",
"rows",
":",
"view_name",
"=",
"row",
"[",
"\"view_name\"",
"]",
"table_name",
"=",
"view_name",
".",
"replace",
"(",
"\"tcs_view_\"",
",",
"\"\"",
")",
".",
"split",
"(",
"\"_\"",
")",
"[",
"1",
":",
"]",
"table_name",
"=",
"(",
"\"_\"",
")",
".",
"join",
"(",
"table_name",
")",
"table_name",
"=",
"\"tcs_cat_%(table_name)s\"",
"%",
"locals",
"(",
")",
"sqlQuery",
"=",
"u\"\"\"\n update tcs_helper_catalogue_views_info set table_id = (select id from tcs_helper_catalogue_tables_info where table_name = \"%(table_name)s\") where view_name = \"%(view_name)s\"\n \"\"\"",
"%",
"locals",
"(",
")",
"writequery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_clean_up_columns`` method'",
")",
"return",
"None"
] | clean up columns
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"clean",
"up",
"columns"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/database_cleaner.py#L264-L346 |
thespacedoctor/sherlock | sherlock/database_cleaner.py | database_cleaner._update_tcs_helper_catalogue_views_info_with_new_views | def _update_tcs_helper_catalogue_views_info_with_new_views(
self):
""" update tcs helper catalogue tables info with new tables
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_update_tcs_helper_catalogue_views_info_with_new_views`` method')
sqlQuery = u"""
SELECT max(id) as thisId FROM tcs_helper_catalogue_views_info;
""" % locals()
thisId = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
try:
highestId = thisId[0]["thisId"] + 1
except:
highestId = 1
sqlQuery = u"""
SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='VIEW' AND TABLE_SCHEMA like '%%catalogues%%' and TABLE_NAME like "tcs_view%%" and TABLE_NAME not like "%%helper%%";
""" % locals()
tablesInDatabase = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
sqlQuery = u"""
SELECT view_name FROM tcs_helper_catalogue_views_info;
""" % locals()
tableList = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
tbList = []
for tb in tableList:
tbList.append(tb["view_name"])
for tb in tablesInDatabase:
if tb["TABLE_NAME"] not in tbList:
thisViewName = tb["TABLE_NAME"]
print "`%(thisViewName)s` added to `tcs_helper_catalogue_views_info` database table" % locals()
sqlQuery = u"""
INSERT INTO tcs_helper_catalogue_views_info (
id,
view_name
)
VALUES (
%(highestId)s,
"%(thisViewName)s"
)""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
highestId += 1
self.log.debug(
'completed the ``_update_tcs_helper_catalogue_views_info_with_new_views`` method')
return None | python | def _update_tcs_helper_catalogue_views_info_with_new_views(
self):
""" update tcs helper catalogue tables info with new tables
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_update_tcs_helper_catalogue_views_info_with_new_views`` method')
sqlQuery = u"""
SELECT max(id) as thisId FROM tcs_helper_catalogue_views_info;
""" % locals()
thisId = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
try:
highestId = thisId[0]["thisId"] + 1
except:
highestId = 1
sqlQuery = u"""
SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='VIEW' AND TABLE_SCHEMA like '%%catalogues%%' and TABLE_NAME like "tcs_view%%" and TABLE_NAME not like "%%helper%%";
""" % locals()
tablesInDatabase = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
sqlQuery = u"""
SELECT view_name FROM tcs_helper_catalogue_views_info;
""" % locals()
tableList = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
tbList = []
for tb in tableList:
tbList.append(tb["view_name"])
for tb in tablesInDatabase:
if tb["TABLE_NAME"] not in tbList:
thisViewName = tb["TABLE_NAME"]
print "`%(thisViewName)s` added to `tcs_helper_catalogue_views_info` database table" % locals()
sqlQuery = u"""
INSERT INTO tcs_helper_catalogue_views_info (
id,
view_name
)
VALUES (
%(highestId)s,
"%(thisViewName)s"
)""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
highestId += 1
self.log.debug(
'completed the ``_update_tcs_helper_catalogue_views_info_with_new_views`` method')
return None | [
"def",
"_update_tcs_helper_catalogue_views_info_with_new_views",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_update_tcs_helper_catalogue_views_info_with_new_views`` method'",
")",
"sqlQuery",
"=",
"u\"\"\"\n SELECT max(id) as thisId FROM tcs_helper_catalogue_views_info;\n \"\"\"",
"%",
"locals",
"(",
")",
"thisId",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
"quiet",
"=",
"False",
")",
"try",
":",
"highestId",
"=",
"thisId",
"[",
"0",
"]",
"[",
"\"thisId\"",
"]",
"+",
"1",
"except",
":",
"highestId",
"=",
"1",
"sqlQuery",
"=",
"u\"\"\"\n SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='VIEW' AND TABLE_SCHEMA like '%%catalogues%%' and TABLE_NAME like \"tcs_view%%\" and TABLE_NAME not like \"%%helper%%\";\n \"\"\"",
"%",
"locals",
"(",
")",
"tablesInDatabase",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
"quiet",
"=",
"False",
")",
"sqlQuery",
"=",
"u\"\"\"\n SELECT view_name FROM tcs_helper_catalogue_views_info;\n \"\"\"",
"%",
"locals",
"(",
")",
"tableList",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
"quiet",
"=",
"False",
")",
"tbList",
"=",
"[",
"]",
"for",
"tb",
"in",
"tableList",
":",
"tbList",
".",
"append",
"(",
"tb",
"[",
"\"view_name\"",
"]",
")",
"for",
"tb",
"in",
"tablesInDatabase",
":",
"if",
"tb",
"[",
"\"TABLE_NAME\"",
"]",
"not",
"in",
"tbList",
":",
"thisViewName",
"=",
"tb",
"[",
"\"TABLE_NAME\"",
"]",
"print",
"\"`%(thisViewName)s` added to `tcs_helper_catalogue_views_info` database table\"",
"%",
"locals",
"(",
")",
"sqlQuery",
"=",
"u\"\"\"\n INSERT INTO tcs_helper_catalogue_views_info (\n id,\n view_name\n )\n VALUES (\n %(highestId)s,\n \"%(thisViewName)s\"\n )\"\"\"",
"%",
"locals",
"(",
")",
"writequery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
")",
"highestId",
"+=",
"1",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_update_tcs_helper_catalogue_views_info_with_new_views`` method'",
")",
"return",
"None"
] | update tcs helper catalogue tables info with new tables
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"update",
"tcs",
"helper",
"catalogue",
"tables",
"info",
"with",
"new",
"tables"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/database_cleaner.py#L348-L424 |
pytroll/posttroll | posttroll/listener.py | ListenerContainer.restart_listener | def restart_listener(self, topics):
'''Restart listener after configuration update.
'''
if self.listener is not None:
if self.listener.running:
self.stop()
self.__init__(topics=topics) | python | def restart_listener(self, topics):
'''Restart listener after configuration update.
'''
if self.listener is not None:
if self.listener.running:
self.stop()
self.__init__(topics=topics) | [
"def",
"restart_listener",
"(",
"self",
",",
"topics",
")",
":",
"if",
"self",
".",
"listener",
"is",
"not",
"None",
":",
"if",
"self",
".",
"listener",
".",
"running",
":",
"self",
".",
"stop",
"(",
")",
"self",
".",
"__init__",
"(",
"topics",
"=",
"topics",
")"
] | Restart listener after configuration update. | [
"Restart",
"listener",
"after",
"configuration",
"update",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/listener.py#L65-L71 |
pytroll/posttroll | posttroll/listener.py | ListenerContainer.stop | def stop(self):
'''Stop listener.'''
self.logger.debug("Stopping listener.")
self.listener.stop()
if self.thread is not None:
self.thread.join()
self.thread = None
self.logger.debug("Listener stopped.") | python | def stop(self):
'''Stop listener.'''
self.logger.debug("Stopping listener.")
self.listener.stop()
if self.thread is not None:
self.thread.join()
self.thread = None
self.logger.debug("Listener stopped.") | [
"def",
"stop",
"(",
"self",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Stopping listener.\"",
")",
"self",
".",
"listener",
".",
"stop",
"(",
")",
"if",
"self",
".",
"thread",
"is",
"not",
"None",
":",
"self",
".",
"thread",
".",
"join",
"(",
")",
"self",
".",
"thread",
"=",
"None",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Listener stopped.\"",
")"
] | Stop listener. | [
"Stop",
"listener",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/listener.py#L73-L80 |
pytroll/posttroll | posttroll/listener.py | Listener.create_subscriber | def create_subscriber(self):
'''Create a subscriber instance using specified addresses and
message types.
'''
if self.subscriber is None:
if self.topics:
self.subscriber = NSSubscriber(self.services, self.topics,
addr_listener=True,
addresses=self.addresses,
nameserver=self.nameserver)
self.recv = self.subscriber.start().recv | python | def create_subscriber(self):
'''Create a subscriber instance using specified addresses and
message types.
'''
if self.subscriber is None:
if self.topics:
self.subscriber = NSSubscriber(self.services, self.topics,
addr_listener=True,
addresses=self.addresses,
nameserver=self.nameserver)
self.recv = self.subscriber.start().recv | [
"def",
"create_subscriber",
"(",
"self",
")",
":",
"if",
"self",
".",
"subscriber",
"is",
"None",
":",
"if",
"self",
".",
"topics",
":",
"self",
".",
"subscriber",
"=",
"NSSubscriber",
"(",
"self",
".",
"services",
",",
"self",
".",
"topics",
",",
"addr_listener",
"=",
"True",
",",
"addresses",
"=",
"self",
".",
"addresses",
",",
"nameserver",
"=",
"self",
".",
"nameserver",
")",
"self",
".",
"recv",
"=",
"self",
".",
"subscriber",
".",
"start",
"(",
")",
".",
"recv"
] | Create a subscriber instance using specified addresses and
message types. | [
"Create",
"a",
"subscriber",
"instance",
"using",
"specified",
"addresses",
"and",
"message",
"types",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/listener.py#L105-L115 |
pytroll/posttroll | posttroll/listener.py | Listener.run | def run(self):
'''Run listener
'''
self.running = True
for msg in self.recv(1):
if msg is None:
if self.running:
continue
else:
break
self.logger.debug("New message received: %s", str(msg))
self.add_to_queue(msg) | python | def run(self):
'''Run listener
'''
self.running = True
for msg in self.recv(1):
if msg is None:
if self.running:
continue
else:
break
self.logger.debug("New message received: %s", str(msg))
self.add_to_queue(msg) | [
"def",
"run",
"(",
"self",
")",
":",
"self",
".",
"running",
"=",
"True",
"for",
"msg",
"in",
"self",
".",
"recv",
"(",
"1",
")",
":",
"if",
"msg",
"is",
"None",
":",
"if",
"self",
".",
"running",
":",
"continue",
"else",
":",
"break",
"self",
".",
"logger",
".",
"debug",
"(",
"\"New message received: %s\"",
",",
"str",
"(",
"msg",
")",
")",
"self",
".",
"add_to_queue",
"(",
"msg",
")"
] | Run listener | [
"Run",
"listener"
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/listener.py#L122-L136 |
pytroll/posttroll | posttroll/listener.py | Listener.stop | def stop(self):
'''Stop subscriber and delete the instance
'''
self.running = False
time.sleep(1)
if self.subscriber is not None:
self.subscriber.stop()
self.subscriber = None | python | def stop(self):
'''Stop subscriber and delete the instance
'''
self.running = False
time.sleep(1)
if self.subscriber is not None:
self.subscriber.stop()
self.subscriber = None | [
"def",
"stop",
"(",
"self",
")",
":",
"self",
".",
"running",
"=",
"False",
"time",
".",
"sleep",
"(",
"1",
")",
"if",
"self",
".",
"subscriber",
"is",
"not",
"None",
":",
"self",
".",
"subscriber",
".",
"stop",
"(",
")",
"self",
".",
"subscriber",
"=",
"None"
] | Stop subscriber and delete the instance | [
"Stop",
"subscriber",
"and",
"delete",
"the",
"instance"
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/listener.py#L138-L145 |
ARMmbed/autoversion | src/auto_version/semver.py | get_current_semver | def get_current_semver(data):
"""Given a dictionary of all version data available, determine the current version"""
# get the not-none values from data
known = {
key: data.get(alias)
for key, alias in config._forward_aliases.items()
if data.get(alias) is not None
}
# prefer the strict field, if available
potentials = [
known.pop(Constants.VERSION_STRICT_FIELD, None),
known.pop(Constants.VERSION_FIELD, None),
]
from_components = [known.get(k) for k in SemVerSigFig._fields if k in known]
if len(from_components) == 3:
potentials.append(".".join(from_components))
versions = set()
for potential in potentials:
if not potential:
continue
match = re_semver.match(potential)
if match:
parts = match.groupdict()
parts.pop("tail")
versions.add(SemVer(**parts))
if len(versions) > 1:
raise ValueError("conflicting versions within project: %s" % versions)
if not versions:
_LOG.debug("key pairs found: \n%r", known)
raise ValueError("could not find existing semver")
return versions.pop() | python | def get_current_semver(data):
"""Given a dictionary of all version data available, determine the current version"""
# get the not-none values from data
known = {
key: data.get(alias)
for key, alias in config._forward_aliases.items()
if data.get(alias) is not None
}
# prefer the strict field, if available
potentials = [
known.pop(Constants.VERSION_STRICT_FIELD, None),
known.pop(Constants.VERSION_FIELD, None),
]
from_components = [known.get(k) for k in SemVerSigFig._fields if k in known]
if len(from_components) == 3:
potentials.append(".".join(from_components))
versions = set()
for potential in potentials:
if not potential:
continue
match = re_semver.match(potential)
if match:
parts = match.groupdict()
parts.pop("tail")
versions.add(SemVer(**parts))
if len(versions) > 1:
raise ValueError("conflicting versions within project: %s" % versions)
if not versions:
_LOG.debug("key pairs found: \n%r", known)
raise ValueError("could not find existing semver")
return versions.pop() | [
"def",
"get_current_semver",
"(",
"data",
")",
":",
"# get the not-none values from data",
"known",
"=",
"{",
"key",
":",
"data",
".",
"get",
"(",
"alias",
")",
"for",
"key",
",",
"alias",
"in",
"config",
".",
"_forward_aliases",
".",
"items",
"(",
")",
"if",
"data",
".",
"get",
"(",
"alias",
")",
"is",
"not",
"None",
"}",
"# prefer the strict field, if available",
"potentials",
"=",
"[",
"known",
".",
"pop",
"(",
"Constants",
".",
"VERSION_STRICT_FIELD",
",",
"None",
")",
",",
"known",
".",
"pop",
"(",
"Constants",
".",
"VERSION_FIELD",
",",
"None",
")",
",",
"]",
"from_components",
"=",
"[",
"known",
".",
"get",
"(",
"k",
")",
"for",
"k",
"in",
"SemVerSigFig",
".",
"_fields",
"if",
"k",
"in",
"known",
"]",
"if",
"len",
"(",
"from_components",
")",
"==",
"3",
":",
"potentials",
".",
"append",
"(",
"\".\"",
".",
"join",
"(",
"from_components",
")",
")",
"versions",
"=",
"set",
"(",
")",
"for",
"potential",
"in",
"potentials",
":",
"if",
"not",
"potential",
":",
"continue",
"match",
"=",
"re_semver",
".",
"match",
"(",
"potential",
")",
"if",
"match",
":",
"parts",
"=",
"match",
".",
"groupdict",
"(",
")",
"parts",
".",
"pop",
"(",
"\"tail\"",
")",
"versions",
".",
"add",
"(",
"SemVer",
"(",
"*",
"*",
"parts",
")",
")",
"if",
"len",
"(",
"versions",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"conflicting versions within project: %s\"",
"%",
"versions",
")",
"if",
"not",
"versions",
":",
"_LOG",
".",
"debug",
"(",
"\"key pairs found: \\n%r\"",
",",
"known",
")",
"raise",
"ValueError",
"(",
"\"could not find existing semver\"",
")",
"return",
"versions",
".",
"pop",
"(",
")"
] | Given a dictionary of all version data available, determine the current version | [
"Given",
"a",
"dictionary",
"of",
"all",
"version",
"data",
"available",
"determine",
"the",
"current",
"version"
] | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/semver.py#L15-L50 |
ARMmbed/autoversion | src/auto_version/semver.py | make_new_semver | def make_new_semver(current_semver, all_triggers, **overrides):
"""Defines how to increment semver based on which significant figure is triggered"""
new_semver = {}
bumped = False
for sig_fig in SemVerSigFig: # iterate sig figs in order of significance
value = getattr(current_semver, sig_fig)
override = overrides.get(sig_fig)
if override is not None:
new_semver[sig_fig] = override
if int(override) > int(value):
bumped = True
elif bumped:
new_semver[sig_fig] = "0"
elif sig_fig in all_triggers:
new_semver[sig_fig] = str(int(value) + 1)
bumped = True
else:
new_semver[sig_fig] = value
return SemVer(**new_semver) | python | def make_new_semver(current_semver, all_triggers, **overrides):
"""Defines how to increment semver based on which significant figure is triggered"""
new_semver = {}
bumped = False
for sig_fig in SemVerSigFig: # iterate sig figs in order of significance
value = getattr(current_semver, sig_fig)
override = overrides.get(sig_fig)
if override is not None:
new_semver[sig_fig] = override
if int(override) > int(value):
bumped = True
elif bumped:
new_semver[sig_fig] = "0"
elif sig_fig in all_triggers:
new_semver[sig_fig] = str(int(value) + 1)
bumped = True
else:
new_semver[sig_fig] = value
return SemVer(**new_semver) | [
"def",
"make_new_semver",
"(",
"current_semver",
",",
"all_triggers",
",",
"*",
"*",
"overrides",
")",
":",
"new_semver",
"=",
"{",
"}",
"bumped",
"=",
"False",
"for",
"sig_fig",
"in",
"SemVerSigFig",
":",
"# iterate sig figs in order of significance",
"value",
"=",
"getattr",
"(",
"current_semver",
",",
"sig_fig",
")",
"override",
"=",
"overrides",
".",
"get",
"(",
"sig_fig",
")",
"if",
"override",
"is",
"not",
"None",
":",
"new_semver",
"[",
"sig_fig",
"]",
"=",
"override",
"if",
"int",
"(",
"override",
")",
">",
"int",
"(",
"value",
")",
":",
"bumped",
"=",
"True",
"elif",
"bumped",
":",
"new_semver",
"[",
"sig_fig",
"]",
"=",
"\"0\"",
"elif",
"sig_fig",
"in",
"all_triggers",
":",
"new_semver",
"[",
"sig_fig",
"]",
"=",
"str",
"(",
"int",
"(",
"value",
")",
"+",
"1",
")",
"bumped",
"=",
"True",
"else",
":",
"new_semver",
"[",
"sig_fig",
"]",
"=",
"value",
"return",
"SemVer",
"(",
"*",
"*",
"new_semver",
")"
] | Defines how to increment semver based on which significant figure is triggered | [
"Defines",
"how",
"to",
"increment",
"semver",
"based",
"on",
"which",
"significant",
"figure",
"is",
"triggered"
] | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/semver.py#L53-L71 |
lbenitez000/trparse | trparse.py | loads | def loads(data):
"""Parser entry point. Parses the output of a traceroute execution"""
data += "\n_EOS_" # Append EOS token. Helps to match last RE_HOP
# Get headers
match_dest = RE_HEADER.search(data)
dest_name = match_dest.group(1)
dest_ip = match_dest.group(2)
# The Traceroute is the root of the tree.
traceroute = Traceroute(dest_name, dest_ip)
# Get hops
matches_hop = RE_HOP.findall(data)
for match_hop in matches_hop:
# Initialize a hop
idx = int(match_hop[0])
if match_hop[1]:
asn = int(match_hop[1])
else:
asn = None
hop = Hop(idx, asn)
# Parse probes data: <name> | <(IP)> | <rtt> | 'ms' | '*'
probes_data = match_hop[2].split()
# Get rid of 'ms': <name> | <(IP)> | <rtt> | '*'
probes_data = filter(lambda s: s.lower() != 'ms', probes_data)
i = 0
while i < len(probes_data):
# For each hop parse probes
name = None
ip = None
rtt = None
anno = ''
# RTT check comes first because RE_PROBE_NAME can confuse rtt with an IP as name
# The regex RE_PROBE_NAME can be improved
if RE_PROBE_RTT.match(probes_data[i]):
# Matched rtt, so name and IP have been parsed before
rtt = float(probes_data[i])
i += 1
elif RE_PROBE_NAME.match(probes_data[i]):
# Matched a name, so next elements are IP and rtt
name = probes_data[i]
ip = probes_data[i+1].strip('()')
rtt = float(probes_data[i+2])
i += 3
elif RE_PROBE_TIMEOUT.match(probes_data[i]):
# Its a timeout, so maybe name and IP have been parsed before
# or maybe not. But it's Hop job to deal with it
rtt = None
i += 1
else:
ext = "i: %d\nprobes_data: %s\nname: %s\nip: %s\nrtt: %s\nanno: %s" % (i, probes_data, name, ip, rtt, anno)
raise ParseError("Parse error \n%s" % ext)
# Check for annotation
try:
if RE_PROBE_ANNOTATION.match(probes_data[i]):
anno = probes_data[i]
i += 1
except IndexError:
pass
probe = Probe(name, ip, rtt, anno)
hop.add_probe(probe)
traceroute.add_hop(hop)
return traceroute | python | def loads(data):
"""Parser entry point. Parses the output of a traceroute execution"""
data += "\n_EOS_" # Append EOS token. Helps to match last RE_HOP
# Get headers
match_dest = RE_HEADER.search(data)
dest_name = match_dest.group(1)
dest_ip = match_dest.group(2)
# The Traceroute is the root of the tree.
traceroute = Traceroute(dest_name, dest_ip)
# Get hops
matches_hop = RE_HOP.findall(data)
for match_hop in matches_hop:
# Initialize a hop
idx = int(match_hop[0])
if match_hop[1]:
asn = int(match_hop[1])
else:
asn = None
hop = Hop(idx, asn)
# Parse probes data: <name> | <(IP)> | <rtt> | 'ms' | '*'
probes_data = match_hop[2].split()
# Get rid of 'ms': <name> | <(IP)> | <rtt> | '*'
probes_data = filter(lambda s: s.lower() != 'ms', probes_data)
i = 0
while i < len(probes_data):
# For each hop parse probes
name = None
ip = None
rtt = None
anno = ''
# RTT check comes first because RE_PROBE_NAME can confuse rtt with an IP as name
# The regex RE_PROBE_NAME can be improved
if RE_PROBE_RTT.match(probes_data[i]):
# Matched rtt, so name and IP have been parsed before
rtt = float(probes_data[i])
i += 1
elif RE_PROBE_NAME.match(probes_data[i]):
# Matched a name, so next elements are IP and rtt
name = probes_data[i]
ip = probes_data[i+1].strip('()')
rtt = float(probes_data[i+2])
i += 3
elif RE_PROBE_TIMEOUT.match(probes_data[i]):
# Its a timeout, so maybe name and IP have been parsed before
# or maybe not. But it's Hop job to deal with it
rtt = None
i += 1
else:
ext = "i: %d\nprobes_data: %s\nname: %s\nip: %s\nrtt: %s\nanno: %s" % (i, probes_data, name, ip, rtt, anno)
raise ParseError("Parse error \n%s" % ext)
# Check for annotation
try:
if RE_PROBE_ANNOTATION.match(probes_data[i]):
anno = probes_data[i]
i += 1
except IndexError:
pass
probe = Probe(name, ip, rtt, anno)
hop.add_probe(probe)
traceroute.add_hop(hop)
return traceroute | [
"def",
"loads",
"(",
"data",
")",
":",
"data",
"+=",
"\"\\n_EOS_\"",
"# Append EOS token. Helps to match last RE_HOP",
"# Get headers",
"match_dest",
"=",
"RE_HEADER",
".",
"search",
"(",
"data",
")",
"dest_name",
"=",
"match_dest",
".",
"group",
"(",
"1",
")",
"dest_ip",
"=",
"match_dest",
".",
"group",
"(",
"2",
")",
"# The Traceroute is the root of the tree.",
"traceroute",
"=",
"Traceroute",
"(",
"dest_name",
",",
"dest_ip",
")",
"# Get hops",
"matches_hop",
"=",
"RE_HOP",
".",
"findall",
"(",
"data",
")",
"for",
"match_hop",
"in",
"matches_hop",
":",
"# Initialize a hop",
"idx",
"=",
"int",
"(",
"match_hop",
"[",
"0",
"]",
")",
"if",
"match_hop",
"[",
"1",
"]",
":",
"asn",
"=",
"int",
"(",
"match_hop",
"[",
"1",
"]",
")",
"else",
":",
"asn",
"=",
"None",
"hop",
"=",
"Hop",
"(",
"idx",
",",
"asn",
")",
"# Parse probes data: <name> | <(IP)> | <rtt> | 'ms' | '*'",
"probes_data",
"=",
"match_hop",
"[",
"2",
"]",
".",
"split",
"(",
")",
"# Get rid of 'ms': <name> | <(IP)> | <rtt> | '*'",
"probes_data",
"=",
"filter",
"(",
"lambda",
"s",
":",
"s",
".",
"lower",
"(",
")",
"!=",
"'ms'",
",",
"probes_data",
")",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"probes_data",
")",
":",
"# For each hop parse probes",
"name",
"=",
"None",
"ip",
"=",
"None",
"rtt",
"=",
"None",
"anno",
"=",
"''",
"# RTT check comes first because RE_PROBE_NAME can confuse rtt with an IP as name",
"# The regex RE_PROBE_NAME can be improved",
"if",
"RE_PROBE_RTT",
".",
"match",
"(",
"probes_data",
"[",
"i",
"]",
")",
":",
"# Matched rtt, so name and IP have been parsed before",
"rtt",
"=",
"float",
"(",
"probes_data",
"[",
"i",
"]",
")",
"i",
"+=",
"1",
"elif",
"RE_PROBE_NAME",
".",
"match",
"(",
"probes_data",
"[",
"i",
"]",
")",
":",
"# Matched a name, so next elements are IP and rtt",
"name",
"=",
"probes_data",
"[",
"i",
"]",
"ip",
"=",
"probes_data",
"[",
"i",
"+",
"1",
"]",
".",
"strip",
"(",
"'()'",
")",
"rtt",
"=",
"float",
"(",
"probes_data",
"[",
"i",
"+",
"2",
"]",
")",
"i",
"+=",
"3",
"elif",
"RE_PROBE_TIMEOUT",
".",
"match",
"(",
"probes_data",
"[",
"i",
"]",
")",
":",
"# Its a timeout, so maybe name and IP have been parsed before",
"# or maybe not. But it's Hop job to deal with it",
"rtt",
"=",
"None",
"i",
"+=",
"1",
"else",
":",
"ext",
"=",
"\"i: %d\\nprobes_data: %s\\nname: %s\\nip: %s\\nrtt: %s\\nanno: %s\"",
"%",
"(",
"i",
",",
"probes_data",
",",
"name",
",",
"ip",
",",
"rtt",
",",
"anno",
")",
"raise",
"ParseError",
"(",
"\"Parse error \\n%s\"",
"%",
"ext",
")",
"# Check for annotation",
"try",
":",
"if",
"RE_PROBE_ANNOTATION",
".",
"match",
"(",
"probes_data",
"[",
"i",
"]",
")",
":",
"anno",
"=",
"probes_data",
"[",
"i",
"]",
"i",
"+=",
"1",
"except",
"IndexError",
":",
"pass",
"probe",
"=",
"Probe",
"(",
"name",
",",
"ip",
",",
"rtt",
",",
"anno",
")",
"hop",
".",
"add_probe",
"(",
"probe",
")",
"traceroute",
".",
"add_hop",
"(",
"hop",
")",
"return",
"traceroute"
] | Parser entry point. Parses the output of a traceroute execution | [
"Parser",
"entry",
"point",
".",
"Parses",
"the",
"output",
"of",
"a",
"traceroute",
"execution"
] | train | https://github.com/lbenitez000/trparse/blob/1f932d882a98b062b540b6f4bc2ecb0f35b92e97/trparse.py#L88-L158 |
lbenitez000/trparse | trparse.py | Hop.add_probe | def add_probe(self, probe):
"""Adds a Probe instance to this hop's results."""
if self.probes:
probe_last = self.probes[-1]
if not probe.ip:
probe.ip = probe_last.ip
probe.name = probe_last.name
self.probes.append(probe) | python | def add_probe(self, probe):
"""Adds a Probe instance to this hop's results."""
if self.probes:
probe_last = self.probes[-1]
if not probe.ip:
probe.ip = probe_last.ip
probe.name = probe_last.name
self.probes.append(probe) | [
"def",
"add_probe",
"(",
"self",
",",
"probe",
")",
":",
"if",
"self",
".",
"probes",
":",
"probe_last",
"=",
"self",
".",
"probes",
"[",
"-",
"1",
"]",
"if",
"not",
"probe",
".",
"ip",
":",
"probe",
".",
"ip",
"=",
"probe_last",
".",
"ip",
"probe",
".",
"name",
"=",
"probe_last",
".",
"name",
"self",
".",
"probes",
".",
"append",
"(",
"probe",
")"
] | Adds a Probe instance to this hop's results. | [
"Adds",
"a",
"Probe",
"instance",
"to",
"this",
"hop",
"s",
"results",
"."
] | train | https://github.com/lbenitez000/trparse/blob/1f932d882a98b062b540b6f4bc2ecb0f35b92e97/trparse.py#L48-L55 |
emichael/PyREM | pyrem/utils.py | synchronized | def synchronized(func, *args, **kwargs):
"""Function decorator to make function synchronized on ``self._lock``.
If the first argument to the function (hopefully self) does not have a _lock
attribute, then this decorator does nothing.
"""
if not (args and hasattr(args[0], '_lock')):
return func(*args, **kwargs)
with args[0]._lock: # pylint: disable=W0212
return func(*args, **kwargs) | python | def synchronized(func, *args, **kwargs):
"""Function decorator to make function synchronized on ``self._lock``.
If the first argument to the function (hopefully self) does not have a _lock
attribute, then this decorator does nothing.
"""
if not (args and hasattr(args[0], '_lock')):
return func(*args, **kwargs)
with args[0]._lock: # pylint: disable=W0212
return func(*args, **kwargs) | [
"def",
"synchronized",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"(",
"args",
"and",
"hasattr",
"(",
"args",
"[",
"0",
"]",
",",
"'_lock'",
")",
")",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"with",
"args",
"[",
"0",
"]",
".",
"_lock",
":",
"# pylint: disable=W0212",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Function decorator to make function synchronized on ``self._lock``.
If the first argument to the function (hopefully self) does not have a _lock
attribute, then this decorator does nothing. | [
"Function",
"decorator",
"to",
"make",
"function",
"synchronized",
"on",
"self",
".",
"_lock",
"."
] | train | https://github.com/emichael/PyREM/blob/2609249ead197cd9496d164f4998ca9985503579/pyrem/utils.py#L10-L19 |
zeroSteiner/smoke-zephyr | smoke_zephyr/job.py | normalize_job_id | def normalize_job_id(job_id):
"""
Convert a value to a job id.
:param job_id: Value to convert.
:type job_id: int, str
:return: The job id.
:rtype: :py:class:`uuid.UUID`
"""
if not isinstance(job_id, uuid.UUID):
job_id = uuid.UUID(job_id)
return job_id | python | def normalize_job_id(job_id):
"""
Convert a value to a job id.
:param job_id: Value to convert.
:type job_id: int, str
:return: The job id.
:rtype: :py:class:`uuid.UUID`
"""
if not isinstance(job_id, uuid.UUID):
job_id = uuid.UUID(job_id)
return job_id | [
"def",
"normalize_job_id",
"(",
"job_id",
")",
":",
"if",
"not",
"isinstance",
"(",
"job_id",
",",
"uuid",
".",
"UUID",
")",
":",
"job_id",
"=",
"uuid",
".",
"UUID",
"(",
"job_id",
")",
"return",
"job_id"
] | Convert a value to a job id.
:param job_id: Value to convert.
:type job_id: int, str
:return: The job id.
:rtype: :py:class:`uuid.UUID` | [
"Convert",
"a",
"value",
"to",
"a",
"job",
"id",
"."
] | train | https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/job.py#L41-L52 |
zeroSteiner/smoke-zephyr | smoke_zephyr/job.py | JobManager.now | def now(self):
"""
Return a :py:class:`datetime.datetime` instance representing the current time.
:rtype: :py:class:`datetime.datetime`
"""
if self.use_utc:
return datetime.datetime.utcnow()
else:
return datetime.datetime.now() | python | def now(self):
"""
Return a :py:class:`datetime.datetime` instance representing the current time.
:rtype: :py:class:`datetime.datetime`
"""
if self.use_utc:
return datetime.datetime.utcnow()
else:
return datetime.datetime.now() | [
"def",
"now",
"(",
"self",
")",
":",
"if",
"self",
".",
"use_utc",
":",
"return",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"else",
":",
"return",
"datetime",
".",
"datetime",
".",
"now",
"(",
")"
] | Return a :py:class:`datetime.datetime` instance representing the current time.
:rtype: :py:class:`datetime.datetime` | [
"Return",
"a",
":",
"py",
":",
"class",
":",
"datetime",
".",
"datetime",
"instance",
"representing",
"the",
"current",
"time",
"."
] | train | https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/job.py#L176-L185 |
zeroSteiner/smoke-zephyr | smoke_zephyr/job.py | JobManager.start | def start(self):
"""
Start the JobManager thread.
"""
if self._thread_running.is_set():
raise RuntimeError('the JobManager has already been started')
self._thread.start()
self._thread_running.wait()
return | python | def start(self):
"""
Start the JobManager thread.
"""
if self._thread_running.is_set():
raise RuntimeError('the JobManager has already been started')
self._thread.start()
self._thread_running.wait()
return | [
"def",
"start",
"(",
"self",
")",
":",
"if",
"self",
".",
"_thread_running",
".",
"is_set",
"(",
")",
":",
"raise",
"RuntimeError",
"(",
"'the JobManager has already been started'",
")",
"self",
".",
"_thread",
".",
"start",
"(",
")",
"self",
".",
"_thread_running",
".",
"wait",
"(",
")",
"return"
] | Start the JobManager thread. | [
"Start",
"the",
"JobManager",
"thread",
"."
] | train | https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/job.py#L209-L217 |
zeroSteiner/smoke-zephyr | smoke_zephyr/job.py | JobManager.stop | def stop(self):
"""
Stop the JobManager thread.
"""
self.logger.debug('stopping the job manager')
self._thread_running.clear()
self._thread_shutdown.wait()
self._job_lock.acquire()
self.logger.debug('waiting on ' + str(len(self._jobs)) + ' job threads')
for job_desc in self._jobs.values():
if job_desc['job'] is None:
continue
if not job_desc['job'].is_alive():
continue
job_desc['job'].join()
# the job lock must be released before the thread can be joined because the thread routine acquires it before
# checking if it should exit, see https://github.com/zeroSteiner/smoke-zephyr/issues/4 for more details
self._job_lock.release()
self._thread.join()
self.logger.info('the job manager has been stopped')
return | python | def stop(self):
"""
Stop the JobManager thread.
"""
self.logger.debug('stopping the job manager')
self._thread_running.clear()
self._thread_shutdown.wait()
self._job_lock.acquire()
self.logger.debug('waiting on ' + str(len(self._jobs)) + ' job threads')
for job_desc in self._jobs.values():
if job_desc['job'] is None:
continue
if not job_desc['job'].is_alive():
continue
job_desc['job'].join()
# the job lock must be released before the thread can be joined because the thread routine acquires it before
# checking if it should exit, see https://github.com/zeroSteiner/smoke-zephyr/issues/4 for more details
self._job_lock.release()
self._thread.join()
self.logger.info('the job manager has been stopped')
return | [
"def",
"stop",
"(",
"self",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'stopping the job manager'",
")",
"self",
".",
"_thread_running",
".",
"clear",
"(",
")",
"self",
".",
"_thread_shutdown",
".",
"wait",
"(",
")",
"self",
".",
"_job_lock",
".",
"acquire",
"(",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'waiting on '",
"+",
"str",
"(",
"len",
"(",
"self",
".",
"_jobs",
")",
")",
"+",
"' job threads'",
")",
"for",
"job_desc",
"in",
"self",
".",
"_jobs",
".",
"values",
"(",
")",
":",
"if",
"job_desc",
"[",
"'job'",
"]",
"is",
"None",
":",
"continue",
"if",
"not",
"job_desc",
"[",
"'job'",
"]",
".",
"is_alive",
"(",
")",
":",
"continue",
"job_desc",
"[",
"'job'",
"]",
".",
"join",
"(",
")",
"# the job lock must be released before the thread can be joined because the thread routine acquires it before",
"# checking if it should exit, see https://github.com/zeroSteiner/smoke-zephyr/issues/4 for more details",
"self",
".",
"_job_lock",
".",
"release",
"(",
")",
"self",
".",
"_thread",
".",
"join",
"(",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'the job manager has been stopped'",
")",
"return"
] | Stop the JobManager thread. | [
"Stop",
"the",
"JobManager",
"thread",
"."
] | train | https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/job.py#L219-L241 |
zeroSteiner/smoke-zephyr | smoke_zephyr/job.py | JobManager.job_run | def job_run(self, callback, parameters=None):
"""
Add a job and run it once immediately.
:param function callback: The function to run asynchronously.
:param parameters: The parameters to be provided to the callback.
:type parameters: list, tuple
:return: The job id.
:rtype: :py:class:`uuid.UUID`
"""
if not self._thread_running.is_set():
raise RuntimeError('the JobManager is not running')
parameters = (parameters or ())
if not isinstance(parameters, (list, tuple)):
parameters = (parameters,)
job_desc = {}
job_desc['job'] = JobRun(callback, parameters)
job_desc['last_run'] = None
job_desc['run_every'] = datetime.timedelta(0, 1)
job_desc['callback'] = callback
job_desc['parameters'] = parameters
job_desc['enabled'] = True
job_desc['tolerate_exceptions'] = False
job_desc['run_count'] = 0
job_desc['expiration'] = 0
job_id = uuid.uuid4()
self.logger.info('adding new job with id: ' + str(job_id) + ' and callback function: ' + callback.__name__)
with self._job_lock:
self._jobs[job_id] = job_desc
self._job_execute(job_id)
return job_id | python | def job_run(self, callback, parameters=None):
"""
Add a job and run it once immediately.
:param function callback: The function to run asynchronously.
:param parameters: The parameters to be provided to the callback.
:type parameters: list, tuple
:return: The job id.
:rtype: :py:class:`uuid.UUID`
"""
if not self._thread_running.is_set():
raise RuntimeError('the JobManager is not running')
parameters = (parameters or ())
if not isinstance(parameters, (list, tuple)):
parameters = (parameters,)
job_desc = {}
job_desc['job'] = JobRun(callback, parameters)
job_desc['last_run'] = None
job_desc['run_every'] = datetime.timedelta(0, 1)
job_desc['callback'] = callback
job_desc['parameters'] = parameters
job_desc['enabled'] = True
job_desc['tolerate_exceptions'] = False
job_desc['run_count'] = 0
job_desc['expiration'] = 0
job_id = uuid.uuid4()
self.logger.info('adding new job with id: ' + str(job_id) + ' and callback function: ' + callback.__name__)
with self._job_lock:
self._jobs[job_id] = job_desc
self._job_execute(job_id)
return job_id | [
"def",
"job_run",
"(",
"self",
",",
"callback",
",",
"parameters",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_thread_running",
".",
"is_set",
"(",
")",
":",
"raise",
"RuntimeError",
"(",
"'the JobManager is not running'",
")",
"parameters",
"=",
"(",
"parameters",
"or",
"(",
")",
")",
"if",
"not",
"isinstance",
"(",
"parameters",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"parameters",
"=",
"(",
"parameters",
",",
")",
"job_desc",
"=",
"{",
"}",
"job_desc",
"[",
"'job'",
"]",
"=",
"JobRun",
"(",
"callback",
",",
"parameters",
")",
"job_desc",
"[",
"'last_run'",
"]",
"=",
"None",
"job_desc",
"[",
"'run_every'",
"]",
"=",
"datetime",
".",
"timedelta",
"(",
"0",
",",
"1",
")",
"job_desc",
"[",
"'callback'",
"]",
"=",
"callback",
"job_desc",
"[",
"'parameters'",
"]",
"=",
"parameters",
"job_desc",
"[",
"'enabled'",
"]",
"=",
"True",
"job_desc",
"[",
"'tolerate_exceptions'",
"]",
"=",
"False",
"job_desc",
"[",
"'run_count'",
"]",
"=",
"0",
"job_desc",
"[",
"'expiration'",
"]",
"=",
"0",
"job_id",
"=",
"uuid",
".",
"uuid4",
"(",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'adding new job with id: '",
"+",
"str",
"(",
"job_id",
")",
"+",
"' and callback function: '",
"+",
"callback",
".",
"__name__",
")",
"with",
"self",
".",
"_job_lock",
":",
"self",
".",
"_jobs",
"[",
"job_id",
"]",
"=",
"job_desc",
"self",
".",
"_job_execute",
"(",
"job_id",
")",
"return",
"job_id"
] | Add a job and run it once immediately.
:param function callback: The function to run asynchronously.
:param parameters: The parameters to be provided to the callback.
:type parameters: list, tuple
:return: The job id.
:rtype: :py:class:`uuid.UUID` | [
"Add",
"a",
"job",
"and",
"run",
"it",
"once",
"immediately",
"."
] | train | https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/job.py#L243-L273 |
zeroSteiner/smoke-zephyr | smoke_zephyr/job.py | JobManager.job_add | def job_add(self, callback, parameters=None, hours=0, minutes=0, seconds=0, tolerate_exceptions=True, expiration=None):
"""
Add a job to the job manager.
:param function callback: The function to run asynchronously.
:param parameters: The parameters to be provided to the callback.
:type parameters: list, tuple
:param int hours: Number of hours to sleep between running the callback.
:param int minutes: Number of minutes to sleep between running the callback.
:param int seconds: Number of seconds to sleep between running the callback.
:param bool tolerate_execptions: Whether to continue running a job after it has thrown an exception.
:param expiration: When to expire and remove the job. If an integer
is provided, the job will be executed that many times. If a
datetime or timedelta instance is provided, then the job will
be removed after the specified time.
:type expiration: int, :py:class:`datetime.timedelta`, :py:class:`datetime.datetime`
:return: The job id.
:rtype: :py:class:`uuid.UUID`
"""
if not self._thread_running.is_set():
raise RuntimeError('the JobManager is not running')
parameters = (parameters or ())
if not isinstance(parameters, (list, tuple)):
parameters = (parameters,)
job_desc = {}
job_desc['job'] = JobRun(callback, parameters)
job_desc['last_run'] = None
job_desc['run_every'] = datetime.timedelta(0, ((hours * 60 * 60) + (minutes * 60) + seconds))
job_desc['callback'] = callback
job_desc['parameters'] = parameters
job_desc['enabled'] = True
job_desc['tolerate_exceptions'] = tolerate_exceptions
job_desc['run_count'] = 0
if isinstance(expiration, int):
job_desc['expiration'] = expiration
elif isinstance(expiration, datetime.timedelta):
job_desc['expiration'] = self.now() + expiration
elif isinstance(expiration, datetime.datetime):
job_desc['expiration'] = expiration
else:
job_desc['expiration'] = None
job_id = uuid.uuid4()
self.logger.info('adding new job with id: ' + str(job_id) + ' and callback function: ' + callback.__name__)
with self._job_lock:
self._jobs[job_id] = job_desc
return job_id | python | def job_add(self, callback, parameters=None, hours=0, minutes=0, seconds=0, tolerate_exceptions=True, expiration=None):
"""
Add a job to the job manager.
:param function callback: The function to run asynchronously.
:param parameters: The parameters to be provided to the callback.
:type parameters: list, tuple
:param int hours: Number of hours to sleep between running the callback.
:param int minutes: Number of minutes to sleep between running the callback.
:param int seconds: Number of seconds to sleep between running the callback.
:param bool tolerate_execptions: Whether to continue running a job after it has thrown an exception.
:param expiration: When to expire and remove the job. If an integer
is provided, the job will be executed that many times. If a
datetime or timedelta instance is provided, then the job will
be removed after the specified time.
:type expiration: int, :py:class:`datetime.timedelta`, :py:class:`datetime.datetime`
:return: The job id.
:rtype: :py:class:`uuid.UUID`
"""
if not self._thread_running.is_set():
raise RuntimeError('the JobManager is not running')
parameters = (parameters or ())
if not isinstance(parameters, (list, tuple)):
parameters = (parameters,)
job_desc = {}
job_desc['job'] = JobRun(callback, parameters)
job_desc['last_run'] = None
job_desc['run_every'] = datetime.timedelta(0, ((hours * 60 * 60) + (minutes * 60) + seconds))
job_desc['callback'] = callback
job_desc['parameters'] = parameters
job_desc['enabled'] = True
job_desc['tolerate_exceptions'] = tolerate_exceptions
job_desc['run_count'] = 0
if isinstance(expiration, int):
job_desc['expiration'] = expiration
elif isinstance(expiration, datetime.timedelta):
job_desc['expiration'] = self.now() + expiration
elif isinstance(expiration, datetime.datetime):
job_desc['expiration'] = expiration
else:
job_desc['expiration'] = None
job_id = uuid.uuid4()
self.logger.info('adding new job with id: ' + str(job_id) + ' and callback function: ' + callback.__name__)
with self._job_lock:
self._jobs[job_id] = job_desc
return job_id | [
"def",
"job_add",
"(",
"self",
",",
"callback",
",",
"parameters",
"=",
"None",
",",
"hours",
"=",
"0",
",",
"minutes",
"=",
"0",
",",
"seconds",
"=",
"0",
",",
"tolerate_exceptions",
"=",
"True",
",",
"expiration",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_thread_running",
".",
"is_set",
"(",
")",
":",
"raise",
"RuntimeError",
"(",
"'the JobManager is not running'",
")",
"parameters",
"=",
"(",
"parameters",
"or",
"(",
")",
")",
"if",
"not",
"isinstance",
"(",
"parameters",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"parameters",
"=",
"(",
"parameters",
",",
")",
"job_desc",
"=",
"{",
"}",
"job_desc",
"[",
"'job'",
"]",
"=",
"JobRun",
"(",
"callback",
",",
"parameters",
")",
"job_desc",
"[",
"'last_run'",
"]",
"=",
"None",
"job_desc",
"[",
"'run_every'",
"]",
"=",
"datetime",
".",
"timedelta",
"(",
"0",
",",
"(",
"(",
"hours",
"*",
"60",
"*",
"60",
")",
"+",
"(",
"minutes",
"*",
"60",
")",
"+",
"seconds",
")",
")",
"job_desc",
"[",
"'callback'",
"]",
"=",
"callback",
"job_desc",
"[",
"'parameters'",
"]",
"=",
"parameters",
"job_desc",
"[",
"'enabled'",
"]",
"=",
"True",
"job_desc",
"[",
"'tolerate_exceptions'",
"]",
"=",
"tolerate_exceptions",
"job_desc",
"[",
"'run_count'",
"]",
"=",
"0",
"if",
"isinstance",
"(",
"expiration",
",",
"int",
")",
":",
"job_desc",
"[",
"'expiration'",
"]",
"=",
"expiration",
"elif",
"isinstance",
"(",
"expiration",
",",
"datetime",
".",
"timedelta",
")",
":",
"job_desc",
"[",
"'expiration'",
"]",
"=",
"self",
".",
"now",
"(",
")",
"+",
"expiration",
"elif",
"isinstance",
"(",
"expiration",
",",
"datetime",
".",
"datetime",
")",
":",
"job_desc",
"[",
"'expiration'",
"]",
"=",
"expiration",
"else",
":",
"job_desc",
"[",
"'expiration'",
"]",
"=",
"None",
"job_id",
"=",
"uuid",
".",
"uuid4",
"(",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'adding new job with id: '",
"+",
"str",
"(",
"job_id",
")",
"+",
"' and callback function: '",
"+",
"callback",
".",
"__name__",
")",
"with",
"self",
".",
"_job_lock",
":",
"self",
".",
"_jobs",
"[",
"job_id",
"]",
"=",
"job_desc",
"return",
"job_id"
] | Add a job to the job manager.
:param function callback: The function to run asynchronously.
:param parameters: The parameters to be provided to the callback.
:type parameters: list, tuple
:param int hours: Number of hours to sleep between running the callback.
:param int minutes: Number of minutes to sleep between running the callback.
:param int seconds: Number of seconds to sleep between running the callback.
:param bool tolerate_execptions: Whether to continue running a job after it has thrown an exception.
:param expiration: When to expire and remove the job. If an integer
is provided, the job will be executed that many times. If a
datetime or timedelta instance is provided, then the job will
be removed after the specified time.
:type expiration: int, :py:class:`datetime.timedelta`, :py:class:`datetime.datetime`
:return: The job id.
:rtype: :py:class:`uuid.UUID` | [
"Add",
"a",
"job",
"to",
"the",
"job",
"manager",
"."
] | train | https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/job.py#L275-L320 |
zeroSteiner/smoke-zephyr | smoke_zephyr/job.py | JobManager.job_count_enabled | def job_count_enabled(self):
"""
Return the number of enabled jobs.
:return: The number of jobs that are enabled.
:rtype: int
"""
enabled = 0
for job_desc in self._jobs.values():
if job_desc['enabled']:
enabled += 1
return enabled | python | def job_count_enabled(self):
"""
Return the number of enabled jobs.
:return: The number of jobs that are enabled.
:rtype: int
"""
enabled = 0
for job_desc in self._jobs.values():
if job_desc['enabled']:
enabled += 1
return enabled | [
"def",
"job_count_enabled",
"(",
"self",
")",
":",
"enabled",
"=",
"0",
"for",
"job_desc",
"in",
"self",
".",
"_jobs",
".",
"values",
"(",
")",
":",
"if",
"job_desc",
"[",
"'enabled'",
"]",
":",
"enabled",
"+=",
"1",
"return",
"enabled"
] | Return the number of enabled jobs.
:return: The number of jobs that are enabled.
:rtype: int | [
"Return",
"the",
"number",
"of",
"enabled",
"jobs",
"."
] | train | https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/job.py#L331-L342 |
zeroSteiner/smoke-zephyr | smoke_zephyr/job.py | JobManager.job_enable | def job_enable(self, job_id):
"""
Enable a job.
:param job_id: Job identifier to enable.
:type job_id: :py:class:`uuid.UUID`
"""
job_id = normalize_job_id(job_id)
with self._job_lock:
job_desc = self._jobs[job_id]
job_desc['enabled'] = True | python | def job_enable(self, job_id):
"""
Enable a job.
:param job_id: Job identifier to enable.
:type job_id: :py:class:`uuid.UUID`
"""
job_id = normalize_job_id(job_id)
with self._job_lock:
job_desc = self._jobs[job_id]
job_desc['enabled'] = True | [
"def",
"job_enable",
"(",
"self",
",",
"job_id",
")",
":",
"job_id",
"=",
"normalize_job_id",
"(",
"job_id",
")",
"with",
"self",
".",
"_job_lock",
":",
"job_desc",
"=",
"self",
".",
"_jobs",
"[",
"job_id",
"]",
"job_desc",
"[",
"'enabled'",
"]",
"=",
"True"
] | Enable a job.
:param job_id: Job identifier to enable.
:type job_id: :py:class:`uuid.UUID` | [
"Enable",
"a",
"job",
"."
] | train | https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/job.py#L344-L354 |
zeroSteiner/smoke-zephyr | smoke_zephyr/job.py | JobManager.job_disable | def job_disable(self, job_id):
"""
Disable a job. Disabled jobs will not be executed.
:param job_id: Job identifier to disable.
:type job_id: :py:class:`uuid.UUID`
"""
job_id = normalize_job_id(job_id)
with self._job_lock:
job_desc = self._jobs[job_id]
job_desc['enabled'] = False | python | def job_disable(self, job_id):
"""
Disable a job. Disabled jobs will not be executed.
:param job_id: Job identifier to disable.
:type job_id: :py:class:`uuid.UUID`
"""
job_id = normalize_job_id(job_id)
with self._job_lock:
job_desc = self._jobs[job_id]
job_desc['enabled'] = False | [
"def",
"job_disable",
"(",
"self",
",",
"job_id",
")",
":",
"job_id",
"=",
"normalize_job_id",
"(",
"job_id",
")",
"with",
"self",
".",
"_job_lock",
":",
"job_desc",
"=",
"self",
".",
"_jobs",
"[",
"job_id",
"]",
"job_desc",
"[",
"'enabled'",
"]",
"=",
"False"
] | Disable a job. Disabled jobs will not be executed.
:param job_id: Job identifier to disable.
:type job_id: :py:class:`uuid.UUID` | [
"Disable",
"a",
"job",
".",
"Disabled",
"jobs",
"will",
"not",
"be",
"executed",
"."
] | train | https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/job.py#L356-L366 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.