body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def __init__(self, identifier, name, date, dims, auth_token=auth_token, base_url=base_url):
'Initialize parameters of the experiment client object.'
self.experiment_id = identifier
self.name = name
self.date = date
self.dims = dims
self.auth_token = auth_token
self.base_url = base_url | -8,454,303,637,792,562,000 | Initialize parameters of the experiment client object. | thor_client/experiment_client.py | __init__ | JamesBrofos/Thor-Python-Client | python | def __init__(self, identifier, name, date, dims, auth_token=auth_token, base_url=base_url):
self.experiment_id = identifier
self.name = name
self.date = date
self.dims = dims
self.auth_token = auth_token
self.base_url = base_url |
def submit_observation(self, config, target):
'Upload a pairing of a configuration alongside an observed target\n variable.\n\n Parameters:\n config (dictionary): A dictionary mapping dimension names to values\n indicating the configuration of parameters.\n target (float): A number indicating the performance of this\n configuration of model parameters.\n\n Examples:\n This utility is helpful in the event that a machine learning\n practitioner already has a few existing evaluations of the system at\n given inputs. For instance, the consumer may have already performed\n a grid search to obtain parameter values.\n\n Suppose that a particular experiment has two dimensions named "x"\n and "y". Then to upload a configuration to the Thor server, we\n proceed as follows:\n\n >>> d = {"x": 1.5, "y": 3.1}\n >>> v = f(d["x"], d["y"])\n >>> exp.submit_observation(d, v)\n '
post_data = {'auth_token': self.auth_token, 'experiment_id': self.experiment_id, 'configuration': json.dumps(config), 'target': target}
result = requests.post(url=self.base_url.format('submit_observation'), json=post_data)
return json_parser(result, self.auth_token) | 2,216,162,767,312,073,200 | Upload a pairing of a configuration alongside an observed target
variable.
Parameters:
config (dictionary): A dictionary mapping dimension names to values
indicating the configuration of parameters.
target (float): A number indicating the performance of this
configuration of model parameters.
Examples:
This utility is helpful in the event that a machine learning
practitioner already has a few existing evaluations of the system at
given inputs. For instance, the consumer may have already performed
a grid search to obtain parameter values.
Suppose that a particular experiment has two dimensions named "x"
and "y". Then to upload a configuration to the Thor server, we
proceed as follows:
>>> d = {"x": 1.5, "y": 3.1}
>>> v = f(d["x"], d["y"])
>>> exp.submit_observation(d, v) | thor_client/experiment_client.py | submit_observation | JamesBrofos/Thor-Python-Client | python | def submit_observation(self, config, target):
'Upload a pairing of a configuration alongside an observed target\n variable.\n\n Parameters:\n config (dictionary): A dictionary mapping dimension names to values\n indicating the configuration of parameters.\n target (float): A number indicating the performance of this\n configuration of model parameters.\n\n Examples:\n This utility is helpful in the event that a machine learning\n practitioner already has a few existing evaluations of the system at\n given inputs. For instance, the consumer may have already performed\n a grid search to obtain parameter values.\n\n Suppose that a particular experiment has two dimensions named "x"\n and "y". Then to upload a configuration to the Thor server, we\n proceed as follows:\n\n >>> d = {"x": 1.5, "y": 3.1}\n >>> v = f(d["x"], d["y"])\n >>> exp.submit_observation(d, v)\n '
post_data = {'auth_token': self.auth_token, 'experiment_id': self.experiment_id, 'configuration': json.dumps(config), 'target': target}
result = requests.post(url=self.base_url.format('submit_observation'), json=post_data)
return json_parser(result, self.auth_token) |
def create_recommendation(self, rand_prob=0.0, n_models=5, description='', acq_func='expected_improvement', integrate_acq=True):
'Get a recommendation for a point to evaluate next.\n\n The create recommendation utility represents the core of the Thor\n Bayesian optimization software. This function will contact the Thor\n server and request a new configuration of machine learning parameters\n that serve the object of maximizing the metric of interest.\n\n Parameters:\n rand_prob (optional, float): This parameter represents that a random\n point in the input space is chosen instead of selecting a\n configuration of parameters using Bayesian optimization. As\n such, this parameter can be used to benchmark against random\n search and otherwise to perform pure exploration of the\n parameter space.\n n_models (optional, int): The number of Gaussian process models to\n sample using elliptical slice sampling. Setting this to a large\n number will produce a better characterization of uncertainty in\n the acquisition function.\n description (optional, str): An optional per-observation\n descriptor, potentially useful for identifying one observation\n among many others in a large experiment. Defaults to "".\n acq_func (optional, str): A string specifying which acquisition\n function should be used to construct the newest recommendation.\n It can be useful to sometimes vary the acquisition function to\n enable exploitation towards the end of an experiment.\n integrate_acq (optional, bool): An indicator for whether or not we\n should construct an integrated acquisition function using models\n sampled from the posterior. The alternative is to not integrate\n and to return a single recommendation for each of the sampled\n models, of which there are `n_models`.\n\n Returns:\n RecommendationClient: A recommendation client object\n corresponding to the recommended set of parameters. If the\n acquisition function is not integrated, a list of\n RecommendationClient objects may be returned instead, one for\n each sampled model.\n '
post_data = {'auth_token': self.auth_token, 'experiment_id': self.experiment_id, 'n_models': n_models, 'rand_prob': rand_prob, 'description': description, 'acq_func': acq_func, 'integrate_acq': integrate_acq}
result = requests.post(url=self.base_url.format('create_recommendation'), json=post_data)
recs = json_parser(result, self.auth_token, RecommendationClient)
return (recs[0] if (len(recs) == 1) else recs) | -7,716,865,792,384,818,000 | Get a recommendation for a point to evaluate next.
The create recommendation utility represents the core of the Thor
Bayesian optimization software. This function will contact the Thor
server and request a new configuration of machine learning parameters
that serve the object of maximizing the metric of interest.
Parameters:
rand_prob (optional, float): This parameter represents that a random
point in the input space is chosen instead of selecting a
configuration of parameters using Bayesian optimization. As
such, this parameter can be used to benchmark against random
search and otherwise to perform pure exploration of the
parameter space.
n_models (optional, int): The number of Gaussian process models to
sample using elliptical slice sampling. Setting this to a large
number will produce a better characterization of uncertainty in
the acquisition function.
description (optional, str): An optional per-observation
descriptor, potentially useful for identifying one observation
among many others in a large experiment. Defaults to "".
acq_func (optional, str): A string specifying which acquisition
function should be used to construct the newest recommendation.
It can be useful to sometimes vary the acquisition function to
enable exploitation towards the end of an experiment.
integrate_acq (optional, bool): An indicator for whether or not we
should construct an integrated acquisition function using models
sampled from the posterior. The alternative is to not integrate
and to return a single recommendation for each of the sampled
models, of which there are `n_models`.
Returns:
RecommendationClient: A recommendation client object
corresponding to the recommended set of parameters. If the
acquisition function is not integrated, a list of
RecommendationClient objects may be returned instead, one for
each sampled model. | thor_client/experiment_client.py | create_recommendation | JamesBrofos/Thor-Python-Client | python | def create_recommendation(self, rand_prob=0.0, n_models=5, description=, acq_func='expected_improvement', integrate_acq=True):
'Get a recommendation for a point to evaluate next.\n\n The create recommendation utility represents the core of the Thor\n Bayesian optimization software. This function will contact the Thor\n server and request a new configuration of machine learning parameters\n that serve the object of maximizing the metric of interest.\n\n Parameters:\n rand_prob (optional, float): This parameter represents that a random\n point in the input space is chosen instead of selecting a\n configuration of parameters using Bayesian optimization. As\n such, this parameter can be used to benchmark against random\n search and otherwise to perform pure exploration of the\n parameter space.\n n_models (optional, int): The number of Gaussian process models to\n sample using elliptical slice sampling. Setting this to a large\n number will produce a better characterization of uncertainty in\n the acquisition function.\n description (optional, str): An optional per-observation\n descriptor, potentially useful for identifying one observation\n among many others in a large experiment. Defaults to .\n acq_func (optional, str): A string specifying which acquisition\n function should be used to construct the newest recommendation.\n It can be useful to sometimes vary the acquisition function to\n enable exploitation towards the end of an experiment.\n integrate_acq (optional, bool): An indicator for whether or not we\n should construct an integrated acquisition function using models\n sampled from the posterior. The alternative is to not integrate\n and to return a single recommendation for each of the sampled\n models, of which there are `n_models`.\n\n Returns:\n RecommendationClient: A recommendation client object\n corresponding to the recommended set of parameters. If the\n acquisition function is not integrated, a list of\n RecommendationClient objects may be returned instead, one for\n each sampled model.\n '
post_data = {'auth_token': self.auth_token, 'experiment_id': self.experiment_id, 'n_models': n_models, 'rand_prob': rand_prob, 'description': description, 'acq_func': acq_func, 'integrate_acq': integrate_acq}
result = requests.post(url=self.base_url.format('create_recommendation'), json=post_data)
recs = json_parser(result, self.auth_token, RecommendationClient)
return (recs[0] if (len(recs) == 1) else recs) |
def best_configuration(self):
'Get the configuration of parameters that produced the best value of\n the objective function.\n\n Returns:\n dictionary: A dictionary containing a detailed view of the\n configuration of model parameters that produced the maximal\n value of the metric. This includes the date the observation was\n created, the value of the metric, and the configuration itself.\n '
post_data = {'auth_token': self.auth_token, 'experiment_id': self.experiment_id}
result = requests.post(url=self.base_url.format('best_configuration'), json=post_data)
return json_parser(result, self.auth_token) | -2,313,999,916,199,557,600 | Get the configuration of parameters that produced the best value of
the objective function.
Returns:
dictionary: A dictionary containing a detailed view of the
configuration of model parameters that produced the maximal
value of the metric. This includes the date the observation was
created, the value of the metric, and the configuration itself. | thor_client/experiment_client.py | best_configuration | JamesBrofos/Thor-Python-Client | python | def best_configuration(self):
'Get the configuration of parameters that produced the best value of\n the objective function.\n\n Returns:\n dictionary: A dictionary containing a detailed view of the\n configuration of model parameters that produced the maximal\n value of the metric. This includes the date the observation was\n created, the value of the metric, and the configuration itself.\n '
post_data = {'auth_token': self.auth_token, 'experiment_id': self.experiment_id}
result = requests.post(url=self.base_url.format('best_configuration'), json=post_data)
return json_parser(result, self.auth_token) |
def pending_recommendations(self):
'Query for pending recommendations that have yet to be evaluated.\n\n Sometimes client-side computations may fail for a given input\n configuration of model parameters, leaving the recommendation in a kind\n of "limbo" state in which is not being evaluated but still exists. In\n this case, it can be advantageous for the client to query for such\n pending observations and to evaluate them. This function returns a list\n of pending recommendations which can then be evaluated by the client.\n\n Returns:\n list of RecommendationClient: A list of\n recommendation client objects, where each element in the list\n corresponds to a pending observation.\n '
post_data = {'auth_token': self.auth_token, 'experiment_id': self.experiment_id}
result = requests.post(url=self.base_url.format('pending_recommendations'), json=post_data)
return json_parser(result, self.auth_token, RecommendationClient) | 8,343,984,657,662,928,000 | Query for pending recommendations that have yet to be evaluated.
Sometimes client-side computations may fail for a given input
configuration of model parameters, leaving the recommendation in a kind
of "limbo" state in which is not being evaluated but still exists. In
this case, it can be advantageous for the client to query for such
pending observations and to evaluate them. This function returns a list
of pending recommendations which can then be evaluated by the client.
Returns:
list of RecommendationClient: A list of
recommendation client objects, where each element in the list
corresponds to a pending observation. | thor_client/experiment_client.py | pending_recommendations | JamesBrofos/Thor-Python-Client | python | def pending_recommendations(self):
'Query for pending recommendations that have yet to be evaluated.\n\n Sometimes client-side computations may fail for a given input\n configuration of model parameters, leaving the recommendation in a kind\n of "limbo" state in which is not being evaluated but still exists. In\n this case, it can be advantageous for the client to query for such\n pending observations and to evaluate them. This function returns a list\n of pending recommendations which can then be evaluated by the client.\n\n Returns:\n list of RecommendationClient: A list of\n recommendation client objects, where each element in the list\n corresponds to a pending observation.\n '
post_data = {'auth_token': self.auth_token, 'experiment_id': self.experiment_id}
result = requests.post(url=self.base_url.format('pending_recommendations'), json=post_data)
return json_parser(result, self.auth_token, RecommendationClient) |
@classmethod
def from_dict(cls, dictionary, auth_token):
'Create an experiment object from a dictionary representation. Pass\n the authentication token as an additional parameter.\n\n TODO:\n Can the authentication token be a return parameter?\n '
return cls(identifier=dictionary['id'], name=dictionary['name'], date=dictionary['date'], dims=dictionary['dimensions'], auth_token=auth_token) | -1,919,639,324,076,756,200 | Create an experiment object from a dictionary representation. Pass
the authentication token as an additional parameter.
TODO:
Can the authentication token be a return parameter? | thor_client/experiment_client.py | from_dict | JamesBrofos/Thor-Python-Client | python | @classmethod
def from_dict(cls, dictionary, auth_token):
'Create an experiment object from a dictionary representation. Pass\n the authentication token as an additional parameter.\n\n TODO:\n Can the authentication token be a return parameter?\n '
return cls(identifier=dictionary['id'], name=dictionary['name'], date=dictionary['date'], dims=dictionary['dimensions'], auth_token=auth_token) |
async def add_derivation_paths(self, records: List[DerivationRecord]) -> None:
'\n Insert many derivation paths into the database.\n '
async with self.db_wrapper.lock:
sql_records = []
for record in records:
self.all_puzzle_hashes.add(record.puzzle_hash)
sql_records.append((record.index, bytes(record.pubkey).hex(), record.puzzle_hash.hex(), record.wallet_type, record.wallet_id, 0))
cursor = (await self.db_connection.executemany('INSERT OR REPLACE INTO derivation_paths VALUES(?, ?, ?, ?, ?, ?)', sql_records))
(await cursor.close())
(await self.db_connection.commit()) | -6,033,513,997,635,272,000 | Insert many derivation paths into the database. | chia/wallet/wallet_puzzle_store.py | add_derivation_paths | 1SecureANDROID/chia-blockchain | python | async def add_derivation_paths(self, records: List[DerivationRecord]) -> None:
'\n \n '
async with self.db_wrapper.lock:
sql_records = []
for record in records:
self.all_puzzle_hashes.add(record.puzzle_hash)
sql_records.append((record.index, bytes(record.pubkey).hex(), record.puzzle_hash.hex(), record.wallet_type, record.wallet_id, 0))
cursor = (await self.db_connection.executemany('INSERT OR REPLACE INTO derivation_paths VALUES(?, ?, ?, ?, ?, ?)', sql_records))
(await cursor.close())
(await self.db_connection.commit()) |
async def get_derivation_record(self, index: uint32, wallet_id: uint32) -> Optional[DerivationRecord]:
'\n Returns the derivation record by index and wallet id.\n '
cursor = (await self.db_connection.execute('SELECT * FROM derivation_paths WHERE derivation_index=? and wallet_id=?;', (index, wallet_id)))
row = (await cursor.fetchone())
(await cursor.close())
if ((row is not None) and (row[0] is not None)):
return DerivationRecord(uint32(row[0]), bytes32.fromhex(row[2]), G1Element.from_bytes(bytes.fromhex(row[1])), WalletType(row[3]), uint32(row[4]))
return None | -7,135,487,103,276,070,000 | Returns the derivation record by index and wallet id. | chia/wallet/wallet_puzzle_store.py | get_derivation_record | 1SecureANDROID/chia-blockchain | python | async def get_derivation_record(self, index: uint32, wallet_id: uint32) -> Optional[DerivationRecord]:
'\n \n '
cursor = (await self.db_connection.execute('SELECT * FROM derivation_paths WHERE derivation_index=? and wallet_id=?;', (index, wallet_id)))
row = (await cursor.fetchone())
(await cursor.close())
if ((row is not None) and (row[0] is not None)):
return DerivationRecord(uint32(row[0]), bytes32.fromhex(row[2]), G1Element.from_bytes(bytes.fromhex(row[1])), WalletType(row[3]), uint32(row[4]))
return None |
async def get_derivation_record_for_puzzle_hash(self, puzzle_hash: str) -> Optional[DerivationRecord]:
'\n Returns the derivation record by index and wallet id.\n '
cursor = (await self.db_connection.execute('SELECT * FROM derivation_paths WHERE puzzle_hash=?;', (puzzle_hash,)))
row = (await cursor.fetchone())
(await cursor.close())
if ((row is not None) and (row[0] is not None)):
return DerivationRecord(uint32(row[0]), bytes32.fromhex(row[2]), G1Element.from_bytes(bytes.fromhex(row[1])), WalletType(row[3]), uint32(row[4]))
return None | 3,878,319,438,836,965,400 | Returns the derivation record by index and wallet id. | chia/wallet/wallet_puzzle_store.py | get_derivation_record_for_puzzle_hash | 1SecureANDROID/chia-blockchain | python | async def get_derivation_record_for_puzzle_hash(self, puzzle_hash: str) -> Optional[DerivationRecord]:
'\n \n '
cursor = (await self.db_connection.execute('SELECT * FROM derivation_paths WHERE puzzle_hash=?;', (puzzle_hash,)))
row = (await cursor.fetchone())
(await cursor.close())
if ((row is not None) and (row[0] is not None)):
return DerivationRecord(uint32(row[0]), bytes32.fromhex(row[2]), G1Element.from_bytes(bytes.fromhex(row[1])), WalletType(row[3]), uint32(row[4]))
return None |
async def set_used_up_to(self, index: uint32, in_transaction=False) -> None:
"\n Sets a derivation path to used so we don't use it again.\n "
if (not in_transaction):
(await self.db_wrapper.lock.acquire())
try:
cursor = (await self.db_connection.execute('UPDATE derivation_paths SET used=1 WHERE derivation_index<=?', (index,)))
(await cursor.close())
finally:
if (not in_transaction):
(await self.db_connection.commit())
self.db_wrapper.lock.release() | 8,223,308,533,676,820,000 | Sets a derivation path to used so we don't use it again. | chia/wallet/wallet_puzzle_store.py | set_used_up_to | 1SecureANDROID/chia-blockchain | python | async def set_used_up_to(self, index: uint32, in_transaction=False) -> None:
"\n \n "
if (not in_transaction):
(await self.db_wrapper.lock.acquire())
try:
cursor = (await self.db_connection.execute('UPDATE derivation_paths SET used=1 WHERE derivation_index<=?', (index,)))
(await cursor.close())
finally:
if (not in_transaction):
(await self.db_connection.commit())
self.db_wrapper.lock.release() |
async def puzzle_hash_exists(self, puzzle_hash: bytes32) -> bool:
'\n Checks if passed puzzle_hash is present in the db.\n '
cursor = (await self.db_connection.execute('SELECT * from derivation_paths WHERE puzzle_hash=?', (puzzle_hash.hex(),)))
row = (await cursor.fetchone())
(await cursor.close())
return (row is not None) | 117,372,233,620,748,340 | Checks if passed puzzle_hash is present in the db. | chia/wallet/wallet_puzzle_store.py | puzzle_hash_exists | 1SecureANDROID/chia-blockchain | python | async def puzzle_hash_exists(self, puzzle_hash: bytes32) -> bool:
'\n \n '
cursor = (await self.db_connection.execute('SELECT * from derivation_paths WHERE puzzle_hash=?', (puzzle_hash.hex(),)))
row = (await cursor.fetchone())
(await cursor.close())
return (row is not None) |
async def one_of_puzzle_hashes_exists(self, puzzle_hashes: List[bytes32]) -> bool:
'\n Checks if one of the passed puzzle_hashes is present in the db.\n '
if (len(puzzle_hashes) < 1):
return False
for ph in puzzle_hashes:
if (ph in self.all_puzzle_hashes):
return True
return False | -5,927,667,413,891,186,000 | Checks if one of the passed puzzle_hashes is present in the db. | chia/wallet/wallet_puzzle_store.py | one_of_puzzle_hashes_exists | 1SecureANDROID/chia-blockchain | python | async def one_of_puzzle_hashes_exists(self, puzzle_hashes: List[bytes32]) -> bool:
'\n \n '
if (len(puzzle_hashes) < 1):
return False
for ph in puzzle_hashes:
if (ph in self.all_puzzle_hashes):
return True
return False |
async def index_for_pubkey(self, pubkey: G1Element) -> Optional[uint32]:
'\n Returns derivation paths for the given pubkey.\n Returns None if not present.\n '
cursor = (await self.db_connection.execute('SELECT * from derivation_paths WHERE pubkey=?', (bytes(pubkey).hex(),)))
row = (await cursor.fetchone())
(await cursor.close())
if (row is not None):
return uint32(row[0])
return None | -3,788,613,676,239,940,600 | Returns derivation paths for the given pubkey.
Returns None if not present. | chia/wallet/wallet_puzzle_store.py | index_for_pubkey | 1SecureANDROID/chia-blockchain | python | async def index_for_pubkey(self, pubkey: G1Element) -> Optional[uint32]:
'\n Returns derivation paths for the given pubkey.\n Returns None if not present.\n '
cursor = (await self.db_connection.execute('SELECT * from derivation_paths WHERE pubkey=?', (bytes(pubkey).hex(),)))
row = (await cursor.fetchone())
(await cursor.close())
if (row is not None):
return uint32(row[0])
return None |
async def index_for_puzzle_hash(self, puzzle_hash: bytes32) -> Optional[uint32]:
'\n Returns the derivation path for the puzzle_hash.\n Returns None if not present.\n '
cursor = (await self.db_connection.execute('SELECT * from derivation_paths WHERE puzzle_hash=?', (puzzle_hash.hex(),)))
row = (await cursor.fetchone())
(await cursor.close())
if (row is not None):
return uint32(row[0])
return None | 2,479,210,134,985,288,700 | Returns the derivation path for the puzzle_hash.
Returns None if not present. | chia/wallet/wallet_puzzle_store.py | index_for_puzzle_hash | 1SecureANDROID/chia-blockchain | python | async def index_for_puzzle_hash(self, puzzle_hash: bytes32) -> Optional[uint32]:
'\n Returns the derivation path for the puzzle_hash.\n Returns None if not present.\n '
cursor = (await self.db_connection.execute('SELECT * from derivation_paths WHERE puzzle_hash=?', (puzzle_hash.hex(),)))
row = (await cursor.fetchone())
(await cursor.close())
if (row is not None):
return uint32(row[0])
return None |
async def index_for_puzzle_hash_and_wallet(self, puzzle_hash: bytes32, wallet_id: uint32) -> Optional[uint32]:
'\n Returns the derivation path for the puzzle_hash.\n Returns None if not present.\n '
cursor = (await self.db_connection.execute('SELECT * from derivation_paths WHERE puzzle_hash=? and wallet_id=?;', (puzzle_hash.hex(), wallet_id)))
row = (await cursor.fetchone())
(await cursor.close())
if (row is not None):
return uint32(row[0])
return None | -465,883,259,975,356,740 | Returns the derivation path for the puzzle_hash.
Returns None if not present. | chia/wallet/wallet_puzzle_store.py | index_for_puzzle_hash_and_wallet | 1SecureANDROID/chia-blockchain | python | async def index_for_puzzle_hash_and_wallet(self, puzzle_hash: bytes32, wallet_id: uint32) -> Optional[uint32]:
'\n Returns the derivation path for the puzzle_hash.\n Returns None if not present.\n '
cursor = (await self.db_connection.execute('SELECT * from derivation_paths WHERE puzzle_hash=? and wallet_id=?;', (puzzle_hash.hex(), wallet_id)))
row = (await cursor.fetchone())
(await cursor.close())
if (row is not None):
return uint32(row[0])
return None |
async def wallet_info_for_puzzle_hash(self, puzzle_hash: bytes32) -> Optional[Tuple[(uint32, WalletType)]]:
'\n Returns the derivation path for the puzzle_hash.\n Returns None if not present.\n '
cursor = (await self.db_connection.execute('SELECT * from derivation_paths WHERE puzzle_hash=?', (puzzle_hash.hex(),)))
row = (await cursor.fetchone())
(await cursor.close())
if (row is not None):
return (row[4], WalletType(row[3]))
return None | -2,138,887,991,481,001,000 | Returns the derivation path for the puzzle_hash.
Returns None if not present. | chia/wallet/wallet_puzzle_store.py | wallet_info_for_puzzle_hash | 1SecureANDROID/chia-blockchain | python | async def wallet_info_for_puzzle_hash(self, puzzle_hash: bytes32) -> Optional[Tuple[(uint32, WalletType)]]:
'\n Returns the derivation path for the puzzle_hash.\n Returns None if not present.\n '
cursor = (await self.db_connection.execute('SELECT * from derivation_paths WHERE puzzle_hash=?', (puzzle_hash.hex(),)))
row = (await cursor.fetchone())
(await cursor.close())
if (row is not None):
return (row[4], WalletType(row[3]))
return None |
async def get_all_puzzle_hashes(self) -> Set[bytes32]:
'\n Return a set containing all puzzle_hashes we generated.\n '
cursor = (await self.db_connection.execute('SELECT * from derivation_paths'))
rows = (await cursor.fetchall())
(await cursor.close())
result: Set[bytes32] = set()
for row in rows:
result.add(bytes32(bytes.fromhex(row[2])))
return result | -3,404,014,031,407,140,000 | Return a set containing all puzzle_hashes we generated. | chia/wallet/wallet_puzzle_store.py | get_all_puzzle_hashes | 1SecureANDROID/chia-blockchain | python | async def get_all_puzzle_hashes(self) -> Set[bytes32]:
'\n \n '
cursor = (await self.db_connection.execute('SELECT * from derivation_paths'))
rows = (await cursor.fetchall())
(await cursor.close())
result: Set[bytes32] = set()
for row in rows:
result.add(bytes32(bytes.fromhex(row[2])))
return result |
async def get_last_derivation_path(self) -> Optional[uint32]:
'\n Returns the last derivation path by derivation_index.\n '
cursor = (await self.db_connection.execute('SELECT MAX(derivation_index) FROM derivation_paths;'))
row = (await cursor.fetchone())
(await cursor.close())
if ((row is not None) and (row[0] is not None)):
return uint32(row[0])
return None | -1,486,977,598,627,375,600 | Returns the last derivation path by derivation_index. | chia/wallet/wallet_puzzle_store.py | get_last_derivation_path | 1SecureANDROID/chia-blockchain | python | async def get_last_derivation_path(self) -> Optional[uint32]:
'\n \n '
cursor = (await self.db_connection.execute('SELECT MAX(derivation_index) FROM derivation_paths;'))
row = (await cursor.fetchone())
(await cursor.close())
if ((row is not None) and (row[0] is not None)):
return uint32(row[0])
return None |
async def get_last_derivation_path_for_wallet(self, wallet_id: int) -> Optional[uint32]:
'\n Returns the last derivation path by derivation_index.\n '
cursor = (await self.db_connection.execute(f'SELECT MAX(derivation_index) FROM derivation_paths WHERE wallet_id={wallet_id};'))
row = (await cursor.fetchone())
(await cursor.close())
if ((row is not None) and (row[0] is not None)):
return uint32(row[0])
return None | 3,022,884,929,937,231,400 | Returns the last derivation path by derivation_index. | chia/wallet/wallet_puzzle_store.py | get_last_derivation_path_for_wallet | 1SecureANDROID/chia-blockchain | python | async def get_last_derivation_path_for_wallet(self, wallet_id: int) -> Optional[uint32]:
'\n \n '
cursor = (await self.db_connection.execute(f'SELECT MAX(derivation_index) FROM derivation_paths WHERE wallet_id={wallet_id};'))
row = (await cursor.fetchone())
(await cursor.close())
if ((row is not None) and (row[0] is not None)):
return uint32(row[0])
return None |
async def get_current_derivation_record_for_wallet(self, wallet_id: uint32) -> Optional[DerivationRecord]:
'\n Returns the current derivation record by derivation_index.\n '
cursor = (await self.db_connection.execute(f'SELECT MAX(derivation_index) FROM derivation_paths WHERE wallet_id={wallet_id} and used=1;'))
row = (await cursor.fetchone())
(await cursor.close())
if ((row is not None) and (row[0] is not None)):
index = uint32(row[0])
return (await self.get_derivation_record(index, wallet_id))
return None | 6,350,083,862,135,092,000 | Returns the current derivation record by derivation_index. | chia/wallet/wallet_puzzle_store.py | get_current_derivation_record_for_wallet | 1SecureANDROID/chia-blockchain | python | async def get_current_derivation_record_for_wallet(self, wallet_id: uint32) -> Optional[DerivationRecord]:
'\n \n '
cursor = (await self.db_connection.execute(f'SELECT MAX(derivation_index) FROM derivation_paths WHERE wallet_id={wallet_id} and used=1;'))
row = (await cursor.fetchone())
(await cursor.close())
if ((row is not None) and (row[0] is not None)):
index = uint32(row[0])
return (await self.get_derivation_record(index, wallet_id))
return None |
async def get_unused_derivation_path(self) -> Optional[uint32]:
'\n Returns the first unused derivation path by derivation_index.\n '
cursor = (await self.db_connection.execute('SELECT MIN(derivation_index) FROM derivation_paths WHERE used=0;'))
row = (await cursor.fetchone())
(await cursor.close())
if ((row is not None) and (row[0] is not None)):
return uint32(row[0])
return None | -1,986,313,468,384,572,400 | Returns the first unused derivation path by derivation_index. | chia/wallet/wallet_puzzle_store.py | get_unused_derivation_path | 1SecureANDROID/chia-blockchain | python | async def get_unused_derivation_path(self) -> Optional[uint32]:
'\n \n '
cursor = (await self.db_connection.execute('SELECT MIN(derivation_index) FROM derivation_paths WHERE used=0;'))
row = (await cursor.fetchone())
(await cursor.close())
if ((row is not None) and (row[0] is not None)):
return uint32(row[0])
return None |
@property
def channels(self) -> int:
'The number of channels the file has.'
return self._channels | -4,151,978,912,950,002,000 | The number of channels the file has. | lacaudiofiles/wave/wavefile.py | channels | landmarkacoustics/lac-audio-files | python | @property
def channels(self) -> int:
return self._channels |
@property
def sample_rate(self) -> int:
'The number of samples per second.'
return self._sample_rate | -6,119,015,805,304,670,000 | The number of samples per second. | lacaudiofiles/wave/wavefile.py | sample_rate | landmarkacoustics/lac-audio-files | python | @property
def sample_rate(self) -> int:
return self._sample_rate |
@property
def byte_rate(self) -> int:
'The number of bytes per sample.'
return self._byte_rate | 667,125,268,274,181,600 | The number of bytes per sample. | lacaudiofiles/wave/wavefile.py | byte_rate | landmarkacoustics/lac-audio-files | python | @property
def byte_rate(self) -> int:
return self._byte_rate |
@property
def bit_rate(self) -> int:
'The number of bits per sample.'
return (self.byte_rate * 8) | -767,199,106,598,475,400 | The number of bits per sample. | lacaudiofiles/wave/wavefile.py | bit_rate | landmarkacoustics/lac-audio-files | python | @property
def bit_rate(self) -> int:
return (self.byte_rate * 8) |
def write_frames(self, data) -> int:
"Add some data to the file.\n\n Parameters\n ----------\n data : bytes-like object\n The user must ensure that the data's format matches the file's!\n\n Returns\n -------\n int : the number of frames written\n\n "
pos = self._filehandle.tell()
self._filehandle.writeframes(data)
return (self._filehandle.tell() - pos) | -1,767,031,196,508,024,600 | Add some data to the file.
Parameters
----------
data : bytes-like object
The user must ensure that the data's format matches the file's!
Returns
-------
int : the number of frames written | lacaudiofiles/wave/wavefile.py | write_frames | landmarkacoustics/lac-audio-files | python | def write_frames(self, data) -> int:
"Add some data to the file.\n\n Parameters\n ----------\n data : bytes-like object\n The user must ensure that the data's format matches the file's!\n\n Returns\n -------\n int : the number of frames written\n\n "
pos = self._filehandle.tell()
self._filehandle.writeframes(data)
return (self._filehandle.tell() - pos) |
@property
def frame_size(self) -> int:
'The number of bytes per frame.'
return (self.byte_rate * self.channels) | -2,240,499,510,299,825,400 | The number of bytes per frame. | lacaudiofiles/wave/wavefile.py | frame_size | landmarkacoustics/lac-audio-files | python | @property
def frame_size(self) -> int:
return (self.byte_rate * self.channels) |
def main():
'Run administrative tasks.'
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'goodshare.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError("Couldn't import Django. Are you sure it's installed and available on your PYTHONPATH environment variable? Did you forget to activate a virtual environment?") from exc
execute_from_command_line(sys.argv) | 8,687,208,097,773,623,000 | Run administrative tasks. | manage.py | main | nikhilchaudhary0126/goodshare | python | def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'goodshare.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError("Couldn't import Django. Are you sure it's installed and available on your PYTHONPATH environment variable? Did you forget to activate a virtual environment?") from exc
execute_from_command_line(sys.argv) |
def crop_image_from_xy(image, crop_location, crop_size, scale=1.0):
'\n Crops an image. When factor is not given does an central crop.\n\n Inputs:\n image: 4D tensor, [batch, height, width, channels] which will be cropped in height and width dimension\n crop_location: tensor, [batch, 2] which represent the height and width location of the crop\n crop_size: int, describes the extension of the crop\n Outputs:\n image_crop: 4D tensor, [batch, crop_size, crop_size, channels]\n '
with tf.name_scope('crop_image_from_xy'):
s = image.get_shape().as_list()
assert (len(s) == 4), 'Image needs to be of shape [batch, width, height, channel]'
scale = tf.reshape(scale, [(- 1)])
crop_location = tf.cast(crop_location, tf.float32)
crop_location = tf.reshape(crop_location, [s[0], 2])
crop_size = tf.cast(crop_size, tf.float32)
crop_size_scaled = (crop_size / scale)
y1 = (crop_location[:, 0] - (crop_size_scaled // 2))
y2 = (y1 + crop_size_scaled)
x1 = (crop_location[:, 1] - (crop_size_scaled // 2))
x2 = (x1 + crop_size_scaled)
y1 /= s[1]
y2 /= s[1]
x1 /= s[2]
x2 /= s[2]
boxes = tf.stack([y1, x1, y2, x2], (- 1))
crop_size = tf.cast(tf.stack([crop_size, crop_size]), tf.int32)
box_ind = tf.range(s[0])
image_c = tf.image.crop_and_resize(tf.cast(image, tf.float32), boxes, box_ind, crop_size, name='crop')
return image_c | 8,955,644,050,935,177,000 | Crops an image. When factor is not given does an central crop.
Inputs:
image: 4D tensor, [batch, height, width, channels] which will be cropped in height and width dimension
crop_location: tensor, [batch, 2] which represent the height and width location of the crop
crop_size: int, describes the extension of the crop
Outputs:
image_crop: 4D tensor, [batch, crop_size, crop_size, channels] | utils/general.py | crop_image_from_xy | vivekkhurana/handsign | python | def crop_image_from_xy(image, crop_location, crop_size, scale=1.0):
'\n Crops an image. When factor is not given does an central crop.\n\n Inputs:\n image: 4D tensor, [batch, height, width, channels] which will be cropped in height and width dimension\n crop_location: tensor, [batch, 2] which represent the height and width location of the crop\n crop_size: int, describes the extension of the crop\n Outputs:\n image_crop: 4D tensor, [batch, crop_size, crop_size, channels]\n '
with tf.name_scope('crop_image_from_xy'):
s = image.get_shape().as_list()
assert (len(s) == 4), 'Image needs to be of shape [batch, width, height, channel]'
scale = tf.reshape(scale, [(- 1)])
crop_location = tf.cast(crop_location, tf.float32)
crop_location = tf.reshape(crop_location, [s[0], 2])
crop_size = tf.cast(crop_size, tf.float32)
crop_size_scaled = (crop_size / scale)
y1 = (crop_location[:, 0] - (crop_size_scaled // 2))
y2 = (y1 + crop_size_scaled)
x1 = (crop_location[:, 1] - (crop_size_scaled // 2))
x2 = (x1 + crop_size_scaled)
y1 /= s[1]
y2 /= s[1]
x1 /= s[2]
x2 /= s[2]
boxes = tf.stack([y1, x1, y2, x2], (- 1))
crop_size = tf.cast(tf.stack([crop_size, crop_size]), tf.int32)
box_ind = tf.range(s[0])
image_c = tf.image.crop_and_resize(tf.cast(image, tf.float32), boxes, box_ind, crop_size, name='crop')
return image_c |
def find_max_location(scoremap):
' Returns the coordinates of the given scoremap with maximum value. '
with tf.variable_scope('find_max_location'):
s = scoremap.get_shape().as_list()
if (len(s) == 4):
scoremap = tf.squeeze(scoremap, [3])
if (len(s) == 2):
scoremap = tf.expand_dims(scoremap, 0)
s = scoremap.get_shape().as_list()
assert (len(s) == 3), 'Scoremap must be 3D.'
assert ((s[0] < s[1]) and (s[0] < s[2])), 'Scoremap must be [Batch, Width, Height]'
x_range = tf.expand_dims(tf.range(s[1]), 1)
y_range = tf.expand_dims(tf.range(s[2]), 0)
X = tf.tile(x_range, [1, s[2]])
Y = tf.tile(y_range, [s[1], 1])
x_vec = tf.reshape(X, [(- 1)])
y_vec = tf.reshape(Y, [(- 1)])
scoremap_vec = tf.reshape(scoremap, [s[0], (- 1)])
max_ind_vec = tf.cast(tf.argmax(scoremap_vec, dimension=1), tf.int32)
xy_loc = list()
for i in range(s[0]):
x_loc = tf.reshape(x_vec[max_ind_vec[i]], [1])
y_loc = tf.reshape(y_vec[max_ind_vec[i]], [1])
xy_loc.append(tf.concat([x_loc, y_loc], 0))
xy_loc = tf.stack(xy_loc, 0)
return xy_loc | 1,839,514,264,288,827,100 | Returns the coordinates of the given scoremap with maximum value. | utils/general.py | find_max_location | vivekkhurana/handsign | python | def find_max_location(scoremap):
' '
with tf.variable_scope('find_max_location'):
s = scoremap.get_shape().as_list()
if (len(s) == 4):
scoremap = tf.squeeze(scoremap, [3])
if (len(s) == 2):
scoremap = tf.expand_dims(scoremap, 0)
s = scoremap.get_shape().as_list()
assert (len(s) == 3), 'Scoremap must be 3D.'
assert ((s[0] < s[1]) and (s[0] < s[2])), 'Scoremap must be [Batch, Width, Height]'
x_range = tf.expand_dims(tf.range(s[1]), 1)
y_range = tf.expand_dims(tf.range(s[2]), 0)
X = tf.tile(x_range, [1, s[2]])
Y = tf.tile(y_range, [s[1], 1])
x_vec = tf.reshape(X, [(- 1)])
y_vec = tf.reshape(Y, [(- 1)])
scoremap_vec = tf.reshape(scoremap, [s[0], (- 1)])
max_ind_vec = tf.cast(tf.argmax(scoremap_vec, dimension=1), tf.int32)
xy_loc = list()
for i in range(s[0]):
x_loc = tf.reshape(x_vec[max_ind_vec[i]], [1])
y_loc = tf.reshape(y_vec[max_ind_vec[i]], [1])
xy_loc.append(tf.concat([x_loc, y_loc], 0))
xy_loc = tf.stack(xy_loc, 0)
return xy_loc |
def single_obj_scoremap(scoremap):
' Applies my algorithm to figure out the most likely object from a given segmentation scoremap. '
with tf.variable_scope('single_obj_scoremap'):
filter_size = 21
s = scoremap.get_shape().as_list()
assert (len(s) == 4), 'Scoremap must be 4D.'
scoremap_softmax = tf.nn.softmax(scoremap)
scoremap_fg = tf.reduce_max(scoremap_softmax[:, :, :, 1:], 3)
detmap_fg = tf.round(scoremap_fg)
max_loc = find_max_location(scoremap_fg)
objectmap_list = list()
kernel_dil = (tf.ones((filter_size, filter_size, 1)) / float((filter_size * filter_size)))
for i in range(s[0]):
sparse_ind = tf.reshape(max_loc[i, :], [1, 2])
objectmap = tf.sparse_to_dense(sparse_ind, [s[1], s[2]], 1.0)
num_passes = (max(s[1], s[2]) // (filter_size // 2))
for j in range(num_passes):
objectmap = tf.reshape(objectmap, [1, s[1], s[2], 1])
objectmap_dil = tf.nn.dilation2d(objectmap, kernel_dil, [1, 1, 1, 1], [1, 1, 1, 1], 'SAME')
objectmap_dil = tf.reshape(objectmap_dil, [s[1], s[2]])
objectmap = tf.round(tf.multiply(detmap_fg[i, :, :], objectmap_dil))
objectmap = tf.reshape(objectmap, [s[1], s[2], 1])
objectmap_list.append(objectmap)
objectmap = tf.stack(objectmap_list)
return objectmap | -8,797,348,347,964,816,000 | Applies my algorithm to figure out the most likely object from a given segmentation scoremap. | utils/general.py | single_obj_scoremap | vivekkhurana/handsign | python | def single_obj_scoremap(scoremap):
' '
with tf.variable_scope('single_obj_scoremap'):
filter_size = 21
s = scoremap.get_shape().as_list()
assert (len(s) == 4), 'Scoremap must be 4D.'
scoremap_softmax = tf.nn.softmax(scoremap)
scoremap_fg = tf.reduce_max(scoremap_softmax[:, :, :, 1:], 3)
detmap_fg = tf.round(scoremap_fg)
max_loc = find_max_location(scoremap_fg)
objectmap_list = list()
kernel_dil = (tf.ones((filter_size, filter_size, 1)) / float((filter_size * filter_size)))
for i in range(s[0]):
sparse_ind = tf.reshape(max_loc[i, :], [1, 2])
objectmap = tf.sparse_to_dense(sparse_ind, [s[1], s[2]], 1.0)
num_passes = (max(s[1], s[2]) // (filter_size // 2))
for j in range(num_passes):
objectmap = tf.reshape(objectmap, [1, s[1], s[2], 1])
objectmap_dil = tf.nn.dilation2d(objectmap, kernel_dil, [1, 1, 1, 1], [1, 1, 1, 1], 'SAME')
objectmap_dil = tf.reshape(objectmap_dil, [s[1], s[2]])
objectmap = tf.round(tf.multiply(detmap_fg[i, :, :], objectmap_dil))
objectmap = tf.reshape(objectmap, [s[1], s[2], 1])
objectmap_list.append(objectmap)
objectmap = tf.stack(objectmap_list)
return objectmap |
def calc_center_bb(binary_class_mask):
' Returns the center of mass coordinates for the given binary_class_mask. '
with tf.variable_scope('calc_center_bb'):
binary_class_mask = tf.cast(binary_class_mask, tf.int32)
binary_class_mask = tf.equal(binary_class_mask, 1)
s = binary_class_mask.get_shape().as_list()
if (len(s) == 4):
binary_class_mask = tf.squeeze(binary_class_mask, [3])
s = binary_class_mask.get_shape().as_list()
assert (len(s) == 3), 'binary_class_mask must be 3D.'
assert ((s[0] < s[1]) and (s[0] < s[2])), 'binary_class_mask must be [Batch, Width, Height]'
x_range = tf.expand_dims(tf.range(s[1]), 1)
y_range = tf.expand_dims(tf.range(s[2]), 0)
X = tf.tile(x_range, [1, s[2]])
Y = tf.tile(y_range, [s[1], 1])
bb_list = list()
center_list = list()
crop_size_list = list()
for i in range(s[0]):
X_masked = tf.cast(tf.boolean_mask(X, binary_class_mask[i, :, :]), tf.float32)
Y_masked = tf.cast(tf.boolean_mask(Y, binary_class_mask[i, :, :]), tf.float32)
x_min = tf.reduce_min(X_masked)
x_max = tf.reduce_max(X_masked)
y_min = tf.reduce_min(Y_masked)
y_max = tf.reduce_max(Y_masked)
start = tf.stack([x_min, y_min])
end = tf.stack([x_max, y_max])
bb = tf.stack([start, end], 1)
bb_list.append(bb)
center_x = (0.5 * (x_max + x_min))
center_y = (0.5 * (y_max + y_min))
center = tf.stack([center_x, center_y], 0)
center = tf.cond(tf.reduce_all(tf.is_finite(center)), (lambda : center), (lambda : tf.constant([160.0, 160.0])))
center.set_shape([2])
center_list.append(center)
crop_size_x = (x_max - x_min)
crop_size_y = (y_max - y_min)
crop_size = tf.expand_dims(tf.maximum(crop_size_x, crop_size_y), 0)
crop_size = tf.cond(tf.reduce_all(tf.is_finite(crop_size)), (lambda : crop_size), (lambda : tf.constant([100.0])))
crop_size.set_shape([1])
crop_size_list.append(crop_size)
bb = tf.stack(bb_list)
center = tf.stack(center_list)
crop_size = tf.stack(crop_size_list)
return (center, bb, crop_size) | 495,273,454,323,574,600 | Returns the center of mass coordinates for the given binary_class_mask. | utils/general.py | calc_center_bb | vivekkhurana/handsign | python | def calc_center_bb(binary_class_mask):
' '
with tf.variable_scope('calc_center_bb'):
binary_class_mask = tf.cast(binary_class_mask, tf.int32)
binary_class_mask = tf.equal(binary_class_mask, 1)
s = binary_class_mask.get_shape().as_list()
if (len(s) == 4):
binary_class_mask = tf.squeeze(binary_class_mask, [3])
s = binary_class_mask.get_shape().as_list()
assert (len(s) == 3), 'binary_class_mask must be 3D.'
assert ((s[0] < s[1]) and (s[0] < s[2])), 'binary_class_mask must be [Batch, Width, Height]'
x_range = tf.expand_dims(tf.range(s[1]), 1)
y_range = tf.expand_dims(tf.range(s[2]), 0)
X = tf.tile(x_range, [1, s[2]])
Y = tf.tile(y_range, [s[1], 1])
bb_list = list()
center_list = list()
crop_size_list = list()
for i in range(s[0]):
X_masked = tf.cast(tf.boolean_mask(X, binary_class_mask[i, :, :]), tf.float32)
Y_masked = tf.cast(tf.boolean_mask(Y, binary_class_mask[i, :, :]), tf.float32)
x_min = tf.reduce_min(X_masked)
x_max = tf.reduce_max(X_masked)
y_min = tf.reduce_min(Y_masked)
y_max = tf.reduce_max(Y_masked)
start = tf.stack([x_min, y_min])
end = tf.stack([x_max, y_max])
bb = tf.stack([start, end], 1)
bb_list.append(bb)
center_x = (0.5 * (x_max + x_min))
center_y = (0.5 * (y_max + y_min))
center = tf.stack([center_x, center_y], 0)
center = tf.cond(tf.reduce_all(tf.is_finite(center)), (lambda : center), (lambda : tf.constant([160.0, 160.0])))
center.set_shape([2])
center_list.append(center)
crop_size_x = (x_max - x_min)
crop_size_y = (y_max - y_min)
crop_size = tf.expand_dims(tf.maximum(crop_size_x, crop_size_y), 0)
crop_size = tf.cond(tf.reduce_all(tf.is_finite(crop_size)), (lambda : crop_size), (lambda : tf.constant([100.0])))
crop_size.set_shape([1])
crop_size_list.append(crop_size)
bb = tf.stack(bb_list)
center = tf.stack(center_list)
crop_size = tf.stack(crop_size_list)
return (center, bb, crop_size) |
def detect_keypoints(scoremaps):
' Performs detection per scoremap for the hands keypoints. '
if (len(scoremaps.shape) == 4):
scoremaps = np.squeeze(scoremaps)
s = scoremaps.shape
assert (len(s) == 3), 'This function was only designed for 3D Scoremaps.'
assert ((s[2] < s[1]) and (s[2] < s[0])), 'Probably the input is not correct, because [H, W, C] is expected.'
keypoint_coords = np.zeros((s[2], 2))
for i in range(s[2]):
(v, u) = np.unravel_index(np.argmax(scoremaps[:, :, i]), (s[0], s[1]))
keypoint_coords[(i, 0)] = v
keypoint_coords[(i, 1)] = u
return keypoint_coords | 4,990,219,028,124,282,000 | Performs detection per scoremap for the hands keypoints. | utils/general.py | detect_keypoints | vivekkhurana/handsign | python | def detect_keypoints(scoremaps):
' '
if (len(scoremaps.shape) == 4):
scoremaps = np.squeeze(scoremaps)
s = scoremaps.shape
assert (len(s) == 3), 'This function was only designed for 3D Scoremaps.'
assert ((s[2] < s[1]) and (s[2] < s[0])), 'Probably the input is not correct, because [H, W, C] is expected.'
keypoint_coords = np.zeros((s[2], 2))
for i in range(s[2]):
(v, u) = np.unravel_index(np.argmax(scoremaps[:, :, i]), (s[0], s[1]))
keypoint_coords[(i, 0)] = v
keypoint_coords[(i, 1)] = u
return keypoint_coords |
def trafo_coords(keypoints_crop_coords, centers, scale, crop_size):
' Transforms coords into global image coordinates. '
keypoints_coords = np.copy(keypoints_crop_coords)
keypoints_coords -= (crop_size // 2)
keypoints_coords /= scale
keypoints_coords += centers
return keypoints_coords | -531,633,264,401,260,200 | Transforms coords into global image coordinates. | utils/general.py | trafo_coords | vivekkhurana/handsign | python | def trafo_coords(keypoints_crop_coords, centers, scale, crop_size):
' '
keypoints_coords = np.copy(keypoints_crop_coords)
keypoints_coords -= (crop_size // 2)
keypoints_coords /= scale
keypoints_coords += centers
return keypoints_coords |
def plot_hand(coords_hw, axis, color_fixed=None, linewidth='1'):
' Plots a hand stick figure into a matplotlib figure. '
colors = np.array([[0.0, 0.0, 0.5], [0.0, 0.0, 0.73172906], [0.0, 0.0, 0.96345811], [0.0, 0.12745098, 1.0], [0.0, 0.33137255, 1.0], [0.0, 0.55098039, 1.0], [0.0, 0.75490196, 1.0], [0.06008855, 0.9745098, 0.90765338], [0.22454143, 1.0, 0.74320051], [0.40164453, 1.0, 0.56609741], [0.56609741, 1.0, 0.40164453], [0.74320051, 1.0, 0.22454143], [0.90765338, 1.0, 0.06008855], [1.0, 0.82861293, 0.0], [1.0, 0.63979666, 0.0], [1.0, 0.43645606, 0.0], [1.0, 0.2476398, 0.0], [0.96345811, 0.0442992, 0.0], [0.73172906, 0.0, 0.0], [0.5, 0.0, 0.0]])
bones = [((0, 4), colors[0, :]), ((4, 3), colors[1, :]), ((3, 2), colors[2, :]), ((2, 1), colors[3, :]), ((0, 8), colors[4, :]), ((8, 7), colors[5, :]), ((7, 6), colors[6, :]), ((6, 5), colors[7, :]), ((0, 12), colors[8, :]), ((12, 11), colors[9, :]), ((11, 10), colors[10, :]), ((10, 9), colors[11, :]), ((0, 16), colors[12, :]), ((16, 15), colors[13, :]), ((15, 14), colors[14, :]), ((14, 13), colors[15, :]), ((0, 20), colors[16, :]), ((20, 19), colors[17, :]), ((19, 18), colors[18, :]), ((18, 17), colors[19, :])]
for (connection, color) in bones:
coord1 = coords_hw[connection[0], :]
coord2 = coords_hw[connection[1], :]
coords = np.stack([coord1, coord2])
if (color_fixed is None):
axis.plot(coords[:, 1], coords[:, 0], color=color, linewidth=linewidth)
else:
axis.plot(coords[:, 1], coords[:, 0], color_fixed, linewidth=linewidth) | 9,040,086,310,086,804,000 | Plots a hand stick figure into a matplotlib figure. | utils/general.py | plot_hand | vivekkhurana/handsign | python | def plot_hand(coords_hw, axis, color_fixed=None, linewidth='1'):
' '
colors = np.array([[0.0, 0.0, 0.5], [0.0, 0.0, 0.73172906], [0.0, 0.0, 0.96345811], [0.0, 0.12745098, 1.0], [0.0, 0.33137255, 1.0], [0.0, 0.55098039, 1.0], [0.0, 0.75490196, 1.0], [0.06008855, 0.9745098, 0.90765338], [0.22454143, 1.0, 0.74320051], [0.40164453, 1.0, 0.56609741], [0.56609741, 1.0, 0.40164453], [0.74320051, 1.0, 0.22454143], [0.90765338, 1.0, 0.06008855], [1.0, 0.82861293, 0.0], [1.0, 0.63979666, 0.0], [1.0, 0.43645606, 0.0], [1.0, 0.2476398, 0.0], [0.96345811, 0.0442992, 0.0], [0.73172906, 0.0, 0.0], [0.5, 0.0, 0.0]])
bones = [((0, 4), colors[0, :]), ((4, 3), colors[1, :]), ((3, 2), colors[2, :]), ((2, 1), colors[3, :]), ((0, 8), colors[4, :]), ((8, 7), colors[5, :]), ((7, 6), colors[6, :]), ((6, 5), colors[7, :]), ((0, 12), colors[8, :]), ((12, 11), colors[9, :]), ((11, 10), colors[10, :]), ((10, 9), colors[11, :]), ((0, 16), colors[12, :]), ((16, 15), colors[13, :]), ((15, 14), colors[14, :]), ((14, 13), colors[15, :]), ((0, 20), colors[16, :]), ((20, 19), colors[17, :]), ((19, 18), colors[18, :]), ((18, 17), colors[19, :])]
for (connection, color) in bones:
coord1 = coords_hw[connection[0], :]
coord2 = coords_hw[connection[1], :]
coords = np.stack([coord1, coord2])
if (color_fixed is None):
axis.plot(coords[:, 1], coords[:, 0], color=color, linewidth=linewidth)
else:
axis.plot(coords[:, 1], coords[:, 0], color_fixed, linewidth=linewidth) |
def plot_hand_3d(coords_xyz, axis, color_fixed=None, linewidth='1'):
' Plots a hand stick figure into a matplotlib figure. '
colors = np.array([[0.0, 0.0, 0.5], [0.0, 0.0, 0.73172906], [0.0, 0.0, 0.96345811], [0.0, 0.12745098, 1.0], [0.0, 0.33137255, 1.0], [0.0, 0.55098039, 1.0], [0.0, 0.75490196, 1.0], [0.06008855, 0.9745098, 0.90765338], [0.22454143, 1.0, 0.74320051], [0.40164453, 1.0, 0.56609741], [0.56609741, 1.0, 0.40164453], [0.74320051, 1.0, 0.22454143], [0.90765338, 1.0, 0.06008855], [1.0, 0.82861293, 0.0], [1.0, 0.63979666, 0.0], [1.0, 0.43645606, 0.0], [1.0, 0.2476398, 0.0], [0.96345811, 0.0442992, 0.0], [0.73172906, 0.0, 0.0], [0.5, 0.0, 0.0]])
bones = [((0, 4), colors[0, :]), ((4, 3), colors[1, :]), ((3, 2), colors[2, :]), ((2, 1), colors[3, :]), ((0, 8), colors[4, :]), ((8, 7), colors[5, :]), ((7, 6), colors[6, :]), ((6, 5), colors[7, :]), ((0, 12), colors[8, :]), ((12, 11), colors[9, :]), ((11, 10), colors[10, :]), ((10, 9), colors[11, :]), ((0, 16), colors[12, :]), ((16, 15), colors[13, :]), ((15, 14), colors[14, :]), ((14, 13), colors[15, :]), ((0, 20), colors[16, :]), ((20, 19), colors[17, :]), ((19, 18), colors[18, :]), ((18, 17), colors[19, :])]
for (connection, color) in bones:
coord1 = coords_xyz[connection[0], :]
coord2 = coords_xyz[connection[1], :]
coords = np.stack([coord1, coord2])
if (color_fixed is None):
axis.plot(coords[:, 0], coords[:, 1], coords[:, 2], color=color, linewidth=linewidth)
else:
axis.plot(coords[:, 0], coords[:, 1], coords[:, 2], color_fixed, linewidth=linewidth)
axis.view_init(azim=(- 90.0), elev=90.0) | -1,052,739,098,623,866,900 | Plots a hand stick figure into a matplotlib figure. | utils/general.py | plot_hand_3d | vivekkhurana/handsign | python | def plot_hand_3d(coords_xyz, axis, color_fixed=None, linewidth='1'):
' '
colors = np.array([[0.0, 0.0, 0.5], [0.0, 0.0, 0.73172906], [0.0, 0.0, 0.96345811], [0.0, 0.12745098, 1.0], [0.0, 0.33137255, 1.0], [0.0, 0.55098039, 1.0], [0.0, 0.75490196, 1.0], [0.06008855, 0.9745098, 0.90765338], [0.22454143, 1.0, 0.74320051], [0.40164453, 1.0, 0.56609741], [0.56609741, 1.0, 0.40164453], [0.74320051, 1.0, 0.22454143], [0.90765338, 1.0, 0.06008855], [1.0, 0.82861293, 0.0], [1.0, 0.63979666, 0.0], [1.0, 0.43645606, 0.0], [1.0, 0.2476398, 0.0], [0.96345811, 0.0442992, 0.0], [0.73172906, 0.0, 0.0], [0.5, 0.0, 0.0]])
bones = [((0, 4), colors[0, :]), ((4, 3), colors[1, :]), ((3, 2), colors[2, :]), ((2, 1), colors[3, :]), ((0, 8), colors[4, :]), ((8, 7), colors[5, :]), ((7, 6), colors[6, :]), ((6, 5), colors[7, :]), ((0, 12), colors[8, :]), ((12, 11), colors[9, :]), ((11, 10), colors[10, :]), ((10, 9), colors[11, :]), ((0, 16), colors[12, :]), ((16, 15), colors[13, :]), ((15, 14), colors[14, :]), ((14, 13), colors[15, :]), ((0, 20), colors[16, :]), ((20, 19), colors[17, :]), ((19, 18), colors[18, :]), ((18, 17), colors[19, :])]
for (connection, color) in bones:
coord1 = coords_xyz[connection[0], :]
coord2 = coords_xyz[connection[1], :]
coords = np.stack([coord1, coord2])
if (color_fixed is None):
axis.plot(coords[:, 0], coords[:, 1], coords[:, 2], color=color, linewidth=linewidth)
else:
axis.plot(coords[:, 0], coords[:, 1], coords[:, 2], color_fixed, linewidth=linewidth)
axis.view_init(azim=(- 90.0), elev=90.0) |
def plot_hand_2d(coords_hw, image, color_fixed=None, linewidth=2):
' Plots a hand stick figure into a matplotlib figure. '
colors = [(0, 0, 127), (0, 0, 187), (0, 0, 246), (0, 32, 255), (0, 85, 255), (0, 140, 255), (0, 192, 255), (15, 248, 231), (57, 255, 190), (102, 1, 144), (144, 1, 102), (190, 1, 57), (231, 1, 15), (1, 211, 0), (1, 163, 0), (1, 111, 0), (1, 63, 0), (246, 11, 0), (187, 0, 0), (127, 0, 0)]
bones = [((0, 4), colors[0]), ((4, 3), colors[1]), ((3, 2), colors[2]), ((2, 1), colors[3]), ((0, 8), colors[4]), ((8, 7), colors[5]), ((7, 6), colors[6]), ((6, 5), colors[7]), ((0, 12), colors[8]), ((12, 11), colors[9]), ((11, 10), colors[10]), ((10, 9), colors[11]), ((0, 16), colors[12]), ((16, 15), colors[13]), ((15, 14), colors[14]), ((14, 13), colors[15]), ((0, 20), colors[16]), ((20, 19), colors[17]), ((19, 18), colors[18]), ((18, 17), colors[19])]
for (connection, color) in bones:
coord1 = coords_hw[connection[0], :]
coord2 = coords_hw[connection[1], :]
coords = np.stack([coord1, coord2])
coord1_t = (int(coord1[1]), int(coord1[0]))
coord2_t = (int(coord2[1]), int(coord2[0]))
if (color_fixed is None):
cv2.line(image, coord2_t, coord1_t, color, linewidth)
else:
cv2.line(image, coord1_t, coord2_t, color_fixed, linewidth) | 6,905,083,832,229,474,000 | Plots a hand stick figure into a matplotlib figure. | utils/general.py | plot_hand_2d | vivekkhurana/handsign | python | def plot_hand_2d(coords_hw, image, color_fixed=None, linewidth=2):
' '
colors = [(0, 0, 127), (0, 0, 187), (0, 0, 246), (0, 32, 255), (0, 85, 255), (0, 140, 255), (0, 192, 255), (15, 248, 231), (57, 255, 190), (102, 1, 144), (144, 1, 102), (190, 1, 57), (231, 1, 15), (1, 211, 0), (1, 163, 0), (1, 111, 0), (1, 63, 0), (246, 11, 0), (187, 0, 0), (127, 0, 0)]
bones = [((0, 4), colors[0]), ((4, 3), colors[1]), ((3, 2), colors[2]), ((2, 1), colors[3]), ((0, 8), colors[4]), ((8, 7), colors[5]), ((7, 6), colors[6]), ((6, 5), colors[7]), ((0, 12), colors[8]), ((12, 11), colors[9]), ((11, 10), colors[10]), ((10, 9), colors[11]), ((0, 16), colors[12]), ((16, 15), colors[13]), ((15, 14), colors[14]), ((14, 13), colors[15]), ((0, 20), colors[16]), ((20, 19), colors[17]), ((19, 18), colors[18]), ((18, 17), colors[19])]
for (connection, color) in bones:
coord1 = coords_hw[connection[0], :]
coord2 = coords_hw[connection[1], :]
coords = np.stack([coord1, coord2])
coord1_t = (int(coord1[1]), int(coord1[0]))
coord2_t = (int(coord2[1]), int(coord2[0]))
if (color_fixed is None):
cv2.line(image, coord2_t, coord1_t, color, linewidth)
else:
cv2.line(image, coord1_t, coord2_t, color_fixed, linewidth) |
def load_weights_from_snapshot(session, checkpoint_path, discard_list=None, rename_dict=None):
' Loads weights from a snapshot except the ones indicated with discard_list. Others are possibly renamed. '
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
if (discard_list is not None):
num_disc = 0
var_to_shape_map_new = dict()
for (k, v) in var_to_shape_map.items():
good = True
for dis_str in discard_list:
if (dis_str in k):
good = False
if good:
var_to_shape_map_new[k] = v
else:
num_disc += 1
var_to_shape_map = dict(var_to_shape_map_new)
print(('Discarded %d items' % num_disc))
num_rename = 0
var_to_shape_map_new = dict()
for name in var_to_shape_map.keys():
new_name = name
if (rename_dict is not None):
for rename_str in rename_dict.keys():
if (rename_str in name):
new_name = new_name.replace(rename_str, rename_dict[rename_str])
num_rename += 1
var_to_shape_map_new[new_name] = reader.get_tensor(name)
var_to_shape_map = dict(var_to_shape_map_new)
(init_op, init_feed) = tf.contrib.framework.assign_from_values(var_to_shape_map)
session.run(init_op, init_feed)
print(('Initialized %d variables from %s.' % (len(var_to_shape_map), checkpoint_path))) | -5,373,056,543,232,178,000 | Loads weights from a snapshot except the ones indicated with discard_list. Others are possibly renamed. | utils/general.py | load_weights_from_snapshot | vivekkhurana/handsign | python | def load_weights_from_snapshot(session, checkpoint_path, discard_list=None, rename_dict=None):
' '
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
if (discard_list is not None):
num_disc = 0
var_to_shape_map_new = dict()
for (k, v) in var_to_shape_map.items():
good = True
for dis_str in discard_list:
if (dis_str in k):
good = False
if good:
var_to_shape_map_new[k] = v
else:
num_disc += 1
var_to_shape_map = dict(var_to_shape_map_new)
print(('Discarded %d items' % num_disc))
num_rename = 0
var_to_shape_map_new = dict()
for name in var_to_shape_map.keys():
new_name = name
if (rename_dict is not None):
for rename_str in rename_dict.keys():
if (rename_str in name):
new_name = new_name.replace(rename_str, rename_dict[rename_str])
num_rename += 1
var_to_shape_map_new[new_name] = reader.get_tensor(name)
var_to_shape_map = dict(var_to_shape_map_new)
(init_op, init_feed) = tf.contrib.framework.assign_from_values(var_to_shape_map)
session.run(init_op, init_feed)
print(('Initialized %d variables from %s.' % (len(var_to_shape_map), checkpoint_path))) |
def calc_auc(x, y):
' Given x and y values it calculates the approx. integral and normalizes it: area under curve'
integral = np.trapz(y, x)
norm = np.trapz(np.ones_like(y), x)
return (integral / norm) | 7,823,423,900,271,961,000 | Given x and y values it calculates the approx. integral and normalizes it: area under curve | utils/general.py | calc_auc | vivekkhurana/handsign | python | def calc_auc(x, y):
' '
integral = np.trapz(y, x)
norm = np.trapz(np.ones_like(y), x)
return (integral / norm) |
def get_stb_ref_curves():
'\n Returns results of various baseline methods on the Stereo Tracking Benchmark Dataset reported by:\n Zhang et al., ‘3d Hand Pose Tracking and Estimation Using Stereo Matching’, 2016\n '
curve_list = list()
thresh_mm = np.array([20.0, 25, 30, 35, 40, 45, 50])
pso_b1 = np.array([0.32236842, 0.53947368, 0.67434211, 0.75657895, 0.80921053, 0.86513158, 0.89473684])
curve_list.append((thresh_mm, pso_b1, ('PSO (AUC=%.3f)' % calc_auc(thresh_mm, pso_b1))))
icppso_b1 = np.array([0.51973684, 0.64473684, 0.71710526, 0.77302632, 0.80921053, 0.84868421, 0.86842105])
curve_list.append((thresh_mm, icppso_b1, ('ICPPSO (AUC=%.3f)' % calc_auc(thresh_mm, icppso_b1))))
chpr_b1 = np.array([0.56578947, 0.71710526, 0.82236842, 0.88157895, 0.91447368, 0.9375, 0.96052632])
curve_list.append((thresh_mm, chpr_b1, ('CHPR (AUC=%.3f)' % calc_auc(thresh_mm, chpr_b1))))
return curve_list | 974,777,666,031,250,600 | Returns results of various baseline methods on the Stereo Tracking Benchmark Dataset reported by:
Zhang et al., ‘3d Hand Pose Tracking and Estimation Using Stereo Matching’, 2016 | utils/general.py | get_stb_ref_curves | vivekkhurana/handsign | python | def get_stb_ref_curves():
'\n Returns results of various baseline methods on the Stereo Tracking Benchmark Dataset reported by:\n Zhang et al., ‘3d Hand Pose Tracking and Estimation Using Stereo Matching’, 2016\n '
curve_list = list()
thresh_mm = np.array([20.0, 25, 30, 35, 40, 45, 50])
pso_b1 = np.array([0.32236842, 0.53947368, 0.67434211, 0.75657895, 0.80921053, 0.86513158, 0.89473684])
curve_list.append((thresh_mm, pso_b1, ('PSO (AUC=%.3f)' % calc_auc(thresh_mm, pso_b1))))
icppso_b1 = np.array([0.51973684, 0.64473684, 0.71710526, 0.77302632, 0.80921053, 0.84868421, 0.86842105])
curve_list.append((thresh_mm, icppso_b1, ('ICPPSO (AUC=%.3f)' % calc_auc(thresh_mm, icppso_b1))))
chpr_b1 = np.array([0.56578947, 0.71710526, 0.82236842, 0.88157895, 0.91447368, 0.9375, 0.96052632])
curve_list.append((thresh_mm, chpr_b1, ('CHPR (AUC=%.3f)' % calc_auc(thresh_mm, chpr_b1))))
return curve_list |
@staticmethod
def dropout(in_tensor, keep_prob, evaluation):
' Dropout: Each neuron is dropped independently. '
with tf.variable_scope('dropout'):
tensor_shape = in_tensor.get_shape().as_list()
out_tensor = tf.cond(evaluation, (lambda : tf.nn.dropout(in_tensor, 1.0, noise_shape=tensor_shape)), (lambda : tf.nn.dropout(in_tensor, keep_prob, noise_shape=tensor_shape)))
return out_tensor | 4,951,999,580,329,712,000 | Dropout: Each neuron is dropped independently. | utils/general.py | dropout | vivekkhurana/handsign | python | @staticmethod
def dropout(in_tensor, keep_prob, evaluation):
' '
with tf.variable_scope('dropout'):
tensor_shape = in_tensor.get_shape().as_list()
out_tensor = tf.cond(evaluation, (lambda : tf.nn.dropout(in_tensor, 1.0, noise_shape=tensor_shape)), (lambda : tf.nn.dropout(in_tensor, keep_prob, noise_shape=tensor_shape)))
return out_tensor |
@staticmethod
def spatial_dropout(in_tensor, keep_prob, evaluation):
' Spatial dropout: Not each neuron is dropped independently, but feature map wise. '
with tf.variable_scope('spatial_dropout'):
tensor_shape = in_tensor.get_shape().as_list()
out_tensor = tf.cond(evaluation, (lambda : tf.nn.dropout(in_tensor, 1.0, noise_shape=tensor_shape)), (lambda : tf.nn.dropout(in_tensor, keep_prob, noise_shape=[tensor_shape[0], 1, 1, tensor_shape[3]])))
return out_tensor | -1,824,907,717,556,814,800 | Spatial dropout: Not each neuron is dropped independently, but feature map wise. | utils/general.py | spatial_dropout | vivekkhurana/handsign | python | @staticmethod
def spatial_dropout(in_tensor, keep_prob, evaluation):
' '
with tf.variable_scope('spatial_dropout'):
tensor_shape = in_tensor.get_shape().as_list()
out_tensor = tf.cond(evaluation, (lambda : tf.nn.dropout(in_tensor, 1.0, noise_shape=tensor_shape)), (lambda : tf.nn.dropout(in_tensor, keep_prob, noise_shape=[tensor_shape[0], 1, 1, tensor_shape[3]])))
return out_tensor |
def feed(self, keypoint_gt, keypoint_vis, keypoint_pred):
' Used to feed data to the class. Stores the euclidean distance between gt and pred, when it is visible. '
keypoint_gt = np.squeeze(keypoint_gt)
keypoint_pred = np.squeeze(keypoint_pred)
keypoint_vis = np.squeeze(keypoint_vis).astype('bool')
assert (len(keypoint_gt.shape) == 2)
assert (len(keypoint_pred.shape) == 2)
assert (len(keypoint_vis.shape) == 1)
diff = (keypoint_gt - keypoint_pred)
euclidean_dist = np.sqrt(np.sum(np.square(diff), axis=1))
num_kp = keypoint_gt.shape[0]
for i in range(num_kp):
if keypoint_vis[i]:
self.data[i].append(euclidean_dist[i]) | -286,897,132,212,552,580 | Used to feed data to the class. Stores the euclidean distance between gt and pred, when it is visible. | utils/general.py | feed | vivekkhurana/handsign | python | def feed(self, keypoint_gt, keypoint_vis, keypoint_pred):
' '
keypoint_gt = np.squeeze(keypoint_gt)
keypoint_pred = np.squeeze(keypoint_pred)
keypoint_vis = np.squeeze(keypoint_vis).astype('bool')
assert (len(keypoint_gt.shape) == 2)
assert (len(keypoint_pred.shape) == 2)
assert (len(keypoint_vis.shape) == 1)
diff = (keypoint_gt - keypoint_pred)
euclidean_dist = np.sqrt(np.sum(np.square(diff), axis=1))
num_kp = keypoint_gt.shape[0]
for i in range(num_kp):
if keypoint_vis[i]:
self.data[i].append(euclidean_dist[i]) |
def _get_pck(self, kp_id, threshold):
' Returns pck for one keypoint for the given threshold. '
if (len(self.data[kp_id]) == 0):
return None
data = np.array(self.data[kp_id])
pck = np.mean((data <= threshold).astype('float'))
return pck | 1,373,773,765,965,389,000 | Returns pck for one keypoint for the given threshold. | utils/general.py | _get_pck | vivekkhurana/handsign | python | def _get_pck(self, kp_id, threshold):
' '
if (len(self.data[kp_id]) == 0):
return None
data = np.array(self.data[kp_id])
pck = np.mean((data <= threshold).astype('float'))
return pck |
def _get_epe(self, kp_id):
' Returns end point error for one keypoint. '
if (len(self.data[kp_id]) == 0):
return (None, None)
data = np.array(self.data[kp_id])
epe_mean = np.mean(data)
epe_median = np.median(data)
return (epe_mean, epe_median) | 7,835,355,939,018,915,000 | Returns end point error for one keypoint. | utils/general.py | _get_epe | vivekkhurana/handsign | python | def _get_epe(self, kp_id):
' '
if (len(self.data[kp_id]) == 0):
return (None, None)
data = np.array(self.data[kp_id])
epe_mean = np.mean(data)
epe_median = np.median(data)
return (epe_mean, epe_median) |
def get_measures(self, val_min, val_max, steps):
' Outputs the average mean and median error as well as the pck score. '
thresholds = np.linspace(val_min, val_max, steps)
thresholds = np.array(thresholds)
norm_factor = np.trapz(np.ones_like(thresholds), thresholds)
epe_mean_all = list()
epe_median_all = list()
auc_all = list()
pck_curve_all = list()
for part_id in range(self.num_kp):
(mean, median) = self._get_epe(part_id)
if (mean is None):
continue
epe_mean_all.append(mean)
epe_median_all.append(median)
pck_curve = list()
for t in thresholds:
pck = self._get_pck(part_id, t)
pck_curve.append(pck)
pck_curve = np.array(pck_curve)
pck_curve_all.append(pck_curve)
auc = np.trapz(pck_curve, thresholds)
auc /= norm_factor
auc_all.append(auc)
epe_mean_all = np.mean(np.array(epe_mean_all))
epe_median_all = np.mean(np.array(epe_median_all))
auc_all = np.mean(np.array(auc_all))
pck_curve_all = np.mean(np.array(pck_curve_all), 0)
return (epe_mean_all, epe_median_all, auc_all, pck_curve_all, thresholds) | -4,849,076,406,514,604,000 | Outputs the average mean and median error as well as the pck score. | utils/general.py | get_measures | vivekkhurana/handsign | python | def get_measures(self, val_min, val_max, steps):
' '
thresholds = np.linspace(val_min, val_max, steps)
thresholds = np.array(thresholds)
norm_factor = np.trapz(np.ones_like(thresholds), thresholds)
epe_mean_all = list()
epe_median_all = list()
auc_all = list()
pck_curve_all = list()
for part_id in range(self.num_kp):
(mean, median) = self._get_epe(part_id)
if (mean is None):
continue
epe_mean_all.append(mean)
epe_median_all.append(median)
pck_curve = list()
for t in thresholds:
pck = self._get_pck(part_id, t)
pck_curve.append(pck)
pck_curve = np.array(pck_curve)
pck_curve_all.append(pck_curve)
auc = np.trapz(pck_curve, thresholds)
auc /= norm_factor
auc_all.append(auc)
epe_mean_all = np.mean(np.array(epe_mean_all))
epe_median_all = np.mean(np.array(epe_median_all))
auc_all = np.mean(np.array(auc_all))
pck_curve_all = np.mean(np.array(pck_curve_all), 0)
return (epe_mean_all, epe_median_all, auc_all, pck_curve_all, thresholds) |
def import_local_resources(args):
'Entrance of importing local resources'
parser = argparse.ArgumentParser(prog='cotk import', description='Import local resources')
parser.add_argument('file_id', type=str, help='Name of resource')
parser.add_argument('file_path', type=str, help='Path to resource')
cargs = parser.parse_args(args)
file_utils.import_local_resources(cargs.file_id, cargs.file_path)
main.LOGGER.info('Successfully import local resource {}.'.format(cargs.file_id)) | 575,454,734,934,522,400 | Entrance of importing local resources | cotk/scripts/import_local_resources.py | import_local_resources | JianGuanTHU/cotk | python | def import_local_resources(args):
parser = argparse.ArgumentParser(prog='cotk import', description='Import local resources')
parser.add_argument('file_id', type=str, help='Name of resource')
parser.add_argument('file_path', type=str, help='Path to resource')
cargs = parser.parse_args(args)
file_utils.import_local_resources(cargs.file_id, cargs.file_path)
main.LOGGER.info('Successfully import local resource {}.'.format(cargs.file_id)) |
def one_row_rbf_kernel(X, i, gamma=None):
'\n X : array of shape (n_samples_X, n_features)\n i : target sample in X (X[i])\n gamma : float, default None\n If None, defaults to 1.0 / n_samples_X\n K(x, y) = exp(-gamma ||x-xi||^2)\n Returns\n -------\n kernel_matrix : array of shape (n_samples_X, n_samples_Y)\n '
if (gamma is None):
gamma = (1.0 / X.shape[0])
d = np.sum(np.power((X - X[i]), 2), axis=1)
return np.array(np.exp(((- gamma) * d))) | 3,638,076,951,810,041,000 | X : array of shape (n_samples_X, n_features)
i : target sample in X (X[i])
gamma : float, default None
If None, defaults to 1.0 / n_samples_X
K(x, y) = exp(-gamma ||x-xi||^2)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y) | spectral_clustering_fd/laplacian_sketch.py | one_row_rbf_kernel | AtsushiHashimoto/SpectralClusteringFD | python | def one_row_rbf_kernel(X, i, gamma=None):
'\n X : array of shape (n_samples_X, n_features)\n i : target sample in X (X[i])\n gamma : float, default None\n If None, defaults to 1.0 / n_samples_X\n K(x, y) = exp(-gamma ||x-xi||^2)\n Returns\n -------\n kernel_matrix : array of shape (n_samples_X, n_samples_Y)\n '
if (gamma is None):
gamma = (1.0 / X.shape[0])
d = np.sum(np.power((X - X[i]), 2), axis=1)
return np.array(np.exp(((- gamma) * d))) |
def one_row_cosine_similarity(X, i):
'\n X : normalized matrix\n i : target sample in X\n '
a = ((np.dot(X, X[i].T) + 1) / 2)
a[(a < 0)] = 0
return a | 4,555,324,856,876,778,500 | X : normalized matrix
i : target sample in X | spectral_clustering_fd/laplacian_sketch.py | one_row_cosine_similarity | AtsushiHashimoto/SpectralClusteringFD | python | def one_row_cosine_similarity(X, i):
'\n X : normalized matrix\n i : target sample in X\n '
a = ((np.dot(X, X[i].T) + 1) / 2)
a[(a < 0)] = 0
return a |
def __init__(self, name: str, previous: str, description: str='', refTemp: float=None, maintainAttributes: Boolean=False):
'This method creates an AnnealStep object.\n\n Notes\n -----\n This function can be accessed by:\n\n .. code-block:: python\n\n mdb.models[name].AnnealStep\n \n Parameters\n ----------\n name\n A String specifying the repository key. \n previous\n A String specifying the name of the previous step. The new step appears after this step \n in the list of analysis steps. \n description\n A String specifying a description of the new step. The default value is an empty string. \n refTemp\n A Float specifying the post-anneal reference temperature. The default value is the \n current temperature at all nodes in the model after the annealing has completed. \n maintainAttributes\n A Boolean specifying whether to retain attributes from an existing step with the same \n name. The default value is False. \n\n Returns\n -------\n An AnnealStep object. \n\n Raises\n ------\n RangeError\n '
super().__init__()
pass | -5,879,916,014,305,015,000 | This method creates an AnnealStep object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].AnnealStep
Parameters
----------
name
A String specifying the repository key.
previous
A String specifying the name of the previous step. The new step appears after this step
in the list of analysis steps.
description
A String specifying a description of the new step. The default value is an empty string.
refTemp
A Float specifying the post-anneal reference temperature. The default value is the
current temperature at all nodes in the model after the annealing has completed.
maintainAttributes
A Boolean specifying whether to retain attributes from an existing step with the same
name. The default value is False.
Returns
-------
An AnnealStep object.
Raises
------
RangeError | src/abaqus/Step/AnnealStep.py | __init__ | Haiiliin/PyAbaqus | python | def __init__(self, name: str, previous: str, description: str=, refTemp: float=None, maintainAttributes: Boolean=False):
'This method creates an AnnealStep object.\n\n Notes\n -----\n This function can be accessed by:\n\n .. code-block:: python\n\n mdb.models[name].AnnealStep\n \n Parameters\n ----------\n name\n A String specifying the repository key. \n previous\n A String specifying the name of the previous step. The new step appears after this step \n in the list of analysis steps. \n description\n A String specifying a description of the new step. The default value is an empty string. \n refTemp\n A Float specifying the post-anneal reference temperature. The default value is the \n current temperature at all nodes in the model after the annealing has completed. \n maintainAttributes\n A Boolean specifying whether to retain attributes from an existing step with the same \n name. The default value is False. \n\n Returns\n -------\n An AnnealStep object. \n\n Raises\n ------\n RangeError\n '
super().__init__()
pass |
def setValues(self, description: str='', refTemp: float=None):
'This method modifies the AnnealStep object.\n \n Parameters\n ----------\n description\n A String specifying a description of the new step. The default value is an empty string. \n refTemp\n A Float specifying the post-anneal reference temperature. The default value is the \n current temperature at all nodes in the model after the annealing has completed.\n\n Raises\n ------\n RangeError\n '
pass | 2,165,322,819,001,568,300 | This method modifies the AnnealStep object.
Parameters
----------
description
A String specifying a description of the new step. The default value is an empty string.
refTemp
A Float specifying the post-anneal reference temperature. The default value is the
current temperature at all nodes in the model after the annealing has completed.
Raises
------
RangeError | src/abaqus/Step/AnnealStep.py | setValues | Haiiliin/PyAbaqus | python | def setValues(self, description: str=, refTemp: float=None):
'This method modifies the AnnealStep object.\n \n Parameters\n ----------\n description\n A String specifying a description of the new step. The default value is an empty string. \n refTemp\n A Float specifying the post-anneal reference temperature. The default value is the \n current temperature at all nodes in the model after the annealing has completed.\n\n Raises\n ------\n RangeError\n '
pass |
def sample_mask(idx, l):
'Create mask.'
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool) | 2,110,093,059,590,823,700 | Create mask. | utils.py | sample_mask | smtnkc/gcn4epi | python | def sample_mask(idx, l):
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool) |
def load_data(cell_line, cross_cell_line, label_rate, k_mer):
'\n Load input data from data/cell_line directory.\n\n | x_20.index | the indices (IDs) of labeled train instances as list object (for label_rate = 20%) |\n | ux_20.index | the indices (IDs) of unlabeled train instances as list object (for label_rate = 20%) |\n | vx_20.index | the indices (IDs) of validation instances as list object (for label_rate = 20%) |\n | tx_20.index | the indices (IDs) of test instances as list object (for label_rate = 20%) |\n | features_5mer | the feature vectors of all instances as scipy.sparse.csr.csr_matrix object (for k_mer = 5) |\n | nodes | a dict in the format {chromosome_name: ID} as collections.defaultdict object |\n | labels | the one-hot labels of all instances as numpy.ndarray object |\n | graph | a dict in the format {ID: [IDs_of_neighbor_nodes]} as collections.defaultdict object |\n\n All objects above must be saved using python pickle module.\n\n :param cell_line: Name of the cell line to which the datasets belong\n :return: All data input files loaded (as well the training/test data).\n '
if ((cross_cell_line != None) and (cross_cell_line != cell_line)):
read_dir = 'data/{}_{}/'.format(cell_line, cross_cell_line)
else:
read_dir = 'data/{}/'.format(cell_line)
features_file = open('{}/features_{}mer'.format(read_dir, k_mer), 'rb')
features = pkl.load(features_file)
features_file.close()
labels_file = open('{}/labels'.format(read_dir), 'rb')
labels = pkl.load(labels_file)
labels_file.close()
graph_file = open('{}/graph'.format(read_dir), 'rb')
graph = pkl.load(graph_file)
graph_file.close()
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
lr = txt = '{:.2f}'.format(label_rate).split('.')[1]
idx_x_file = open('{}/x_{}.index'.format(read_dir, lr), 'rb')
idx_x = pkl.load(idx_x_file)
idx_x_file.close()
idx_ux_file = open('{}/ux_{}.index'.format(read_dir, lr), 'rb')
idx_ux = pkl.load(idx_ux_file)
idx_ux_file.close()
idx_vx_file = open('{}/vx_{}.index'.format(read_dir, lr), 'rb')
idx_vx = pkl.load(idx_vx_file)
idx_vx_file.close()
idx_tx_file = open('{}/tx_{}.index'.format(read_dir, lr), 'rb')
idx_tx = pkl.load(idx_tx_file)
idx_tx_file.close()
x = features[idx_x]
y = labels[idx_x]
ux = features[idx_ux]
uy = labels[idx_ux]
vx = features[idx_vx]
vy = labels[idx_vx]
tx = features[idx_tx]
ty = labels[idx_tx]
print('x={} ux={} vx={} tx={}'.format(x.shape[0], ux.shape[0], vx.shape[0], tx.shape[0]))
train_mask = sample_mask(idx_x, labels.shape[0])
val_mask = sample_mask(idx_vx, labels.shape[0])
test_mask = sample_mask(idx_tx, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return (adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask) | -7,958,482,504,992,849,000 | Load input data from data/cell_line directory.
| x_20.index | the indices (IDs) of labeled train instances as list object (for label_rate = 20%) |
| ux_20.index | the indices (IDs) of unlabeled train instances as list object (for label_rate = 20%) |
| vx_20.index | the indices (IDs) of validation instances as list object (for label_rate = 20%) |
| tx_20.index | the indices (IDs) of test instances as list object (for label_rate = 20%) |
| features_5mer | the feature vectors of all instances as scipy.sparse.csr.csr_matrix object (for k_mer = 5) |
| nodes | a dict in the format {chromosome_name: ID} as collections.defaultdict object |
| labels | the one-hot labels of all instances as numpy.ndarray object |
| graph | a dict in the format {ID: [IDs_of_neighbor_nodes]} as collections.defaultdict object |
All objects above must be saved using python pickle module.
:param cell_line: Name of the cell line to which the datasets belong
:return: All data input files loaded (as well the training/test data). | utils.py | load_data | smtnkc/gcn4epi | python | def load_data(cell_line, cross_cell_line, label_rate, k_mer):
'\n Load input data from data/cell_line directory.\n\n | x_20.index | the indices (IDs) of labeled train instances as list object (for label_rate = 20%) |\n | ux_20.index | the indices (IDs) of unlabeled train instances as list object (for label_rate = 20%) |\n | vx_20.index | the indices (IDs) of validation instances as list object (for label_rate = 20%) |\n | tx_20.index | the indices (IDs) of test instances as list object (for label_rate = 20%) |\n | features_5mer | the feature vectors of all instances as scipy.sparse.csr.csr_matrix object (for k_mer = 5) |\n | nodes | a dict in the format {chromosome_name: ID} as collections.defaultdict object |\n | labels | the one-hot labels of all instances as numpy.ndarray object |\n | graph | a dict in the format {ID: [IDs_of_neighbor_nodes]} as collections.defaultdict object |\n\n All objects above must be saved using python pickle module.\n\n :param cell_line: Name of the cell line to which the datasets belong\n :return: All data input files loaded (as well the training/test data).\n '
if ((cross_cell_line != None) and (cross_cell_line != cell_line)):
read_dir = 'data/{}_{}/'.format(cell_line, cross_cell_line)
else:
read_dir = 'data/{}/'.format(cell_line)
features_file = open('{}/features_{}mer'.format(read_dir, k_mer), 'rb')
features = pkl.load(features_file)
features_file.close()
labels_file = open('{}/labels'.format(read_dir), 'rb')
labels = pkl.load(labels_file)
labels_file.close()
graph_file = open('{}/graph'.format(read_dir), 'rb')
graph = pkl.load(graph_file)
graph_file.close()
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
lr = txt = '{:.2f}'.format(label_rate).split('.')[1]
idx_x_file = open('{}/x_{}.index'.format(read_dir, lr), 'rb')
idx_x = pkl.load(idx_x_file)
idx_x_file.close()
idx_ux_file = open('{}/ux_{}.index'.format(read_dir, lr), 'rb')
idx_ux = pkl.load(idx_ux_file)
idx_ux_file.close()
idx_vx_file = open('{}/vx_{}.index'.format(read_dir, lr), 'rb')
idx_vx = pkl.load(idx_vx_file)
idx_vx_file.close()
idx_tx_file = open('{}/tx_{}.index'.format(read_dir, lr), 'rb')
idx_tx = pkl.load(idx_tx_file)
idx_tx_file.close()
x = features[idx_x]
y = labels[idx_x]
ux = features[idx_ux]
uy = labels[idx_ux]
vx = features[idx_vx]
vy = labels[idx_vx]
tx = features[idx_tx]
ty = labels[idx_tx]
print('x={} ux={} vx={} tx={}'.format(x.shape[0], ux.shape[0], vx.shape[0], tx.shape[0]))
train_mask = sample_mask(idx_x, labels.shape[0])
val_mask = sample_mask(idx_vx, labels.shape[0])
test_mask = sample_mask(idx_tx, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return (adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask) |
def sparse_to_tuple(sparse_mx):
'Convert sparse matrix to tuple representation.'
def to_tuple(mx):
if (not sp.isspmatrix_coo(mx)):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return (coords, values, shape)
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx | -9,219,015,497,007,221,000 | Convert sparse matrix to tuple representation. | utils.py | sparse_to_tuple | smtnkc/gcn4epi | python | def sparse_to_tuple(sparse_mx):
def to_tuple(mx):
if (not sp.isspmatrix_coo(mx)):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return (coords, values, shape)
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx |
def preprocess_features(features):
'Row-normalize feature matrix and convert to tuple representation'
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, (- 1)).flatten()
r_inv[np.isinf(r_inv)] = 0.0
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return sparse_to_tuple(features) | -7,883,522,908,479,923,000 | Row-normalize feature matrix and convert to tuple representation | utils.py | preprocess_features | smtnkc/gcn4epi | python | def preprocess_features(features):
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, (- 1)).flatten()
r_inv[np.isinf(r_inv)] = 0.0
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return sparse_to_tuple(features) |
def normalize_adj(adj):
'Symmetrically normalize adjacency matrix.'
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, (- 0.5)).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.0
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo() | -2,276,161,129,782,893,300 | Symmetrically normalize adjacency matrix. | utils.py | normalize_adj | smtnkc/gcn4epi | python | def normalize_adj(adj):
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, (- 0.5)).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.0
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo() |
def preprocess_adj(adj):
'Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation.'
adj_normalized = normalize_adj((adj + sp.eye(adj.shape[0])))
return sparse_to_tuple(adj_normalized) | -7,887,896,939,135,372,000 | Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation. | utils.py | preprocess_adj | smtnkc/gcn4epi | python | def preprocess_adj(adj):
adj_normalized = normalize_adj((adj + sp.eye(adj.shape[0])))
return sparse_to_tuple(adj_normalized) |
def construct_feed_dict(features, support, labels, labels_mask, placeholders):
'Construct feed dictionary.'
feed_dict = dict()
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['labels_mask']: labels_mask})
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))})
feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})
return feed_dict | -649,063,363,262,092,200 | Construct feed dictionary. | utils.py | construct_feed_dict | smtnkc/gcn4epi | python | def construct_feed_dict(features, support, labels, labels_mask, placeholders):
feed_dict = dict()
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['labels_mask']: labels_mask})
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))})
feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})
return feed_dict |
def chebyshev_polynomials(adj, k):
'Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation).'
print('Calculating Chebyshev polynomials up to order {}...'.format(k))
adj_normalized = normalize_adj(adj)
laplacian = (sp.eye(adj.shape[0]) - adj_normalized)
(largest_eigval, _) = eigsh(laplacian, 1, which='LM')
scaled_laplacian = (((2.0 / largest_eigval[0]) * laplacian) - sp.eye(adj.shape[0]))
t_k = list()
t_k.append(sp.eye(adj.shape[0]))
t_k.append(scaled_laplacian)
def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
s_lap = sp.csr_matrix(scaled_lap, copy=True)
return ((2 * s_lap.dot(t_k_minus_one)) - t_k_minus_two)
for i in range(2, (k + 1)):
t_k.append(chebyshev_recurrence(t_k[(- 1)], t_k[(- 2)], scaled_laplacian))
return sparse_to_tuple(t_k) | 3,459,099,397,867,827,700 | Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation). | utils.py | chebyshev_polynomials | smtnkc/gcn4epi | python | def chebyshev_polynomials(adj, k):
print('Calculating Chebyshev polynomials up to order {}...'.format(k))
adj_normalized = normalize_adj(adj)
laplacian = (sp.eye(adj.shape[0]) - adj_normalized)
(largest_eigval, _) = eigsh(laplacian, 1, which='LM')
scaled_laplacian = (((2.0 / largest_eigval[0]) * laplacian) - sp.eye(adj.shape[0]))
t_k = list()
t_k.append(sp.eye(adj.shape[0]))
t_k.append(scaled_laplacian)
def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
s_lap = sp.csr_matrix(scaled_lap, copy=True)
return ((2 * s_lap.dot(t_k_minus_one)) - t_k_minus_two)
for i in range(2, (k + 1)):
t_k.append(chebyshev_recurrence(t_k[(- 1)], t_k[(- 2)], scaled_laplacian))
return sparse_to_tuple(t_k) |
def __eq__(self, *args):
' x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y '
pass | 2,144,965,521,805,394,200 | x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y | release/stubs.min/Autodesk/Revit/DB/__init___parts/FittingAngleUsage.py | __eq__ | YKato521/ironpython-stubs | python | def __eq__(self, *args):
' '
pass |
def __format__(self, *args):
' __format__(formattable: IFormattable,format: str) -> str '
pass | -4,894,195,495,142,889,000 | __format__(formattable: IFormattable,format: str) -> str | release/stubs.min/Autodesk/Revit/DB/__init___parts/FittingAngleUsage.py | __format__ | YKato521/ironpython-stubs | python | def __format__(self, *args):
' '
pass |
def __init__(self, *args):
' x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature '
pass | -90,002,593,062,007,400 | x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature | release/stubs.min/Autodesk/Revit/DB/__init___parts/FittingAngleUsage.py | __init__ | YKato521/ironpython-stubs | python | def __init__(self, *args):
' '
pass |
def begin(self):
'At the start of the run, we want to record the test\n execution information in the database.'
exec_payload = ExecutionQueryPayload()
exec_payload.execution_start_time = int((time.time() * 1000))
self.execution_start_time = exec_payload.execution_start_time
exec_payload.guid = self.execution_guid
exec_payload.username = getpass.getuser()
self.testcase_manager.insert_execution_data(exec_payload) | -4,280,806,599,896,135,700 | At the start of the run, we want to record the test
execution information in the database. | seleniumbase/plugins/db_reporting_plugin.py | begin | Mu-L/SeleniumBase | python | def begin(self):
'At the start of the run, we want to record the test\n execution information in the database.'
exec_payload = ExecutionQueryPayload()
exec_payload.execution_start_time = int((time.time() * 1000))
self.execution_start_time = exec_payload.execution_start_time
exec_payload.guid = self.execution_guid
exec_payload.username = getpass.getuser()
self.testcase_manager.insert_execution_data(exec_payload) |
def startTest(self, test):
'At the start of the test, set the testcase details.'
data_payload = TestcaseDataPayload()
self.testcase_guid = str(uuid.uuid4())
data_payload.guid = self.testcase_guid
data_payload.execution_guid = self.execution_guid
if hasattr(test, 'browser'):
data_payload.browser = test.browser
else:
data_payload.browser = 'N/A'
data_payload.test_address = test.id()
application = ApplicationManager.generate_application_string(test)
data_payload.env = application.split('.')[0]
data_payload.start_time = application.split('.')[1]
data_payload.state = constants.State.UNTESTED
self.testcase_manager.insert_testcase_data(data_payload)
self.case_start_time = int((time.time() * 1000))
test.testcase_guid = self.testcase_guid
self._test = test
self._test._nose_skip_reason = None | -1,761,585,010,409,018,000 | At the start of the test, set the testcase details. | seleniumbase/plugins/db_reporting_plugin.py | startTest | Mu-L/SeleniumBase | python | def startTest(self, test):
data_payload = TestcaseDataPayload()
self.testcase_guid = str(uuid.uuid4())
data_payload.guid = self.testcase_guid
data_payload.execution_guid = self.execution_guid
if hasattr(test, 'browser'):
data_payload.browser = test.browser
else:
data_payload.browser = 'N/A'
data_payload.test_address = test.id()
application = ApplicationManager.generate_application_string(test)
data_payload.env = application.split('.')[0]
data_payload.start_time = application.split('.')[1]
data_payload.state = constants.State.UNTESTED
self.testcase_manager.insert_testcase_data(data_payload)
self.case_start_time = int((time.time() * 1000))
test.testcase_guid = self.testcase_guid
self._test = test
self._test._nose_skip_reason = None |
def finalize(self, result):
'At the end of the test run, we want to\n update the DB row with the total execution time.'
runtime = (int((time.time() * 1000)) - self.execution_start_time)
self.testcase_manager.update_execution_data(self.execution_guid, runtime) | -8,234,503,311,109,827,000 | At the end of the test run, we want to
update the DB row with the total execution time. | seleniumbase/plugins/db_reporting_plugin.py | finalize | Mu-L/SeleniumBase | python | def finalize(self, result):
'At the end of the test run, we want to\n update the DB row with the total execution time.'
runtime = (int((time.time() * 1000)) - self.execution_start_time)
self.testcase_manager.update_execution_data(self.execution_guid, runtime) |
def addSuccess(self, test, capt):
'\n After each test success, record testcase run information.\n '
self.__insert_test_result(constants.State.PASSED, test)
self._result_set = True | 1,633,553,923,361,758,200 | After each test success, record testcase run information. | seleniumbase/plugins/db_reporting_plugin.py | addSuccess | Mu-L/SeleniumBase | python | def addSuccess(self, test, capt):
'\n \n '
self.__insert_test_result(constants.State.PASSED, test)
self._result_set = True |
def addFailure(self, test, err, capt=None, tbinfo=None):
'\n After each test failure, record testcase run information.\n '
self.__insert_test_result(constants.State.FAILED, test, err)
self._result_set = True | -2,188,310,513,905,270,000 | After each test failure, record testcase run information. | seleniumbase/plugins/db_reporting_plugin.py | addFailure | Mu-L/SeleniumBase | python | def addFailure(self, test, err, capt=None, tbinfo=None):
'\n \n '
self.__insert_test_result(constants.State.FAILED, test, err)
self._result_set = True |
def addError(self, test, err, capt=None):
'\n After each test error, record testcase run information.\n (Test errors should be treated the same as test failures.)\n '
self.__insert_test_result(constants.State.FAILED, test, err)
self._result_set = True | 1,358,458,308,906,235,600 | After each test error, record testcase run information.
(Test errors should be treated the same as test failures.) | seleniumbase/plugins/db_reporting_plugin.py | addError | Mu-L/SeleniumBase | python | def addError(self, test, err, capt=None):
'\n After each test error, record testcase run information.\n (Test errors should be treated the same as test failures.)\n '
self.__insert_test_result(constants.State.FAILED, test, err)
self._result_set = True |
def handleError(self, test, err, capt=None):
'\n After each test error, record testcase run information.\n "Error" also encompasses any states other than Pass or Fail, so we\n check for those first.\n '
if (err[0] == errors.BlockedTest):
self.__insert_test_result(constants.State.BLOCKED, test, err)
self._result_set = True
raise SkipTest(err[1])
return True
elif (err[0] == errors.DeprecatedTest):
self.__insert_test_result(constants.State.DEPRECATED, test, err)
self._result_set = True
raise SkipTest(err[1])
return True
elif (err[0] == errors.SkipTest):
self.__insert_test_result(constants.State.SKIPPED, test, err)
self._result_set = True
raise SkipTest(err[1])
return True | -7,883,552,030,825,401,000 | After each test error, record testcase run information.
"Error" also encompasses any states other than Pass or Fail, so we
check for those first. | seleniumbase/plugins/db_reporting_plugin.py | handleError | Mu-L/SeleniumBase | python | def handleError(self, test, err, capt=None):
'\n After each test error, record testcase run information.\n "Error" also encompasses any states other than Pass or Fail, so we\n check for those first.\n '
if (err[0] == errors.BlockedTest):
self.__insert_test_result(constants.State.BLOCKED, test, err)
self._result_set = True
raise SkipTest(err[1])
return True
elif (err[0] == errors.DeprecatedTest):
self.__insert_test_result(constants.State.DEPRECATED, test, err)
self._result_set = True
raise SkipTest(err[1])
return True
elif (err[0] == errors.SkipTest):
self.__insert_test_result(constants.State.SKIPPED, test, err)
self._result_set = True
raise SkipTest(err[1])
return True |
def create_user_item_matrix(df):
'\n INPUT:\n df - pandas dataframe with article_id, title, user_id columns\n \n OUTPUT:\n user_item - user item matrix \n \n Description:\n Return a matrix with user ids as rows and article ids on the columns with 1 values where a user interacted with \n an article and a 0 otherwise\n '
user_item = df.groupby('user_id')['article_id'].value_counts().unstack()
user_item[(user_item.isna() == False)] = 1
return user_item | -7,589,969,097,151,251,000 | INPUT:
df - pandas dataframe with article_id, title, user_id columns
OUTPUT:
user_item - user item matrix
Description:
Return a matrix with user ids as rows and article ids on the columns with 1 values where a user interacted with
an article and a 0 otherwise | model/recommendation_functions.py | create_user_item_matrix | dalpengholic/Udacity_Recommendations_with_IBM | python | def create_user_item_matrix(df):
'\n INPUT:\n df - pandas dataframe with article_id, title, user_id columns\n \n OUTPUT:\n user_item - user item matrix \n \n Description:\n Return a matrix with user ids as rows and article ids on the columns with 1 values where a user interacted with \n an article and a 0 otherwise\n '
user_item = df.groupby('user_id')['article_id'].value_counts().unstack()
user_item[(user_item.isna() == False)] = 1
return user_item |
def get_top_articles(n, df):
"\n INPUT:\n n - (int) the number of top articles to return\n df - (pandas dataframe) df as defined at the top of the notebook \n \n OUTPUT:\n top_articles - (list) A list of the top 'n' article titles \n \n "
article_id_grouped_df = df.groupby(['title'])
top_articles = article_id_grouped_df['user_id'].count().sort_values(ascending=False).iloc[:n].index.tolist()
return top_articles | 4,361,726,635,507,890,700 | INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles | model/recommendation_functions.py | get_top_articles | dalpengholic/Udacity_Recommendations_with_IBM | python | def get_top_articles(n, df):
"\n INPUT:\n n - (int) the number of top articles to return\n df - (pandas dataframe) df as defined at the top of the notebook \n \n OUTPUT:\n top_articles - (list) A list of the top 'n' article titles \n \n "
article_id_grouped_df = df.groupby(['title'])
top_articles = article_id_grouped_df['user_id'].count().sort_values(ascending=False).iloc[:n].index.tolist()
return top_articles |
def get_top_article_ids(n, df):
"\n INPUT:\n n - (int) the number of top articles to return\n df - (pandas dataframe) df as defined at the top of the notebook \n \n OUTPUT:\n top_articles - (list) A list of the top 'n' article titles \n \n "
article_id_grouped_df = df.groupby(['article_id'])
top_articles_ids = article_id_grouped_df['user_id'].count().sort_values(ascending=False).iloc[:n].index.tolist()
return top_articles_ids | -5,730,097,972,124,829,000 | INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles | model/recommendation_functions.py | get_top_article_ids | dalpengholic/Udacity_Recommendations_with_IBM | python | def get_top_article_ids(n, df):
"\n INPUT:\n n - (int) the number of top articles to return\n df - (pandas dataframe) df as defined at the top of the notebook \n \n OUTPUT:\n top_articles - (list) A list of the top 'n' article titles \n \n "
article_id_grouped_df = df.groupby(['article_id'])
top_articles_ids = article_id_grouped_df['user_id'].count().sort_values(ascending=False).iloc[:n].index.tolist()
return top_articles_ids |
def user_user_recs(user_id, user_item, df, m=10):
"\n INPUT:\n user_id - (int) a user id\n m - (int) the number of recommendations you want for the user\n \n OUTPUT:\n recs - (list) a list of recommendations for the user by article id\n rec_names - (list) a list of recommendations for the user by article title\n \n Description:\n Loops through the users based on closeness to the input user_id\n For each user - finds articles the user hasn't seen before and provides them as recs\n Does this until m recommendations are found\n \n Notes:\n * Choose the users that have the most total article interactions \n before choosing those with fewer article interactions.\n\n * Choose articles with the articles with the most total interactions \n before choosing those with fewer total interactions. \n \n "
def get_user_articles_names_ids(user_id):
'\n INPUT:\n user_id\n\n\n OUTPUT:\n article_ids - (list) a list of the article ids seen by the user\n article_names - (list) a list of article names associated with the list of article ids \n (this is identified by the doc_full_name column in df_content)\n \n Description:\n Provides a list of the article_ids and article titles that have been seen by a user\n '
article_ids = user_item.loc[user_id][(user_item.loc[user_id] == 1)].index.tolist()
article_names = []
for i in article_ids:
try:
title = df[(df['article_id'] == i)]['title'].unique()[0]
except IndexError:
title = 'None'
article_names.append(title)
article_ids = list(map(str, article_ids))
return (article_ids, article_names)
def find_similar_users():
' \n OUTPUT:\n similar_users - (list) an ordered list where the closest users (largest dot product users)\n are listed first\n \n Description:\n Computes the similarity of every pair of users based on the dot product\n Returns an ordered\n \n '
user_item_tmp = user_item.copy()
user_item_tmp[(user_item_tmp.isna() == True)] = 0
row = user_item_tmp.loc[user_id]
result_dot = (row @ user_item_tmp.T)
result_dot.drop(labels=[user_id], inplace=True)
most_similar_users = result_dot.sort_values(ascending=False).index.tolist()
return most_similar_users
def get_top_sorted_users(most_similar_users):
'\n INPUT:\n most_similar_users - (list) an ordered list where the closest users (largest dot product users)\n are listed first \n \n OUTPUT:\n neighbors_df - (pandas dataframe) a dataframe with:\n neighbor_id - is a neighbor user_id\n similarity - measure of the similarity of each user to the provided user_id\n num_interactions - the number of articles viewed by the user - if a u\n \n Other Details - sort the neighbors_df by the similarity and then by number of interactions where \n highest of each is higher in the dataframe\n \n '
df_user_id_grouped = df.groupby('user_id')
df_user_id_grouped['article_id'].count().sort_values(ascending=False)
neighbors_df = pd.DataFrame()
neighbors_df['neighbor_id'] = most_similar_users
user_item_tmp = user_item.copy()
user_item_tmp[(user_item_tmp.isna() == True)] = 0
row = user_item_tmp.loc[user_id]
result_dot = (row @ user_item_tmp.T)
result_dot.drop(labels=[user_id], inplace=True)
similarity = result_dot.sort_values(ascending=False).values.tolist()[0:10]
neighbors_df['similarity'] = similarity
num_interactions = []
for i in neighbors_df['neighbor_id']:
counted_interaction = df_user_id_grouped['article_id'].count().loc[i]
num_interactions.append(counted_interaction)
neighbors_df['num_interactions'] = num_interactions
neighbors_df = neighbors_df.sort_values(by=['similarity', 'num_interactions'], ascending=False)
return neighbors_df
recs = []
rec_names = []
counter = 0
(article_ids, article_names) = get_user_articles_names_ids(user_id)
seen_ids_set = set(article_ids)
most_similar_users = find_similar_users()[0:10]
neighbors_df = get_top_sorted_users(most_similar_users)
similar_users_list = neighbors_df['neighbor_id']
for sim_user in similar_users_list:
if (counter < m):
(sim_article_ids, sim_article_names) = get_user_articles_names_ids(sim_user)
sim_user_dict = dict(zip(sim_article_ids, sim_article_names))
sim_seen_ids_set = set(sim_article_ids)
unseen_ids_set = sim_seen_ids_set.difference(seen_ids_set)
for i in unseen_ids_set:
if (counter < m):
recs.append(i)
rec_names.append(sim_user_dict[i])
counter += 1
return (recs, rec_names) | -5,629,144,764,730,280,000 | INPUT:
user_id - (int) a user id
m - (int) the number of recommendations you want for the user
OUTPUT:
recs - (list) a list of recommendations for the user by article id
rec_names - (list) a list of recommendations for the user by article title
Description:
Loops through the users based on closeness to the input user_id
For each user - finds articles the user hasn't seen before and provides them as recs
Does this until m recommendations are found
Notes:
* Choose the users that have the most total article interactions
before choosing those with fewer article interactions.
* Choose articles with the articles with the most total interactions
before choosing those with fewer total interactions. | model/recommendation_functions.py | user_user_recs | dalpengholic/Udacity_Recommendations_with_IBM | python | def user_user_recs(user_id, user_item, df, m=10):
"\n INPUT:\n user_id - (int) a user id\n m - (int) the number of recommendations you want for the user\n \n OUTPUT:\n recs - (list) a list of recommendations for the user by article id\n rec_names - (list) a list of recommendations for the user by article title\n \n Description:\n Loops through the users based on closeness to the input user_id\n For each user - finds articles the user hasn't seen before and provides them as recs\n Does this until m recommendations are found\n \n Notes:\n * Choose the users that have the most total article interactions \n before choosing those with fewer article interactions.\n\n * Choose articles with the articles with the most total interactions \n before choosing those with fewer total interactions. \n \n "
def get_user_articles_names_ids(user_id):
'\n INPUT:\n user_id\n\n\n OUTPUT:\n article_ids - (list) a list of the article ids seen by the user\n article_names - (list) a list of article names associated with the list of article ids \n (this is identified by the doc_full_name column in df_content)\n \n Description:\n Provides a list of the article_ids and article titles that have been seen by a user\n '
article_ids = user_item.loc[user_id][(user_item.loc[user_id] == 1)].index.tolist()
article_names = []
for i in article_ids:
try:
title = df[(df['article_id'] == i)]['title'].unique()[0]
except IndexError:
title = 'None'
article_names.append(title)
article_ids = list(map(str, article_ids))
return (article_ids, article_names)
def find_similar_users():
' \n OUTPUT:\n similar_users - (list) an ordered list where the closest users (largest dot product users)\n are listed first\n \n Description:\n Computes the similarity of every pair of users based on the dot product\n Returns an ordered\n \n '
user_item_tmp = user_item.copy()
user_item_tmp[(user_item_tmp.isna() == True)] = 0
row = user_item_tmp.loc[user_id]
result_dot = (row @ user_item_tmp.T)
result_dot.drop(labels=[user_id], inplace=True)
most_similar_users = result_dot.sort_values(ascending=False).index.tolist()
return most_similar_users
def get_top_sorted_users(most_similar_users):
'\n INPUT:\n most_similar_users - (list) an ordered list where the closest users (largest dot product users)\n are listed first \n \n OUTPUT:\n neighbors_df - (pandas dataframe) a dataframe with:\n neighbor_id - is a neighbor user_id\n similarity - measure of the similarity of each user to the provided user_id\n num_interactions - the number of articles viewed by the user - if a u\n \n Other Details - sort the neighbors_df by the similarity and then by number of interactions where \n highest of each is higher in the dataframe\n \n '
df_user_id_grouped = df.groupby('user_id')
df_user_id_grouped['article_id'].count().sort_values(ascending=False)
neighbors_df = pd.DataFrame()
neighbors_df['neighbor_id'] = most_similar_users
user_item_tmp = user_item.copy()
user_item_tmp[(user_item_tmp.isna() == True)] = 0
row = user_item_tmp.loc[user_id]
result_dot = (row @ user_item_tmp.T)
result_dot.drop(labels=[user_id], inplace=True)
similarity = result_dot.sort_values(ascending=False).values.tolist()[0:10]
neighbors_df['similarity'] = similarity
num_interactions = []
for i in neighbors_df['neighbor_id']:
counted_interaction = df_user_id_grouped['article_id'].count().loc[i]
num_interactions.append(counted_interaction)
neighbors_df['num_interactions'] = num_interactions
neighbors_df = neighbors_df.sort_values(by=['similarity', 'num_interactions'], ascending=False)
return neighbors_df
recs = []
rec_names = []
counter = 0
(article_ids, article_names) = get_user_articles_names_ids(user_id)
seen_ids_set = set(article_ids)
most_similar_users = find_similar_users()[0:10]
neighbors_df = get_top_sorted_users(most_similar_users)
similar_users_list = neighbors_df['neighbor_id']
for sim_user in similar_users_list:
if (counter < m):
(sim_article_ids, sim_article_names) = get_user_articles_names_ids(sim_user)
sim_user_dict = dict(zip(sim_article_ids, sim_article_names))
sim_seen_ids_set = set(sim_article_ids)
unseen_ids_set = sim_seen_ids_set.difference(seen_ids_set)
for i in unseen_ids_set:
if (counter < m):
recs.append(i)
rec_names.append(sim_user_dict[i])
counter += 1
return (recs, rec_names) |
def make_content_recs(article_id, df_content, df, m=10):
'\n INPUT:\n article_id = (int) a article id in df_content\n m - (int) the number of recommendations you want for the user\n df_content - (pandas dataframe) df_content as defined at the top of the notebook \n df - (pandas dataframe) df as defined at the top of the notebook \n\n OUTPUT:\n recs - (list) a list of recommendations for the user by article id\n rec_names - (list) a list of recommendations for the user by article title\n '
def tokenize(text):
'\n Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word. \n The funtions also cleans irrelevant stopwords.\n Input:\n 1. text: text message\n Output:\n 1. Clean_tokens : list of tokenized clean words\n '
text = re.sub('[^a-zA-Z0-9]', ' ', text)
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok, pos='v').lower().strip()
clean_tokens.append(clean_tok)
stopwords = nltk.corpus.stopwords.words('english')
clean_tokens = [token for token in clean_tokens if (token not in stopwords)]
return clean_tokens
(vect, X) = make_Tfidf_array(df_content)
if (article_id in df_content.article_id):
cosine_similarity = linear_kernel(X, X)
df_similarity = pd.DataFrame(cosine_similarity[article_id], columns=['similarity'])
df_similarity_modified = df_similarity.drop(article_id)
recs = df_similarity_modified.similarity.sort_values(ascending=False).index[0:10].tolist()
rec_names = []
for i in recs:
name = df_content[(df_content['article_id'] == i)]['doc_full_name'].values[0]
rec_names.append(name)
else:
tfidf_feature_name = vect.get_feature_names()
booktitle = df[(df['article_id'] == article_id)]['title'].values[0]
booktitle_tokenized = tokenize(booktitle)
X_slice_list = []
for i in booktitle_tokenized:
if (i in tfidf_feature_name):
X_slice_list.append(tfidf_feature_name.index(i))
X_slice_list.sort()
X_sliced = X[:, X_slice_list]
check_df = pd.DataFrame(X_sliced, columns=X_slice_list)
check_df['sum'] = check_df.sum(axis=1)
recs = check_df.sort_values('sum', ascending=False)[0:10].index.tolist()
rec_names = []
for i in recs:
name = df_content[(df_content['article_id'] == i)]['doc_full_name'].values[0]
rec_names.append(name)
return (recs, rec_names) | -225,442,637,891,645,920 | INPUT:
article_id = (int) a article id in df_content
m - (int) the number of recommendations you want for the user
df_content - (pandas dataframe) df_content as defined at the top of the notebook
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
recs - (list) a list of recommendations for the user by article id
rec_names - (list) a list of recommendations for the user by article title | model/recommendation_functions.py | make_content_recs | dalpengholic/Udacity_Recommendations_with_IBM | python | def make_content_recs(article_id, df_content, df, m=10):
'\n INPUT:\n article_id = (int) a article id in df_content\n m - (int) the number of recommendations you want for the user\n df_content - (pandas dataframe) df_content as defined at the top of the notebook \n df - (pandas dataframe) df as defined at the top of the notebook \n\n OUTPUT:\n recs - (list) a list of recommendations for the user by article id\n rec_names - (list) a list of recommendations for the user by article title\n '
def tokenize(text):
'\n Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word. \n The funtions also cleans irrelevant stopwords.\n Input:\n 1. text: text message\n Output:\n 1. Clean_tokens : list of tokenized clean words\n '
text = re.sub('[^a-zA-Z0-9]', ' ', text)
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok, pos='v').lower().strip()
clean_tokens.append(clean_tok)
stopwords = nltk.corpus.stopwords.words('english')
clean_tokens = [token for token in clean_tokens if (token not in stopwords)]
return clean_tokens
(vect, X) = make_Tfidf_array(df_content)
if (article_id in df_content.article_id):
cosine_similarity = linear_kernel(X, X)
df_similarity = pd.DataFrame(cosine_similarity[article_id], columns=['similarity'])
df_similarity_modified = df_similarity.drop(article_id)
recs = df_similarity_modified.similarity.sort_values(ascending=False).index[0:10].tolist()
rec_names = []
for i in recs:
name = df_content[(df_content['article_id'] == i)]['doc_full_name'].values[0]
rec_names.append(name)
else:
tfidf_feature_name = vect.get_feature_names()
booktitle = df[(df['article_id'] == article_id)]['title'].values[0]
booktitle_tokenized = tokenize(booktitle)
X_slice_list = []
for i in booktitle_tokenized:
if (i in tfidf_feature_name):
X_slice_list.append(tfidf_feature_name.index(i))
X_slice_list.sort()
X_sliced = X[:, X_slice_list]
check_df = pd.DataFrame(X_sliced, columns=X_slice_list)
check_df['sum'] = check_df.sum(axis=1)
recs = check_df.sort_values('sum', ascending=False)[0:10].index.tolist()
rec_names = []
for i in recs:
name = df_content[(df_content['article_id'] == i)]['doc_full_name'].values[0]
rec_names.append(name)
return (recs, rec_names) |
def get_user_articles_names_ids(user_id):
'\n INPUT:\n user_id\n\n\n OUTPUT:\n article_ids - (list) a list of the article ids seen by the user\n article_names - (list) a list of article names associated with the list of article ids \n (this is identified by the doc_full_name column in df_content)\n \n Description:\n Provides a list of the article_ids and article titles that have been seen by a user\n '
article_ids = user_item.loc[user_id][(user_item.loc[user_id] == 1)].index.tolist()
article_names = []
for i in article_ids:
try:
title = df[(df['article_id'] == i)]['title'].unique()[0]
except IndexError:
title = 'None'
article_names.append(title)
article_ids = list(map(str, article_ids))
return (article_ids, article_names) | -2,381,257,371,788,109,000 | INPUT:
user_id
OUTPUT:
article_ids - (list) a list of the article ids seen by the user
article_names - (list) a list of article names associated with the list of article ids
(this is identified by the doc_full_name column in df_content)
Description:
Provides a list of the article_ids and article titles that have been seen by a user | model/recommendation_functions.py | get_user_articles_names_ids | dalpengholic/Udacity_Recommendations_with_IBM | python | def get_user_articles_names_ids(user_id):
'\n INPUT:\n user_id\n\n\n OUTPUT:\n article_ids - (list) a list of the article ids seen by the user\n article_names - (list) a list of article names associated with the list of article ids \n (this is identified by the doc_full_name column in df_content)\n \n Description:\n Provides a list of the article_ids and article titles that have been seen by a user\n '
article_ids = user_item.loc[user_id][(user_item.loc[user_id] == 1)].index.tolist()
article_names = []
for i in article_ids:
try:
title = df[(df['article_id'] == i)]['title'].unique()[0]
except IndexError:
title = 'None'
article_names.append(title)
article_ids = list(map(str, article_ids))
return (article_ids, article_names) |
def find_similar_users():
' \n OUTPUT:\n similar_users - (list) an ordered list where the closest users (largest dot product users)\n are listed first\n \n Description:\n Computes the similarity of every pair of users based on the dot product\n Returns an ordered\n \n '
user_item_tmp = user_item.copy()
user_item_tmp[(user_item_tmp.isna() == True)] = 0
row = user_item_tmp.loc[user_id]
result_dot = (row @ user_item_tmp.T)
result_dot.drop(labels=[user_id], inplace=True)
most_similar_users = result_dot.sort_values(ascending=False).index.tolist()
return most_similar_users | -6,149,135,974,720,328,000 | OUTPUT:
similar_users - (list) an ordered list where the closest users (largest dot product users)
are listed first
Description:
Computes the similarity of every pair of users based on the dot product
Returns an ordered | model/recommendation_functions.py | find_similar_users | dalpengholic/Udacity_Recommendations_with_IBM | python | def find_similar_users():
' \n OUTPUT:\n similar_users - (list) an ordered list where the closest users (largest dot product users)\n are listed first\n \n Description:\n Computes the similarity of every pair of users based on the dot product\n Returns an ordered\n \n '
user_item_tmp = user_item.copy()
user_item_tmp[(user_item_tmp.isna() == True)] = 0
row = user_item_tmp.loc[user_id]
result_dot = (row @ user_item_tmp.T)
result_dot.drop(labels=[user_id], inplace=True)
most_similar_users = result_dot.sort_values(ascending=False).index.tolist()
return most_similar_users |
def get_top_sorted_users(most_similar_users):
'\n INPUT:\n most_similar_users - (list) an ordered list where the closest users (largest dot product users)\n are listed first \n \n OUTPUT:\n neighbors_df - (pandas dataframe) a dataframe with:\n neighbor_id - is a neighbor user_id\n similarity - measure of the similarity of each user to the provided user_id\n num_interactions - the number of articles viewed by the user - if a u\n \n Other Details - sort the neighbors_df by the similarity and then by number of interactions where \n highest of each is higher in the dataframe\n \n '
df_user_id_grouped = df.groupby('user_id')
df_user_id_grouped['article_id'].count().sort_values(ascending=False)
neighbors_df = pd.DataFrame()
neighbors_df['neighbor_id'] = most_similar_users
user_item_tmp = user_item.copy()
user_item_tmp[(user_item_tmp.isna() == True)] = 0
row = user_item_tmp.loc[user_id]
result_dot = (row @ user_item_tmp.T)
result_dot.drop(labels=[user_id], inplace=True)
similarity = result_dot.sort_values(ascending=False).values.tolist()[0:10]
neighbors_df['similarity'] = similarity
num_interactions = []
for i in neighbors_df['neighbor_id']:
counted_interaction = df_user_id_grouped['article_id'].count().loc[i]
num_interactions.append(counted_interaction)
neighbors_df['num_interactions'] = num_interactions
neighbors_df = neighbors_df.sort_values(by=['similarity', 'num_interactions'], ascending=False)
return neighbors_df | 8,527,079,837,033,794,000 | INPUT:
most_similar_users - (list) an ordered list where the closest users (largest dot product users)
are listed first
OUTPUT:
neighbors_df - (pandas dataframe) a dataframe with:
neighbor_id - is a neighbor user_id
similarity - measure of the similarity of each user to the provided user_id
num_interactions - the number of articles viewed by the user - if a u
Other Details - sort the neighbors_df by the similarity and then by number of interactions where
highest of each is higher in the dataframe | model/recommendation_functions.py | get_top_sorted_users | dalpengholic/Udacity_Recommendations_with_IBM | python | def get_top_sorted_users(most_similar_users):
'\n INPUT:\n most_similar_users - (list) an ordered list where the closest users (largest dot product users)\n are listed first \n \n OUTPUT:\n neighbors_df - (pandas dataframe) a dataframe with:\n neighbor_id - is a neighbor user_id\n similarity - measure of the similarity of each user to the provided user_id\n num_interactions - the number of articles viewed by the user - if a u\n \n Other Details - sort the neighbors_df by the similarity and then by number of interactions where \n highest of each is higher in the dataframe\n \n '
df_user_id_grouped = df.groupby('user_id')
df_user_id_grouped['article_id'].count().sort_values(ascending=False)
neighbors_df = pd.DataFrame()
neighbors_df['neighbor_id'] = most_similar_users
user_item_tmp = user_item.copy()
user_item_tmp[(user_item_tmp.isna() == True)] = 0
row = user_item_tmp.loc[user_id]
result_dot = (row @ user_item_tmp.T)
result_dot.drop(labels=[user_id], inplace=True)
similarity = result_dot.sort_values(ascending=False).values.tolist()[0:10]
neighbors_df['similarity'] = similarity
num_interactions = []
for i in neighbors_df['neighbor_id']:
counted_interaction = df_user_id_grouped['article_id'].count().loc[i]
num_interactions.append(counted_interaction)
neighbors_df['num_interactions'] = num_interactions
neighbors_df = neighbors_df.sort_values(by=['similarity', 'num_interactions'], ascending=False)
return neighbors_df |
def tokenize(text):
'\n Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word. \n The funtions also cleans irrelevant stopwords.\n Input:\n 1. text: text message\n Output:\n 1. Clean_tokens : list of tokenized clean words\n '
text = re.sub('[^a-zA-Z0-9]', ' ', text)
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok, pos='v').lower().strip()
clean_tokens.append(clean_tok)
stopwords = nltk.corpus.stopwords.words('english')
clean_tokens = [token for token in clean_tokens if (token not in stopwords)]
return clean_tokens | -4,629,823,160,291,802,000 | Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word.
The funtions also cleans irrelevant stopwords.
Input:
1. text: text message
Output:
1. Clean_tokens : list of tokenized clean words | model/recommendation_functions.py | tokenize | dalpengholic/Udacity_Recommendations_with_IBM | python | def tokenize(text):
'\n Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word. \n The funtions also cleans irrelevant stopwords.\n Input:\n 1. text: text message\n Output:\n 1. Clean_tokens : list of tokenized clean words\n '
text = re.sub('[^a-zA-Z0-9]', ' ', text)
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok, pos='v').lower().strip()
clean_tokens.append(clean_tok)
stopwords = nltk.corpus.stopwords.words('english')
clean_tokens = [token for token in clean_tokens if (token not in stopwords)]
return clean_tokens |
def tokenize(text):
'\n Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word. \n The funtions also cleans irrelevant stopwords.\n Input:\n 1. text: text message\n Output:\n 1. Clean_tokens : list of tokenized clean words\n '
text = re.sub('[^a-zA-Z0-9]', ' ', text)
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok, pos='v').lower().strip()
clean_tokens.append(clean_tok)
stopwords = nltk.corpus.stopwords.words('english')
clean_tokens = [token for token in clean_tokens if (token not in stopwords)]
return clean_tokens | -4,629,823,160,291,802,000 | Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word.
The funtions also cleans irrelevant stopwords.
Input:
1. text: text message
Output:
1. Clean_tokens : list of tokenized clean words | model/recommendation_functions.py | tokenize | dalpengholic/Udacity_Recommendations_with_IBM | python | def tokenize(text):
'\n Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word. \n The funtions also cleans irrelevant stopwords.\n Input:\n 1. text: text message\n Output:\n 1. Clean_tokens : list of tokenized clean words\n '
text = re.sub('[^a-zA-Z0-9]', ' ', text)
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok, pos='v').lower().strip()
clean_tokens.append(clean_tok)
stopwords = nltk.corpus.stopwords.words('english')
clean_tokens = [token for token in clean_tokens if (token not in stopwords)]
return clean_tokens |
def get_imdb(name):
'Get an imdb (image database) by name.'
if (name not in __sets):
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]() | -3,263,413,934,054,098,000 | Get an imdb (image database) by name. | lib/datasets/factory.py | get_imdb | hinthornw/faster_rcnn_symbols | python | def get_imdb(name):
if (name not in __sets):
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]() |
def list_imdbs():
'List all registered imdbs.'
return list(__sets.keys()) | 4,693,669,182,354,276,000 | List all registered imdbs. | lib/datasets/factory.py | list_imdbs | hinthornw/faster_rcnn_symbols | python | def list_imdbs():
return list(__sets.keys()) |
def __init__(self, configuration=None):
'CreateConfigurationResponse - a model defined in huaweicloud sdk'
super(CreateConfigurationResponse, self).__init__()
self._configuration = None
self.discriminator = None
if (configuration is not None):
self.configuration = configuration | -6,258,271,062,558,947,000 | CreateConfigurationResponse - a model defined in huaweicloud sdk | huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/create_configuration_response.py | __init__ | JeffreyDin/huaweicloud-sdk-python-v3 | python | def __init__(self, configuration=None):
super(CreateConfigurationResponse, self).__init__()
self._configuration = None
self.discriminator = None
if (configuration is not None):
self.configuration = configuration |
@property
def configuration(self):
'Gets the configuration of this CreateConfigurationResponse.\n\n\n :return: The configuration of this CreateConfigurationResponse.\n :rtype: ConfigurationSummaryForCreate\n '
return self._configuration | -8,497,689,340,689,618,000 | Gets the configuration of this CreateConfigurationResponse.
:return: The configuration of this CreateConfigurationResponse.
:rtype: ConfigurationSummaryForCreate | huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/create_configuration_response.py | configuration | JeffreyDin/huaweicloud-sdk-python-v3 | python | @property
def configuration(self):
'Gets the configuration of this CreateConfigurationResponse.\n\n\n :return: The configuration of this CreateConfigurationResponse.\n :rtype: ConfigurationSummaryForCreate\n '
return self._configuration |
@configuration.setter
def configuration(self, configuration):
'Sets the configuration of this CreateConfigurationResponse.\n\n\n :param configuration: The configuration of this CreateConfigurationResponse.\n :type: ConfigurationSummaryForCreate\n '
self._configuration = configuration | -3,457,435,754,205,594,600 | Sets the configuration of this CreateConfigurationResponse.
:param configuration: The configuration of this CreateConfigurationResponse.
:type: ConfigurationSummaryForCreate | huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/create_configuration_response.py | configuration | JeffreyDin/huaweicloud-sdk-python-v3 | python | @configuration.setter
def configuration(self, configuration):
'Sets the configuration of this CreateConfigurationResponse.\n\n\n :param configuration: The configuration of this CreateConfigurationResponse.\n :type: ConfigurationSummaryForCreate\n '
self._configuration = configuration |
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
elif (attr in self.sensitive_list):
result[attr] = '****'
else:
result[attr] = value
return result | 2,594,216,033,120,720,000 | Returns the model properties as a dict | huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/create_configuration_response.py | to_dict | JeffreyDin/huaweicloud-sdk-python-v3 | python | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
elif (attr in self.sensitive_list):
result[attr] = '****'
else:
result[attr] = value
return result |
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | 5,849,158,643,760,736,000 | Returns the string representation of the model | huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/create_configuration_response.py | to_str | JeffreyDin/huaweicloud-sdk-python-v3 | python | def to_str(self):
return pprint.pformat(self.to_dict()) |
def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | -8,960,031,694,814,905,000 | For `print` and `pprint` | huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/create_configuration_response.py | __repr__ | JeffreyDin/huaweicloud-sdk-python-v3 | python | def __repr__(self):
return self.to_str() |
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, CreateConfigurationResponse)):
return False
return (self.__dict__ == other.__dict__) | 5,950,562,783,730,043,000 | Returns true if both objects are equal | huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/create_configuration_response.py | __eq__ | JeffreyDin/huaweicloud-sdk-python-v3 | python | def __eq__(self, other):
if (not isinstance(other, CreateConfigurationResponse)):
return False
return (self.__dict__ == other.__dict__) |
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other)) | 7,764,124,047,908,058,000 | Returns true if both objects are not equal | huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/create_configuration_response.py | __ne__ | JeffreyDin/huaweicloud-sdk-python-v3 | python | def __ne__(self, other):
return (not (self == other)) |
def __init__(self, parent=None, pagesize='A3', orientation='landscape', x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None, start=None, end=None, tracklines=0, track_size=0.75, circular=1):
" __init__(self, parent, pagesize='A3', orientation='landscape',\n x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,\n start=None, end=None, tracklines=0, track_size=0.75,\n circular=1)\n\n o parent Diagram object containing the data that the drawer\n draws\n\n o pagesize String describing the ISO size of the image, or a tuple\n of pixels\n\n o orientation String describing the required orientation of the\n final drawing ('landscape' or 'portrait')\n\n o x Float (0->1) describing the relative size of the X\n margins to the page\n\n o y Float (0->1) describing the relative size of the Y\n margins to the page\n\n o xl Float (0->1) describing the relative size of the left X\n margin to the page (overrides x)\n\n o xl Float (0->1) describing the relative size of the left X\n margin to the page (overrides x)\n\n o xr Float (0->1) describing the relative size of the right X\n margin to the page (overrides x)\n\n o yt Float (0->1) describing the relative size of the top Y\n margin to the page (overrides y)\n\n o yb Float (0->1) describing the relative size of the lower Y\n margin to the page (overrides y)\n\n o start Int, the position to begin drawing the diagram at\n\n o end Int, the position to stop drawing the diagram at\n\n o tracklines Boolean flag to show (or not) lines delineating tracks\n on the diagram \n \n o track_size The proportion of the available track height that\n should be taken up in drawing\n\n o circular Boolean flaw to show whether the passed sequence is\n circular or not\n "
AbstractDrawer.__init__(self, parent, pagesize, orientation, x, y, xl, xr, yt, yb, start, end, tracklines)
self.track_size = track_size
if (circular == False):
self.sweep = 0.9
else:
self.sweep = 1 | -5,975,716,011,951,879,000 | __init__(self, parent, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1)
o parent Diagram object containing the data that the drawer
draws
o pagesize String describing the ISO size of the image, or a tuple
of pixels
o orientation String describing the required orientation of the
final drawing ('landscape' or 'portrait')
o x Float (0->1) describing the relative size of the X
margins to the page
o y Float (0->1) describing the relative size of the Y
margins to the page
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xr Float (0->1) describing the relative size of the right X
margin to the page (overrides x)
o yt Float (0->1) describing the relative size of the top Y
margin to the page (overrides y)
o yb Float (0->1) describing the relative size of the lower Y
margin to the page (overrides y)
o start Int, the position to begin drawing the diagram at
o end Int, the position to stop drawing the diagram at
o tracklines Boolean flag to show (or not) lines delineating tracks
on the diagram
o track_size The proportion of the available track height that
should be taken up in drawing
o circular Boolean flaw to show whether the passed sequence is
circular or not | bin/last_wrapper/Bio/Graphics/GenomeDiagram/_CircularDrawer.py | __init__ | LyonsLab/coge | python | def __init__(self, parent=None, pagesize='A3', orientation='landscape', x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None, start=None, end=None, tracklines=0, track_size=0.75, circular=1):
" __init__(self, parent, pagesize='A3', orientation='landscape',\n x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,\n start=None, end=None, tracklines=0, track_size=0.75,\n circular=1)\n\n o parent Diagram object containing the data that the drawer\n draws\n\n o pagesize String describing the ISO size of the image, or a tuple\n of pixels\n\n o orientation String describing the required orientation of the\n final drawing ('landscape' or 'portrait')\n\n o x Float (0->1) describing the relative size of the X\n margins to the page\n\n o y Float (0->1) describing the relative size of the Y\n margins to the page\n\n o xl Float (0->1) describing the relative size of the left X\n margin to the page (overrides x)\n\n o xl Float (0->1) describing the relative size of the left X\n margin to the page (overrides x)\n\n o xr Float (0->1) describing the relative size of the right X\n margin to the page (overrides x)\n\n o yt Float (0->1) describing the relative size of the top Y\n margin to the page (overrides y)\n\n o yb Float (0->1) describing the relative size of the lower Y\n margin to the page (overrides y)\n\n o start Int, the position to begin drawing the diagram at\n\n o end Int, the position to stop drawing the diagram at\n\n o tracklines Boolean flag to show (or not) lines delineating tracks\n on the diagram \n \n o track_size The proportion of the available track height that\n should be taken up in drawing\n\n o circular Boolean flaw to show whether the passed sequence is\n circular or not\n "
AbstractDrawer.__init__(self, parent, pagesize, orientation, x, y, xl, xr, yt, yb, start, end, tracklines)
self.track_size = track_size
if (circular == False):
self.sweep = 0.9
else:
self.sweep = 1 |
def set_track_heights(self):
' set_track_heights(self)\n\n Since tracks may not be of identical heights, the bottom and top\n radius for each track is stored in a dictionary - self.track_radii,\n keyed by track number\n '
top_track = max(self.drawn_tracks)
trackunit_sum = 0
trackunits = {}
heightholder = 0
for track in range(1, (top_track + 1)):
try:
trackheight = self._parent[track].height
except:
trackheight = 1
trackunit_sum += trackheight
trackunits[track] = (heightholder, (heightholder + trackheight))
heightholder += trackheight
trackunit_height = ((0.5 * min(self.pagewidth, self.pageheight)) / trackunit_sum)
self.track_radii = {}
track_crop = ((trackunit_height * (1 - self.track_size)) / 2.0)
for track in trackunits:
top = ((trackunits[track][1] * trackunit_height) - track_crop)
btm = ((trackunits[track][0] * trackunit_height) + track_crop)
ctr = (btm + ((top - btm) / 2.0))
self.track_radii[track] = (btm, ctr, top) | -894,090,286,291,584,300 | set_track_heights(self)
Since tracks may not be of identical heights, the bottom and top
radius for each track is stored in a dictionary - self.track_radii,
keyed by track number | bin/last_wrapper/Bio/Graphics/GenomeDiagram/_CircularDrawer.py | set_track_heights | LyonsLab/coge | python | def set_track_heights(self):
' set_track_heights(self)\n\n Since tracks may not be of identical heights, the bottom and top\n radius for each track is stored in a dictionary - self.track_radii,\n keyed by track number\n '
top_track = max(self.drawn_tracks)
trackunit_sum = 0
trackunits = {}
heightholder = 0
for track in range(1, (top_track + 1)):
try:
trackheight = self._parent[track].height
except:
trackheight = 1
trackunit_sum += trackheight
trackunits[track] = (heightholder, (heightholder + trackheight))
heightholder += trackheight
trackunit_height = ((0.5 * min(self.pagewidth, self.pageheight)) / trackunit_sum)
self.track_radii = {}
track_crop = ((trackunit_height * (1 - self.track_size)) / 2.0)
for track in trackunits:
top = ((trackunits[track][1] * trackunit_height) - track_crop)
btm = ((trackunits[track][0] * trackunit_height) + track_crop)
ctr = (btm + ((top - btm) / 2.0))
self.track_radii[track] = (btm, ctr, top) |
def draw(self):
' draw(self)\n\n Draw a circular diagram of the stored data\n '
self.drawing = Drawing(self.pagesize[0], self.pagesize[1])
feature_elements = []
feature_labels = []
greytrack_bgs = []
greytrack_labels = []
scale_axes = []
scale_labels = []
self.drawn_tracks = self._parent.get_drawn_levels()
self.set_track_heights()
for track_level in self._parent.get_drawn_levels():
self.current_track_level = track_level
track = self._parent[track_level]
(gbgs, glabels) = self.draw_greytrack(track)
greytrack_bgs.append(gbgs)
greytrack_labels.append(glabels)
(features, flabels) = self.draw_track(track)
feature_elements.append(features)
feature_labels.append(flabels)
if track.scale:
(axes, slabels) = self.draw_scale(track)
scale_axes.append(axes)
scale_labels.append(slabels)
element_groups = [greytrack_bgs, feature_elements, scale_axes, scale_labels, feature_labels, greytrack_labels]
for element_group in element_groups:
for element_list in element_group:
[self.drawing.add(element) for element in element_list]
if self.tracklines:
self.draw_test_tracks() | -7,497,354,379,445,910,000 | draw(self)
Draw a circular diagram of the stored data | bin/last_wrapper/Bio/Graphics/GenomeDiagram/_CircularDrawer.py | draw | LyonsLab/coge | python | def draw(self):
' draw(self)\n\n Draw a circular diagram of the stored data\n '
self.drawing = Drawing(self.pagesize[0], self.pagesize[1])
feature_elements = []
feature_labels = []
greytrack_bgs = []
greytrack_labels = []
scale_axes = []
scale_labels = []
self.drawn_tracks = self._parent.get_drawn_levels()
self.set_track_heights()
for track_level in self._parent.get_drawn_levels():
self.current_track_level = track_level
track = self._parent[track_level]
(gbgs, glabels) = self.draw_greytrack(track)
greytrack_bgs.append(gbgs)
greytrack_labels.append(glabels)
(features, flabels) = self.draw_track(track)
feature_elements.append(features)
feature_labels.append(flabels)
if track.scale:
(axes, slabels) = self.draw_scale(track)
scale_axes.append(axes)
scale_labels.append(slabels)
element_groups = [greytrack_bgs, feature_elements, scale_axes, scale_labels, feature_labels, greytrack_labels]
for element_group in element_groups:
for element_list in element_group:
[self.drawing.add(element) for element in element_list]
if self.tracklines:
self.draw_test_tracks() |
def draw_track(self, track):
' draw_track(self, track) -> ([element, element,...], [element, element,...])\n\n o track Track object\n\n Return tuple of (list of track elements, list of track labels) \n '
track_elements = []
track_labels = []
set_methods = {FeatureSet: self.draw_feature_set, GraphSet: self.draw_graph_set}
for set in track.get_sets():
(elements, labels) = set_methods[set.__class__](set)
track_elements += elements
track_labels += labels
return (track_elements, track_labels) | 4,402,995,958,333,717,500 | draw_track(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Return tuple of (list of track elements, list of track labels) | bin/last_wrapper/Bio/Graphics/GenomeDiagram/_CircularDrawer.py | draw_track | LyonsLab/coge | python | def draw_track(self, track):
' draw_track(self, track) -> ([element, element,...], [element, element,...])\n\n o track Track object\n\n Return tuple of (list of track elements, list of track labels) \n '
track_elements = []
track_labels = []
set_methods = {FeatureSet: self.draw_feature_set, GraphSet: self.draw_graph_set}
for set in track.get_sets():
(elements, labels) = set_methods[set.__class__](set)
track_elements += elements
track_labels += labels
return (track_elements, track_labels) |
def draw_feature_set(self, set):
' draw_feature_set(self, set) -> ([element, element,...], [element, element,...])\n\n o set FeatureSet object\n\n Returns a tuple (list of elements describing features, list of\n labels for elements)\n '
feature_elements = []
label_elements = []
for feature in set.get_features():
if (self.is_in_bounds(feature.start) or self.is_in_bounds(feature.end)):
(features, labels) = self.draw_feature(feature)
feature_elements += features
label_elements += labels
return (feature_elements, label_elements) | 6,090,080,020,066,561,000 | draw_feature_set(self, set) -> ([element, element,...], [element, element,...])
o set FeatureSet object
Returns a tuple (list of elements describing features, list of
labels for elements) | bin/last_wrapper/Bio/Graphics/GenomeDiagram/_CircularDrawer.py | draw_feature_set | LyonsLab/coge | python | def draw_feature_set(self, set):
' draw_feature_set(self, set) -> ([element, element,...], [element, element,...])\n\n o set FeatureSet object\n\n Returns a tuple (list of elements describing features, list of\n labels for elements)\n '
feature_elements = []
label_elements = []
for feature in set.get_features():
if (self.is_in_bounds(feature.start) or self.is_in_bounds(feature.end)):
(features, labels) = self.draw_feature(feature)
feature_elements += features
label_elements += labels
return (feature_elements, label_elements) |
def draw_feature(self, feature):
' draw_feature(self, feature, parent_feature=None) -> ([element, element,...], [element, element,...])\n\n o feature Feature containing location info\n\n Returns tuple of (list of elements describing single feature, list\n of labels for those elements)\n '
feature_elements = []
label_elements = []
if feature.hide:
return (feature_elements, label_elements)
for (locstart, locend) in feature.locations:
(feature_sigil, label) = self.get_feature_sigil(feature, locstart, locend)
feature_elements.append(feature_sigil)
if (label is not None):
label_elements.append(label)
return (feature_elements, label_elements) | 2,990,649,715,615,632,000 | draw_feature(self, feature, parent_feature=None) -> ([element, element,...], [element, element,...])
o feature Feature containing location info
Returns tuple of (list of elements describing single feature, list
of labels for those elements) | bin/last_wrapper/Bio/Graphics/GenomeDiagram/_CircularDrawer.py | draw_feature | LyonsLab/coge | python | def draw_feature(self, feature):
' draw_feature(self, feature, parent_feature=None) -> ([element, element,...], [element, element,...])\n\n o feature Feature containing location info\n\n Returns tuple of (list of elements describing single feature, list\n of labels for those elements)\n '
feature_elements = []
label_elements = []
if feature.hide:
return (feature_elements, label_elements)
for (locstart, locend) in feature.locations:
(feature_sigil, label) = self.get_feature_sigil(feature, locstart, locend)
feature_elements.append(feature_sigil)
if (label is not None):
label_elements.append(label)
return (feature_elements, label_elements) |
def get_feature_sigil(self, feature, locstart, locend, **kwargs):
' get_feature_sigil(self, feature, x0, x1, fragment) -> (element, element)\n\n o feature Feature object\n\n o locstart The start position of the feature\n\n o locend The end position of the feature\n\n Returns a drawable indicator of the feature, and any required label\n for it\n '
(btm, ctr, top) = self.track_radii[self.current_track_level]
(startangle, startcos, startsin) = self.canvas_angle(locstart)
(endangle, endcos, endsin) = self.canvas_angle(locend)
(midangle, midcos, midsin) = self.canvas_angle((float((locend + locstart)) / 2))
draw_methods = {'BOX': self._draw_arc, 'ARROW': self._draw_arc_arrow}
method = draw_methods[feature.sigil]
kwargs['head_length_ratio'] = feature.arrowhead_length
kwargs['shaft_height_ratio'] = feature.arrowshaft_height
if hasattr(feature, 'url'):
kwargs['hrefURL'] = feature.url
kwargs['hrefTitle'] = feature.name
if (feature.color == colors.white):
border = colors.black
else:
border = feature.color
if (feature.strand == 1):
sigil = method(ctr, top, startangle, endangle, feature.color, border, orientation='right', **kwargs)
elif (feature.strand == (- 1)):
sigil = method(btm, ctr, startangle, endangle, feature.color, border, orientation='left', **kwargs)
else:
sigil = method(btm, top, startangle, endangle, feature.color, border, **kwargs)
if feature.label:
label = String(0, 0, feature.name.strip(), fontName=feature.label_font, fontSize=feature.label_size, fillColor=feature.label_color)
labelgroup = Group(label)
label_angle = (startangle + (0.5 * pi))
(sinval, cosval) = (startsin, startcos)
if (feature.strand != (- 1)):
if (startangle < pi):
(sinval, cosval) = (endsin, endcos)
label_angle = (endangle - (0.5 * pi))
labelgroup.contents[0].textAnchor = 'end'
pos = (self.xcenter + (top * sinval))
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel, (- sinlabel), sinlabel, coslabel, pos, (self.ycenter + (top * cosval)))
else:
if (startangle < pi):
(sinval, cosval) = (endsin, endcos)
label_angle = (endangle - (0.5 * pi))
else:
labelgroup.contents[0].textAnchor = 'end'
pos = (self.xcenter + (btm * sinval))
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel, (- sinlabel), sinlabel, coslabel, pos, (self.ycenter + (btm * cosval)))
else:
labelgroup = None
return (sigil, labelgroup) | -4,611,603,714,295,767,600 | get_feature_sigil(self, feature, x0, x1, fragment) -> (element, element)
o feature Feature object
o locstart The start position of the feature
o locend The end position of the feature
Returns a drawable indicator of the feature, and any required label
for it | bin/last_wrapper/Bio/Graphics/GenomeDiagram/_CircularDrawer.py | get_feature_sigil | LyonsLab/coge | python | def get_feature_sigil(self, feature, locstart, locend, **kwargs):
' get_feature_sigil(self, feature, x0, x1, fragment) -> (element, element)\n\n o feature Feature object\n\n o locstart The start position of the feature\n\n o locend The end position of the feature\n\n Returns a drawable indicator of the feature, and any required label\n for it\n '
(btm, ctr, top) = self.track_radii[self.current_track_level]
(startangle, startcos, startsin) = self.canvas_angle(locstart)
(endangle, endcos, endsin) = self.canvas_angle(locend)
(midangle, midcos, midsin) = self.canvas_angle((float((locend + locstart)) / 2))
draw_methods = {'BOX': self._draw_arc, 'ARROW': self._draw_arc_arrow}
method = draw_methods[feature.sigil]
kwargs['head_length_ratio'] = feature.arrowhead_length
kwargs['shaft_height_ratio'] = feature.arrowshaft_height
if hasattr(feature, 'url'):
kwargs['hrefURL'] = feature.url
kwargs['hrefTitle'] = feature.name
if (feature.color == colors.white):
border = colors.black
else:
border = feature.color
if (feature.strand == 1):
sigil = method(ctr, top, startangle, endangle, feature.color, border, orientation='right', **kwargs)
elif (feature.strand == (- 1)):
sigil = method(btm, ctr, startangle, endangle, feature.color, border, orientation='left', **kwargs)
else:
sigil = method(btm, top, startangle, endangle, feature.color, border, **kwargs)
if feature.label:
label = String(0, 0, feature.name.strip(), fontName=feature.label_font, fontSize=feature.label_size, fillColor=feature.label_color)
labelgroup = Group(label)
label_angle = (startangle + (0.5 * pi))
(sinval, cosval) = (startsin, startcos)
if (feature.strand != (- 1)):
if (startangle < pi):
(sinval, cosval) = (endsin, endcos)
label_angle = (endangle - (0.5 * pi))
labelgroup.contents[0].textAnchor = 'end'
pos = (self.xcenter + (top * sinval))
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel, (- sinlabel), sinlabel, coslabel, pos, (self.ycenter + (top * cosval)))
else:
if (startangle < pi):
(sinval, cosval) = (endsin, endcos)
label_angle = (endangle - (0.5 * pi))
else:
labelgroup.contents[0].textAnchor = 'end'
pos = (self.xcenter + (btm * sinval))
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel, (- sinlabel), sinlabel, coslabel, pos, (self.ycenter + (btm * cosval)))
else:
labelgroup = None
return (sigil, labelgroup) |
def draw_graph_set(self, set):
' draw_graph_set(self, set) -> ([element, element,...], [element, element,...])\n \n o set GraphSet object\n\n Returns tuple (list of graph elements, list of graph labels)\n '
elements = []
style_methods = {'line': self.draw_line_graph, 'heat': self.draw_heat_graph, 'bar': self.draw_bar_graph}
for graph in set.get_graphs():
elements += style_methods[graph.style](graph)
return (elements, []) | 4,303,515,553,062,250,000 | draw_graph_set(self, set) -> ([element, element,...], [element, element,...])
o set GraphSet object
Returns tuple (list of graph elements, list of graph labels) | bin/last_wrapper/Bio/Graphics/GenomeDiagram/_CircularDrawer.py | draw_graph_set | LyonsLab/coge | python | def draw_graph_set(self, set):
' draw_graph_set(self, set) -> ([element, element,...], [element, element,...])\n \n o set GraphSet object\n\n Returns tuple (list of graph elements, list of graph labels)\n '
elements = []
style_methods = {'line': self.draw_line_graph, 'heat': self.draw_heat_graph, 'bar': self.draw_bar_graph}
for graph in set.get_graphs():
elements += style_methods[graph.style](graph)
return (elements, []) |
def draw_line_graph(self, graph):
' draw_line_graph(self, graph, center) -> [element, element,...]\n\n o graph GraphData object\n\n Returns a line graph as a list of drawable elements\n '
line_elements = []
data_quartiles = graph.quartiles()
(minval, maxval) = (data_quartiles[0], data_quartiles[4])
(btm, ctr, top) = self.track_radii[self.current_track_level]
trackheight = (0.5 * (top - btm))
datarange = (maxval - minval)
if (datarange == 0):
datarange = trackheight
data = graph[self.start:self.end]
if (graph.center is None):
midval = ((maxval + minval) / 2.0)
else:
midval = graph.center
resolution = max((midval - minval), (maxval - midval))
(pos, val) = data[0]
(lastangle, lastcos, lastsin) = self.canvas_angle(pos)
posheight = (((trackheight * (val - midval)) / resolution) + ctr)
lastx = (self.xcenter + (posheight * lastsin))
lasty = (self.ycenter + (posheight * lastcos))
for (pos, val) in data:
(posangle, poscos, possin) = self.canvas_angle(pos)
posheight = (((trackheight * (val - midval)) / resolution) + ctr)
x = (self.xcenter + (posheight * possin))
y = (self.ycenter + (posheight * poscos))
line_elements.append(Line(lastx, lasty, x, y, strokeColor=graph.poscolor, strokeWidth=graph.linewidth))
(lastx, lasty) = (x, y)
return line_elements | -6,926,512,762,448,647,000 | draw_line_graph(self, graph, center) -> [element, element,...]
o graph GraphData object
Returns a line graph as a list of drawable elements | bin/last_wrapper/Bio/Graphics/GenomeDiagram/_CircularDrawer.py | draw_line_graph | LyonsLab/coge | python | def draw_line_graph(self, graph):
' draw_line_graph(self, graph, center) -> [element, element,...]\n\n o graph GraphData object\n\n Returns a line graph as a list of drawable elements\n '
line_elements = []
data_quartiles = graph.quartiles()
(minval, maxval) = (data_quartiles[0], data_quartiles[4])
(btm, ctr, top) = self.track_radii[self.current_track_level]
trackheight = (0.5 * (top - btm))
datarange = (maxval - minval)
if (datarange == 0):
datarange = trackheight
data = graph[self.start:self.end]
if (graph.center is None):
midval = ((maxval + minval) / 2.0)
else:
midval = graph.center
resolution = max((midval - minval), (maxval - midval))
(pos, val) = data[0]
(lastangle, lastcos, lastsin) = self.canvas_angle(pos)
posheight = (((trackheight * (val - midval)) / resolution) + ctr)
lastx = (self.xcenter + (posheight * lastsin))
lasty = (self.ycenter + (posheight * lastcos))
for (pos, val) in data:
(posangle, poscos, possin) = self.canvas_angle(pos)
posheight = (((trackheight * (val - midval)) / resolution) + ctr)
x = (self.xcenter + (posheight * possin))
y = (self.ycenter + (posheight * poscos))
line_elements.append(Line(lastx, lasty, x, y, strokeColor=graph.poscolor, strokeWidth=graph.linewidth))
(lastx, lasty) = (x, y)
return line_elements |
def draw_bar_graph(self, graph):
' draw_bar_graph(self, graph) -> [element, element,...]\n\n o graph Graph object\n\n Returns a list of drawable elements for a bar graph of the passed\n Graph object\n '
bar_elements = []
data_quartiles = graph.quartiles()
(minval, maxval) = (data_quartiles[0], data_quartiles[4])
(btm, ctr, top) = self.track_radii[self.current_track_level]
trackheight = (0.5 * (top - btm))
datarange = (maxval - minval)
if (datarange == 0):
datarange = trackheight
data = graph[self.start:self.end]
if (graph.center is None):
midval = ((maxval + minval) / 2.0)
else:
midval = graph.center
newdata = intermediate_points(self.start, self.end, graph[self.start:self.end])
resolution = max((midval - minval), (maxval - midval))
if (resolution == 0):
resolution = trackheight
for (pos0, pos1, val) in newdata:
(pos0angle, pos0cos, pos0sin) = self.canvas_angle(pos0)
(pos1angle, pos1cos, pos1sin) = self.canvas_angle(pos1)
barval = ((trackheight * (val - midval)) / resolution)
if (barval >= 0):
barcolor = graph.poscolor
else:
barcolor = graph.negcolor
bar_elements.append(self._draw_arc(ctr, (ctr + barval), pos0angle, pos1angle, barcolor))
return bar_elements | 3,926,647,248,423,015,400 | draw_bar_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for a bar graph of the passed
Graph object | bin/last_wrapper/Bio/Graphics/GenomeDiagram/_CircularDrawer.py | draw_bar_graph | LyonsLab/coge | python | def draw_bar_graph(self, graph):
' draw_bar_graph(self, graph) -> [element, element,...]\n\n o graph Graph object\n\n Returns a list of drawable elements for a bar graph of the passed\n Graph object\n '
bar_elements = []
data_quartiles = graph.quartiles()
(minval, maxval) = (data_quartiles[0], data_quartiles[4])
(btm, ctr, top) = self.track_radii[self.current_track_level]
trackheight = (0.5 * (top - btm))
datarange = (maxval - minval)
if (datarange == 0):
datarange = trackheight
data = graph[self.start:self.end]
if (graph.center is None):
midval = ((maxval + minval) / 2.0)
else:
midval = graph.center
newdata = intermediate_points(self.start, self.end, graph[self.start:self.end])
resolution = max((midval - minval), (maxval - midval))
if (resolution == 0):
resolution = trackheight
for (pos0, pos1, val) in newdata:
(pos0angle, pos0cos, pos0sin) = self.canvas_angle(pos0)
(pos1angle, pos1cos, pos1sin) = self.canvas_angle(pos1)
barval = ((trackheight * (val - midval)) / resolution)
if (barval >= 0):
barcolor = graph.poscolor
else:
barcolor = graph.negcolor
bar_elements.append(self._draw_arc(ctr, (ctr + barval), pos0angle, pos1angle, barcolor))
return bar_elements |
def draw_heat_graph(self, graph):
' draw_heat_graph(self, graph) -> [element, element,...]\n\n o graph Graph object\n\n Returns a list of drawable elements for the heat graph\n '
heat_elements = []
data_quartiles = graph.quartiles()
(minval, maxval) = (data_quartiles[0], data_quartiles[4])
midval = ((maxval + minval) / 2.0)
(btm, ctr, top) = self.track_radii[self.current_track_level]
trackheight = (top - btm)
newdata = intermediate_points(self.start, self.end, graph[self.start:self.end])
for (pos0, pos1, val) in newdata:
(pos0angle, pos0cos, pos0sin) = self.canvas_angle(pos0)
(pos1angle, pos1cos, pos1sin) = self.canvas_angle(pos1)
heat = colors.linearlyInterpolatedColor(graph.poscolor, graph.negcolor, maxval, minval, val)
heat_elements.append(self._draw_arc(btm, top, pos0angle, pos1angle, heat, border=heat))
return heat_elements | -4,502,195,446,420,181,500 | draw_heat_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for the heat graph | bin/last_wrapper/Bio/Graphics/GenomeDiagram/_CircularDrawer.py | draw_heat_graph | LyonsLab/coge | python | def draw_heat_graph(self, graph):
' draw_heat_graph(self, graph) -> [element, element,...]\n\n o graph Graph object\n\n Returns a list of drawable elements for the heat graph\n '
heat_elements = []
data_quartiles = graph.quartiles()
(minval, maxval) = (data_quartiles[0], data_quartiles[4])
midval = ((maxval + minval) / 2.0)
(btm, ctr, top) = self.track_radii[self.current_track_level]
trackheight = (top - btm)
newdata = intermediate_points(self.start, self.end, graph[self.start:self.end])
for (pos0, pos1, val) in newdata:
(pos0angle, pos0cos, pos0sin) = self.canvas_angle(pos0)
(pos1angle, pos1cos, pos1sin) = self.canvas_angle(pos1)
heat = colors.linearlyInterpolatedColor(graph.poscolor, graph.negcolor, maxval, minval, val)
heat_elements.append(self._draw_arc(btm, top, pos0angle, pos1angle, heat, border=heat))
return heat_elements |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.