id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/wasp-launcher-0.0.2.tar.gz/wasp-launcher-0.0.2/wasp_launcher/static/angular/latest/i18n/angular-locale_ckb.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"\u0628.\u0646",
"\u062f.\u0646"
],
"DAY": [
"\u06cc\u06d5\u06a9\u0634\u06d5\u0645\u0645\u06d5",
"\u062f\u0648\u0648\u0634\u06d5\u0645\u0645\u06d5",
"\u0633\u06ce\u0634\u06d5\u0645\u0645\u06d5",
"\u0686\u0648\u0627\u0631\u0634\u06d5\u0645\u0645\u06d5",
"\u067e\u06ce\u0646\u062c\u0634\u06d5\u0645\u0645\u06d5",
"\u06be\u06d5\u06cc\u0646\u06cc",
"\u0634\u06d5\u0645\u0645\u06d5"
],
"ERANAMES": [
"\u067e\u06ce\u0634 \u0632\u0627\u06cc\u06cc\u0646",
"\u0632\u0627\u06cc\u06cc\u0646\u06cc"
],
"ERAS": [
"\u067e.\u0646",
"\u0632"
],
"FIRSTDAYOFWEEK": 5,
"MONTH": [
"\u06a9\u0627\u0646\u0648\u0648\u0646\u06cc \u062f\u0648\u0648\u06d5\u0645",
"\u0634\u0648\u0628\u0627\u062a",
"\u0626\u0627\u0632\u0627\u0631",
"\u0646\u06cc\u0633\u0627\u0646",
"\u0626\u0627\u06cc\u0627\u0631",
"\u062d\u0648\u0632\u06d5\u06cc\u0631\u0627\u0646",
"\u062a\u06d5\u0645\u0648\u0648\u0632",
"\u0626\u0627\u0628",
"\u0626\u06d5\u06cc\u0644\u0648\u0648\u0644",
"\u062a\u0634\u0631\u06cc\u0646\u06cc \u06cc\u06d5\u06a9\u06d5\u0645",
"\u062a\u0634\u0631\u06cc\u0646\u06cc \u062f\u0648\u0648\u06d5\u0645",
"\u06a9\u0627\u0646\u0648\u0646\u06cc \u06cc\u06d5\u06a9\u06d5\u0645"
],
"SHORTDAY": [
"\u06cc\u06d5\u06a9\u0634\u06d5\u0645\u0645\u06d5",
"\u062f\u0648\u0648\u0634\u06d5\u0645\u0645\u06d5",
"\u0633\u06ce\u0634\u06d5\u0645\u0645\u06d5",
"\u0686\u0648\u0627\u0631\u0634\u06d5\u0645\u0645\u06d5",
"\u067e\u06ce\u0646\u062c\u0634\u06d5\u0645\u0645\u06d5",
"\u06be\u06d5\u06cc\u0646\u06cc",
"\u0634\u06d5\u0645\u0645\u06d5"
],
"SHORTMONTH": [
"\u06a9\u0627\u0646\u0648\u0648\u0646\u06cc \u062f\u0648\u0648\u06d5\u0645",
"\u0634\u0648\u0628\u0627\u062a",
"\u0626\u0627\u0632\u0627\u0631",
"\u0646\u06cc\u0633\u0627\u0646",
"\u0626\u0627\u06cc\u0627\u0631",
"\u062d\u0648\u0632\u06d5\u06cc\u0631\u0627\u0646",
"\u062a\u06d5\u0645\u0648\u0648\u0632",
"\u0626\u0627\u0628",
"\u0626\u06d5\u06cc\u0644\u0648\u0648\u0644",
"\u062a\u0634\u0631\u06cc\u0646\u06cc \u06cc\u06d5\u06a9\u06d5\u0645",
"\u062a\u0634\u0631\u06cc\u0646\u06cc \u062f\u0648\u0648\u06d5\u0645",
"\u06a9\u0627\u0646\u0648\u0646\u06cc \u06cc\u06d5\u06a9\u06d5\u0645"
],
"STANDALONEMONTH": [
"\u06a9\u0627\u0646\u0648\u0648\u0646\u06cc \u062f\u0648\u0648\u06d5\u0645",
"\u0634\u0648\u0628\u0627\u062a",
"\u0626\u0627\u0632\u0627\u0631",
"\u0646\u06cc\u0633\u0627\u0646",
"\u0626\u0627\u06cc\u0627\u0631",
"\u062d\u0648\u0632\u06d5\u06cc\u0631\u0627\u0646",
"\u062a\u06d5\u0645\u0648\u0648\u0632",
"\u0626\u0627\u0628",
"\u0626\u06d5\u06cc\u0644\u0648\u0648\u0644",
"\u062a\u0634\u0631\u06cc\u0646\u06cc \u06cc\u06d5\u06a9\u06d5\u0645",
"\u062a\u0634\u0631\u06cc\u0646\u06cc \u062f\u0648\u0648\u06d5\u0645",
"\u06a9\u0627\u0646\u0648\u0646\u06cc \u06cc\u06d5\u06a9\u06d5\u0645"
],
"WEEKENDRANGE": [
4,
5
],
"fullDate": "y MMMM d, EEEE",
"longDate": "d\u06cc MMMM\u06cc y",
"medium": "y MMM d h:mm:ss a",
"mediumDate": "y MMM d",
"mediumTime": "h:mm:ss a",
"short": "y-MM-dd h:mm a",
"shortDate": "y-MM-dd",
"shortTime": "h:mm a"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "din",
"DECIMAL_SEP": "\u066b",
"GROUP_SEP": "\u066c",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "-\u00a4\u00a0",
"negSuf": "",
"posPre": "\u00a4\u00a0",
"posSuf": ""
}
]
},
"id": "ckb",
"localeID": "ckb",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/ensmallen_graph-0.6.0-cp37-cp37m-manylinux2010_x86_64.whl/ensmallen_graph/datasets/networkrepository/rail2586.py | from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def Rail2586(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/networkrepository",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the rail2586 graph.
The graph is automatically retrieved from the NetworkRepository repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of rail2586 graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-06 12:16:21.111009
The undirected graph rail2586 has 86456 nodes and 592440 weighted edges,
of which none are self-loops. The graph is quite sparse as it has a density
of 0.00016 and has 8 connected components, where the component with most
nodes has 86314 nodes and the component with the least nodes has 3 nodes.
The graph median node degree is 7, the mean node degree is 13.71, and the
node degree mode is 8. The top 5 most central nodes are 2289 (degree 5403),
1245 (degree 5236), 687 (degree 4784), 2275 (degree 4198) and 121 (degree
3698).
References
---------------------
Please cite the following if you use the data:
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.networkrepository import Rail2586
# Then load the graph
graph = Rail2586()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="Rail2586",
dataset="networkrepository",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)() | PypiClean |
/cpg-hail-0.2.90.tar.gz/cpg-hail-0.2.90/hail/experimental/lens.py | import abc
import hail as hl
class TableLike(abc.ABC):
@abc.abstractmethod
def annotate(self, *args, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def drop(self, *args, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def select(self, *args, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def explode(self, *args, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def group_by(self, *args, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def __getitem__(self, *args, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def index(self, *args, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def unlens(self):
raise NotImplementedError
class GroupedTableLike(abc.ABC):
@abc.abstractmethod
def aggregate(self, *args, **kwargs):
raise NotImplementedError
class MatrixRows(TableLike):
def __init__(self, mt):
assert isinstance(mt, hl.MatrixTable)
self.mt = mt
self.key = mt.row_key
def annotate(self, *args, **kwargs):
return MatrixRows(self.mt.annotate_rows(*args, **kwargs))
def drop(self, *args, **kwargs):
return MatrixRows(self.mt.drop(*args, **kwargs))
def select(self, *args, **kwargs):
return MatrixRows(self.mt.select_rows(*args, **kwargs))
def explode(self, *args, **kwargs):
return MatrixRows(self.mt.explode_rows(*args, **kwargs))
def group_by(self, *args, **kwargs):
return GroupedMatrixRows(self.mt.group_rows_by(*args, **kwargs))
def __getitem__(self, *args, **kwargs):
return self.mt.__getitem__(*args, **kwargs)
def index(self, *args, **kwargs):
return self.mt.rows().index(*args, **kwargs)
def unlens(self):
return self.mt
class GroupedMatrixRows(GroupedTableLike):
def __init__(self, mt):
assert isinstance(mt, hl.GroupedMatrixTable)
self.mt = mt
def aggregate(self, *args, **kwargs):
return MatrixRows(self.mt.aggregate_rows(*args, **kwargs).result())
class TableRows(TableLike):
def __init__(self, t):
assert isinstance(t, hl.Table)
self.t = t
self.key = t.key
def annotate(self, *args, **kwargs):
return TableRows(self.t.annotate(*args, **kwargs))
def drop(self, *args, **kwargs):
return TableRows(self.t.drop(*args, **kwargs))
def select(self, *args, **kwargs):
return TableRows(self.t.select(*args, **kwargs))
def explode(self, *args, **kwargs):
return TableRows(self.t.explode(*args, **kwargs))
def group_by(self, *args, **kwargs):
return GroupedTableRows(self.t.group_by(*args, **kwargs))
def __getitem__(self, *args, **kwargs):
return self.t.__getitem__(*args, **kwargs)
def index(self, *args, **kwargs):
return self.t.index(*args, **kwargs)
def unlens(self):
return self.t
class GroupedTableRows(GroupedTableLike):
def __init__(self, t):
assert isinstance(t, hl.GroupedTable)
self.t = t
def aggregate(self, *args, **kwargs):
return TableRows(self.t.aggregate(*args, **kwargs)) | PypiClean |
/nni_daily-1.5.2005180104-py3-none-manylinux1_x86_64.whl/nni_daily-1.5.2005180104.data/data/nni/node_modules/moment/locale/x-pseudo.js |
;(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined'
&& typeof require === 'function' ? factory(require('../moment')) :
typeof define === 'function' && define.amd ? define(['../moment'], factory) :
factory(global.moment)
}(this, (function (moment) { 'use strict';
//! moment.js locale configuration
var xPseudo = moment.defineLocale('x-pseudo', {
months: 'J~áñúá~rý_F~ébrú~árý_~Márc~h_Áp~ríl_~Máý_~Júñé~_Júl~ý_Áú~gúst~_Sép~témb~ér_Ó~ctób~ér_Ñ~óvém~bér_~Décé~mbér'.split(
'_'
),
monthsShort: 'J~áñ_~Féb_~Már_~Ápr_~Máý_~Júñ_~Júl_~Áúg_~Sép_~Óct_~Ñóv_~Déc'.split(
'_'
),
monthsParseExact: true,
weekdays: 'S~úñdá~ý_Mó~ñdáý~_Túé~sdáý~_Wéd~ñésd~áý_T~húrs~dáý_~Fríd~áý_S~átúr~dáý'.split(
'_'
),
weekdaysShort: 'S~úñ_~Móñ_~Túé_~Wéd_~Thú_~Frí_~Sát'.split('_'),
weekdaysMin: 'S~ú_Mó~_Tú_~Wé_T~h_Fr~_Sá'.split('_'),
weekdaysParseExact: true,
longDateFormat: {
LT: 'HH:mm',
L: 'DD/MM/YYYY',
LL: 'D MMMM YYYY',
LLL: 'D MMMM YYYY HH:mm',
LLLL: 'dddd, D MMMM YYYY HH:mm',
},
calendar: {
sameDay: '[T~ódá~ý át] LT',
nextDay: '[T~ómó~rró~w át] LT',
nextWeek: 'dddd [át] LT',
lastDay: '[Ý~ést~érdá~ý át] LT',
lastWeek: '[L~ást] dddd [át] LT',
sameElse: 'L',
},
relativeTime: {
future: 'í~ñ %s',
past: '%s á~gó',
s: 'á ~féw ~sécó~ñds',
ss: '%d s~écóñ~ds',
m: 'á ~míñ~úté',
mm: '%d m~íñú~tés',
h: 'á~ñ hó~úr',
hh: '%d h~óúrs',
d: 'á ~dáý',
dd: '%d d~áýs',
M: 'á ~móñ~th',
MM: '%d m~óñt~hs',
y: 'á ~ýéár',
yy: '%d ý~éárs',
},
dayOfMonthOrdinalParse: /\d{1,2}(th|st|nd|rd)/,
ordinal: function (number) {
var b = number % 10,
output =
~~((number % 100) / 10) === 1
? 'th'
: b === 1
? 'st'
: b === 2
? 'nd'
: b === 3
? 'rd'
: 'th';
return number + output;
},
week: {
dow: 1, // Monday is the first day of the week.
doy: 4, // The week that contains Jan 4th is the first week of the year.
},
});
return xPseudo;
}))); | PypiClean |
/deeprob_kit-1.1.0-py3-none-any.whl/deeprob/flows/models/maf.py |
from typing import Optional
from deeprob.torch.base import DensityEstimator
from deeprob.utils.random import check_random_state, RandomState
from deeprob.flows.utils import BatchNormLayer1d
from deeprob.flows.layers.autoregressive import AutoregressiveLayer
from deeprob.flows.models.base import NormalizingFlow
class MAF(NormalizingFlow):
def __init__(
self,
in_features: int,
dequantize: bool = False,
logit: Optional[float] = None,
in_base: Optional[DensityEstimator] = None,
n_flows: int = 5,
depth: int = 1,
units: int = 128,
batch_norm: bool = True,
activation: str = 'relu',
sequential: bool = True,
random_state: Optional[RandomState] = None
):
"""
Initialize a Masked Autorefressive Flow (MAF) model.
:param in_features: The number of input features.
:param dequantize: Whether to apply the dequantization transformation.
:param logit: The logit factor to use. Use None to disable the logit transformation.
:param in_base: The input base distribution to use. If None, the standard Normal distribution is used.
:param n_flows: The number of sequential autoregressive layers.
:param depth: The number of hidden layers of flows conditioners.
:param units: The number of hidden units per layer of flows conditioners.
:param batch_norm: Whether to apply batch normalization after each autoregressive layer.
:param activation: The activation function name to use for the flows conditioners hidden layers.
:param sequential: If True build masks degrees sequentially, otherwise randomly.
:param random_state: The random state used to generate the masks degrees. Used only if sequential is False.
It can be either a seed integer or a np.random.RandomState instance.
:raises ValueError: If a parameter is out of domain.
"""
if n_flows <= 0:
raise ValueError("The number of autoregressive flow layers must be positive")
if depth <= 0:
raise ValueError("The number of hidden layers of conditioners must be positive")
if units <= 0:
raise ValueError("The number of hidden units per layer must be positive")
super().__init__(in_features, dequantize=dequantize, logit=logit, in_base=in_base)
self.n_flows = n_flows
self.depth = depth
self.units = units
self.batch_norm = batch_norm
self.activation = activation
self.sequential = sequential
# Check the random state, if not using sequential masks
if not self.sequential:
random_state = check_random_state(random_state)
# Build the autoregressive layers
reverse = False
for _ in range(self.n_flows):
self.layers.append(
AutoregressiveLayer(
self.in_features, self.depth, self.units, self.activation,
reverse=reverse, sequential=self.sequential, random_state=random_state
)
)
# Append batch normalization after each layer, if specified
if self.batch_norm:
self.layers.append(BatchNormLayer1d(self.in_features))
# Invert the input ordering
reverse = not reverse | PypiClean |
/framedork.py-0.1.0.tar.gz/framedork.py-0.1.0/framedork/src/preprocessors/Response.py | import json
class Response:
def __init__(self, contents: list):
self.contents = contents
if len(self.contents) > 1:
self.method = "html"
else:
self.method = "json"
class ResponseHandler:
base_headers: list = [
('Server', 'Framedork.py'),
('Connection', 'keep-alive')
]
html_headers: str = 'text/html; encoding=utf8'
json_headers: str = 'application/json; encoding=utf8'
RESPONSE_CODES = {
200: "HTTP/1.1 200 OK\r\n",
400: "HTTP/1.1 400 BAD_REQUEST\r\n",
404: "HTTP/1.1 404 NOT_FOUND\r\n",
405: "HTTP/1.1 405 Method not allowed\r\n",
500: "HTTP/1.1 500 Internal server error\r\n"
}
RESPONSE_CODES_WSGI = {
200: "200 OK",
400: "400 BAD REQUEST",
404: "404 NOT FOUND",
405: "405 Method not allowed",
500: "500 Internal server error"
}
def __init__(self, code: int, page: str, content: str, mode: str):
self.code = code
self.page = page
self.content = content
self.mode = mode
def _construct_response(self):
page_raw = self.page
if self.content == "json":
page_raw = json.dumps(self.page[0])
headers_raw = [f"{i[0]}: {i[1]}" for i in self.base_headers]
headers_raw = "\r\n".join(headers_raw) + f"\r\nContent-Length: {len(page_raw)}\r\n"
headers_raw = headers_raw + 'Content-Type: ' + self.html_headers + "\r\n" if self.content == "html" else headers_raw + 'Content-Type: ' + self.json_headers + "\r\n"
return (self.RESPONSE_CODES[self.code], headers_raw, page_raw)
def _construct_wsgi_response(self):
page_raw = self.page
if self.content == "json":
page_raw = json.dumps(self.page)
headers = self.base_headers.copy()
headers.append(("Content-Length", str(len(page_raw))))
headers.append(('Content-Type', self.html_headers) if self.content == "html" else ('Content-Type', self.json_headers))
return (self.RESPONSE_CODES_WSGI[self.code], headers, page_raw)
def __call__(self):
if self.mode == "local":
return self._construct_response()
elif self.mode == "wsgi":
return self._construct_wsgi_response() | PypiClean |
/wizata_dsapi-0.3.35-py3-none-any.whl/wizata_dsapi/mlmodel.py | import json
import uuid
from flask import jsonify
from .api_dto import ApiDto
from enum import Enum
class ModelKeyType(Enum):
DIRECT = "direct"
TEMPLATE = "template"
VARIABLE = "variable"
class MLModelConfig(ApiDto):
"""
a model config defines execution properties for a specific model.
usually to define how a pipeline should train and predict your model.
:ivar train_script: name of function referencing the script to train the model.
:ivar train_test_split_pct: percentage repartition to split the data for training and scoring.
:ivar target_feat: target feature name if existing.
:ivar features: filter list of datapoint columns if necessary.
:ivar train_test_split_type: name of function referencing the script to train the model.
:ivar model_key: key of the model to store (or use story property if dynamic).
:ivar model_key_type: ModelKeyType by default at DIRECT. Use TEMPLATE or VARIABLE to get key from another source.
:ivar output_property: name of dataframe column where to store results (or template property).
"""
def __init__(self,
train_script=None,
train_test_split_pct: float = 1.0,
target_feat: str = None,
features: list = None,
train_test_split_type: str = "ignore",
model_key: str = None,
model_key_type: ModelKeyType = ModelKeyType.DIRECT,
output_property: str = "result",
function: str = "predict"):
self.train_script = train_script
self.train_test_split_pct = train_test_split_pct
self.train_test_split_type = train_test_split_type
self.target_feat = target_feat
self.features = features
self.model_key_type = model_key_type
self.model_key = model_key
self.output_property = output_property
self.function = function
def from_json(self, obj):
if "train_script" in obj.keys() and obj["train_script"] is not None:
self.train_script = obj["train_script"]
if "train_test_split_pct" in obj.keys() and obj["train_test_split_pct"] is not None:
self.train_test_split_pct = float(obj["train_test_split_pct"])
if "train_test_split_type" in obj.keys() and obj["train_test_split_type"] is not None:
self.train_test_split_type = obj["train_test_split_type"]
if "target_feat" in obj.keys() and obj["target_feat"] is not None:
self.target_feat = obj["target_feat"]
if "features" in obj.keys() and obj["features"] is not None:
self.features = obj["features"]
if "store_property" in obj.keys() and obj["store_property"] is not None:
self.model_key = obj["store_property"]
self.model_key_type = ModelKeyType.TEMPLATE
elif "model_key" in obj.keys() and obj["model_key"] is not None:
self.model_key = obj["model_key"]
if "model_key_type" in obj.keys():
self.model_key_type = ModelKeyType(obj["model_key_type"])
if self.model_key is None:
raise KeyError('model_key must be declared in the config.')
if "output_property" in obj.keys() and obj["output_property"] is not None:
self.output_property = obj["output_property"]
if "function" in obj.keys() and obj["function"] is not None:
self.function = obj["function"]
def to_json(self):
obj = {}
if self.train_script is not None:
obj["train_script"] = str(self.train_script)
if self.train_test_split_pct is not None:
obj["train_test_split_pct"] = float(self.train_test_split_pct)
if self.train_test_split_type is not None:
obj["train_test_split_type"] = str(self.train_test_split_type)
if self.target_feat is not None:
obj["target_feat"] = str(self.target_feat)
if self.features is not None:
obj["features"] = self.features
if self.model_key_type is not None:
obj["model_key_type"] = str(self.model_key_type.value)
if self.model_key is not None:
obj["model_key"] = self.model_key
if self.output_property is not None:
obj["output_property"] = self.output_property
if self.function is not None:
obj["function"] = self.function
return obj
class MLModel(ApiDto):
"""
A trained Machine Learning Model stored to be executed on demand.
Can contain also a scaler, both object are stored as pickled file.
:ivar model_id: The UUID of the ML Model.
:ivar key: Logical String ID of the Model
:ivar generatedById: The UUID of the Execution from which the ML Model was created.
:ivar status: 'draft', 'valid' or 'invalid' - When generated model are tested by API, only valid model can be used.
:ivar needExactColumnNumbers: True by default, define if the model requires exact columns numbers to be executed.
:ivar needExactColumnNames: True by default, define if the model requires exact columns names to be executed.
:ivar has_anomalies: False by default, define if a model generate potential anomalies.
:ivar has_target_feat: False by default, define if a model requires a target features to be trained or executed.
:ivar input_columns: list of all columns used to trained the model.
:ivar output_columns: list of all columns generated by the model.
:ivar label_counts: Count of labels generated by the model.
:ivar trained_model: The Trained model
:ivar scaler: The scaler
"""
def __init__(self, model_id=None, generated_by_id=None,
exact_names=True, exact_numbers=True,
key = None):
if model_id is None:
model_id = uuid.uuid4()
self.model_id = model_id
self.key = key
self.generatedById = generated_by_id
self.status = 'draft'
self.input_columns = []
self.output_columns = []
self.needExactColumnNumbers = exact_numbers
self.needExactColumnNames = exact_names
self.has_anomalies = False
self.label_counts = 0
self.has_target_feat = False
self.trained_model = None
self.scaler = None
self.experimentId = None
def api_id(self) -> str:
"""
Id of the ML Model (model_id)
:return: string formatted UUID of the Model ID.
"""
return str(self.model_id).upper()
def endpoint(self) -> str:
"""
Name of the endpoints used to manipulate ML Models.
:return: Endpoint name.
"""
return "MLModels"
def to_json(self):
obj = {"id": str(self.model_id),
"status": str(self.status),
"needExactColumnNames": str(self.needExactColumnNames),
"needExactColumnNumbers": str(self.needExactColumnNumbers),
"hasAnomalies": str(self.has_anomalies),
"hasTargetFeat": str(self.has_target_feat),
"labelCount": str(self.label_counts)
}
if self.key is not None:
obj["key"] = str(self.key)
if self.generatedById is not None:
obj["generatedById"] = str(self.generatedById)
if self.input_columns is not None:
obj["inputColumns"] = json.dumps(list(self.input_columns))
if self.output_columns is not None:
obj["outputColumns"] = json.dumps(list(self.output_columns))
if self.experimentId is not None:
obj["experimentId"] = str(self.experimentId)
return obj
def from_json(self, obj):
"""
Load the ML Model entity from a dictionary representation of the ML Model.
:param obj: Dict version of the ML Model.
"""
if "id" in obj.keys():
self.model_id = obj["id"]
if "key" in obj.keys() and obj["key"] is not None:
self.key = obj["key"]
if "generatedById" in obj.keys() and obj["generatedById"] is not None:
self.generatedById = uuid.UUID(obj["generatedById"])
if "experimentId" in obj.keys() and obj["experimentId"] is not None:
self.experimentId = uuid.UUID(obj["experimentId"])
if "status" in obj.keys():
self.status = str(obj["status"]).lower()
if "inputColumns" in obj.keys():
self.input_columns = json.loads(obj["inputColumns"])
if "outputColumns" in obj.keys():
self.output_columns = json.loads(obj["outputColumns"])
if "labelCount" in obj.keys():
self.label_counts = int(obj["labelCount"])
if "hasAnomalies" in obj.keys():
if isinstance(obj["hasAnomalies"], str) and obj["hasAnomalies"].lower() == "false":
self.has_anomalies = False
else:
self.has_anomalies = bool(obj["hasAnomalies"])
if "hasTargetFeat" in obj.keys():
if isinstance(obj["hasTargetFeat"], str) and obj["hasTargetFeat"].lower() == "false":
self.has_target_feat = False
else:
self.has_target_feat = bool(obj["hasTargetFeat"])
if "needExactColumnNumbers" in obj.keys():
if isinstance(obj["needExactColumnNumbers"], str) and obj["needExactColumnNumbers"].lower() == "false":
self.needExactColumnNumbers = False
else:
self.needExactColumnNumbers = bool(obj["needExactColumnNumbers"])
if "needExactColumnNames" in obj.keys():
if isinstance(obj["needExactColumnNames"], str) and obj["needExactColumnNames"].lower() == "false":
self.needExactColumnNames = False
else:
self.needExactColumnNames = bool(obj["needExactColumnNames"])
def get_sample_payload(self):
"""
Get a JSON formatted sample payload to call the ML Model.
:return: JSON formatted sample payload.
"""
pl_columns = {"timestamp": "[timestamp]"}
for hardwareId in self.input_columns:
pl_columns[hardwareId] = "[" + hardwareId + "]"
pl_json = {
"id": str(self.model_id),
"dataset": pl_columns
}
return pl_json | PypiClean |
/coinstac_pyprofiler-0.1.0.tar.gz/coinstac_pyprofiler-0.1.0/README.md | # coinstac python profiler
Profile your python code (includes code running using coinstac-simulator). This primarily uses pyinstrument profiler, but can also be extended to include other python profilers
## Prerequisites
Python 3.6+
Other packages listed in Requirements.txt
# Usage
NOTE: The code currently implements profiling based on pyinstrument profiler. Other profilier (such as cprofile) can be included based on requirements.
## First way:
Use the following line above the method definition which needs to be profiled.
```python
from coinstac_pyprofiler import custom_profiler as cprof
@cprof.profile(type="pyinstrument", output_file_prefix=output_file_prefix)
```
Decorator class to profile any method.
Note: 'output_file_prefix' should include its (absolute) directory path
## Second way:
Create object of Profile class in custom_profiler.py and use start() and stop() methods to control profiling.
```python
from coinstac_pyprofiler import custom_profiler as cprof
@cprof.profile(type="pyinstrument", output_file_prefix=output_file_prefix)
profile = cprof.Profile(type='pyinstrument', output_file_prefix=<your_dir_path>/<some_file_prefix>")
profile.start()
<your code to profile>
profile.stop()
profile.persist_log()
```
# Merging multiple profile output files
Merges the json profiler output files generated using pyinstrument profiling and saves merged output.
## Use-case:
For a computation in coinstac-simulator, some computation has many iterations and every iteration of python call generates a separate profile json output file. All such json files can be merged separately for each client/remote using this call.
## Example
An example usage is included in tests/examples/profiler_usage.py which demonstrates the usage of the above mentioned profiling methods and also provides examples to merge multiple profile output files.
Happy profiling!!
| PypiClean |
/dsin100days603v37-6.0.3.tar.gz/dsin100days603v37-6.0.3/notebook/static/components/codemirror/mode/forth/forth.js |
// Author: Aliaksei Chapyzhenka
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
function toWordList(words) {
var ret = [];
words.split(' ').forEach(function(e){
ret.push({name: e});
});
return ret;
}
var coreWordList = toWordList(
'INVERT AND OR XOR\
2* 2/ LSHIFT RSHIFT\
0= = 0< < > U< MIN MAX\
2DROP 2DUP 2OVER 2SWAP ?DUP DEPTH DROP DUP OVER ROT SWAP\
>R R> R@\
+ - 1+ 1- ABS NEGATE\
S>D * M* UM*\
FM/MOD SM/REM UM/MOD */ */MOD / /MOD MOD\
HERE , @ ! CELL+ CELLS C, C@ C! CHARS 2@ 2!\
ALIGN ALIGNED +! ALLOT\
CHAR [CHAR] [ ] BL\
FIND EXECUTE IMMEDIATE COUNT LITERAL STATE\
; DOES> >BODY\
EVALUATE\
SOURCE >IN\
<# # #S #> HOLD SIGN BASE >NUMBER HEX DECIMAL\
FILL MOVE\
. CR EMIT SPACE SPACES TYPE U. .R U.R\
ACCEPT\
TRUE FALSE\
<> U> 0<> 0>\
NIP TUCK ROLL PICK\
2>R 2R@ 2R>\
WITHIN UNUSED MARKER\
I J\
TO\
COMPILE, [COMPILE]\
SAVE-INPUT RESTORE-INPUT\
PAD ERASE\
2LITERAL DNEGATE\
D- D+ D0< D0= D2* D2/ D< D= DMAX DMIN D>S DABS\
M+ M*/ D. D.R 2ROT DU<\
CATCH THROW\
FREE RESIZE ALLOCATE\
CS-PICK CS-ROLL\
GET-CURRENT SET-CURRENT FORTH-WORDLIST GET-ORDER SET-ORDER\
PREVIOUS SEARCH-WORDLIST WORDLIST FIND ALSO ONLY FORTH DEFINITIONS ORDER\
-TRAILING /STRING SEARCH COMPARE CMOVE CMOVE> BLANK SLITERAL');
var immediateWordList = toWordList('IF ELSE THEN BEGIN WHILE REPEAT UNTIL RECURSE [IF] [ELSE] [THEN] ?DO DO LOOP +LOOP UNLOOP LEAVE EXIT AGAIN CASE OF ENDOF ENDCASE');
CodeMirror.defineMode('forth', function() {
function searchWordList (wordList, word) {
var i;
for (i = wordList.length - 1; i >= 0; i--) {
if (wordList[i].name === word.toUpperCase()) {
return wordList[i];
}
}
return undefined;
}
return {
startState: function() {
return {
state: '',
base: 10,
coreWordList: coreWordList,
immediateWordList: immediateWordList,
wordList: []
};
},
token: function (stream, stt) {
var mat;
if (stream.eatSpace()) {
return null;
}
if (stt.state === '') { // interpretation
if (stream.match(/^(\]|:NONAME)(\s|$)/i)) {
stt.state = ' compilation';
return 'builtin compilation';
}
mat = stream.match(/^(\:)\s+(\S+)(\s|$)+/);
if (mat) {
stt.wordList.push({name: mat[2].toUpperCase()});
stt.state = ' compilation';
return 'def' + stt.state;
}
mat = stream.match(/^(VARIABLE|2VARIABLE|CONSTANT|2CONSTANT|CREATE|POSTPONE|VALUE|WORD)\s+(\S+)(\s|$)+/i);
if (mat) {
stt.wordList.push({name: mat[2].toUpperCase()});
return 'def' + stt.state;
}
mat = stream.match(/^(\'|\[\'\])\s+(\S+)(\s|$)+/);
if (mat) {
return 'builtin' + stt.state;
}
} else { // compilation
// ; [
if (stream.match(/^(\;|\[)(\s)/)) {
stt.state = '';
stream.backUp(1);
return 'builtin compilation';
}
if (stream.match(/^(\;|\[)($)/)) {
stt.state = '';
return 'builtin compilation';
}
if (stream.match(/^(POSTPONE)\s+\S+(\s|$)+/)) {
return 'builtin';
}
}
// dynamic wordlist
mat = stream.match(/^(\S+)(\s+|$)/);
if (mat) {
if (searchWordList(stt.wordList, mat[1]) !== undefined) {
return 'variable' + stt.state;
}
// comments
if (mat[1] === '\\') {
stream.skipToEnd();
return 'comment' + stt.state;
}
// core words
if (searchWordList(stt.coreWordList, mat[1]) !== undefined) {
return 'builtin' + stt.state;
}
if (searchWordList(stt.immediateWordList, mat[1]) !== undefined) {
return 'keyword' + stt.state;
}
if (mat[1] === '(') {
stream.eatWhile(function (s) { return s !== ')'; });
stream.eat(')');
return 'comment' + stt.state;
}
// // strings
if (mat[1] === '.(') {
stream.eatWhile(function (s) { return s !== ')'; });
stream.eat(')');
return 'string' + stt.state;
}
if (mat[1] === 'S"' || mat[1] === '."' || mat[1] === 'C"') {
stream.eatWhile(function (s) { return s !== '"'; });
stream.eat('"');
return 'string' + stt.state;
}
// numbers
if (mat[1] - 0xfffffffff) {
return 'number' + stt.state;
}
// if (mat[1].match(/^[-+]?[0-9]+\.[0-9]*/)) {
// return 'number' + stt.state;
// }
return 'atom' + stt.state;
}
}
};
});
CodeMirror.defineMIME("text/x-forth", "forth");
}); | PypiClean |
/yi-mp-1.0.5.tar.gz/yi-mp-1.0.5/emp_boot.py | from emp_utils import rainbow
from emp_utils import print_as_a_list_item
from emp_utils import selection
from emp_utils import _const
from emp_utils import config_path
import os
import machine
BOOT_MODE = _const()
BOOT_MODE.WITH_NOTHING = 0
BOOT_MODE.WITH_WIFI_STARTUP = 1
BOOT_MODE.EASY_DEVELOP = 2
BOOT_MODE.WITH_WIFI_STARTUP_CODE = '''from emp_wifi import Wifi
if __name__ == '__main__':
Wifi.connect()'''
BOOT_MODE.EASY_DEVELOP_CODE = '''from emp_wifi import Wifi
from emp_webrepl import WebREPL
from emp_utils import webrepl_pass
from emp_utils import post_ip
if __name__ == '__main__':
print()
print(' ---------------------------')
print(' - Python YI MicroPython -')
print(' - version 1.0.5 -')
print(' - by YI -')
print(' ---------------------------')
print()
Wifi.connect()
try:
post_ip(Wifi.ifconfig()[0][0])
except ImportError:
pass
WebREPL.start(password=webrepl_pass())
import ide'''
def reboot():
print(rainbow('Reboot', color='red'))
machine.reset()
def set_boot_mode():
print(
print_as_a_list_item(
0, '清除模式',
'注意: 清除引导程序 boot.py, 这将导致程序无法启动!'))
print(
print_as_a_list_item(
1, 'WiFi模式',
'此选项适合稳定程序使用!'))
print(
print_as_a_list_item(
2, '开发者模式',
'启动连接WIFI热点,并开启WebREPL开发模式'
))
mode = selection('请选择模式 [0-2]: ', 2)
with open('boot.py', 'w') as f:
if mode == BOOT_MODE.WITH_NOTHING:
boot_code = ''
f.write(boot_code)
print(rainbow('已设置为清除模式', color='green'))
elif mode == BOOT_MODE.WITH_WIFI_STARTUP:
boot_code = BOOT_MODE.WITH_WIFI_STARTUP_CODE
f.write(boot_code)
print(rainbow('已设置为WiFi模式', color='green'))
elif mode == BOOT_MODE.EASY_DEVELOP:
config_path()
if not 'webrepl.pass' in os.listdir('config'):
with open('config/webrepl.pass', 'w') as c:
c.write('123456')
boot_code = BOOT_MODE.EASY_DEVELOP_CODE
f.write(boot_code)
print(rainbow('已设置为开发者模式', color='green'))
reboot()
def set_web_repl():
pw = input(rainbow('请输入新的WebREPL密码:', color='blue'))
config_path()
with open('config/webrepl.pass', 'w') as c:
c.write(pw)
print(rainbow('已重新设置WebREPL密码,重启之后可用!!!', color='green')) | PypiClean |
/mengyu_distributions-1.1.tar.gz/mengyu_distributions-1.1/mengyu_distributions/Binomialdistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) number of trials
TODO: Fill out all functions below
"""
def __init__(self, prob=.5, size=20):
self.n = size
self.p = prob
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar Chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
result = Binomial()
result.n = self.n + other.n
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}, p {}, n {}".\
format(self.mean, self.stdev, self.p, self.n) | PypiClean |
/pulumi_azure_native-2.5.1a1693590910.tar.gz/pulumi_azure_native-2.5.1a1693590910/pulumi_azure_native/policyinsights/get_remediation_at_resource.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetRemediationAtResourceResult',
'AwaitableGetRemediationAtResourceResult',
'get_remediation_at_resource',
'get_remediation_at_resource_output',
]
@pulumi.output_type
class GetRemediationAtResourceResult:
"""
The remediation definition.
"""
def __init__(__self__, correlation_id=None, created_on=None, deployment_status=None, failure_threshold=None, filters=None, id=None, last_updated_on=None, name=None, parallel_deployments=None, policy_assignment_id=None, policy_definition_reference_id=None, provisioning_state=None, resource_count=None, resource_discovery_mode=None, status_message=None, system_data=None, type=None):
if correlation_id and not isinstance(correlation_id, str):
raise TypeError("Expected argument 'correlation_id' to be a str")
pulumi.set(__self__, "correlation_id", correlation_id)
if created_on and not isinstance(created_on, str):
raise TypeError("Expected argument 'created_on' to be a str")
pulumi.set(__self__, "created_on", created_on)
if deployment_status and not isinstance(deployment_status, dict):
raise TypeError("Expected argument 'deployment_status' to be a dict")
pulumi.set(__self__, "deployment_status", deployment_status)
if failure_threshold and not isinstance(failure_threshold, dict):
raise TypeError("Expected argument 'failure_threshold' to be a dict")
pulumi.set(__self__, "failure_threshold", failure_threshold)
if filters and not isinstance(filters, dict):
raise TypeError("Expected argument 'filters' to be a dict")
pulumi.set(__self__, "filters", filters)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_updated_on and not isinstance(last_updated_on, str):
raise TypeError("Expected argument 'last_updated_on' to be a str")
pulumi.set(__self__, "last_updated_on", last_updated_on)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if parallel_deployments and not isinstance(parallel_deployments, int):
raise TypeError("Expected argument 'parallel_deployments' to be a int")
pulumi.set(__self__, "parallel_deployments", parallel_deployments)
if policy_assignment_id and not isinstance(policy_assignment_id, str):
raise TypeError("Expected argument 'policy_assignment_id' to be a str")
pulumi.set(__self__, "policy_assignment_id", policy_assignment_id)
if policy_definition_reference_id and not isinstance(policy_definition_reference_id, str):
raise TypeError("Expected argument 'policy_definition_reference_id' to be a str")
pulumi.set(__self__, "policy_definition_reference_id", policy_definition_reference_id)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_count and not isinstance(resource_count, int):
raise TypeError("Expected argument 'resource_count' to be a int")
pulumi.set(__self__, "resource_count", resource_count)
if resource_discovery_mode and not isinstance(resource_discovery_mode, str):
raise TypeError("Expected argument 'resource_discovery_mode' to be a str")
pulumi.set(__self__, "resource_discovery_mode", resource_discovery_mode)
if status_message and not isinstance(status_message, str):
raise TypeError("Expected argument 'status_message' to be a str")
pulumi.set(__self__, "status_message", status_message)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="correlationId")
def correlation_id(self) -> str:
"""
The remediation correlation Id. Can be used to find events related to the remediation in the activity log.
"""
return pulumi.get(self, "correlation_id")
@property
@pulumi.getter(name="createdOn")
def created_on(self) -> str:
"""
The time at which the remediation was created.
"""
return pulumi.get(self, "created_on")
@property
@pulumi.getter(name="deploymentStatus")
def deployment_status(self) -> 'outputs.RemediationDeploymentSummaryResponse':
"""
The deployment status summary for all deployments created by the remediation.
"""
return pulumi.get(self, "deployment_status")
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional['outputs.RemediationPropertiesResponseFailureThreshold']:
"""
The remediation failure threshold settings
"""
return pulumi.get(self, "failure_threshold")
@property
@pulumi.getter
def filters(self) -> Optional['outputs.RemediationFiltersResponse']:
"""
The filters that will be applied to determine which resources to remediate.
"""
return pulumi.get(self, "filters")
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the remediation.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastUpdatedOn")
def last_updated_on(self) -> str:
"""
The time at which the remediation was last updated.
"""
return pulumi.get(self, "last_updated_on")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the remediation.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="parallelDeployments")
def parallel_deployments(self) -> Optional[int]:
"""
Determines how many resources to remediate at any given time. Can be used to increase or reduce the pace of the remediation. If not provided, the default parallel deployments value is used.
"""
return pulumi.get(self, "parallel_deployments")
@property
@pulumi.getter(name="policyAssignmentId")
def policy_assignment_id(self) -> Optional[str]:
"""
The resource ID of the policy assignment that should be remediated.
"""
return pulumi.get(self, "policy_assignment_id")
@property
@pulumi.getter(name="policyDefinitionReferenceId")
def policy_definition_reference_id(self) -> Optional[str]:
"""
The policy definition reference ID of the individual definition that should be remediated. Required when the policy assignment being remediated assigns a policy set definition.
"""
return pulumi.get(self, "policy_definition_reference_id")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The status of the remediation. This refers to the entire remediation task, not individual deployments. Allowed values are Evaluating, Canceled, Cancelling, Failed, Complete, or Succeeded.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceCount")
def resource_count(self) -> Optional[int]:
"""
Determines the max number of resources that can be remediated by the remediation job. If not provided, the default resource count is used.
"""
return pulumi.get(self, "resource_count")
@property
@pulumi.getter(name="resourceDiscoveryMode")
def resource_discovery_mode(self) -> Optional[str]:
"""
The way resources to remediate are discovered. Defaults to ExistingNonCompliant if not specified.
"""
return pulumi.get(self, "resource_discovery_mode")
@property
@pulumi.getter(name="statusMessage")
def status_message(self) -> str:
"""
The remediation status message. Provides additional details regarding the state of the remediation.
"""
return pulumi.get(self, "status_message")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the remediation.
"""
return pulumi.get(self, "type")
class AwaitableGetRemediationAtResourceResult(GetRemediationAtResourceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRemediationAtResourceResult(
correlation_id=self.correlation_id,
created_on=self.created_on,
deployment_status=self.deployment_status,
failure_threshold=self.failure_threshold,
filters=self.filters,
id=self.id,
last_updated_on=self.last_updated_on,
name=self.name,
parallel_deployments=self.parallel_deployments,
policy_assignment_id=self.policy_assignment_id,
policy_definition_reference_id=self.policy_definition_reference_id,
provisioning_state=self.provisioning_state,
resource_count=self.resource_count,
resource_discovery_mode=self.resource_discovery_mode,
status_message=self.status_message,
system_data=self.system_data,
type=self.type)
def get_remediation_at_resource(remediation_name: Optional[str] = None,
resource_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRemediationAtResourceResult:
"""
Gets an existing remediation at resource scope.
Azure REST API version: 2021-10-01.
:param str remediation_name: The name of the remediation.
:param str resource_id: Resource ID.
"""
__args__ = dict()
__args__['remediationName'] = remediation_name
__args__['resourceId'] = resource_id
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:policyinsights:getRemediationAtResource', __args__, opts=opts, typ=GetRemediationAtResourceResult).value
return AwaitableGetRemediationAtResourceResult(
correlation_id=pulumi.get(__ret__, 'correlation_id'),
created_on=pulumi.get(__ret__, 'created_on'),
deployment_status=pulumi.get(__ret__, 'deployment_status'),
failure_threshold=pulumi.get(__ret__, 'failure_threshold'),
filters=pulumi.get(__ret__, 'filters'),
id=pulumi.get(__ret__, 'id'),
last_updated_on=pulumi.get(__ret__, 'last_updated_on'),
name=pulumi.get(__ret__, 'name'),
parallel_deployments=pulumi.get(__ret__, 'parallel_deployments'),
policy_assignment_id=pulumi.get(__ret__, 'policy_assignment_id'),
policy_definition_reference_id=pulumi.get(__ret__, 'policy_definition_reference_id'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
resource_count=pulumi.get(__ret__, 'resource_count'),
resource_discovery_mode=pulumi.get(__ret__, 'resource_discovery_mode'),
status_message=pulumi.get(__ret__, 'status_message'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_remediation_at_resource)
def get_remediation_at_resource_output(remediation_name: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRemediationAtResourceResult]:
"""
Gets an existing remediation at resource scope.
Azure REST API version: 2021-10-01.
:param str remediation_name: The name of the remediation.
:param str resource_id: Resource ID.
"""
... | PypiClean |
/v1/model/update_vault_request.py |
import pprint
import re
import six
class UpdateVaultRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'vault_id': 'str',
'body': 'VaultUpdateReq'
}
attribute_map = {
'vault_id': 'vault_id',
'body': 'body'
}
def __init__(self, vault_id=None, body=None):
"""UpdateVaultRequest - a model defined in huaweicloud sdk"""
self._vault_id = None
self._body = None
self.discriminator = None
self.vault_id = vault_id
if body is not None:
self.body = body
@property
def vault_id(self):
"""Gets the vault_id of this UpdateVaultRequest.
:return: The vault_id of this UpdateVaultRequest.
:rtype: str
"""
return self._vault_id
@vault_id.setter
def vault_id(self, vault_id):
"""Sets the vault_id of this UpdateVaultRequest.
:param vault_id: The vault_id of this UpdateVaultRequest.
:type: str
"""
self._vault_id = vault_id
@property
def body(self):
"""Gets the body of this UpdateVaultRequest.
:return: The body of this UpdateVaultRequest.
:rtype: VaultUpdateReq
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this UpdateVaultRequest.
:param body: The body of this UpdateVaultRequest.
:type: VaultUpdateReq
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateVaultRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | PypiClean |
/tensorflow_tflex-1.13.1rc1-cp27-cp27mu-manylinux1_x86_64.whl/tensorflow_tflex-1.13.1rc1.data/purelib/tensorflow/core/framework/op_def_pb2.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.framework import attr_value_pb2 as tensorflow_dot_core_dot_framework_dot_attr__value__pb2
from tensorflow.core.framework import types_pb2 as tensorflow_dot_core_dot_framework_dot_types__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/framework/op_def.proto',
package='tensorflow',
syntax='proto3',
serialized_options=_b('\n\030org.tensorflow.frameworkB\013OpDefProtosP\001Z=github.com/tensorflow/tensorflow/tensorflow/go/core/framework\370\001\001'),
serialized_pb=_b('\n&tensorflow/core/framework/op_def.proto\x12\ntensorflow\x1a*tensorflow/core/framework/attr_value.proto\x1a%tensorflow/core/framework/types.proto\"\xb8\x05\n\x05OpDef\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\tinput_arg\x18\x02 \x03(\x0b\x32\x18.tensorflow.OpDef.ArgDef\x12,\n\noutput_arg\x18\x03 \x03(\x0b\x32\x18.tensorflow.OpDef.ArgDef\x12\'\n\x04\x61ttr\x18\x04 \x03(\x0b\x32\x19.tensorflow.OpDef.AttrDef\x12.\n\x0b\x64\x65precation\x18\x08 \x01(\x0b\x32\x19.tensorflow.OpDeprecation\x12\x0f\n\x07summary\x18\x05 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x06 \x01(\t\x12\x16\n\x0eis_commutative\x18\x12 \x01(\x08\x12\x14\n\x0cis_aggregate\x18\x10 \x01(\x08\x12\x13\n\x0bis_stateful\x18\x11 \x01(\x08\x12\"\n\x1a\x61llows_uninitialized_input\x18\x13 \x01(\x08\x1a\x9f\x01\n\x06\x41rgDef\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\"\n\x04type\x18\x03 \x01(\x0e\x32\x14.tensorflow.DataType\x12\x11\n\ttype_attr\x18\x04 \x01(\t\x12\x13\n\x0bnumber_attr\x18\x05 \x01(\t\x12\x16\n\x0etype_list_attr\x18\x06 \x01(\t\x12\x0e\n\x06is_ref\x18\x10 \x01(\x08\x1a\xbd\x01\n\x07\x41ttrDef\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12,\n\rdefault_value\x18\x03 \x01(\x0b\x32\x15.tensorflow.AttrValue\x12\x13\n\x0b\x64\x65scription\x18\x04 \x01(\t\x12\x13\n\x0bhas_minimum\x18\x05 \x01(\x08\x12\x0f\n\x07minimum\x18\x06 \x01(\x03\x12-\n\x0e\x61llowed_values\x18\x07 \x01(\x0b\x32\x15.tensorflow.AttrValue\"5\n\rOpDeprecation\x12\x0f\n\x07version\x18\x01 \x01(\x05\x12\x13\n\x0b\x65xplanation\x18\x02 \x01(\t\"\'\n\x06OpList\x12\x1d\n\x02op\x18\x01 \x03(\x0b\x32\x11.tensorflow.OpDefBk\n\x18org.tensorflow.frameworkB\x0bOpDefProtosP\x01Z=github.com/tensorflow/tensorflow/tensorflow/go/core/framework\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_framework_dot_attr__value__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_types__pb2.DESCRIPTOR,])
_OPDEF_ARGDEF = _descriptor.Descriptor(
name='ArgDef',
full_name='tensorflow.OpDef.ArgDef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tensorflow.OpDef.ArgDef.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='tensorflow.OpDef.ArgDef.description', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='tensorflow.OpDef.ArgDef.type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type_attr', full_name='tensorflow.OpDef.ArgDef.type_attr', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='number_attr', full_name='tensorflow.OpDef.ArgDef.number_attr', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type_list_attr', full_name='tensorflow.OpDef.ArgDef.type_list_attr', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_ref', full_name='tensorflow.OpDef.ArgDef.is_ref', index=6,
number=16, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=483,
serialized_end=642,
)
_OPDEF_ATTRDEF = _descriptor.Descriptor(
name='AttrDef',
full_name='tensorflow.OpDef.AttrDef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tensorflow.OpDef.AttrDef.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='tensorflow.OpDef.AttrDef.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='default_value', full_name='tensorflow.OpDef.AttrDef.default_value', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='tensorflow.OpDef.AttrDef.description', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='has_minimum', full_name='tensorflow.OpDef.AttrDef.has_minimum', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='minimum', full_name='tensorflow.OpDef.AttrDef.minimum', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allowed_values', full_name='tensorflow.OpDef.AttrDef.allowed_values', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=645,
serialized_end=834,
)
_OPDEF = _descriptor.Descriptor(
name='OpDef',
full_name='tensorflow.OpDef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tensorflow.OpDef.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input_arg', full_name='tensorflow.OpDef.input_arg', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_arg', full_name='tensorflow.OpDef.output_arg', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='attr', full_name='tensorflow.OpDef.attr', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='deprecation', full_name='tensorflow.OpDef.deprecation', index=4,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='summary', full_name='tensorflow.OpDef.summary', index=5,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='tensorflow.OpDef.description', index=6,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_commutative', full_name='tensorflow.OpDef.is_commutative', index=7,
number=18, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_aggregate', full_name='tensorflow.OpDef.is_aggregate', index=8,
number=16, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_stateful', full_name='tensorflow.OpDef.is_stateful', index=9,
number=17, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allows_uninitialized_input', full_name='tensorflow.OpDef.allows_uninitialized_input', index=10,
number=19, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_OPDEF_ARGDEF, _OPDEF_ATTRDEF, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=138,
serialized_end=834,
)
_OPDEPRECATION = _descriptor.Descriptor(
name='OpDeprecation',
full_name='tensorflow.OpDeprecation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='version', full_name='tensorflow.OpDeprecation.version', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='explanation', full_name='tensorflow.OpDeprecation.explanation', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=836,
serialized_end=889,
)
_OPLIST = _descriptor.Descriptor(
name='OpList',
full_name='tensorflow.OpList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='op', full_name='tensorflow.OpList.op', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=891,
serialized_end=930,
)
_OPDEF_ARGDEF.fields_by_name['type'].enum_type = tensorflow_dot_core_dot_framework_dot_types__pb2._DATATYPE
_OPDEF_ARGDEF.containing_type = _OPDEF
_OPDEF_ATTRDEF.fields_by_name['default_value'].message_type = tensorflow_dot_core_dot_framework_dot_attr__value__pb2._ATTRVALUE
_OPDEF_ATTRDEF.fields_by_name['allowed_values'].message_type = tensorflow_dot_core_dot_framework_dot_attr__value__pb2._ATTRVALUE
_OPDEF_ATTRDEF.containing_type = _OPDEF
_OPDEF.fields_by_name['input_arg'].message_type = _OPDEF_ARGDEF
_OPDEF.fields_by_name['output_arg'].message_type = _OPDEF_ARGDEF
_OPDEF.fields_by_name['attr'].message_type = _OPDEF_ATTRDEF
_OPDEF.fields_by_name['deprecation'].message_type = _OPDEPRECATION
_OPLIST.fields_by_name['op'].message_type = _OPDEF
DESCRIPTOR.message_types_by_name['OpDef'] = _OPDEF
DESCRIPTOR.message_types_by_name['OpDeprecation'] = _OPDEPRECATION
DESCRIPTOR.message_types_by_name['OpList'] = _OPLIST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
OpDef = _reflection.GeneratedProtocolMessageType('OpDef', (_message.Message,), dict(
ArgDef = _reflection.GeneratedProtocolMessageType('ArgDef', (_message.Message,), dict(
DESCRIPTOR = _OPDEF_ARGDEF,
__module__ = 'tensorflow.core.framework.op_def_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.OpDef.ArgDef)
))
,
AttrDef = _reflection.GeneratedProtocolMessageType('AttrDef', (_message.Message,), dict(
DESCRIPTOR = _OPDEF_ATTRDEF,
__module__ = 'tensorflow.core.framework.op_def_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.OpDef.AttrDef)
))
,
DESCRIPTOR = _OPDEF,
__module__ = 'tensorflow.core.framework.op_def_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.OpDef)
))
_sym_db.RegisterMessage(OpDef)
_sym_db.RegisterMessage(OpDef.ArgDef)
_sym_db.RegisterMessage(OpDef.AttrDef)
OpDeprecation = _reflection.GeneratedProtocolMessageType('OpDeprecation', (_message.Message,), dict(
DESCRIPTOR = _OPDEPRECATION,
__module__ = 'tensorflow.core.framework.op_def_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.OpDeprecation)
))
_sym_db.RegisterMessage(OpDeprecation)
OpList = _reflection.GeneratedProtocolMessageType('OpList', (_message.Message,), dict(
DESCRIPTOR = _OPLIST,
__module__ = 'tensorflow.core.framework.op_def_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.OpList)
))
_sym_db.RegisterMessage(OpList)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope) | PypiClean |
/robotframework-tidy-4.5.0.tar.gz/robotframework-tidy-4.5.0/robotidy/transformers/SmartSortKeywords.py | from robot.api.parsing import EmptyLine
from robot.parsing.model.blocks import Keyword
from robotidy.disablers import skip_section_if_disabled
from robotidy.transformers import Transformer
class SmartSortKeywords(Transformer):
"""
Sort keywords in ``*** Keywords ***`` section.
By default sorting is case insensitive, but keywords with leading underscore go to the bottom. Other underscores are
treated as spaces.
Empty lines (or lack of them) between keywords are preserved.
Following code:
```robotframework
*** Keywords ***
_my secrete keyword
Kw2
My Keyword
Kw1
my_another_cool_keyword
my another keyword
Kw3
```
Will be transformed to:
```robotframework
*** Keywords ***
my_another_cool_keyword
my another keyword
Kw3
My Keyword
Kw1
_my secrete keyword
Kw2
```
Default behaviour could be changed using following parameters: ``case_insensitive = True``,
``ignore_leading_underscore = False`` and ``ignore_other_underscore = True``.
"""
ENABLED = False
def __init__(self, case_insensitive=True, ignore_leading_underscore=False, ignore_other_underscore=True):
super().__init__()
self.ci = case_insensitive
self.ilu = ignore_leading_underscore
self.iou = ignore_other_underscore
@skip_section_if_disabled
def visit_KeywordSection(self, node): # noqa
before, after = self.leave_only_keywords(node)
empty_lines = self.pop_empty_lines(node)
node.body.sort(key=self.sort_function)
self.append_empty_lines(node, empty_lines)
node.body = before + node.body + after
return node
@staticmethod
def pop_empty_lines(node):
all_empty = []
for kw in node.body:
kw_empty = []
while kw.body and isinstance(kw.body[-1], EmptyLine):
kw_empty.insert(0, kw.body.pop())
all_empty.append(kw_empty)
return all_empty
@staticmethod
def leave_only_keywords(node):
before = []
after = []
while node.body and not isinstance(node.body[0], Keyword):
before.append(node.body.pop(0))
while node.body and not isinstance(node.body[-1], Keyword):
after.append(node.body.pop(-1))
return before, after
def sort_function(self, kw):
name = kw.name
if self.ci:
name = name.casefold().upper() # to make sure that letters go before underscore
if self.ilu:
name = name.lstrip("_")
if self.iou:
index = len(name) - len(name.lstrip("_"))
name = name[:index] + name[index:].replace("_", " ")
return name
@staticmethod
def append_empty_lines(node, empty_lines):
for kw, lines in zip(node.body, empty_lines):
kw.body.extend(lines) | PypiClean |
/pepper_polish-0.1.5-cp36-cp36m-macosx_10_6_intel.whl/pepper_hp/modules/python/MergeVCFsWithSimplify.py | import collections
from copy import deepcopy
import itertools
from threading import Lock
from datetime import datetime
from distutils.version import LooseVersion
import intervaltree
import pysam
import sys
import os
import re
from pepper_hp.build import PEPPER_HP
from pepper_hp.modules.python.ImageGenerationUI import UserInterfaceSupport
"""Reading and writing of Variant Call Format files."""
"""
REUSE NOTICE:
USING FROM: https://raw.githubusercontent.com/nanoporetech/medaka/master/medaka/vcf.py
LICENSE NOTICE: https://github.com/nanoporetech/medaka/blob/master/LICENSE.md
"""
def self_return(x):
"""Return the input."""
return x
# Source: Table1 in 'The Variant Call Format Specification VCFv4.3', Table 1
# Tuples below are (number, type), where number can be:
# A: The field has one value per alternate allele
# R: The field has one value for each possible allele,
# including the reference
# G: The field has one value for each possible genotype
# .(dot): The number of possible values varies, is unknown or unbounded
reserved_info_fields = {
'AA': (1, str), 'AC': ('A', int), 'AD': ('R', int), 'ADF': ('R', int),
'ADR': ('R', int), 'AF': ('A', float), 'AN': (1, int), 'BQ': (1, float),
'CIGAR': ('A', str), 'DB': (0, self_return), 'DP': (1, int),
'END': (1, int), 'H2': (0, self_return), 'H3': (0, self_return),
'MQ': (1, self_return), 'MQ0': (1, int), 'NS': (1, int),
'SB': ('.', self_return), 'SOMATIC': (0, self_return),
'VALIDATED': (0, self_return), '1000G': (0, self_return)}
own_info_fields = {'SCORES': ('R', float)}
all_info_fields = reserved_info_fields.copy()
all_info_fields.update(own_info_fields)
def parse_tags_to_string(tags):
"""Create string representation of a dictionary of tags.
:param tags: dictionary containing "tag" meta data of a variant.
:returns: the string representation of the tags.
"""
str_tags = []
for key, value in sorted(tags.items()):
# If key is of type 'Flag', print only key, else 'key=value'
if value is True:
str_tags.append(key)
else:
if isinstance(value, (tuple, list)):
value = ','.join((str(x) for x in value))
str_tags.append('{}={}'.format(key, value))
return ';'.join(str_tags)
def parse_string_to_tags(string, splitter=','):
"""Create a dictionary of "tag" meta data from a string representation.
:param string: string containing tags.
:param splitter: delimiter of array-valued items.
:returns: dictionary of tags.
"""
tags = {}
for field in string.split(';'):
try:
tag, value = field.split('=')
if tag in all_info_fields.keys():
_type = all_info_fields[tag][1]
value = [_type(x) for x in value.split(splitter)]
if len(value) == 1:
value = value[0]
except ValueError:
tag = field
value = True
tags[tag] = value
return tags
class MetaInfo(object):
"""Representation of a variant file meta data."""
__valid_groups__ = ('INFO', 'FILTER', 'FORMAT')
__valid_group_sort__ = {v: k for k, v in enumerate(__valid_groups__)}
__valid_non_int_nums__ = {'A', 'R', 'G', '.'}
__valid_types__ = {'Integer', 'Float', 'Flag', 'Character', 'String'}
def __init__(self, group, ident, number, typ, descr):
"""Initialize meta info storage for VCF header.
:param group: str, one of {'INFO', 'FILTER', 'FORMAT'}
:param ident: str, short name as it occurs in a VCF data line.
:param number: int or one of {'A', 'R', 'G', '.'}.
:param type: one of {'Integer', 'Float', 'Flag', 'Character', 'String'}
:param descr: str, free form description.
"""
if group not in self.__valid_groups__:
raise ValueError(
'Group {} is not one of {}'.format(
group, self.__valid_groups__))
if not isinstance(number, int) \
and not (isinstance(number, str) and number.isdigit()) \
and number not in self.__valid_non_int_nums__:
raise ValueError(
'Number {} is not an int, digit str or one of {}'.format(
number, self.__valid_non_int_nums__))
if typ not in self.__valid_types__:
raise ValueError('typ {} is not one of {}'.format(
typ, self.__valid_types__))
self.group = group
self.ident = ident
self.number = number
self.typ = typ
self.descr = descr
def __repr__(self):
"""Create representation of meta data item in VCF format."""
return '{}=<ID={},Number={},Type={},Description="{}">'.format(
self.group, self.ident, self.number, self.typ, self.descr)
def __str__(self):
"""Return meta data as string."""
return self.__repr__()
class Variant(object):
"""Representation of a genomic variant."""
# TODO: ref/alt could be a symbolic allele "<ID>".
# TODO: alt could contain breakends.
# TODO: Handle genomic fields.
def __init__(
self, chrom, pos, ref,
alt='.', ident='.', qual='.', filt='.', info='.',
genotype_data=None):
"""Initialize a variant.
:param chrom: reference sequence (chromosome).
:param pos: position in reference chrom.
:param ref: reference allele
:param alt: alternative alleles.
:param ident: variant indentification.
:param qual: variant quality.
:param filt: filt status.
:param info: variant info, a dictionary or VCF compatible string.
:param genotype_data: dictionary specifying genotype information.
"""
self.chrom = chrom
self.pos = int(pos)
self.ref = ref.upper()
# self.alt should be a list/tuple of alternatives
self.alt = alt.split(',') if isinstance(alt, str) else alt
self.ident = str(ident)
self.qual = float(qual) if qual != '.' else qual
self.filt = filt.split(';') if ';' in filt else filt
if isinstance(info, dict):
self.info = info
else:
self.info = parse_string_to_tags(info)
if genotype_data is not None:
self.genotype_data = self._sort_genotype_data(genotype_data)
else:
self.genotype_data = collections.OrderedDict()
def __eq__(self, other):
"""Equality comparison of two variants."""
for field in (
'chrom', 'pos', 'ident', 'ref', 'alt',
'qual', 'filt', 'info', 'genotype_data'):
if getattr(self, field) != getattr(other, field):
return False
return True
def __ne__(self, other):
"""Inequality comparison of two variants."""
return not self.__eq__(other)
@staticmethod
def _sort_genotype_data(gd):
"""Sort genotype data."""
# GT must be first if present
sorted_keys = ['GT'] if 'GT' in gd else []
# others follow in alphabetical order
sorted_keys.extend(k for k in sorted(gd) if k != 'GT')
# order dict returned to retain order
return collections.OrderedDict((k, gd[k]) for k in sorted_keys)
@property
def genotype_keys(self):
"""Return genotype format field for writing to`.vcf` file."""
return ':'.join(self.genotype_data)
@property
def genotype_values(self):
"""Return the genotype data values for writing to `.vcf` file."""
return ':'.join(str(v) for v in self.genotype_data.values())
@property
def info_string(self):
"""Return info field for writing to `.vcf` file."""
return parse_tags_to_string(self.info)
@property
def gt(self):
"""Return the genotype (or None) for each sample."""
try:
gt = self.genotype_data['GT']
except(KeyError):
return None
else:
gt = gt.replace('|', '/').split('/')
return tuple(int(x) for x in gt)
@property
def phased(self):
"""Specify whether variant is phased."""
try:
gt = self.genotype_data['GT']
except(KeyError):
return None
else:
phased = True if '|' in gt else False
return phased
@property
def alleles(self):
"""Return alleles present in genotype."""
all_alleles = [self.ref] + self.alt
if self.gt is None:
return None
else:
return tuple([all_alleles[i] for i in self.gt])
@classmethod
def from_text(cls, line):
"""Create a `Variant` from a `.vcf` formatted line.
:param line: string representing variant.
"""
chrom, pos, ident, ref, alt, qual, filt, info, \
genotype_keys, genotype_values, *others = line.split('\t')
pos = int(pos)
pos -= 1 # VCF is 1-based, python 0-based
gt = cls._sort_genotype_data(
dict(zip(genotype_keys.split(':'),
genotype_values.split(':'))))
valid = all(x in ['A', 'C', 'G', 'T'] for x in list(ref))
instance = cls(
chrom, pos, ref,
alt=alt, ident=ident, qual=qual, filt=filt, info=info,
genotype_data=gt)
return instance, valid
def add_tag(self, tag, value=None):
"""Add a tag (with value).
:param tag: tag name.
:param value: tag value.
"""
self.info[tag] = value
# Remove default value if more than one exists
if len(self.info.keys()) > 0:
self.info.pop('.', None)
def get_tag(self, tag):
"""Get the value of a tag by name.
:param tag: tag name.
"""
return self.info[tag]
def __repr__(self):
"""Return the representation of the `Variant`."""
attributes = {}
for field in (
'chrom', 'pos', 'ref', 'alt', 'ident',
'qual', 'filt', 'info_string'):
attributes[field] = getattr(self, field)
attributes['genotype_data'] = ';'.join(
'{}={}'.format(*d) for d in self.genotype_data.items())
return (
"Variant('{chrom}', {pos}, '{ref}', alt={alt}, ident={ident}, "
"qual={qual}, filt={filt}, info='{info_string}', "
"genotype_data='{genotype_data}')".format(**attributes))
def deep_copy(self):
"""Return the (deep)copy of the `Variant`."""
return deepcopy(self)
def to_dict(self):
"""Return a dictionary representation."""
d = dict(alt=','.join(self.alt))
for attr in ['chrom', 'pos', 'qual', 'ident', 'filt', 'ref']:
d[attr] = getattr(self, attr)
d.update(self.info)
d.update(self.genotype_data)
return d
def trim(self):
"""Return new trimmed Variant with minimal ref and alt sequence."""
def get_trimmed_start_ref_alt(seqs):
def trim_start(seqs):
min_len = min([len(s) for s in seqs])
trim_start = 0
for bases in zip(*seqs):
bases = list(bases)
bases_same = len(set(bases)) == 1
if not bases_same or trim_start == min_len - 1:
break
if bases_same:
trim_start += 1
return trim_start, [s[trim_start:] for s in seqs]
# trim ends
rev_seqs = [s[::-1] for s in seqs]
_, trimmed_rev_seqs = trim_start(rev_seqs)
seqs = [s[::-1] for s in trimmed_rev_seqs]
trim_start, seqs = trim_start(seqs)
return trim_start, seqs
trimmed = self.deep_copy()
seqs = [trimmed.ref] + trimmed.alt
trim_start, (ref, *alt) = get_trimmed_start_ref_alt(seqs)
trimmed.pos += trim_start
trimmed.ref = ref
trimmed.alt = alt
return trimmed
def split_haplotypes(self):
"""Split multiploid variants into list of non-ref haploid variants.
:returns: (haplotype number, `vcf.Variant` or None for ref allele)
"""
if 'GT' not in self.genotype_data:
return tuple()
vs = []
genotype_data = self.genotype_data.copy()
genotype_data['GT'] = '1/1'
for hap_n, n in enumerate(self.gt, 1):
if n == 0:
v = None
else:
v = Variant(
self.chrom, self.pos, self.ref, self.alt[n - 1],
qual=self.qual, info=self.info.copy(),
genotype_data=genotype_data)
vs.append((hap_n, v))
return tuple(vs)
def loose_version_sort(it, key=None):
def version_sorter(x):
return LooseVersion(x) if key is None else LooseVersion(key(x))
it = list(it)
try:
result = sorted(it, key=version_sorter)
except Exception:
result = sorted(it, key=key)
return result
class VCFWriter(object):
"""Writing of `Variants` to file."""
version_options = {'4.3', '4.1'}
def __init__(self, filename, mode='w',
header=(
'CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL',
'FILTER', 'INFO', 'FORMAT', 'SAMPLE'),
contigs=None,
meta_info=None,
version='4.1'
):
"""Initialize a VCF writer.
Some tools cannot read VCFv4.3, preferring VCFv4.1 - so this class
writes VCFv4.1 files by default. VCFv4.3 has a few extra reserved
fields ('AD', 'ADF', and 'ADR') but there is no harm in including those
files written as VCFv4.1 - they simply as not recognised and used as
reserved fields.
:param filename: output file.
:param header: list of header fields.
:param contigs: contig names.
:param meta_info: meta information to store in header.
:param version: version to write to file.
"""
self.filename = filename
self.mode = mode
self.header = header
if version not in self.version_options:
raise ValueError('version must be one of {}'.format(
self.version_options))
self.version = version
self.meta = ['fileformat=VCFv{}'.format(self.version)]
if meta_info is not None:
# try to sort so we get INFO, FILTER, FORMAT in that order
try:
meta_info.sort(
key=lambda x: MetaInfo.__valid_group_sort__[x.group])
except Exception:
# we probably have a pre-formed meta str we assume are in order
pass
meta_info = [str(m) for m in meta_info]
# remove version if this is present in meta_info
meta_info = [m for m in meta_info if 'fileformat=VCFv' not in m]
self.meta.extend(meta_info)
if contigs is not None:
self.meta.extend(['contig=<ID={},length={}>'.format(c, ln) for c, ln in contigs])
def __enter__(self):
"""Open and prepare file as a managed context."""
self.handle = open(self.filename, self.mode, encoding='utf-8')
self.handle.write('\n'.join('##' + line for line in self.meta) + '\n')
self.handle.write('#' + '\t'.join(self.header) + '\n')
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Close the file when context is left."""
self.handle.close()
def write_variants(self, variants, sort=True):
"""Write variants to file, optionally sorting before writing."""
if sort:
variants = loose_version_sort(variants, key=lambda v: '{}-{}'.format(v.chrom, v.pos))
for variant in variants:
self.write_variant(variant)
def write_variant(self, variant):
"""Write a single variant to file.
:param variant: the `Variant` to write.
"""
variant = variant.deep_copy()
# Some fields can be multiple
for attribute in ('alt', 'filt'):
value = getattr(variant, attribute)
if isinstance(value, (tuple, list)):
setattr(variant, attribute, ','.join(str(x) for x in value))
# Convert info dictionary to string
variant.info = variant.info_string
fields = ('chrom', 'pos', 'ident', 'ref', 'alt', 'qual', 'filt', 'info', 'genotype_keys', 'genotype_values')
elements = [getattr(variant, field.lower()) for field in fields]
if len(elements[self.header.index('REF')]) > 50:
return
# VCF POS field is 1-based
elements[self.header.index('POS')] += 1
line = '\t'.join([str(x) for x in elements])
self.handle.write('{}\n'.format(line))
class VCFReader(object):
"""Basic VCF parser."""
def __init__(self, filename, cache=True):
"""Initialize a VCF parser.
:param filename: .vcf file.
:param cache: if True, all parsed variants are stored in memory for
faster subsequent access.
"""
self.filename = filename
self.cache = cache
self.chroms = list() # keep record of chroms in order they were read
self._indexed = False
self._tree = None
self._parse_lock = Lock()
# Read both metadata and header
self.meta = []
self.header = None
with open(filename, encoding='utf-8') as handle:
for line in handle:
line = line.replace('\n', '')
if line.startswith('##'):
self.meta.append(line[2:])
elif line.startswith('#'):
line = line[1:]
self.header = line.split('\t')
break
def _parse(self):
# just parsing the file to yield records
last_pos = [None, None]
with open(self.filename, encoding='utf-8') as handle:
for index, line in enumerate(handle):
line = line.replace('\n', '')
# Already read meta and header in self.__init__
if line.startswith('#'):
continue
try:
variant, valid = Variant.from_text(line)
except Exception as e:
raise IOError(
'Exception while reading variant #{}.\n'
'Line: {}'.format(index, line)) from e
if not valid:
continue
if variant.chrom != last_pos[0]:
last_pos = [variant.chrom, None]
elif last_pos[1] is not None and last_pos[1] > variant.pos:
raise IOError('.vcf is unsorted at index #{}.'.format(index))
if variant.chrom not in self.chroms:
self.chroms.append(variant.chrom)
yield variant
last_pos[1] = variant.pos
def index(self):
"""Index the input file for faster fetches."""
# calling this method implies caching
self.cache = True
if self._indexed or not self.cache:
return
if self._parse_lock.acquire(blocking=False):
try:
# clear out an incomplete parse, actually this doesn't matter
# since the values in the tree are set-like.
self._tree = collections.defaultdict(intervaltree.IntervalTree)
for variant in self._parse():
self._tree[variant.chrom][
variant.pos:variant.pos + len(variant.ref)] = variant
except Exception:
raise
else:
# record we've done a complete parse
self._indexed = True
finally:
self._parse_lock.release()
else:
# wait for lock to be released, then return
self._parse_lock.acquire(blocking=True)
if not self._indexed:
raise IOError("Waited for parsing, but parsing did not occur.")
def fetch(self, ref_name=None, start=None, end=None, strict=True):
"""Yield all variants spanned by a region.
:param ref_name: reference name (CHROM field of .vcf).
:param start: inclusive start co-ordinate (0-based).
:param end: exclusive end co-ordinate (0-based).
:param strict: if False variants overlapping the region, but not
contained enitrely within the region are yielded also.
:yields: `Variant` instances.
"""
if start is None:
start = float('-inf')
if end is None:
end = float('inf')
def _tree_search(tree, start, end, strict):
search = tree.overlap if strict else tree.envelop
return search(start, end)
if not self.cache:
# if not using a cache, just keep re-reading the file
for variant in self._parse():
if not all([
ref_name is None or variant.chrom == ref_name,
start is None or variant.pos > start,
end is None or variant.pos + len(variant.ref) < end]):
continue
yield variant
else:
self.index()
# spec says .vcf is sorted, lets follow. Keep ordering of
# chromosomes as they are in the file, and sort positions.
if ref_name is not None:
results = sorted(_tree_search(
self._tree[ref_name], start, end, strict))
else:
results = itertools.chain(*(
sorted(_tree_search(
self._tree[chrom], start, end, strict=True))
for chrom in self.chroms
))
for interval in results:
yield interval.data
def _get_hap(v, trees):
for hap, tree in enumerate(trees, 1):
at_pos = tree.at(v.pos)
for vp in at_pos:
if vp.data is v:
return hap
def _merge_variants(
interval, trees, ref_seq, detailed_info=False, discard_phase=False):
"""Merge variants in an interval into a `Variant` object.
.. note::
It is assumed that variants in each haplotype have a single alt (an
exception will be raised if this is not the case) and that that if two
overlapping variants have the same alt, the GT it 1/1, else if the alts
are different, the GT is 1/2 (or the phased equivalents if
discard_phase is False)
:param interval: `intervaltree.Interval` with .data containing list of `Variant` objs to be merged
:param trees: iterable of `intervaltree.IntervalTree` objs containing the
`Variant` objs of each haplotype (used to determine which
haplotype variants in `interval` belong to).
:param ref_seq: str, reference sequence
:param detailed_info: bool, whether to add more detail to Variant info.
:param discard_phase: bool, if False, preserve phase, else return unphased
variants.
:returns: `Variant` obj
"""
if interval.end > len(ref_seq):
raise ValueError(
'A variant occurs after the end of the reference sequence.')
ref = ref_seq[interval.begin: interval.end]
alts_dict = collections.OrderedDict()
info = {}
mixed_vars = collections.defaultdict(list)
for v in interval.data:
mixed_vars[str(_get_hap(v, trees))].append(v)
qual = 0.0
for hap, hap_vars in sorted(mixed_vars.items()):
alt = list(ref)
for v in hap_vars:
if len(v.alt) > 1:
raise ValueError('Only single-allele variants from two vcfs can be merged')
start_i = v.pos - interval.begin
end_i = start_i + len(v.ref)
if v.ref != ref[start_i:end_i]:
msg = 'Variant ref {} does not match ref {} at {}:{}'
raise ValueError(msg.format(v.ref, ref[start_i:end_i], v.chrom, v.pos))
# also check ref is correct within unsliced ref seq
assert ref_seq[v.pos:v.pos + len(v.ref)] == v.ref
alt[start_i:end_i] = [''] * len(v.ref)
alt[start_i] = v.alt[0]
# calculate mean GQ for each haplotype, and take mean of these for
# overall qual.
# Use mean otherwise we might need very different thresholds for
# short vs long variants and homozygous vs heterozygous variants.
info['q{}'.format(hap)] = sum((
float(v.qual) for v in hap_vars)) / len(hap_vars)
info['pos{}'.format(hap)] = ','.join(str(v.pos + 1) for v in hap_vars)
if detailed_info:
# + 1 as VCF is 1-based, v.pos is 0 based
info['ref{}'.format(hap)] = ','.join((v.ref for v in hap_vars))
info['alt{}'.format(hap)] = ','.join((v.alt[0] for v in hap_vars))
qual += info['q{}'.format(hap)] / len(mixed_vars)
alts_dict[hap] = ''.join(alt)
haps = list(alts_dict.keys())
alts = list(alts_dict.values())
filtered_alts = list()
filtered_haps = list()
for i, alt in enumerate(alts):
if alt != ref:
filtered_alts.append(alt)
filtered_haps.append(haps[i])
alts = filtered_alts
haps = filtered_haps
gt_sep = '/' if discard_phase else '|'
if len(alts) == 2:
if alts[0] == alts[1]: # homozygous 1/1
gt = gt_sep.join(len(haps) * '1')
alts = alts[:1]
else: # heterozygous 1/2
gt = gt_sep.join(map(str, haps))
else: # heterozygous 0/1
assert len(haps) == 1
gts = [0, 1] # appropriate if hap with variant is 2
if not discard_phase:
if int(haps[0]) == 1:
gts = [1, 0]
gt = gt_sep.join(map(str, gts))
genotype_data = {'GT': gt, 'GQ': qual}
return Variant(
v.chrom, interval.begin, ref, alt=alts,
filt='PASS', info=info, qual=qual,
genotype_data=genotype_data).trim()
def natural_key(string_):
"""See http://www.codinghorror.com/blog/archives/001018.html"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)]
class Haploid2DiploidConverter(object):
"""Conversion of multiple haploid `.vcf` files to a single `.vcf`."""
def __init__(self, vcf1, vcf2, ref_fasta, only_overlapping=True, discard_phase=False, detailed_info=False):
"""Initialize variant merging.
Merge variants from two haploid VCFs into a diploid vcf. Variants in
one file which overlap with variants in the other will have their alts
padded.
.. warning::
Variants in a single vcf file should not overlap with each other.
:param vcf1, vcf2: paths to haploid vcf files.
:param ref_fasta: path to reference.fasta file.
:param only_overlapping: bool, merge only overlapping variants (not
adjacent ones).
:param discard_phase: bool, if False, preserve phase, else output
unphased variants.
"""
self.only_overlapping = only_overlapping
self.discard_phase = discard_phase
self.detailed_info = detailed_info
self.vcfs = [VCFReader(vcf) for vcf in (vcf1, vcf2)]
for vcf in self.vcfs:
vcf.index() # create tree
self.fasta = pysam.FastaFile(ref_fasta)
all_contigs = list(set(itertools.chain(*[v.chroms for v in self.vcfs])))
all_contigs = sorted(all_contigs, key=natural_key)
fasta_handler = PEPPER_HP.FASTA_handler(ref_fasta)
sqs = fasta_handler.get_chromosome_names()
self.chroms = []
for sq in sqs:
if sq not in all_contigs:
continue
sq_id = sq
ln = fasta_handler.get_chromosome_sequence_length(sq)
self.chroms.append((sq_id, ln))
def variants(self):
"""Yield diploid variants.
:yields `Variant` objs
"""
for chrom, ln in loose_version_sort(self.chroms):
sys.stderr.write("[" + str(datetime.now().strftime('%m-%d-%Y %H:%M:%S')) + "]" + " INFO: MERGING VARIANTS IN CONTIG: " + str(chrom) + "\n")
sys.stderr.flush()
merged = []
trees = [vcf._tree[chrom] for vcf in self.vcfs]
# assign haplotype so that otherwise identical variants in both
# trees are not treated as identical (we need to be able to
# distinguish between 0/1 and 1/1)
for h, tree in enumerate(trees):
for i in tree.all_intervals:
i.data.info['mhap'] = h
comb = intervaltree.IntervalTree(
trees[0].all_intervals.union(trees[1].all_intervals))
# if strict, merge only overlapping intervals (not adjacent ones)
comb.merge_overlaps(
strict=self.only_overlapping,
data_initializer=list(),
data_reducer=lambda x, y: x + [y])
ref_seq = self.fasta.fetch(chrom).upper()
for interval in comb.all_intervals:
merged.append(_merge_variants(
interval, trees, ref_seq,
detailed_info=self.detailed_info,
discard_phase=self.discard_phase))
yield from sorted(merged, key=lambda x: x.pos)
@property
def meta_info(self):
"""Return the meta information for the combined `.vcf` file."""
m = []
for h in 1, 2:
m.append(MetaInfo(
'INFO', 'pos{}'.format(h), '.', 'Integer',
'POS of incorporated variants from haplotype {}'.format(h)))
m.append(MetaInfo(
'INFO', 'q{}'.format(h), 1, 'Float',
'Combined qual score for haplotype {}'.format(h)))
if self.detailed_info:
for h in 1, 2:
m.append(MetaInfo(
'INFO', 'ref{}'.format(h), '2', 'String',
'ref alleles of incorporated variants '
'from haplotype {}'.format(m)))
m.append(MetaInfo(
'INFO', 'alt{}'.format(h), '2', 'String',
'alt alleles of incorporated variants '
'from haplotype {}'.format(m)))
# where field has one value for each possible genotype, the
# 'Number' value should be ‘G’.
m.append(MetaInfo(
'FORMAT', 'GT', 'G', 'String', 'Genotype'))
# if this is not a float, vcf benchmarking tools may fail
m.append(MetaInfo(
'FORMAT', 'GQ', 'G', 'Float', 'Genotype quality score'))
return m
def simplify_variants(variant):
ref_seq = variant.ref
alleles = variant.alt
if min([len(allele) for allele in alleles]) == 1 or len(ref_seq) == 1:
return [variant]
window_move = min(len(ref_seq), min([len(allele) for allele in alleles]))
ref_start = variant.pos
simplified_variants = []
for pos in range(ref_start, ref_start + window_move - 1):
indx = pos - ref_start
ref_base = ref_seq[indx]
alts = []
gt_tag = []
gt_count = 1
for allele in alleles:
alt = allele[indx]
if alt != ref_base and alt not in alts:
alts.append(alt)
gt_tag.append(str(gt_count))
gt_count += 1
elif alt != ref_base and alt in alts:
gt_tag.append(str(gt_count-1))
else:
gt_tag.append("0")
if len(alts) == 0:
continue
if len(gt_tag) == 1:
gt_tag.append("0")
GT = '|'.join(gt_tag)
genotype_data = collections.OrderedDict()
genotype_data['GT'] = GT
genotype_data['GQ'] = variant.genotype_data['GQ']
v1 = Variant(chrom=variant.chrom,
pos=pos,
ref=ref_base,
alt=','.join(alts),
ident=variant.ident,
qual=variant.qual,
info=variant.info,
genotype_data=genotype_data)
simplified_variants.append(v1)
ref_out = ref_seq[window_move-1:]
alts = []
gt_tag = []
gt_count = 1
for allele in alleles:
alt = allele[window_move-1:]
if alt != ref_out and alt not in alts:
alts.append(alt)
gt_tag.append(str(gt_count))
gt_count += 1
elif alt != ref_out and alt in alts:
gt_tag.append(str(gt_count-1))
else:
gt_tag.append("0")
if len(alts) > 0:
if len(gt_tag) == 1:
gt_tag.append("0")
GT = '|'.join(gt_tag)
genotype_data = collections.OrderedDict()
genotype_data['GT'] = GT
genotype_data['GQ'] = variant.genotype_data['GQ']
v1 = Variant(chrom=variant.chrom,
pos=ref_start+window_move-1,
ref=ref_out,
alt=','.join(alts),
ident=variant.ident,
qual=variant.qual,
info=variant.info,
genotype_data=genotype_data)
simplified_variants.append(v1)
return simplified_variants
def haploid2diploid(vcf1, vcf2, ref_fasta, output_dir, adjacent=False, discard_phase=False):
"""Entry point for merging two haploid vcfs into a diploid vcf."""
output_dir = UserInterfaceSupport.handle_output_directory(os.path.abspath(output_dir))
converter = Haploid2DiploidConverter(vcf1, vcf2, ref_fasta)
vcfout = output_dir + "PEPPER_HP_CANDIDATES_MERGED.vcf"
with VCFWriter(vcfout, 'w', version='4.1', contigs=converter.chroms, meta_info=converter.meta_info) as vcf_writer:
for v in converter.variants():
simplified_variants = simplify_variants(v)
for variant in simplified_variants:
vcf_writer.write_variant(variant) | PypiClean |
/pulse2percept-0.7.1.tar.gz/pulse2percept-0.7.1/examples/implants/plot_implants.py |
import matplotlib.pyplot as plt
from pulse2percept.implants import *
from pulse2percept.models import AxonMapModel
fig, ax = plt.subplots(ncols=2, figsize=(10, 6))
# For illustrative purpose, also show the map of fiber
# bundles in the optic fiber layer:
model = AxonMapModel()
model.plot(ax=ax[0])
# Argus I is typically implanted at a 30-45deg angle:
ArgusI(rot=-30).plot(ax=ax[0], annotate=True)
ax[0].set_title('Argus I')
model.plot(ax=ax[1])
# Argus II is typically implanted at a 30-45deg angle:
ArgusII(rot=-30).plot(ax=ax[1], annotate=False)
ax[1].set_title('Argus II')
###############################################################################
# PRIMA Bionic Vision System (Pixium Vision SA)
# ----------------------------------------------
#
# :py:class:`~pulse2percept.implants.PRIMA` is a subretinal device developed
# at Stanford University and commercialized by Pixium Vision.
#
# There are several versions of the PRIMA device.
# The device used in clinical trial `NCT03392324`_ consists of 378 85um-wide
# pixels separated by 15um trenches (i.e., 100um pixel pitch), arranged in a
# 2-mm wide hexagonal pattern, and is available in pulse2percept simply as
# :py:class:`~pulse2percept.implants.PRIMA` [Palanker2020]_.
#
# :py:class:`~pulse2percept.implants.PRIMA75` is a newer version of the device,
# consisting of 142 70um-wide pixels separated by 5um trenches (i.e., 75um
# pixel pitch), arranged in a 1-mm wide hexagonal pattern [Lorach2015]_.
#
# .. _NCT03392324: https://www.clinicaltrials.gov/ct2/show/NCT03392324
fig, ax = plt.subplots(ncols=2, figsize=(10, 6))
PRIMA().plot(ax=ax[0])
ax[0].set_title('PRIMA-100')
PRIMA75().plot(ax=ax[1])
ax[1].set_title('PRIMA-75')
###############################################################################
# In addition, the developers are working on miniaturizing the device. At least
# two other prototypes are currently in development:
#
# :py:class:`~pulse2percept.implants.PRIMA55` consists of 50um-wide pixels
# separated by 5um trenches (i.e., 55um pixel pitch), whereas
# :py:class:`~pulse2percept.implants.PRIMA40` consists of 35um-wide pixels
# separated by 5um trenches (i.e., 40um pixel pitch).
#
# The exact geometric arrangement of these two prototypes have not been
# published yet. The devices available in pulse2percept assume that the arrays
# fit on a circular 1mm-diameter substrate, which yields 273 electrodes for
# PRIMA-55 and 532 electrodes for PRIMA-40.
# These prototypes will be updated once more information about them is
# available.
fig, ax = plt.subplots(ncols=2, figsize=(10, 6))
PRIMA55().plot(ax=ax[0])
ax[0].set_title('PRIMA-55')
PRIMA40().plot(ax=ax[1])
ax[1].set_title('PRIMA-40')
###############################################################################
# BVT Bionic Eye System (Bionic Vision Technologies)
# --------------------------------------------------
#
# :py:class:`~pulse2percept.implants.BVT24` is a 24-channel suprachoroidal
# retinal prosthesis [Layton2014]_, which was developed by the Bionic Vision
# Australia Consortium and commercialized by Bionic Vision Technologies (BVT).
#
# Note that the array actually consists of a total of 35 electrodes:
#
# - 33 platinum stimulating electrodes:
#
# - 30 electrodes with 600um diameter (Electrodes 1-20 except
# 9, 17, 19; and Electrodes 21a-m),
#
# - 3 electrodes with 400um diameter (Electrodes 9, 17, 19)
#
# - 2 return electrodes with 2000um diameter (Electrodes 22, 23)
#
# However, Electrodes 21a-m are typically ganged together to provide an
# external ring for common ground. Not counting the two large return electrodes
# leaves 24 stimulating electrodes.
fig, ax = plt.subplots(figsize=(10, 6))
BVT24().plot(ax=ax, annotate=True)
ax.set_title('BVT-24')
###############################################################################
# Alpha-IMS and Alpha-AMS Retinal Implant System (Retina Implant AG)
# ------------------------------------------------------------------
#
# :py:class:`~pulse2percept.implants.AlphaIMS` and
# :py:class:`~pulse2percept.implants.AlphaAMS` are subretinal implants
# developed at the University of Tuebingen, Germany and commercialized by
# Retina Implant AG.
#
# Alpha-IMS consists of 1500 50um-wide square pixels, arranged on a 39x39
# rectangular grid with 72um pixel pitch [Stingl2013]_.
#
# Alpha-AMS is the second generation device, consisting 1600 30um-wide round
# pixels, arranged on a 40x40 rectangular grid with 70um pixel pitch
# [Stingl2017]_.
fig, ax = plt.subplots(ncols=2, figsize=(10, 6))
AlphaIMS().plot(ax=ax[0])
ax[0].set_title('Alpha-IMS')
AlphaAMS().plot(ax=ax[1])
ax[1].set_title('Alpha-AMS') | PypiClean |
/OpenDSS_SciVis-1.1.1-py3-none-any.whl/opendss_scivis/read_element_data_old.py | import os
import pandas as pd
def read_element_data(name):
'''
Read data of an element in power grid that contains a time series of monitored power
system quantities from a Comma Separated Value (CSV) file output by OpenDSS.
Input:
name : name of CSV file with or without suffix, e.g. SimpleDemo.csv or SimpleDemo.
Output:
objectData : data contained in CSV file as a two-dimensional data structure with labeled axes
The data format of objectData is a DataFrame as provided by the pandas.read_csv function. An
example of the format for the contents of an OpenDSS CVS file is
hour t(sec) Frequency ... PShaft dSpeed (Deg/sec) dTheta (Deg)
0 0 0.001 60.0000 ... 2.000000e+09 -0.000093 -9.000000e-10
1 0 0.002 60.0000 ... 2.000000e+09 -0.000093 -2.430000e-09
2 0 0.003 60.0000 ... 2.000000e+09 -0.000093 -4.040000e-09
3 0 0.004 60.0000 ... 2.000000e+09 -0.000093 -5.660000e-09
4 0 0.005 60.0000 ... 2.000000e+09 -0.000093 -7.280000e-09
... ... ... ... ... ... ... ...
4979 0 4.980 60.4106 ... 2.000000e+09 -575.187000 2.579670e+00
4980 0 4.981 60.4090 ... 2.000000e+09 -576.661000 2.569620e+00
4981 0 4.982 60.4074 ... 2.000000e+09 -578.108000 2.559550e+00
4982 0 4.983 60.4058 ... 2.000000e+09 -579.527000 2.549440e+00
4983 0 4.984 60.4041 ... 2.000000e+09 -580.918000 2.539320e+00
[4984 rows x 8 columns]
Author: Peter A. Rochford
Xator Corporation
www.xatorcorp.com
Created on Apr 22, 2022
'''
# Check if CSV file suffix
file_name, file_extension = os.path.splitext(name)
if file_extension == "":
filename = name + '.csv'
elif name.endswith('.csv'):
filename = name
else:
raise Exception("Invalid file type: " + name)
# Check if file exists and is readable
if not os.path.isfile(filename):
raise Exception("File does not exist: " + filename)
elif not os.access(filename, os.R_OK):
raise Exception("File is not readable: " + filename)
# Load object from CSV file
objectData = pd.read_csv(filename)
return objectData | PypiClean |
/spyc_iot-0.0.8-py3-none-any.whl/spyc_iot/bme280.py |
# Updated 2018 and 2020
# This module is based on the below cited resources, which are all
# based on the documentation as provided in the Bosch Data Sheet and
# the sample implementation provided therein.
#
# Final Document: BST-BME280-DS002-15
#
# Authors: Paul Cunnane 2016, Peter Dahlebrg 2016
#
# This module borrows from the Adafruit BME280 Python library. Original
# Copyright notices are reproduced below.
#
# Those libraries were written for the Raspberry Pi. This modification is
# intended for the MicroPython and esp8266 boards.
#
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
#
# Based on the BMP280 driver with BME280 changes provided by
# David J Taylor, Edinburgh (www.satsignal.eu)
#
# Based on Adafruit_I2C.py created by Kevin Townsend.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import time
from ustruct import unpack, unpack_from
from array import array
# BME280 default address.
BME280_I2CADDR = 0x76
# Operating Modes
BME280_OSAMPLE_1 = 1
BME280_OSAMPLE_2 = 2
BME280_OSAMPLE_4 = 3
BME280_OSAMPLE_8 = 4
BME280_OSAMPLE_16 = 5
BME280_REGISTER_CONTROL_HUM = 0xF2
BME280_REGISTER_STATUS = 0xF3
BME280_REGISTER_CONTROL = 0xF4
MODE_SLEEP = const(0)
MODE_FORCED = const(1)
MODE_NORMAL = const(3)
BME280_TIMEOUT = const(100) # about 1 second timeout
class BME280:
def __init__(
self, mode=BME280_OSAMPLE_8, address=BME280_I2CADDR, i2c=None, **kwargs
):
# Check that mode is valid.
if type(mode) is tuple and len(mode) == 3:
self._mode_hum, self._mode_temp, self._mode_press = mode
elif type(mode) == int:
self._mode_hum, self._mode_temp, self._mode_press = mode, mode, mode
else:
raise ValueError(
"Wrong type for the mode parameter, must be int or a 3 element tuple"
)
for mode in (self._mode_hum, self._mode_temp, self._mode_press):
if mode not in [
BME280_OSAMPLE_1,
BME280_OSAMPLE_2,
BME280_OSAMPLE_4,
BME280_OSAMPLE_8,
BME280_OSAMPLE_16,
]:
raise ValueError(
"Unexpected mode value {0}. Set mode to one of "
"BME280_ULTRALOWPOWER, BME280_STANDARD, BME280_HIGHRES, or "
"BME280_ULTRAHIGHRES".format(mode)
)
self.address = address
if i2c is None:
raise ValueError("An I2C object is required.")
self.i2c = i2c
self.__sealevel = 101325
# load calibration data
dig_88_a1 = self.i2c.readfrom_mem(self.address, 0x88, 26)
dig_e1_e7 = self.i2c.readfrom_mem(self.address, 0xE1, 7)
(
self.dig_T1,
self.dig_T2,
self.dig_T3,
self.dig_P1,
self.dig_P2,
self.dig_P3,
self.dig_P4,
self.dig_P5,
self.dig_P6,
self.dig_P7,
self.dig_P8,
self.dig_P9,
_,
self.dig_H1,
) = unpack("<HhhHhhhhhhhhBB", dig_88_a1)
self.dig_H2, self.dig_H3, self.dig_H4, self.dig_H5, self.dig_H6 = unpack(
"<hBbhb", dig_e1_e7
)
# unfold H4, H5, keeping care of a potential sign
self.dig_H4 = (self.dig_H4 * 16) + (self.dig_H5 & 0xF)
self.dig_H5 //= 16
# temporary data holders which stay allocated
self._l1_barray = bytearray(1)
self._l8_barray = bytearray(8)
self._l3_resultarray = array("i", [0, 0, 0])
self._l1_barray[0] = self._mode_temp << 5 | self._mode_press << 2 | MODE_SLEEP
self.i2c.writeto_mem(self.address, BME280_REGISTER_CONTROL, self._l1_barray)
self.t_fine = 0
def read_raw_data(self, result):
"""Reads the raw (uncompensated) data from the sensor.
Args:
result: array of length 3 or alike where the result will be
stored, in temperature, pressure, humidity order
Returns:
None
"""
self._l1_barray[0] = self._mode_hum
self.i2c.writeto_mem(self.address, BME280_REGISTER_CONTROL_HUM, self._l1_barray)
self._l1_barray[0] = self._mode_temp << 5 | self._mode_press << 2 | MODE_FORCED
self.i2c.writeto_mem(self.address, BME280_REGISTER_CONTROL, self._l1_barray)
# Wait for conversion to complete
for _ in range(BME280_TIMEOUT):
if self.i2c.readfrom_mem(self.address, BME280_REGISTER_STATUS, 1)[0] & 0x08:
time.sleep_ms(10) # still busy
else:
break # Sensor ready
else:
raise RuntimeError("Sensor BME280 not ready")
# burst readout from 0xF7 to 0xFE, recommended by datasheet
self.i2c.readfrom_mem_into(self.address, 0xF7, self._l8_barray)
readout = self._l8_barray
# pressure(0xF7): ((msb << 16) | (lsb << 8) | xlsb) >> 4
raw_press = ((readout[0] << 16) | (readout[1] << 8) | readout[2]) >> 4
# temperature(0xFA): ((msb << 16) | (lsb << 8) | xlsb) >> 4
raw_temp = ((readout[3] << 16) | (readout[4] << 8) | readout[5]) >> 4
# humidity(0xFD): (msb << 8) | lsb
raw_hum = (readout[6] << 8) | readout[7]
result[0] = raw_temp
result[1] = raw_press
result[2] = raw_hum
def read_compensated_data(self, result=None):
"""Reads the data from the sensor and returns the compensated data.
Args:
result: array of length 3 or alike where the result will be
stored, in temperature, pressure, humidity order. You may use
this to read out the sensor without allocating heap memory
Returns:
array with temperature, pressure, humidity. Will be the one
from the result parameter if not None
"""
self.read_raw_data(self._l3_resultarray)
raw_temp, raw_press, raw_hum = self._l3_resultarray
# temperature
var1 = (raw_temp / 16384.0 - self.dig_T1 / 1024.0) * self.dig_T2
var2 = raw_temp / 131072.0 - self.dig_T1 / 8192.0
var2 = var2 * var2 * self.dig_T3
self.t_fine = int(var1 + var2)
temp = (var1 + var2) / 5120.0
temp = max(-40, min(85, temp))
# pressure
var1 = (self.t_fine / 2.0) - 64000.0
var2 = var1 * var1 * self.dig_P6 / 32768.0 + var1 * self.dig_P5 * 2.0
var2 = (var2 / 4.0) + (self.dig_P4 * 65536.0)
var1 = (self.dig_P3 * var1 * var1 / 524288.0 + self.dig_P2 * var1) / 524288.0
var1 = (1.0 + var1 / 32768.0) * self.dig_P1
if var1 == 0.0:
pressure = 30000 # avoid exception caused by division by zero
else:
p = ((1048576.0 - raw_press) - (var2 / 4096.0)) * 6250.0 / var1
var1 = self.dig_P9 * p * p / 2147483648.0
var2 = p * self.dig_P8 / 32768.0
pressure = p + (var1 + var2 + self.dig_P7) / 16.0
pressure = max(30000, min(110000, pressure))
# humidity
h = self.t_fine - 76800.0
h = (raw_hum - (self.dig_H4 * 64.0 + self.dig_H5 / 16384.0 * h)) * (
self.dig_H2
/ 65536.0
* (
1.0
+ self.dig_H6 / 67108864.0 * h * (1.0 + self.dig_H3 / 67108864.0 * h)
)
)
humidity = h * (1.0 - self.dig_H1 * h / 524288.0)
# humidity = max(0, min(100, humidity))
if result:
result[0] = temp
result[1] = pressure
result[2] = humidity
return result
return array("f", (temp, pressure, humidity))
@property
def sealevel(self):
return self.__sealevel
@sealevel.setter
def sealevel(self, value):
if 30000 < value < 120000: # just ensure some reasonable value
self.__sealevel = value
@property
def altitude(self):
"""
Altitude in m.
"""
from math import pow
try:
p = 44330 * (
1.0 - pow(self.read_compensated_data()[1] / self.__sealevel, 0.1903)
)
except:
p = 0.0
return p
@property
def dew_point(self):
"""
Compute the dew point temperature for the current Temperature
and Humidity measured pair
"""
from math import log
t, p, h = self.read_compensated_data()
h = (log(h, 10) - 2) / 0.4343 + (17.62 * t) / (243.12 + t)
return 243.12 * h / (17.62 - h)
@property
def values(self):
"""human readable values"""
t, p, h = self.read_compensated_data()
return ("{:.2f}C".format(t), "{:.2f}hPa".format(p / 100), "{:.2f}%".format(h)) | PypiClean |
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/users/item/calendar_view/item/instances/item/exception_occurrences/item/decline/decline_post_request_body.py | from __future__ import annotations
from kiota_abstractions.serialization import AdditionalDataHolder, Parsable, ParseNode, SerializationWriter
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from ..........models import time_slot
class DeclinePostRequestBody(AdditionalDataHolder, Parsable):
def __init__(self,) -> None:
"""
Instantiates a new declinePostRequestBody and sets the default values.
"""
# Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
self._additional_data: Dict[str, Any] = {}
# The Comment property
self._comment: Optional[str] = None
# The ProposedNewTime property
self._proposed_new_time: Optional[time_slot.TimeSlot] = None
# The SendResponse property
self._send_response: Optional[bool] = None
@property
def additional_data(self,) -> Dict[str, Any]:
"""
Gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
Returns: Dict[str, Any]
"""
return self._additional_data
@additional_data.setter
def additional_data(self,value: Dict[str, Any]) -> None:
"""
Sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
Args:
value: Value to set for the AdditionalData property.
"""
self._additional_data = value
@property
def comment(self,) -> Optional[str]:
"""
Gets the comment property value. The Comment property
Returns: Optional[str]
"""
return self._comment
@comment.setter
def comment(self,value: Optional[str] = None) -> None:
"""
Sets the comment property value. The Comment property
Args:
value: Value to set for the Comment property.
"""
self._comment = value
@staticmethod
def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> DeclinePostRequestBody:
"""
Creates a new instance of the appropriate class based on discriminator value
Args:
parseNode: The parse node to use to read the discriminator value and create the object
Returns: DeclinePostRequestBody
"""
if parse_node is None:
raise Exception("parse_node cannot be undefined")
return DeclinePostRequestBody()
def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:
"""
The deserialization information for the current model
Returns: Dict[str, Callable[[ParseNode], None]]
"""
from ..........models import time_slot
fields: Dict[str, Callable[[Any], None]] = {
"Comment": lambda n : setattr(self, 'comment', n.get_str_value()),
"ProposedNewTime": lambda n : setattr(self, 'proposed_new_time', n.get_object_value(time_slot.TimeSlot)),
"SendResponse": lambda n : setattr(self, 'send_response', n.get_bool_value()),
}
return fields
@property
def proposed_new_time(self,) -> Optional[time_slot.TimeSlot]:
"""
Gets the proposedNewTime property value. The ProposedNewTime property
Returns: Optional[time_slot.TimeSlot]
"""
return self._proposed_new_time
@proposed_new_time.setter
def proposed_new_time(self,value: Optional[time_slot.TimeSlot] = None) -> None:
"""
Sets the proposedNewTime property value. The ProposedNewTime property
Args:
value: Value to set for the proposed_new_time property.
"""
self._proposed_new_time = value
@property
def send_response(self,) -> Optional[bool]:
"""
Gets the sendResponse property value. The SendResponse property
Returns: Optional[bool]
"""
return self._send_response
@send_response.setter
def send_response(self,value: Optional[bool] = None) -> None:
"""
Sets the sendResponse property value. The SendResponse property
Args:
value: Value to set for the send_response property.
"""
self._send_response = value
def serialize(self,writer: SerializationWriter) -> None:
"""
Serializes information the current object
Args:
writer: Serialization writer to use to serialize this model
"""
if writer is None:
raise Exception("writer cannot be undefined")
writer.write_str_value("Comment", self.comment)
writer.write_object_value("ProposedNewTime", self.proposed_new_time)
writer.write_bool_value("SendResponse", self.send_response)
writer.write_additional_data_value(self.additional_data) | PypiClean |
/calcasa-api-1.2.1.tar.gz/calcasa-api-1.2.1/calcasa/api/api/rapporten_api.py | import re # noqa: F401
import sys # noqa: F401
from calcasa.api.api_client import ApiClient, Endpoint as _Endpoint
from calcasa.api.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from calcasa.api.model.not_found_problem_details import NotFoundProblemDetails
from calcasa.api.model.permissions_denied_problem_details import PermissionsDeniedProblemDetails
from calcasa.api.model.problem_details import ProblemDetails
class RapportenApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.get_rapport_endpoint = _Endpoint(
settings={
'response_type': (file_type,),
'auth': [
'oauth',
'oauth'
],
'endpoint_path': '/api/v1/rapporten/{id}',
'operation_id': 'get_rapport',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/problem+json',
'application/pdf'
],
'content_type': [],
},
api_client=api_client
)
def get_rapport(
self,
id,
**kwargs
):
"""Rapport op basis van waardering Id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_rapport(id, async_req=True)
>>> result = thread.get()
Args:
id (str): De Id van een waardering.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
file_type
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['id'] = \
id
return self.get_rapport_endpoint.call_with_http_info(**kwargs) | PypiClean |
/NVDA-addonTemplate-0.5.2.zip/NVDA-addonTemplate-0.5.2/NVDAAddonTemplate/data/{{cookiecutter.project_slug}}/scons-local-2.5.0/SCons/Tool/packaging/rpm.py |
__revision__ = "src/engine/SCons/Tool/packaging/rpm.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog"
import os
import SCons.Builder
import SCons.Tool.rpmutils
from SCons.Environment import OverrideEnvironment
from SCons.Tool.packaging import stripinstallbuilder, src_targz
from SCons.Errors import UserError
def package(env, target, source, PACKAGEROOT, NAME, VERSION,
PACKAGEVERSION, DESCRIPTION, SUMMARY, X_RPM_GROUP, LICENSE,
**kw):
# initialize the rpm tool
SCons.Tool.Tool('rpm').generate(env)
bld = env['BUILDERS']['Rpm']
# Generate a UserError whenever the target name has been set explicitly,
# since rpm does not allow for controlling it. This is detected by
# checking if the target has been set to the default by the Package()
# Environment function.
if str(target[0])!="%s-%s"%(NAME, VERSION):
raise UserError( "Setting target is not supported for rpm." )
else:
# This should be overridable from the construction environment,
# which it is by using ARCHITECTURE=.
buildarchitecture = SCons.Tool.rpmutils.defaultMachine()
if 'ARCHITECTURE' in kw:
buildarchitecture = kw['ARCHITECTURE']
fmt = '%s-%s-%s.%s.rpm'
srcrpm = fmt % (NAME, VERSION, PACKAGEVERSION, 'src')
binrpm = fmt % (NAME, VERSION, PACKAGEVERSION, buildarchitecture)
target = [ srcrpm, binrpm ]
# get the correct arguments into the kw hash
loc=locals()
del loc['kw']
kw.update(loc)
del kw['source'], kw['target'], kw['env']
# if no "SOURCE_URL" tag is given add a default one.
if 'SOURCE_URL' not in kw:
kw['SOURCE_URL']=(str(target[0])+".tar.gz").replace('.rpm', '')
# mangle the source and target list for the rpmbuild
env = OverrideEnvironment(env, kw)
target, source = stripinstallbuilder(target, source, env)
target, source = addspecfile(target, source, env)
target, source = collectintargz(target, source, env)
# now call the rpm builder to actually build the packet.
return bld(env, target, source, **kw)
def collectintargz(target, source, env):
""" Puts all source files into a tar.gz file. """
# the rpm tool depends on a source package, until this is changed
# this hack needs to be here that tries to pack all sources in.
sources = env.FindSourceFiles()
# filter out the target we are building the source list for.
sources = [s for s in sources if s not in target]
# find the .spec file for rpm and add it since it is not necessarily found
# by the FindSourceFiles function.
sources.extend( [s for s in source if str(s).rfind('.spec')!=-1] )
# as the source contains the url of the source package this rpm package
# is built from, we extract the target name
tarball = (str(target[0])+".tar.gz").replace('.rpm', '')
try:
tarball = env['SOURCE_URL'].split('/')[-1]
except KeyError, e:
raise SCons.Errors.UserError( "Missing PackageTag '%s' for RPM packager" % e.args[0] )
tarball = src_targz.package(env, source=sources, target=tarball,
PACKAGEROOT=env['PACKAGEROOT'], )
return (target, tarball)
def addspecfile(target, source, env):
specfile = "%s-%s" % (env['NAME'], env['VERSION'])
bld = SCons.Builder.Builder(action = build_specfile,
suffix = '.spec',
target_factory = SCons.Node.FS.File)
source.extend(bld(env, specfile, source))
return (target,source)
def build_specfile(target, source, env):
""" Builds a RPM specfile from a dictionary with string metadata and
by analyzing a tree of nodes.
"""
file = open(target[0].get_abspath(), 'w')
try:
file.write( build_specfile_header(env) )
file.write( build_specfile_sections(env) )
file.write( build_specfile_filesection(env, source) )
file.close()
# call a user specified function
if 'CHANGE_SPECFILE' in env:
env['CHANGE_SPECFILE'](target, source)
except KeyError, e:
raise SCons.Errors.UserError( '"%s" package field for RPM is missing.' % e.args[0] )
#
# mandatory and optional package tag section
#
def build_specfile_sections(spec):
""" Builds the sections of a rpm specfile.
"""
str = ""
mandatory_sections = {
'DESCRIPTION' : '\n%%description\n%s\n\n', }
str = str + SimpleTagCompiler(mandatory_sections).compile( spec )
optional_sections = {
'DESCRIPTION_' : '%%description -l %s\n%s\n\n',
'CHANGELOG' : '%%changelog\n%s\n\n',
'X_RPM_PREINSTALL' : '%%pre\n%s\n\n',
'X_RPM_POSTINSTALL' : '%%post\n%s\n\n',
'X_RPM_PREUNINSTALL' : '%%preun\n%s\n\n',
'X_RPM_POSTUNINSTALL' : '%%postun\n%s\n\n',
'X_RPM_VERIFY' : '%%verify\n%s\n\n',
# These are for internal use but could possibly be overridden
'X_RPM_PREP' : '%%prep\n%s\n\n',
'X_RPM_BUILD' : '%%build\n%s\n\n',
'X_RPM_INSTALL' : '%%install\n%s\n\n',
'X_RPM_CLEAN' : '%%clean\n%s\n\n',
}
# Default prep, build, install and clean rules
# TODO: optimize those build steps, to not compile the project a second time
if 'X_RPM_PREP' not in spec:
spec['X_RPM_PREP'] = '[ -n "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != / ] && rm -rf "$RPM_BUILD_ROOT"' + '\n%setup -q'
if 'X_RPM_BUILD' not in spec:
spec['X_RPM_BUILD'] = '[ ! -e "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != / ] && mkdir "$RPM_BUILD_ROOT"'
if 'X_RPM_INSTALL' not in spec:
spec['X_RPM_INSTALL'] = 'scons --install-sandbox="$RPM_BUILD_ROOT" "$RPM_BUILD_ROOT"'
if 'X_RPM_CLEAN' not in spec:
spec['X_RPM_CLEAN'] = '[ -n "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != / ] && rm -rf "$RPM_BUILD_ROOT"'
str = str + SimpleTagCompiler(optional_sections, mandatory=0).compile( spec )
return str
def build_specfile_header(spec):
""" Builds all sections but the %file of a rpm specfile
"""
str = ""
# first the mandatory sections
mandatory_header_fields = {
'NAME' : '%%define name %s\nName: %%{name}\n',
'VERSION' : '%%define version %s\nVersion: %%{version}\n',
'PACKAGEVERSION' : '%%define release %s\nRelease: %%{release}\n',
'X_RPM_GROUP' : 'Group: %s\n',
'SUMMARY' : 'Summary: %s\n',
'LICENSE' : 'License: %s\n', }
str = str + SimpleTagCompiler(mandatory_header_fields).compile( spec )
# now the optional tags
optional_header_fields = {
'VENDOR' : 'Vendor: %s\n',
'X_RPM_URL' : 'Url: %s\n',
'SOURCE_URL' : 'Source: %s\n',
'SUMMARY_' : 'Summary(%s): %s\n',
'X_RPM_DISTRIBUTION' : 'Distribution: %s\n',
'X_RPM_ICON' : 'Icon: %s\n',
'X_RPM_PACKAGER' : 'Packager: %s\n',
'X_RPM_GROUP_' : 'Group(%s): %s\n',
'X_RPM_REQUIRES' : 'Requires: %s\n',
'X_RPM_PROVIDES' : 'Provides: %s\n',
'X_RPM_CONFLICTS' : 'Conflicts: %s\n',
'X_RPM_BUILDREQUIRES' : 'BuildRequires: %s\n',
'X_RPM_SERIAL' : 'Serial: %s\n',
'X_RPM_EPOCH' : 'Epoch: %s\n',
'X_RPM_AUTOREQPROV' : 'AutoReqProv: %s\n',
'X_RPM_EXCLUDEARCH' : 'ExcludeArch: %s\n',
'X_RPM_EXCLUSIVEARCH' : 'ExclusiveArch: %s\n',
'X_RPM_PREFIX' : 'Prefix: %s\n',
'X_RPM_CONFLICTS' : 'Conflicts: %s\n',
# internal use
'X_RPM_BUILDROOT' : 'BuildRoot: %s\n', }
# fill in default values:
# Adding a BuildRequires renders the .rpm unbuildable under System, which
# are not managed by rpm, since the database to resolve this dependency is
# missing (take Gentoo as an example)
# if not s.has_key('x_rpm_BuildRequires'):
# s['x_rpm_BuildRequires'] = 'scons'
if 'X_RPM_BUILDROOT' not in spec:
spec['X_RPM_BUILDROOT'] = '%{_tmppath}/%{name}-%{version}-%{release}'
str = str + SimpleTagCompiler(optional_header_fields, mandatory=0).compile( spec )
return str
#
# mandatory and optional file tags
#
def build_specfile_filesection(spec, files):
""" builds the %file section of the specfile
"""
str = '%files\n'
if 'X_RPM_DEFATTR' not in spec:
spec['X_RPM_DEFATTR'] = '(-,root,root)'
str = str + '%%defattr %s\n' % spec['X_RPM_DEFATTR']
supported_tags = {
'PACKAGING_CONFIG' : '%%config %s',
'PACKAGING_CONFIG_NOREPLACE' : '%%config(noreplace) %s',
'PACKAGING_DOC' : '%%doc %s',
'PACKAGING_UNIX_ATTR' : '%%attr %s',
'PACKAGING_LANG_' : '%%lang(%s) %s',
'PACKAGING_X_RPM_VERIFY' : '%%verify %s',
'PACKAGING_X_RPM_DIR' : '%%dir %s',
'PACKAGING_X_RPM_DOCDIR' : '%%docdir %s',
'PACKAGING_X_RPM_GHOST' : '%%ghost %s', }
for file in files:
# build the tagset
tags = {}
for k in supported_tags.keys():
try:
v = file.GetTag(k)
if v:
tags[k] = v
except AttributeError:
pass
# compile the tagset
str = str + SimpleTagCompiler(supported_tags, mandatory=0).compile( tags )
str = str + ' '
str = str + file.GetTag('PACKAGING_INSTALL_LOCATION')
str = str + '\n\n'
return str
class SimpleTagCompiler(object):
""" This class is a simple string substition utility:
the replacement specfication is stored in the tagset dictionary, something
like:
{ "abc" : "cdef %s ",
"abc_" : "cdef %s %s" }
the compile function gets a value dictionary, which may look like:
{ "abc" : "ghij",
"abc_gh" : "ij" }
The resulting string will be:
"cdef ghij cdef gh ij"
"""
def __init__(self, tagset, mandatory=1):
self.tagset = tagset
self.mandatory = mandatory
def compile(self, values):
""" Compiles the tagset and returns a str containing the result
"""
def is_international(tag):
return tag.endswith('_')
def get_country_code(tag):
return tag[-2:]
def strip_country_code(tag):
return tag[:-2]
replacements = list(self.tagset.items())
str = ""
domestic = [t for t in replacements if not is_international(t[0])]
for key, replacement in domestic:
try:
str = str + replacement % values[key]
except KeyError, e:
if self.mandatory:
raise e
international = [t for t in replacements if is_international(t[0])]
for key, replacement in international:
try:
x = [t for t in values.items() if strip_country_code(t[0]) == key]
int_values_for_key = [(get_country_code(t[0]),t[1]) for t in x]
for v in int_values_for_key:
str = str + replacement % v
except KeyError, e:
if self.mandatory:
raise e
return str
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/compas_cra-0.3.0.tar.gz/compas_cra-0.3.0/src/compas_cra/viewers/cra_view.py |
from math import sqrt
import numpy as np
from compas.datastructures import Mesh
from compas.geometry import Polyline, Point, Line, Polygon
from compas.geometry import Rotation, Translation
from compas.geometry import is_coplanar
from compas.utilities import hex_to_rgb
try:
from compas_view2 import app
from compas_view2.collections import Collection
from compas_view2.shapes import Arrow
except ImportError:
pass
def hextorgb(hex):
return tuple(i / 255 for i in hex_to_rgb(hex))
def draw_blocks(assembly, viewer, edge=True, tol=0.0):
supports = []
blocks = []
supportedges = []
blockedges = []
for node in assembly.graph.nodes():
block = assembly.graph.node_attribute(node, "block")
if assembly.graph.node_attribute(node, "is_support"):
supports.append(block)
else:
blocks.append(block)
if not edge:
continue
for edge in block.edges():
if tol != 0.0:
fkeys = block.edge_faces(*edge)
ps = [
block.face_center(fkeys[0]),
block.face_center(fkeys[1]),
*block.edge_coordinates(*edge),
]
if is_coplanar(ps, tol=tol):
continue
if assembly.graph.node_attribute(node, "is_support"):
supportedges.append(Line(*block.edge_coordinates(*edge)))
else:
blockedges.append(Line(*block.edge_coordinates(*edge)))
if len(blocks) != 0:
viewer.add(
Collection(blocks),
show_faces=True,
show_edges=False,
opacity=0.6,
facecolor=(0.9, 0.9, 0.9),
)
if len(supports) != 0:
viewer.add(
Collection(supports),
show_faces=True,
show_edges=False,
opacity=0.5,
facecolor=hextorgb("#f79d84"),
)
if len(blockedges) != 0:
viewer.add(Collection(blockedges), linewidth=1.5)
if len(supportedges) != 0:
viewer.add(Collection(supportedges), linecolor=hextorgb("#f79d84"), linewidth=4)
def draw_interfaces(assembly, viewer):
interfaces = []
faces = []
for edge in assembly.graph.edges():
interface = assembly.graph.edge_attribute(edge, "interface")
if interface is not None:
corners = np.array(interface.points)
faces.append(Polyline(np.vstack((corners, corners[0]))))
if assembly.graph.node_attribute(
edge[0], "is_support"
) or assembly.graph.node_attribute(edge[1], "is_support"):
continue
polygon = Polygon(interface.points)
interfaces.append(Mesh.from_polygons([polygon]))
if assembly.graph.edge_attribute(edge, "interfaces") is None:
continue
for subinterface in assembly.graph.edge_attribute(edge, "interfaces"):
corners = np.array(subinterface.points)
faces.append(Polyline(np.vstack((corners, corners[0]))))
polygon = Polygon(subinterface.points)
interfaces.append(Mesh.from_polygons([polygon]))
if len(interfaces) != 0:
viewer.add(
Collection(interfaces),
show_edges=False,
show_vertices=False,
facecolor=(0.8, 0.8, 0.8),
)
if len(faces) != 0:
viewer.add(
Collection(faces),
linecolor=hextorgb("#fac05e"),
linewidth=10,
pointsize=10,
show_points=True,
pointcolor=(0, 0, 0),
)
def draw_forces(assembly, viewer, scale=1.0, resultant=True, nodal=False):
locs = []
res_np = []
res_nn = []
fnp = []
fnn = []
ft = []
for edge in assembly.graph.edges():
interface = assembly.graph.edge_attribute(edge, "interface")
if interface is None:
break
forces = interface.forces
if forces is None:
continue
corners = np.array(interface.points)
frame = interface.frame
w, u, v = frame.zaxis, frame.xaxis, frame.yaxis
if nodal:
for i, corner in enumerate(corners):
pt = Point(*corner)
force = forces[i]["c_np"] - forces[i]["c_nn"]
p1 = pt + w * force * 0.5 * scale
p2 = pt - w * force * 0.5 * scale
if force >= 0:
fnn.append(Line(p1, p2))
else:
fnp.append(Line(p1, p2))
ft_uv = (u * forces[i]["c_u"] + v * forces[i]["c_v"]) * 0.5 * scale
p1 = pt + ft_uv
p2 = pt - ft_uv
ft.append(Line(p1, p2))
if resultant:
sum_n = sum(force["c_np"] - force["c_nn"] for force in forces)
sum_u = sum(force["c_u"] for force in forces)
sum_v = sum(force["c_v"] for force in forces)
if sum_n == 0:
continue
resultant_pos = np.average(
np.array(corners),
axis=0,
weights=[force["c_np"] - force["c_nn"] for force in forces],
)
locs.append(Point(*resultant_pos))
# resultant
resultant_f = (w * sum_n + u * sum_u + v * sum_v) * 0.5 * scale
p1 = resultant_pos + resultant_f
p2 = resultant_pos - resultant_f
if sum_n >= 0:
res_np.append(Line(p1, p2))
else:
res_nn.append(Line(p1, p2))
if len(locs) != 0:
viewer.add(Collection(locs), size=12, color=hextorgb("#386641"))
if len(res_np) != 0:
viewer.add(Collection(res_np), linewidth=8, linecolor=(0, 0.3, 0))
if len(res_nn) != 0:
viewer.add(Collection(res_nn), linewidth=8, linecolor=(0.8, 0, 0))
if len(fnn) != 0:
viewer.add(Collection(fnn), linewidth=5, linecolor=hextorgb("#00468b"))
if len(fnp) != 0:
viewer.add(Collection(fnp), linewidth=5, linecolor=(1, 0, 0))
if len(ft) != 0:
viewer.add(Collection(ft), linewidth=5, linecolor=(1.0, 0.5, 0.0))
def draw_forcesline(assembly, viewer, scale=1.0, resultant=True, nodal=False):
locs = []
res_np = []
res_nn = []
fnp = []
fnn = []
ft = []
# total_reaction = 0
for edge in assembly.graph.edges():
for interface in assembly.graph.edge_attribute(edge, "interfaces"):
forces = interface.forces
if forces is None:
continue
corners = np.array(interface.points)
frame = interface.frame
w, u, v = frame.zaxis, frame.xaxis, frame.yaxis
if nodal:
for i, corner in enumerate(corners):
pt = Point(*corner)
force = forces[i]["c_np"] - forces[i]["c_nn"]
p1 = pt + w * force * 0.5 * scale
p2 = pt - w * force * 0.5 * scale
if force >= 0:
fnn.append(Line(p1, p2))
else:
fnp.append(Line(p1, p2))
ft_uv = (u * forces[i]["c_u"] + v * forces[i]["c_v"]) * 0.5 * scale
p1 = pt + ft_uv
p2 = pt - ft_uv
ft.append(Line(p1, p2))
if resultant:
is_tension = False
for force in forces:
if force["c_np"] - force["c_nn"] <= -1e-5:
is_tension = True
sum_n = sum(force["c_np"] - force["c_nn"] for force in forces)
sum_u = sum(force["c_u"] for force in forces)
sum_v = sum(force["c_v"] for force in forces)
if sum_n == 0:
continue
resultant_pos = np.average(
np.array(corners),
axis=0,
weights=[force["c_np"] - force["c_nn"] for force in forces],
)
locs.append(Point(*resultant_pos))
# resultant
resultant_f = (w * sum_n + u * sum_u + v * sum_v) * 0.5 * scale
# print((w * sum_n + u * sum_u + v * sum_v).length * 100000, "edge: ", edge)
# if assembly.graph.node_attribute(edge[0], 'is_support') or assembly.graph.node_attribute(edge[1], 'is_support'):
# print((w * sum_n + u * sum_u + v * sum_v).z)
# total_reaction += abs((w * sum_n + u * sum_u + v * sum_v).z * 100000)
p1 = resultant_pos + resultant_f
p2 = resultant_pos - resultant_f
if not is_tension:
res_np.append(Line(p1, p2))
else:
res_nn.append(Line(p1, p2))
if len(locs) != 0:
viewer.add(Collection(locs), size=12, color=hextorgb("#386641"))
if len(res_np) != 0:
viewer.add(Collection(res_np), linewidth=8, linecolor=(0, 0.3, 0))
if len(res_nn) != 0:
viewer.add(Collection(res_nn), linewidth=8, linecolor=(0.8, 0, 0))
if len(fnn) != 0:
viewer.add(Collection(fnn), linewidth=5, linecolor=hextorgb("#00468b"))
if len(fnp) != 0:
viewer.add(Collection(fnp), linewidth=5, linecolor=(1, 0, 0))
if len(ft) != 0:
viewer.add(Collection(ft), linewidth=5, linecolor=(1.0, 0.5, 0.0))
# print("total reaction: ", total_reaction)
def draw_forcesdirect(assembly, viewer, scale=1.0, resultant=True, nodal=False):
locs = []
res_np = []
res_nn = []
fnp = []
fnn = []
ft = []
for edge in assembly.graph.edges():
thres = 1e-6
if assembly.graph.node_attribute(
edge[0], "is_support"
) and not assembly.graph.node_attribute(edge[1], "is_support"):
flip = False
else:
flip = True
if assembly.graph.edge_attribute(edge, "interfaces") is None:
continue
for interface in assembly.graph.edge_attribute(edge, "interfaces"):
forces = interface.forces
if forces is None:
continue
corners = np.array(interface.points)
frame = interface.frame
w, u, v = frame.zaxis, frame.xaxis, frame.yaxis
if nodal:
for i, corner in enumerate(corners):
pt = Point(*corner)
force = forces[i]["c_np"] - forces[i]["c_nn"]
if (w * force * scale).length == 0:
continue
if flip:
f = Arrow(
pt,
w * force * scale * -1,
head_portion=0.2,
head_width=0.07,
body_width=0.02,
)
else:
f = Arrow(
pt,
w * force * scale,
head_portion=0.2,
head_width=0.07,
body_width=0.02,
)
if force >= 0:
fnp.append(f)
else:
fnn.append(f)
ft_uv = (u * forces[i]["c_u"] + v * forces[i]["c_v"]) * scale
if ft_uv.length == 0:
continue
if flip:
f = Arrow(
pt,
ft_uv * -1,
head_portion=0.2,
head_width=0.07,
body_width=0.02,
)
else:
f = Arrow(
pt,
ft_uv,
head_portion=0.2,
head_width=0.07,
body_width=0.02,
)
ft.append(f)
if resultant:
is_tension = False
for force in forces:
if force["c_np"] - force["c_nn"] <= -1e-5:
is_tension = True
sum_n = sum(force["c_np"] - force["c_nn"] for force in forces)
sum_u = sum(force["c_u"] for force in forces)
sum_v = sum(force["c_v"] for force in forces)
if abs(sum_n) <= thres:
resultant_pos = np.average(
np.array(corners),
axis=0,
weights=[
sqrt(force["c_u"] ** 2 + force["c_v"] ** 2)
for force in forces
],
)
friction = True
else:
resultant_pos = np.average(
np.array(corners),
axis=0,
weights=[force["c_np"] - force["c_nn"] for force in forces],
)
friction = False
resultant_f = (w * sum_n + u * sum_u + v * sum_v) * scale
if resultant_f.length >= thres:
locs.append(Point(*resultant_pos))
if flip:
f = Arrow(
resultant_pos,
resultant_f * -1,
head_portion=0.2,
head_width=0.07,
body_width=0.02,
)
else:
f = Arrow(
resultant_pos,
resultant_f,
head_portion=0.2,
head_width=0.07,
body_width=0.02,
)
if friction:
viewer.add(f, facecolor=(1.0, 0.5, 0.0), show_edges=False)
if not is_tension:
res_np.append(f)
else:
res_nn.append(f)
if len(locs) != 0:
viewer.add(Collection(locs), size=12, color=hextorgb("#386641"))
if len(res_np) != 0:
viewer.add(Collection(res_np), facecolor=hextorgb("#386641"), show_edges=False)
if len(res_nn) != 0:
viewer.add(Collection(res_nn), facecolor=(0.8, 0, 0), show_edges=False)
if len(fnp) != 0:
viewer.add(
Collection(fnp),
facecolor=hextorgb("#00468b"),
show_edges=False,
opacity=0.5,
)
if len(fnn) != 0:
viewer.add(Collection(fnn), facecolor=(1, 0, 0), show_edges=False, opacity=0.5)
if len(ft) != 0:
viewer.add(
Collection(ft), facecolor=(1.0, 0.5, 0.0), show_edges=False, opacity=0.5
)
def draw_displacements(assembly, viewer, dispscale=1.0, tol=0.0):
blocks = []
nodes = []
for node in assembly.graph.nodes():
if assembly.graph.node_attribute(node, "is_support"):
continue
block = assembly.graph.node_attribute(node, "block")
displacement = assembly.graph.node_attribute(node, "displacement")
if displacement is None:
continue
displacement = np.array(displacement) * dispscale
vec = (
np.array([1, 0, 0]) * displacement[3]
+ np.array([0, 1, 0]) * displacement[4]
+ np.array([0, 0, 1]) * displacement[5]
).tolist()
R = Rotation.from_axis_angle_vector(vec, point=block.center())
T = Translation.from_vector(displacement[0:3])
new_block = block.transformed(R).transformed(T)
nodes.append(Point(*new_block.center()))
for edge in block.edges():
if tol != 0.0:
fkeys = block.edge_faces(*edge)
ps = [
block.face_center(fkeys[0]),
block.face_center(fkeys[1]),
*block.edge_coordinates(*edge),
]
if is_coplanar(ps, tol=tol):
continue
blocks.append(Line(*new_block.edge_coordinates(*edge)))
if len(blocks) != 0:
viewer.add(Collection(blocks), linewidth=1, linecolor=(0.7, 0.7, 0.7))
if len(nodes) != 0:
viewer.add(Collection(nodes), color=(0.7, 0.7, 0.7))
def draw_weights(assembly, viewer, scale=1.0, density=1.0):
weights = []
blocks = []
supports = []
# total_weights = 0
for node in assembly.graph.nodes():
block = assembly.graph.node_attribute(node, "block")
if assembly.graph.node_attribute(node, "is_support"):
supports.append(Point(*block.center()))
continue
d = block.attributes["density"] if "density" in block.attributes else density
weights.append(
Arrow(
block.center(),
[0, 0, -block.volume() * d * scale],
head_portion=0.2,
head_width=0.07,
body_width=0.02,
)
)
# print("self-weight", -block.volume() * density)
# total_weights += block.volume() * 2500 * 9.8
blocks.append(Point(*block.center()))
# print("total self-weight: ", total_weights)
if len(supports) != 0:
viewer.add(Collection(supports), size=20, color=hextorgb("#ee6352"))
if len(blocks) != 0:
viewer.add(Collection(blocks), size=30, color=hextorgb("#3284a0"))
if len(weights) != 0:
viewer.add(Collection(weights), facecolor=hextorgb("#59cd90"), show_edges=False)
def cra_view(
assembly,
scale=1.0,
density=1.0,
dispscale=1.0,
tol=1e-5,
grid=False,
resultant=True,
nodal=False,
edge=True,
blocks=True,
interfaces=True,
forces=True,
forcesdirect=True,
forcesline=False,
weights=True,
displacements=True,
):
"""CRA Viewer, creating new viewer.
Parameters
----------
assembly : :class:`~compas_assembly.datastructures.Assembly`
The rigid block assembly.
scale : float, optional
Force scale.
density : float, optional
Density of the block material.
dispscale : float, optional
virtual displacement scale.
tol : float, optional
Tolerance value to consider faces to be planar.
grid : bool, optional
Show view grid.
resultant : bool, optional
Plot resultant forces.
nodal : bool, optional
Plot nodal forces.
edge : bool, optional
Plot block edges.
blocks : bool, optional
Plot block.
interfaces : bool, optional
Plot interfaces.
forces : bool, optional
Plot forces.
forcesdirect : bool, optional
Plot forces as vectors.
forcesline : bool, optional
Plot forces as lines.
weights : bool, optional
Plot block self weight as vectors.
displacements : bool, optional
Plot virtual displacements.
Returns
-------
None
"""
viewer = app.App(width=1600, height=1000, viewmode="shaded", show_grid=grid)
if blocks:
draw_blocks(assembly, viewer, edge, tol)
if interfaces:
draw_interfaces(assembly, viewer)
if forces:
draw_forces(assembly, viewer, scale, resultant, nodal)
if forcesdirect:
draw_forcesdirect(assembly, viewer, scale, resultant, nodal)
if forcesline:
draw_forcesline(assembly, viewer, scale, resultant, nodal)
if weights:
draw_weights(assembly, viewer, scale, density)
if displacements:
draw_displacements(assembly, viewer, dispscale, tol)
viewer.run()
def cra_view_ex(
viewer,
assembly,
scale=1.0,
density=1.0,
dispscale=1.0,
tol=1e-5,
resultant=True,
nodal=False,
edge=True,
blocks=True,
interfaces=True,
forces=True,
forcesdirect=True,
forcesline=False,
weights=True,
displacements=True,
):
"""CRA Viewer using existing view.
Parameters
----------
viewer : compas_view2.app.App
External viewer object.
assembly : :class:`~compas_assembly.datastructures.Assembly`
The rigid block assembly.
scale : float, optional
Force scale.
density : float, optional
Density of the block material.
dispscale : float, optional
virtual displacement scale.
tol : float, optional
Tolerance value to consider faces to be planar.
resultant : bool, optional
Plot resultant forces.
nodal : bool, optional
Plot nodal forces.
edge : bool, optional
Plot block edges.
blocks : bool, optional
Plot block.
interfaces : bool, optional
Plot interfaces.
forces : bool, optional
Plot forces.
forcesdirect : bool, optional
Plot forces as vectors.
forcesline : bool, optional
Plot forces as lines.
weights : bool, optional
Plot block self weight as vectors.
displacements : bool, optional
Plot virtual displacements.
Returns
-------
None
"""
if blocks:
draw_blocks(assembly, viewer, edge, tol)
if interfaces:
draw_interfaces(assembly, viewer)
if forces:
draw_forces(assembly, viewer, scale, resultant, nodal)
if forcesdirect:
draw_forcesdirect(assembly, viewer, scale, resultant, nodal)
if forcesline:
draw_forcesline(assembly, viewer, scale, resultant, nodal)
if weights:
draw_weights(assembly, viewer, scale, density)
if displacements:
draw_displacements(assembly, viewer, dispscale, tol)
if __name__ == "__main__":
pass | PypiClean |
/custom-awscli-1.27.51.tar.gz/custom-awscli-1.27.51/awscli/examples/rds/create-db-instance.rst | **To create a DB instance**
The following ``create-db-instance`` example uses the required options to launch a new DB instance. ::
aws rds create-db-instance \
--db-instance-identifier test-mysql-instance \
--db-instance-class db.t3.micro \
--engine mysql \
--master-username admin \
--master-user-password secret99 \
--allocated-storage 20
Output::
{
"DBInstance": {
"DBInstanceIdentifier": "test-mysql-instance",
"DBInstanceClass": "db.t3.micro",
"Engine": "mysql",
"DBInstanceStatus": "creating",
"MasterUsername": "admin",
"AllocatedStorage": 20,
"PreferredBackupWindow": "12:55-13:25",
"BackupRetentionPeriod": 1,
"DBSecurityGroups": [],
"VpcSecurityGroups": [
{
"VpcSecurityGroupId": "sg-12345abc",
"Status": "active"
}
],
"DBParameterGroups": [
{
"DBParameterGroupName": "default.mysql5.7",
"ParameterApplyStatus": "in-sync"
}
],
"DBSubnetGroup": {
"DBSubnetGroupName": "default",
"DBSubnetGroupDescription": "default",
"VpcId": "vpc-2ff2ff2f",
"SubnetGroupStatus": "Complete",
"Subnets": [
{
"SubnetIdentifier": "subnet-########",
"SubnetAvailabilityZone": {
"Name": "us-west-2c"
},
"SubnetStatus": "Active"
},
{
"SubnetIdentifier": "subnet-########",
"SubnetAvailabilityZone": {
"Name": "us-west-2d"
},
"SubnetStatus": "Active"
},
{
"SubnetIdentifier": "subnet-########",
"SubnetAvailabilityZone": {
"Name": "us-west-2a"
},
"SubnetStatus": "Active"
},
{
"SubnetIdentifier": "subnet-########",
"SubnetAvailabilityZone": {
"Name": "us-west-2b"
},
"SubnetStatus": "Active"
}
]
},
"PreferredMaintenanceWindow": "sun:08:07-sun:08:37",
"PendingModifiedValues": {
"MasterUserPassword": "****"
},
"MultiAZ": false,
"EngineVersion": "5.7.22",
"AutoMinorVersionUpgrade": true,
"ReadReplicaDBInstanceIdentifiers": [],
"LicenseModel": "general-public-license",
"OptionGroupMemberships": [
{
"OptionGroupName": "default:mysql-5-7",
"Status": "in-sync"
}
],
"PubliclyAccessible": true,
"StorageType": "gp2",
"DbInstancePort": 0,
"StorageEncrypted": false,
"DbiResourceId": "db-5555EXAMPLE44444444EXAMPLE",
"CACertificateIdentifier": "rds-ca-2019",
"DomainMemberships": [],
"CopyTagsToSnapshot": false,
"MonitoringInterval": 0,
"DBInstanceArn": "arn:aws:rds:us-west-2:123456789012:db:test-mysql-instance",
"IAMDatabaseAuthenticationEnabled": false,
"PerformanceInsightsEnabled": false,
"DeletionProtection": false,
"AssociatedRoles": []
}
}
For more information, see `Creating an Amazon RDS DB Instance <https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_CreateDBInstance.html>`__ in the *Amazon RDS User Guide*.
| PypiClean |
/docums-git-authors-plugin-1.0.tar.gz/docums-git-authors-plugin-1.0/docums_git_authors_plugin/util.py | from datetime import datetime, timezone, timedelta
from pathlib import Path
def commit_datetime(author_time: str, author_tz: str):
"""
Convert a commit's timestamp to an aware datetime object.
Args:
author_time: Unix timestamp string
author_tz: string in the format +hhmm
Returns:
datetime.datetime object with tzinfo
"""
# timezone info looks like +hhmm or -hhmm
tz_hours = int(author_tz[:3])
th_minutes = int(author_tz[0] + author_tz[3:])
return datetime.fromtimestamp(
int(author_time), timezone(timedelta(hours=tz_hours, minutes=th_minutes))
)
def commit_datetime_string(dt: datetime):
"""
Return a string representation for a commit's timestamp.
Args:
dt: datetime object with tzinfo
Returns:
string representation (should be localized)
"""
return dt.strftime("%c %z")
def page_authors_summary(page, config: dict):
"""
A summary of the authors' contributions on a page level
Args:
page (Page): Page class
config (dict): plugin's config dict
Returns:
str: HTML text with authors
"""
authors = page.get_authors()
authors_summary = []
for author in authors:
contrib = (
" (%s)" % author.contribution(page.path(), str)
if page.repo().config("show_contribution") and len(page.get_authors()) > 1
else ""
)
authors_summary.append(
"<a href='mailto:%s'>%s</a>%s" % (author.email(), author.name(), contrib)
)
authors_summary = ", ".join(authors_summary)
return "<span class='git-page-authors git-authors'>%s</span>" % authors_summary
def site_authors_summary(authors, config: dict):
"""
A summary list of the authors' contributions on repo level.
Iterates over all authors and produces an HTML <ul> list with
their names and overall contribution details (lines/percentage).
TODO:
- The output should be configurable or at least localizable
(suggestions:
- load a template with named fields for the values
(user may provide alternative template)
- provide plugin configuration options for the various labels
)
Args:
authors: sorted list of Author objects
config: plugin's config dict
Returns:
Unordered HTML list as a string.
"""
show_contribution = config["show_contribution"]
show_line_count = config["show_line_count"]
result = """
<span class='git-authors'>
<ul>
"""
for author in authors:
contribution = (
" (%s)" % author.contribution(None, str) if show_contribution else ""
)
lines = ": %s lines" % author.lines() if show_line_count else ""
result += """
<li><a href='mailto:{author_email}'>{author_name}</a>{lines}{contribution}</li>
""".format(
author_email=author.email(),
author_name=author.name(),
lines=lines,
contribution=contribution,
)
result += """
</span>
</ul>
"""
return result
def page_authors(authors, path):
"""List of dicts with info on page authors
# TODO: rename to something more representative like 'authors_to_dict()'
Args:
authors (list): list with Author classes
path (str): path to page
"""
if type(path) == str:
path = Path(path)
return [
{
"name": author.name(),
"email": author.email(),
"last_datetime": author.datetime(path, str),
"lines": author.lines(path),
"lines_all_pages": author.lines(),
"contribution": author.contribution(path, str),
"contribution_all_pages": author.contribution(None, str),
}
for author in authors
] | PypiClean |
/ngs-smap-4.6.5.tar.gz/ngs-smap-4.6.5/INSTALL.md | # Installation
## Prerequisites
This manual presumes that you have access to the following:
* A running linux distribution with python3 installed
* Administrator privileges (sudo rights)
* A working internet connection
## Downloading the software
The latest release of the software can be obtained from https://gitlab.com/truttink/smap/-/releases. If you are familiar with Git, we make sure the latest release matches the contents of the master branch (https://gitlab.com/truttink/smap). However, sometimes one would like to use the latest and greatest developments. These development versions are available in the 'dev' branch (https://gitlab.com/truttink/smap/tree/dev). Thus, the software can be downloaded using three ways:
* Downloading the release: using the browser, or using `wget`.
* Downloading the master branch using the command line (git): `git clone https://gitlab.com/truttink/smap.git`
* Getting the latest developments: `git clone https://gitlab.com/truttink/smap.git; git checkout dev`
## Installing dependencies
The scripts included in this software depend on a couple of python packages, together with the bedtools software. Installing bedtools requires administrator privileges, while installing the python packages can be done in virtual environments.
### bedtools
This software ships [BEDtools](https://github.com/arq5x/bedtools2), which is covered by an MIT license.
### Python packages.
As noted above, the package dependencies from python can be installed in virtual environments, allowing these dependencies to be installed without administrator privileges and for a single user only. According to the [python docs](https://docs.python.org/3/tutorial/venv.html), a virtual environment is a self-contained directory tree that contains a Python installation for a particular version of Python, plus a number of additional packages. Creating a virtual environment for python3 is pretty straightforward:
```{bash}
python3 -m venv <environment_folder_name>
```
The above commands will create a hidden folder `<environment_folder_name>` which contains the new virtual environment. This local environment has the same structure as the global python environment. For example, a python executable can be found in `<environment_folder_name>/bin/`. However, it is not necessary to adjust scripts to point to the python executable in this folder. Instead, python virtual environments can be activated to perform this adjustment automatically.
A virtual environment can be activated using
```{bash}
source <environment_folder_name>/bin/activate
```
When activated, the `<environment_folder_name>/bin/` folder will be added to the linux PATH. As a consequence, for every python-related operation that the user performs, the activated virtual environment is used. This includes installing and removing software, running python, etc. Environments can also be activated from scripts, making it possible to install software into virtual environments and remove that virtual environment when the script finishes.
For installing python software, `pip` is used. By default pip will install packages from the Python Package Index, https://pypi.org. If packages are installed into a virtual environment, no sudo rights are required. For your convenience, all the python dependencies have been listed in a [requirements file](https://gitlab.com/truttink/smap/-/blob/master/meta.yaml). This list of requirements can be passed to `pip`, which will automatically install the dependencies.
By default, virtual environments can ship outdated pip versions. It is necessary to update pip before you continue, otherwise you might get an error that cython is not installed.
``` {bash}
pip install --upgrade pip
pip install ngs-smap/
```
After you have finished your analysis in the virtual environment, you leave from the virtual environment by
```{bash}
deactivate
```
## Example installation
### Using pip
```{bash}
python3 -m venv .venv
source .venv/bin/activate
pip install --upgrade pip
pip install ngs-smap
```
If you also want to install SMAP haplotype-window and SMAP design:
```bash
python3 -m venv .venv
source .venv/bin/activate
pip install --upgrade pip
pip install ngs-smap
pip install smap-haplotype-window
pip install primer3-py biopython
## add commands to wget the utility python scripts from the repo's. ##
```
### Using Git
```{bash}
git clone https://gitlab.com/truttink/smap.git
cd smap
git checkout master
python3 -m venv .venv
source .venv/bin/activate
pip install --upgrade pip
pip install .
```
Or
```bash
`git clone https://gitlab.com/truttink/smap.git ; cd smap ; git checkout master ; python3 -m venv .venv ; source .venv/bin/activate ; pip install --upgrade pip ; pip install .`
```
If you also want to install SMAP haplotype-window:
```bash
git clone https://gitlab.com/truttink/smap.git
cd smap
git checkout master
python3 -m venv .venv
source .venv/bin/activate
pip install --upgrade pip
pip install .
cd ..
git clone https://gitlab.com/ilvo/smap-haplotype-window
cd smap-haplotype-window
git checkout master
pip install .
```
or
```bash
`git clone https://gitlab.com/ilvo/smap-haplotype-window ; cd smap ; git checkout master ; python3 -m venv .venv ; source .venv/bin/activate ; pip install --upgrade pip ; pip install . ; cd .. ; git clone https://gitlab.com/ilvo/smap-haplotype-window ; cd smap-haplotype-window ; git checkout master ; pip install .`
```
If you also want to install SMAP haplotype-window and SMAP design:
```bash
git clone https://gitlab.com/truttink/smap.git
cd smap
git checkout master
python3 -m venv .venv
source .venv/bin/activate
pip install --upgrade pip
pip install .
cd ..
git clone https://gitlab.com/ilvo/smap-haplotype-window
cd smap-haplotype-window
git checkout master
pip install .
cd ..
git clone https://gitlab.com/ilvo/smap-design.git
cd smap-design
pip install primer3-py biopython
# The required packages pandas and matplotlib are already included in the main SMAP package installation above. If SMAP design is installed by itself, then also run:
pip install pandas matplotlib
## add commands to wget the utility python scripts from the repo's. ##
```
or
```bash
`git clone https://gitlab.com/ilvo/smap-haplotype-window ; cd smap ; git checkout master ; python3 -m venv .venv ; source .venv/bin/activate ; pip install --upgrade pip ; pip install . ; cd .. ; git clone https://gitlab.com/ilvo/smap-haplotype-window ; cd smap-haplotype-window ; git checkout master ; pip install . ; cd .. ; git clone https://gitlab.com/ilvo/smap-design.git ; cd smap-design ; pip install primer3-py biopython ; pip install pandas matplotlib`
```
### Using Docker
A docker container is available on dockerhub.
To pull the docker image and run SMAP using Docker, use:
```bash
docker run ilvo/smap --help
```
It is currently not possible to install SMAP design and SMAP haplotype-window using docker.
# Removing smap
Uninstalling smap is a matter of removing the virtual environment and uninstalling `bedtools`. For example:
```{bash}
rm -r .venv
```
| PypiClean |
/NarrowBand-0.2.1.tar.gz/NarrowBand-0.2.1/README.md | # NarrowBand Lib for Python 3
This is an easy-to-use library for Narrowband IoT applications.
It communicates via UART with your target device.
It simplifies the setup and use, you don't have to manually send AT commands.
For beginners, use the Narrowband class with it's pre-defined methods.
If you want more freedom, feel free to check out the NarrowbandCore class.
## Requirements:
- Python >= 3
- PySerial >= 3.4
## Example:
If you use a supported board, the lib will auto-detect it:
```python
import narrowband
nb = narrowband.Narrowband()
nb.attach()
while 1:
time.sleep(1)
```
For any other, defined the device and it's values (port, baudrate and timeout):
```
import narrowband
nb = narrowband.Narrowband("COM1", 9600, 1)
nb.attach()
while 1:
time.sleep(1)
```
## Testing
Tested & verified Narrowband chips:
* Quectel BC68
* Quectel BC95-B8
## License
Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License | PypiClean |
/stable_baselines-2.10.2-py3-none-any.whl/stable_baselines/sac/policies.py | import tensorflow as tf
import numpy as np
from gym.spaces import Box
from stable_baselines.common.policies import BasePolicy, nature_cnn, register_policy
from stable_baselines.common.tf_layers import mlp
EPS = 1e-6 # Avoid NaN (prevents division by zero or log of zero)
# CAP the standard deviation of the actor
LOG_STD_MAX = 2
LOG_STD_MIN = -20
def gaussian_likelihood(input_, mu_, log_std):
"""
Helper to computer log likelihood of a gaussian.
Here we assume this is a Diagonal Gaussian.
:param input_: (tf.Tensor)
:param mu_: (tf.Tensor)
:param log_std: (tf.Tensor)
:return: (tf.Tensor)
"""
pre_sum = -0.5 * (((input_ - mu_) / (tf.exp(log_std) + EPS)) ** 2 + 2 * log_std + np.log(2 * np.pi))
return tf.reduce_sum(pre_sum, axis=1)
def gaussian_entropy(log_std):
"""
Compute the entropy for a diagonal Gaussian distribution.
:param log_std: (tf.Tensor) Log of the standard deviation
:return: (tf.Tensor)
"""
return tf.reduce_sum(log_std + 0.5 * np.log(2.0 * np.pi * np.e), axis=-1)
def clip_but_pass_gradient(input_, lower=-1., upper=1.):
clip_up = tf.cast(input_ > upper, tf.float32)
clip_low = tf.cast(input_ < lower, tf.float32)
return input_ + tf.stop_gradient((upper - input_) * clip_up + (lower - input_) * clip_low)
def apply_squashing_func(mu_, pi_, logp_pi):
"""
Squash the output of the Gaussian distribution
and account for that in the log probability
The squashed mean is also returned for using
deterministic actions.
:param mu_: (tf.Tensor) Mean of the gaussian
:param pi_: (tf.Tensor) Output of the policy before squashing
:param logp_pi: (tf.Tensor) Log probability before squashing
:return: ([tf.Tensor])
"""
# Squash the output
deterministic_policy = tf.tanh(mu_)
policy = tf.tanh(pi_)
# OpenAI Variation:
# To avoid evil machine precision error, strictly clip 1-pi**2 to [0,1] range.
# logp_pi -= tf.reduce_sum(tf.log(clip_but_pass_gradient(1 - policy ** 2, lower=0, upper=1) + EPS), axis=1)
# Squash correction (from original implementation)
logp_pi -= tf.reduce_sum(tf.log(1 - policy ** 2 + EPS), axis=1)
return deterministic_policy, policy, logp_pi
class SACPolicy(BasePolicy):
"""
Policy object that implements a SAC-like actor critic
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param reuse: (bool) If the policy is reusable or not
:param scale: (bool) whether or not to scale the input
"""
def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False, scale=False):
super(SACPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse=reuse, scale=scale)
assert isinstance(ac_space, Box), "Error: the action space must be of type gym.spaces.Box"
self.qf1 = None
self.qf2 = None
self.value_fn = None
self.policy = None
self.deterministic_policy = None
self.act_mu = None
self.std = None
def make_actor(self, obs=None, reuse=False, scope="pi"):
"""
Creates an actor object
:param obs: (TensorFlow Tensor) The observation placeholder (can be None for default placeholder)
:param reuse: (bool) whether or not to reuse parameters
:param scope: (str) the scope name of the actor
:return: (TensorFlow Tensor) the output tensor
"""
raise NotImplementedError
def make_critics(self, obs=None, action=None, reuse=False,
scope="values_fn", create_vf=True, create_qf=True):
"""
Creates the two Q-Values approximator along with the Value function
:param obs: (TensorFlow Tensor) The observation placeholder (can be None for default placeholder)
:param action: (TensorFlow Tensor) The action placeholder
:param reuse: (bool) whether or not to reuse parameters
:param scope: (str) the scope name
:param create_vf: (bool) Whether to create Value fn or not
:param create_qf: (bool) Whether to create Q-Values fn or not
:return: ([tf.Tensor]) Mean, action and log probability
"""
raise NotImplementedError
def step(self, obs, state=None, mask=None, deterministic=False):
"""
Returns the policy for a single step
:param obs: ([float] or [int]) The current observation of the environment
:param state: ([float]) The last states (used in recurrent policies)
:param mask: ([float]) The last masks (used in recurrent policies)
:param deterministic: (bool) Whether or not to return deterministic actions.
:return: ([float]) actions
"""
raise NotImplementedError
def proba_step(self, obs, state=None, mask=None):
"""
Returns the action probability params (mean, std) for a single step
:param obs: ([float] or [int]) The current observation of the environment
:param state: ([float]) The last states (used in recurrent policies)
:param mask: ([float]) The last masks (used in recurrent policies)
:return: ([float], [float])
"""
raise NotImplementedError
class FeedForwardPolicy(SACPolicy):
"""
Policy object that implements a DDPG-like actor critic, using a feed forward neural network.
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param reuse: (bool) If the policy is reusable or not
:param layers: ([int]) The size of the Neural network for the policy (if None, default to [64, 64])
:param cnn_extractor: (function (TensorFlow Tensor, ``**kwargs``): (TensorFlow Tensor)) the CNN feature extraction
:param feature_extraction: (str) The feature extraction type ("cnn" or "mlp")
:param layer_norm: (bool) enable layer normalisation
:param reg_weight: (float) Regularization loss weight for the policy parameters
:param act_fun: (tf.func) the activation function to use in the neural network.
:param kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction
"""
def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False, layers=None,
cnn_extractor=nature_cnn, feature_extraction="cnn", reg_weight=0.0,
layer_norm=False, act_fun=tf.nn.relu, **kwargs):
super(FeedForwardPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch,
reuse=reuse, scale=(feature_extraction == "cnn"))
self._kwargs_check(feature_extraction, kwargs)
self.layer_norm = layer_norm
self.feature_extraction = feature_extraction
self.cnn_kwargs = kwargs
self.cnn_extractor = cnn_extractor
self.reuse = reuse
if layers is None:
layers = [64, 64]
self.layers = layers
self.reg_loss = None
self.reg_weight = reg_weight
self.entropy = None
assert len(layers) >= 1, "Error: must have at least one hidden layer for the policy."
self.activ_fn = act_fun
def make_actor(self, obs=None, reuse=False, scope="pi"):
if obs is None:
obs = self.processed_obs
with tf.variable_scope(scope, reuse=reuse):
if self.feature_extraction == "cnn":
pi_h = self.cnn_extractor(obs, **self.cnn_kwargs)
else:
pi_h = tf.layers.flatten(obs)
pi_h = mlp(pi_h, self.layers, self.activ_fn, layer_norm=self.layer_norm)
self.act_mu = mu_ = tf.layers.dense(pi_h, self.ac_space.shape[0], activation=None)
# Important difference with SAC and other algo such as PPO:
# the std depends on the state, so we cannot use stable_baselines.common.distribution
log_std = tf.layers.dense(pi_h, self.ac_space.shape[0], activation=None)
# Regularize policy output (not used for now)
# reg_loss = self.reg_weight * 0.5 * tf.reduce_mean(log_std ** 2)
# reg_loss += self.reg_weight * 0.5 * tf.reduce_mean(mu ** 2)
# self.reg_loss = reg_loss
# OpenAI Variation to cap the standard deviation
# activation = tf.tanh # for log_std
# log_std = LOG_STD_MIN + 0.5 * (LOG_STD_MAX - LOG_STD_MIN) * (log_std + 1)
# Original Implementation
log_std = tf.clip_by_value(log_std, LOG_STD_MIN, LOG_STD_MAX)
self.std = std = tf.exp(log_std)
# Reparameterization trick
pi_ = mu_ + tf.random_normal(tf.shape(mu_)) * std
logp_pi = gaussian_likelihood(pi_, mu_, log_std)
self.entropy = gaussian_entropy(log_std)
# MISSING: reg params for log and mu
# Apply squashing and account for it in the probability
deterministic_policy, policy, logp_pi = apply_squashing_func(mu_, pi_, logp_pi)
self.policy = policy
self.deterministic_policy = deterministic_policy
return deterministic_policy, policy, logp_pi
def make_critics(self, obs=None, action=None, reuse=False, scope="values_fn",
create_vf=True, create_qf=True):
if obs is None:
obs = self.processed_obs
with tf.variable_scope(scope, reuse=reuse):
if self.feature_extraction == "cnn":
critics_h = self.cnn_extractor(obs, **self.cnn_kwargs)
else:
critics_h = tf.layers.flatten(obs)
if create_vf:
# Value function
with tf.variable_scope('vf', reuse=reuse):
vf_h = mlp(critics_h, self.layers, self.activ_fn, layer_norm=self.layer_norm)
value_fn = tf.layers.dense(vf_h, 1, name="vf")
self.value_fn = value_fn
if create_qf:
# Concatenate preprocessed state and action
qf_h = tf.concat([critics_h, action], axis=-1)
# Double Q values to reduce overestimation
with tf.variable_scope('qf1', reuse=reuse):
qf1_h = mlp(qf_h, self.layers, self.activ_fn, layer_norm=self.layer_norm)
qf1 = tf.layers.dense(qf1_h, 1, name="qf1")
with tf.variable_scope('qf2', reuse=reuse):
qf2_h = mlp(qf_h, self.layers, self.activ_fn, layer_norm=self.layer_norm)
qf2 = tf.layers.dense(qf2_h, 1, name="qf2")
self.qf1 = qf1
self.qf2 = qf2
return self.qf1, self.qf2, self.value_fn
def step(self, obs, state=None, mask=None, deterministic=False):
if deterministic:
return self.sess.run(self.deterministic_policy, {self.obs_ph: obs})
return self.sess.run(self.policy, {self.obs_ph: obs})
def proba_step(self, obs, state=None, mask=None):
return self.sess.run([self.act_mu, self.std], {self.obs_ph: obs})
class CnnPolicy(FeedForwardPolicy):
"""
Policy object that implements actor critic, using a CNN (the nature CNN)
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param reuse: (bool) If the policy is reusable or not
:param _kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction
"""
def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False, **_kwargs):
super(CnnPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse,
feature_extraction="cnn", **_kwargs)
class LnCnnPolicy(FeedForwardPolicy):
"""
Policy object that implements actor critic, using a CNN (the nature CNN), with layer normalisation
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param reuse: (bool) If the policy is reusable or not
:param _kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction
"""
def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False, **_kwargs):
super(LnCnnPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse,
feature_extraction="cnn", layer_norm=True, **_kwargs)
class MlpPolicy(FeedForwardPolicy):
"""
Policy object that implements actor critic, using a MLP (2 layers of 64)
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param reuse: (bool) If the policy is reusable or not
:param _kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction
"""
def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False, **_kwargs):
super(MlpPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse,
feature_extraction="mlp", **_kwargs)
class LnMlpPolicy(FeedForwardPolicy):
"""
Policy object that implements actor critic, using a MLP (2 layers of 64), with layer normalisation
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param reuse: (bool) If the policy is reusable or not
:param _kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction
"""
def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False, **_kwargs):
super(LnMlpPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse,
feature_extraction="mlp", layer_norm=True, **_kwargs)
register_policy("CnnPolicy", CnnPolicy)
register_policy("LnCnnPolicy", LnCnnPolicy)
register_policy("MlpPolicy", MlpPolicy)
register_policy("LnMlpPolicy", LnMlpPolicy) | PypiClean |
/django-codenerix-4.0.24.tar.gz/django-codenerix-4.0.24/codenerix/static/codenerix/lib/angular/i18n/angular-locale_es-co.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"a. m.",
"p. m."
],
"DAY": [
"domingo",
"lunes",
"martes",
"mi\u00e9rcoles",
"jueves",
"viernes",
"s\u00e1bado"
],
"ERANAMES": [
"antes de Cristo",
"despu\u00e9s de Cristo"
],
"ERAS": [
"a. C.",
"d. C."
],
"FIRSTDAYOFWEEK": 6,
"MONTH": [
"enero",
"febrero",
"marzo",
"abril",
"mayo",
"junio",
"julio",
"agosto",
"septiembre",
"octubre",
"noviembre",
"diciembre"
],
"SHORTDAY": [
"dom.",
"lun.",
"mar.",
"mi\u00e9.",
"jue.",
"vie.",
"s\u00e1b."
],
"SHORTMONTH": [
"ene.",
"feb.",
"mar.",
"abr.",
"may.",
"jun.",
"jul.",
"ago.",
"sept.",
"oct.",
"nov.",
"dic."
],
"STANDALONEMONTH": [
"Enero",
"Febrero",
"Marzo",
"Abril",
"Mayo",
"Junio",
"Julio",
"Agosto",
"Septiembre",
"Octubre",
"Noviembre",
"Diciembre"
],
"WEEKENDRANGE": [
5,
6
],
"fullDate": "EEEE, d 'de' MMMM 'de' y",
"longDate": "d 'de' MMMM 'de' y",
"medium": "d/MM/y h:mm:ss a",
"mediumDate": "d/MM/y",
"mediumTime": "h:mm:ss a",
"short": "d/MM/yy h:mm a",
"shortDate": "d/MM/yy",
"shortTime": "h:mm a"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "$",
"DECIMAL_SEP": ",",
"GROUP_SEP": ".",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "-\u00a4",
"negSuf": "",
"posPre": "\u00a4",
"posSuf": ""
}
]
},
"id": "es-co",
"localeID": "es_CO",
"pluralCat": function(n, opt_precision) { if (n == 1) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/fsds_100719-0.7.22.tar.gz/fsds_100719-0.7.22/README.rst | ===========
fsds_100719
===========
.. image:: https://img.shields.io/pypi/v/fsds_100719.svg
:target: https://pypi.python.org/pypi/fsds_100719
.. image:: https://img.shields.io/travis/jirvingphd/fsds_100719.svg
:target: https://travis-ci.org/jirvingphd/fsds_100719
.. image:: https://readthedocs.org/projects/fsds-100719/badge/?version=latest
:target: https://fsds-100719.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
Tools for Flatiron 100719 cohorts
* Free software: GNU General Public License v3
* Documentation: https://fsds-100719.readthedocs.io.
Features
--------
* TODO
Credits
-------
This package was created with Cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template.
.. _Cookiecutter: https://github.com/audreyr/cookiecutter
.. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage
| PypiClean |
/py-pure-client-1.38.0.tar.gz/py-pure-client-1.38.0/pypureclient/flashblade/FB_2_9/models/file_system_response.py | import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_9 import models
class FileSystemResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[FileSystem]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.FileSystem]
):
"""
Keyword args:
items (list[FileSystem]): A list of file system objects.
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `FileSystemResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FileSystemResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FileSystemResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | PypiClean |
/KindleComicConverter_headless-5.5.2-py3-none-any.whl/kindlecomicconverter/image.py |
import os
from PIL import Image, ImageOps, ImageStat, ImageChops, ImageFilter
from .shared import md5Checksum
class ProfileData:
def __init__(self):
pass
Palette4 = [
0x00, 0x00, 0x00,
0x55, 0x55, 0x55,
0xaa, 0xaa, 0xaa,
0xff, 0xff, 0xff
]
Palette15 = [
0x00, 0x00, 0x00,
0x11, 0x11, 0x11,
0x22, 0x22, 0x22,
0x33, 0x33, 0x33,
0x44, 0x44, 0x44,
0x55, 0x55, 0x55,
0x66, 0x66, 0x66,
0x77, 0x77, 0x77,
0x88, 0x88, 0x88,
0x99, 0x99, 0x99,
0xaa, 0xaa, 0xaa,
0xbb, 0xbb, 0xbb,
0xcc, 0xcc, 0xcc,
0xdd, 0xdd, 0xdd,
0xff, 0xff, 0xff,
]
Palette16 = [
0x00, 0x00, 0x00,
0x11, 0x11, 0x11,
0x22, 0x22, 0x22,
0x33, 0x33, 0x33,
0x44, 0x44, 0x44,
0x55, 0x55, 0x55,
0x66, 0x66, 0x66,
0x77, 0x77, 0x77,
0x88, 0x88, 0x88,
0x99, 0x99, 0x99,
0xaa, 0xaa, 0xaa,
0xbb, 0xbb, 0xbb,
0xcc, 0xcc, 0xcc,
0xdd, 0xdd, 0xdd,
0xee, 0xee, 0xee,
0xff, 0xff, 0xff,
]
PalleteNull = [
]
Profiles = {
'K1': ("Kindle 1", (600, 670), Palette4, 1.8),
'K2': ("Kindle 2", (600, 670), Palette15, 1.8),
'K34': ("Kindle Keyboard/Touch", (600, 800), Palette16, 1.8),
'K578': ("Kindle", (600, 800), Palette16, 1.8),
'KDX': ("Kindle DX/DXG", (824, 1000), Palette16, 1.8),
'KPW': ("Kindle Paperwhite 1/2", (758, 1024), Palette16, 1.8),
'KV': ("Kindle Paperwhite 3/4/Voyage/Oasis", (1072, 1448), Palette16, 1.8),
'KO': ("Kindle Oasis 2/3", (1264, 1680), Palette16, 1.8),
'KoMT': ("Kobo Mini/Touch", (600, 800), Palette16, 1.8),
'KoG': ("Kobo Glo", (768, 1024), Palette16, 1.8),
'KoGHD': ("Kobo Glo HD", (1072, 1448), Palette16, 1.8),
'KoA': ("Kobo Aura", (758, 1024), Palette16, 1.8),
'KoAHD': ("Kobo Aura HD", (1080, 1440), Palette16, 1.8),
'KoAH2O': ("Kobo Aura H2O", (1080, 1430), Palette16, 1.8),
'KoAO': ("Kobo Aura ONE", (1404, 1872), Palette16, 1.8),
'KoF': ("Kobo Forma", (1440, 1920), Palette16, 1.8),
'OTHER': ("Other", (0, 0), Palette16, 1.8),
}
class ComicPageParser:
def __init__(self, source, options):
Image.MAX_IMAGE_PIXELS = int(2048 * 2048 * 2048 // 4 // 3)
self.opt = options
self.source = source
self.size = self.opt.profileData[1]
self.payload = []
self.image = Image.open(os.path.join(source[0], source[1])).convert('RGB')
self.color = self.colorCheck()
self.fill = self.fillCheck()
self.splitCheck()
def getImageHistogram(self, image):
histogram = image.histogram()
if histogram[0] == 0:
return -1
elif histogram[255] == 0:
return 1
else:
return 0
def splitCheck(self):
width, height = self.image.size
dstwidth, dstheight = self.size
if (width > height) != (dstwidth > dstheight) and width <= dstheight and height <= dstwidth \
and not self.opt.webtoon and self.opt.splitter == 1:
self.payload.append(['R', self.source, self.image.rotate(90, Image.BICUBIC, True), self.color, self.fill])
elif (width > height) != (dstwidth > dstheight) and not self.opt.webtoon:
if self.opt.splitter != 1:
if width > height:
leftbox = (0, 0, int(width / 2), height)
rightbox = (int(width / 2), 0, width, height)
else:
leftbox = (0, 0, width, int(height / 2))
rightbox = (0, int(height / 2), width, height)
if self.opt.righttoleft:
pageone = self.image.crop(rightbox)
pagetwo = self.image.crop(leftbox)
else:
pageone = self.image.crop(leftbox)
pagetwo = self.image.crop(rightbox)
self.payload.append(['S1', self.source, pageone, self.color, self.fill])
self.payload.append(['S2', self.source, pagetwo, self.color, self.fill])
if self.opt.splitter > 0:
self.payload.append(['R', self.source, self.image.rotate(90, Image.BICUBIC, True),
self.color, self.fill])
else:
self.payload.append(['N', self.source, self.image, self.color, self.fill])
def colorCheck(self):
if self.opt.webtoon:
return True
else:
img = self.image.copy()
bands = img.getbands()
if bands == ('R', 'G', 'B') or bands == ('R', 'G', 'B', 'A'):
thumb = img.resize((40, 40))
SSE, bias = 0, [0, 0, 0]
bias = ImageStat.Stat(thumb).mean[:3]
bias = [b - sum(bias) / 3 for b in bias]
for pixel in thumb.getdata():
mu = sum(pixel) / 3
SSE += sum((pixel[i] - mu - bias[i]) * (pixel[i] - mu - bias[i]) for i in [0, 1, 2])
MSE = float(SSE) / (40 * 40)
if MSE > 22:
return True
else:
return False
else:
return False
def fillCheck(self):
if self.opt.bordersColor:
return self.opt.bordersColor
else:
bw = self.image.convert('L').point(lambda x: 0 if x < 128 else 255, '1')
imageBoxA = bw.getbbox()
imageBoxB = ImageChops.invert(bw).getbbox()
if imageBoxA is None or imageBoxB is None:
surfaceB, surfaceW = 0, 0
diff = 0
else:
surfaceB = (imageBoxA[2] - imageBoxA[0]) * (imageBoxA[3] - imageBoxA[1])
surfaceW = (imageBoxB[2] - imageBoxB[0]) * (imageBoxB[3] - imageBoxB[1])
diff = ((max(surfaceB, surfaceW) - min(surfaceB, surfaceW)) / min(surfaceB, surfaceW)) * 100
if diff > 0.5:
if surfaceW < surfaceB:
return 'white'
elif surfaceW > surfaceB:
return 'black'
else:
fill = 0
startY = 0
while startY < bw.size[1]:
if startY + 5 > bw.size[1]:
startY = bw.size[1] - 5
fill += self.getImageHistogram(bw.crop((0, startY, bw.size[0], startY + 5)))
startY += 5
startX = 0
while startX < bw.size[0]:
if startX + 5 > bw.size[0]:
startX = bw.size[0] - 5
fill += self.getImageHistogram(bw.crop((startX, 0, startX + 5, bw.size[1])))
startX += 5
if fill > 0:
return 'black'
else:
return 'white'
class ComicPage:
def __init__(self, options, mode, path, image, color, fill):
self.opt = options
_, self.size, self.palette, self.gamma = self.opt.profileData
if self.opt.hq:
self.size = (int(self.size[0] * 1.5), int(self.size[1] * 1.5))
self.image = image
self.color = color
self.fill = fill
self.rotated = False
self.orgPath = os.path.join(path[0], path[1])
if 'N' in mode:
self.targetPath = os.path.join(path[0], os.path.splitext(path[1])[0]) + '-KCC'
elif 'R' in mode:
self.targetPath = os.path.join(path[0], os.path.splitext(path[1])[0]) + '-KCC-A'
self.rotated = True
elif 'S1' in mode:
self.targetPath = os.path.join(path[0], os.path.splitext(path[1])[0]) + '-KCC-B'
elif 'S2' in mode:
self.targetPath = os.path.join(path[0], os.path.splitext(path[1])[0]) + '-KCC-C'
def saveToDir(self):
try:
flags = []
if not self.opt.forcecolor and not self.opt.forcepng:
self.image = self.image.convert('L')
if self.rotated:
flags.append('Rotated')
if self.fill != 'white':
flags.append('BlackBackground')
if self.opt.forcepng:
self.targetPath += '.png'
self.image.save(self.targetPath, 'PNG', optimize=1)
else:
self.targetPath += '.jpg'
self.image.save(self.targetPath, 'JPEG', optimize=1, quality=85)
return [md5Checksum(self.targetPath), flags, self.orgPath]
except IOError as err:
raise RuntimeError('Cannot save image. ' + str(err))
def autocontrastImage(self):
gamma = self.opt.gamma
if gamma < 0.1:
gamma = self.gamma
if self.gamma != 1.0 and self.color:
gamma = 1.0
if gamma == 1.0:
self.image = ImageOps.autocontrast(self.image)
else:
self.image = ImageOps.autocontrast(Image.eval(self.image, lambda a: 255 * (a / 255.) ** gamma))
def quantizeImage(self):
colors = len(self.palette) // 3
if colors < 256:
self.palette += self.palette[:3] * (256 - colors)
palImg = Image.new('P', (1, 1))
palImg.putpalette(self.palette)
self.image = self.image.convert('L')
self.image = self.image.convert('RGB')
# Quantize is deprecated but new function call it internally anyway...
self.image = self.image.quantize(palette=palImg)
def resizeImage(self):
if self.image.size[0] <= self.size[0] and self.image.size[1] <= self.size[1]:
method = Image.BICUBIC
else:
method = Image.LANCZOS
if self.opt.stretch or (self.opt.kfx and ('-KCC-B' in self.targetPath or '-KCC-C' in self.targetPath)):
self.image = self.image.resize(self.size, method)
elif self.image.size[0] <= self.size[0] and self.image.size[1] <= self.size[1] and not self.opt.upscale:
if self.opt.format == 'CBZ' or self.opt.kfx:
borderw = int((self.size[0] - self.image.size[0]) / 2)
borderh = int((self.size[1] - self.image.size[1]) / 2)
self.image = ImageOps.expand(self.image, border=(borderw, borderh), fill=self.fill)
if self.image.size[0] != self.size[0] or self.image.size[1] != self.size[1]:
self.image = ImageOps.fit(self.image, self.size, method=Image.BICUBIC, centering=(0.5, 0.5))
else:
if self.opt.format == 'CBZ' or self.opt.kfx:
ratioDev = float(self.size[0]) / float(self.size[1])
if (float(self.image.size[0]) / float(self.image.size[1])) < ratioDev:
diff = int(self.image.size[1] * ratioDev) - self.image.size[0]
self.image = ImageOps.expand(self.image, border=(int(diff / 2), 0), fill=self.fill)
elif (float(self.image.size[0]) / float(self.image.size[1])) > ratioDev:
diff = int(self.image.size[0] / ratioDev) - self.image.size[1]
self.image = ImageOps.expand(self.image, border=(0, int(diff / 2)), fill=self.fill)
self.image = ImageOps.fit(self.image, self.size, method=method, centering=(0.5, 0.5))
else:
hpercent = self.size[1] / float(self.image.size[1])
wsize = int((float(self.image.size[0]) * float(hpercent)))
self.image = self.image.resize((wsize, self.size[1]), method)
if self.image.size[0] > self.size[0] or self.image.size[1] > self.size[1]:
self.image.thumbnail(self.size, Image.LANCZOS)
def getBoundingBox(self, tmptmg):
min_margin = [int(0.005 * i + 0.5) for i in tmptmg.size]
max_margin = [int(0.1 * i + 0.5) for i in tmptmg.size]
bbox = tmptmg.getbbox()
bbox = (
max(0, min(max_margin[0], bbox[0] - min_margin[0])),
max(0, min(max_margin[1], bbox[1] - min_margin[1])),
min(tmptmg.size[0],
max(tmptmg.size[0] - max_margin[0], bbox[2] + min_margin[0])),
min(tmptmg.size[1],
max(tmptmg.size[1] - max_margin[1], bbox[3] + min_margin[1])),
)
return bbox
def cropPageNumber(self, power):
if self.fill != 'white':
tmptmg = self.image.convert(mode='L')
else:
tmptmg = ImageOps.invert(self.image.convert(mode='L'))
tmptmg = tmptmg.point(lambda x: x and 255)
tmptmg = tmptmg.filter(ImageFilter.MinFilter(size=3))
tmptmg = tmptmg.filter(ImageFilter.GaussianBlur(radius=5))
tmptmg = tmptmg.point(lambda x: (x >= 16 * power) and x)
self.image = self.image.crop(tmptmg.getbbox()) if tmptmg.getbbox() else self.image
def cropMargin(self, power):
if self.fill != 'white':
tmptmg = self.image.convert(mode='L')
else:
tmptmg = ImageOps.invert(self.image.convert(mode='L'))
tmptmg = tmptmg.filter(ImageFilter.GaussianBlur(radius=3))
tmptmg = tmptmg.point(lambda x: (x >= 16 * power) and x)
self.image = self.image.crop(self.getBoundingBox(tmptmg)) if tmptmg.getbbox() else self.image
class Cover:
def __init__(self, source, target, opt, tomeid):
self.options = opt
self.source = source
self.target = target
if tomeid == 0:
self.tomeid = 1
else:
self.tomeid = tomeid
self.image = Image.open(source)
self.process()
def process(self):
self.image = self.image.convert('RGB')
self.image = ImageOps.autocontrast(self.image)
if not self.options.forcecolor:
self.image = self.image.convert('L')
self.image.thumbnail(self.options.profileData[1], Image.LANCZOS)
self.save()
def save(self):
try:
self.image.save(self.target, "JPEG", optimize=1, quality=85)
except IOError:
raise RuntimeError('Failed to save cover.')
def saveToKindle(self, kindle, asin):
self.image = self.image.resize((300, 470), Image.ANTIALIAS)
try:
self.image.save(os.path.join(kindle.path.split('documents')[0], 'system', 'thumbnails',
'thumbnail_' + asin + '_EBOK_portrait.jpg'), 'JPEG', optimize=1, quality=85)
except IOError:
raise RuntimeError('Failed to upload cover.') | PypiClean |
/tencentcloud-python-sdk-3.0.960.tar.gz/tencentcloud-python-sdk-3.0.960/tencentcloud/aiart/v20221229/aiart_client.py |
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.aiart.v20221229 import models
class AiartClient(AbstractClient):
_apiVersion = '2022-12-29'
_endpoint = 'aiart.tencentcloudapi.com'
_service = 'aiart'
def ImageToImage(self, request):
"""智能图生图接口将根据输入的图片及辅助描述文本,智能生成与之相关的结果图。
输入:单边分辨率小于2000、转成 Base64 字符串后小于 5MB 的图片,建议同时输入描述文本。
输出:对应风格及分辨率的 AI 生成图。
可支持的风格详见 [智能图生图风格列表](https://cloud.tencent.com/document/product/1668/86250),请将列表中的“风格编号”传入 Styles 数组,建议选择一种风格。
请求频率限制为1次/秒。
:param request: Request instance for ImageToImage.
:type request: :class:`tencentcloud.aiart.v20221229.models.ImageToImageRequest`
:rtype: :class:`tencentcloud.aiart.v20221229.models.ImageToImageResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("ImageToImage", params, headers=headers)
response = json.loads(body)
model = models.ImageToImageResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def TextToImage(self, request):
"""智能文生图接口将根据输入的描述文本,智能生成与之相关的结果图。
输入:256个字符以内的描述性文本,推荐使用中文。
输出:对应风格及分辨率的 AI 生成图。
可支持的风格详见 [智能文生图风格列表](https://cloud.tencent.com/document/product/1668/86249),请将列表中的“风格编号”传入 Styles 数组,建议选择一种风格。
请求频率限制为1次/秒。
:param request: Request instance for TextToImage.
:type request: :class:`tencentcloud.aiart.v20221229.models.TextToImageRequest`
:rtype: :class:`tencentcloud.aiart.v20221229.models.TextToImageResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("TextToImage", params, headers=headers)
response = json.loads(body)
model = models.TextToImageResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e)) | PypiClean |
/synergy_scheduler-2.3.tar.gz/synergy_scheduler-2.3/synergy/mx/static/js/d3-scale.v2.2.2.js | (function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('d3-collection'), require('d3-array'), require('d3-interpolate'), require('d3-format'), require('d3-time'), require('d3-time-format')) :
typeof define === 'function' && define.amd ? define(['exports', 'd3-collection', 'd3-array', 'd3-interpolate', 'd3-format', 'd3-time', 'd3-time-format'], factory) :
(factory((global.d3 = global.d3 || {}),global.d3,global.d3,global.d3,global.d3,global.d3,global.d3));
}(this, (function (exports,d3Collection,d3Array,d3Interpolate,d3Format,d3Time,d3TimeFormat) { 'use strict';
function initRange(domain, range) {
switch (arguments.length) {
case 0: break;
case 1: this.range(domain); break;
default: this.range(range).domain(domain); break;
}
return this;
}
function initInterpolator(domain, interpolator) {
switch (arguments.length) {
case 0: break;
case 1: this.interpolator(domain); break;
default: this.interpolator(interpolator).domain(domain); break;
}
return this;
}
var array = Array.prototype;
var map = array.map;
var slice = array.slice;
var implicit = {name: "implicit"};
function ordinal() {
var index = d3Collection.map(),
domain = [],
range = [],
unknown = implicit;
function scale(d) {
var key = d + "", i = index.get(key);
if (!i) {
if (unknown !== implicit) return unknown;
index.set(key, i = domain.push(d));
}
return range[(i - 1) % range.length];
}
scale.domain = function(_) {
if (!arguments.length) return domain.slice();
domain = [], index = d3Collection.map();
var i = -1, n = _.length, d, key;
while (++i < n) if (!index.has(key = (d = _[i]) + "")) index.set(key, domain.push(d));
return scale;
};
scale.range = function(_) {
return arguments.length ? (range = slice.call(_), scale) : range.slice();
};
scale.unknown = function(_) {
return arguments.length ? (unknown = _, scale) : unknown;
};
scale.copy = function() {
return ordinal(domain, range).unknown(unknown);
};
initRange.apply(scale, arguments);
return scale;
}
function band() {
var scale = ordinal().unknown(undefined),
domain = scale.domain,
ordinalRange = scale.range,
range = [0, 1],
step,
bandwidth,
round = false,
paddingInner = 0,
paddingOuter = 0,
align = 0.5;
delete scale.unknown;
function rescale() {
var n = domain().length,
reverse = range[1] < range[0],
start = range[reverse - 0],
stop = range[1 - reverse];
step = (stop - start) / Math.max(1, n - paddingInner + paddingOuter * 2);
if (round) step = Math.floor(step);
start += (stop - start - step * (n - paddingInner)) * align;
bandwidth = step * (1 - paddingInner);
if (round) start = Math.round(start), bandwidth = Math.round(bandwidth);
var values = d3Array.range(n).map(function(i) { return start + step * i; });
return ordinalRange(reverse ? values.reverse() : values);
}
scale.domain = function(_) {
return arguments.length ? (domain(_), rescale()) : domain();
};
scale.range = function(_) {
return arguments.length ? (range = [+_[0], +_[1]], rescale()) : range.slice();
};
scale.rangeRound = function(_) {
return range = [+_[0], +_[1]], round = true, rescale();
};
scale.bandwidth = function() {
return bandwidth;
};
scale.step = function() {
return step;
};
scale.round = function(_) {
return arguments.length ? (round = !!_, rescale()) : round;
};
scale.padding = function(_) {
return arguments.length ? (paddingInner = Math.min(1, paddingOuter = +_), rescale()) : paddingInner;
};
scale.paddingInner = function(_) {
return arguments.length ? (paddingInner = Math.min(1, _), rescale()) : paddingInner;
};
scale.paddingOuter = function(_) {
return arguments.length ? (paddingOuter = +_, rescale()) : paddingOuter;
};
scale.align = function(_) {
return arguments.length ? (align = Math.max(0, Math.min(1, _)), rescale()) : align;
};
scale.copy = function() {
return band(domain(), range)
.round(round)
.paddingInner(paddingInner)
.paddingOuter(paddingOuter)
.align(align);
};
return initRange.apply(rescale(), arguments);
}
function pointish(scale) {
var copy = scale.copy;
scale.padding = scale.paddingOuter;
delete scale.paddingInner;
delete scale.paddingOuter;
scale.copy = function() {
return pointish(copy());
};
return scale;
}
function point() {
return pointish(band.apply(null, arguments).paddingInner(1));
}
function constant(x) {
return function() {
return x;
};
}
function number(x) {
return +x;
}
var unit = [0, 1];
function identity(x) {
return x;
}
function normalize(a, b) {
return (b -= (a = +a))
? function(x) { return (x - a) / b; }
: constant(isNaN(b) ? NaN : 0.5);
}
function clamper(domain) {
var a = domain[0], b = domain[domain.length - 1], t;
if (a > b) t = a, a = b, b = t;
return function(x) { return Math.max(a, Math.min(b, x)); };
}
// normalize(a, b)(x) takes a domain value x in [a,b] and returns the corresponding parameter t in [0,1].
// interpolate(a, b)(t) takes a parameter t in [0,1] and returns the corresponding range value x in [a,b].
function bimap(domain, range, interpolate) {
var d0 = domain[0], d1 = domain[1], r0 = range[0], r1 = range[1];
if (d1 < d0) d0 = normalize(d1, d0), r0 = interpolate(r1, r0);
else d0 = normalize(d0, d1), r0 = interpolate(r0, r1);
return function(x) { return r0(d0(x)); };
}
function polymap(domain, range, interpolate) {
var j = Math.min(domain.length, range.length) - 1,
d = new Array(j),
r = new Array(j),
i = -1;
// Reverse descending domains.
if (domain[j] < domain[0]) {
domain = domain.slice().reverse();
range = range.slice().reverse();
}
while (++i < j) {
d[i] = normalize(domain[i], domain[i + 1]);
r[i] = interpolate(range[i], range[i + 1]);
}
return function(x) {
var i = d3Array.bisect(domain, x, 1, j) - 1;
return r[i](d[i](x));
};
}
function copy(source, target) {
return target
.domain(source.domain())
.range(source.range())
.interpolate(source.interpolate())
.clamp(source.clamp())
.unknown(source.unknown());
}
function transformer() {
var domain = unit,
range = unit,
interpolate = d3Interpolate.interpolate,
transform,
untransform,
unknown,
clamp = identity,
piecewise,
output,
input;
function rescale() {
piecewise = Math.min(domain.length, range.length) > 2 ? polymap : bimap;
output = input = null;
return scale;
}
function scale(x) {
return isNaN(x = +x) ? unknown : (output || (output = piecewise(domain.map(transform), range, interpolate)))(transform(clamp(x)));
}
scale.invert = function(y) {
return clamp(untransform((input || (input = piecewise(range, domain.map(transform), d3Interpolate.interpolateNumber)))(y)));
};
scale.domain = function(_) {
return arguments.length ? (domain = map.call(_, number), clamp === identity || (clamp = clamper(domain)), rescale()) : domain.slice();
};
scale.range = function(_) {
return arguments.length ? (range = slice.call(_), rescale()) : range.slice();
};
scale.rangeRound = function(_) {
return range = slice.call(_), interpolate = d3Interpolate.interpolateRound, rescale();
};
scale.clamp = function(_) {
return arguments.length ? (clamp = _ ? clamper(domain) : identity, scale) : clamp !== identity;
};
scale.interpolate = function(_) {
return arguments.length ? (interpolate = _, rescale()) : interpolate;
};
scale.unknown = function(_) {
return arguments.length ? (unknown = _, scale) : unknown;
};
return function(t, u) {
transform = t, untransform = u;
return rescale();
};
}
function continuous(transform, untransform) {
return transformer()(transform, untransform);
}
function tickFormat(start, stop, count, specifier) {
var step = d3Array.tickStep(start, stop, count),
precision;
specifier = d3Format.formatSpecifier(specifier == null ? ",f" : specifier);
switch (specifier.type) {
case "s": {
var value = Math.max(Math.abs(start), Math.abs(stop));
if (specifier.precision == null && !isNaN(precision = d3Format.precisionPrefix(step, value))) specifier.precision = precision;
return d3Format.formatPrefix(specifier, value);
}
case "":
case "e":
case "g":
case "p":
case "r": {
if (specifier.precision == null && !isNaN(precision = d3Format.precisionRound(step, Math.max(Math.abs(start), Math.abs(stop))))) specifier.precision = precision - (specifier.type === "e");
break;
}
case "f":
case "%": {
if (specifier.precision == null && !isNaN(precision = d3Format.precisionFixed(step))) specifier.precision = precision - (specifier.type === "%") * 2;
break;
}
}
return d3Format.format(specifier);
}
function linearish(scale) {
var domain = scale.domain;
scale.ticks = function(count) {
var d = domain();
return d3Array.ticks(d[0], d[d.length - 1], count == null ? 10 : count);
};
scale.tickFormat = function(count, specifier) {
var d = domain();
return tickFormat(d[0], d[d.length - 1], count == null ? 10 : count, specifier);
};
scale.nice = function(count) {
if (count == null) count = 10;
var d = domain(),
i0 = 0,
i1 = d.length - 1,
start = d[i0],
stop = d[i1],
step;
if (stop < start) {
step = start, start = stop, stop = step;
step = i0, i0 = i1, i1 = step;
}
step = d3Array.tickIncrement(start, stop, count);
if (step > 0) {
start = Math.floor(start / step) * step;
stop = Math.ceil(stop / step) * step;
step = d3Array.tickIncrement(start, stop, count);
} else if (step < 0) {
start = Math.ceil(start * step) / step;
stop = Math.floor(stop * step) / step;
step = d3Array.tickIncrement(start, stop, count);
}
if (step > 0) {
d[i0] = Math.floor(start / step) * step;
d[i1] = Math.ceil(stop / step) * step;
domain(d);
} else if (step < 0) {
d[i0] = Math.ceil(start * step) / step;
d[i1] = Math.floor(stop * step) / step;
domain(d);
}
return scale;
};
return scale;
}
function linear() {
var scale = continuous(identity, identity);
scale.copy = function() {
return copy(scale, linear());
};
initRange.apply(scale, arguments);
return linearish(scale);
}
function identity$1(domain) {
var unknown;
function scale(x) {
return isNaN(x = +x) ? unknown : x;
}
scale.invert = scale;
scale.domain = scale.range = function(_) {
return arguments.length ? (domain = map.call(_, number), scale) : domain.slice();
};
scale.unknown = function(_) {
return arguments.length ? (unknown = _, scale) : unknown;
};
scale.copy = function() {
return identity$1(domain).unknown(unknown);
};
domain = arguments.length ? map.call(domain, number) : [0, 1];
return linearish(scale);
}
function nice(domain, interval) {
domain = domain.slice();
var i0 = 0,
i1 = domain.length - 1,
x0 = domain[i0],
x1 = domain[i1],
t;
if (x1 < x0) {
t = i0, i0 = i1, i1 = t;
t = x0, x0 = x1, x1 = t;
}
domain[i0] = interval.floor(x0);
domain[i1] = interval.ceil(x1);
return domain;
}
function transformLog(x) {
return Math.log(x);
}
function transformExp(x) {
return Math.exp(x);
}
function transformLogn(x) {
return -Math.log(-x);
}
function transformExpn(x) {
return -Math.exp(-x);
}
function pow10(x) {
return isFinite(x) ? +("1e" + x) : x < 0 ? 0 : x;
}
function powp(base) {
return base === 10 ? pow10
: base === Math.E ? Math.exp
: function(x) { return Math.pow(base, x); };
}
function logp(base) {
return base === Math.E ? Math.log
: base === 10 && Math.log10
|| base === 2 && Math.log2
|| (base = Math.log(base), function(x) { return Math.log(x) / base; });
}
function reflect(f) {
return function(x) {
return -f(-x);
};
}
function loggish(transform) {
var scale = transform(transformLog, transformExp),
domain = scale.domain,
base = 10,
logs,
pows;
function rescale() {
logs = logp(base), pows = powp(base);
if (domain()[0] < 0) {
logs = reflect(logs), pows = reflect(pows);
transform(transformLogn, transformExpn);
} else {
transform(transformLog, transformExp);
}
return scale;
}
scale.base = function(_) {
return arguments.length ? (base = +_, rescale()) : base;
};
scale.domain = function(_) {
return arguments.length ? (domain(_), rescale()) : domain();
};
scale.ticks = function(count) {
var d = domain(),
u = d[0],
v = d[d.length - 1],
r;
if (r = v < u) i = u, u = v, v = i;
var i = logs(u),
j = logs(v),
p,
k,
t,
n = count == null ? 10 : +count,
z = [];
if (!(base % 1) && j - i < n) {
i = Math.round(i) - 1, j = Math.round(j) + 1;
if (u > 0) for (; i < j; ++i) {
for (k = 1, p = pows(i); k < base; ++k) {
t = p * k;
if (t < u) continue;
if (t > v) break;
z.push(t);
}
} else for (; i < j; ++i) {
for (k = base - 1, p = pows(i); k >= 1; --k) {
t = p * k;
if (t < u) continue;
if (t > v) break;
z.push(t);
}
}
} else {
z = d3Array.ticks(i, j, Math.min(j - i, n)).map(pows);
}
return r ? z.reverse() : z;
};
scale.tickFormat = function(count, specifier) {
if (specifier == null) specifier = base === 10 ? ".0e" : ",";
if (typeof specifier !== "function") specifier = d3Format.format(specifier);
if (count === Infinity) return specifier;
if (count == null) count = 10;
var k = Math.max(1, base * count / scale.ticks().length); // TODO fast estimate?
return function(d) {
var i = d / pows(Math.round(logs(d)));
if (i * base < base - 0.5) i *= base;
return i <= k ? specifier(d) : "";
};
};
scale.nice = function() {
return domain(nice(domain(), {
floor: function(x) { return pows(Math.floor(logs(x))); },
ceil: function(x) { return pows(Math.ceil(logs(x))); }
}));
};
return scale;
}
function log() {
var scale = loggish(transformer()).domain([1, 10]);
scale.copy = function() {
return copy(scale, log()).base(scale.base());
};
initRange.apply(scale, arguments);
return scale;
}
function transformSymlog(c) {
return function(x) {
return Math.sign(x) * Math.log1p(Math.abs(x / c));
};
}
function transformSymexp(c) {
return function(x) {
return Math.sign(x) * Math.expm1(Math.abs(x)) * c;
};
}
function symlogish(transform) {
var c = 1, scale = transform(transformSymlog(c), transformSymexp(c));
scale.constant = function(_) {
return arguments.length ? transform(transformSymlog(c = +_), transformSymexp(c)) : c;
};
return linearish(scale);
}
function symlog() {
var scale = symlogish(transformer());
scale.copy = function() {
return copy(scale, symlog()).constant(scale.constant());
};
return initRange.apply(scale, arguments);
}
function transformPow(exponent) {
return function(x) {
return x < 0 ? -Math.pow(-x, exponent) : Math.pow(x, exponent);
};
}
function transformSqrt(x) {
return x < 0 ? -Math.sqrt(-x) : Math.sqrt(x);
}
function transformSquare(x) {
return x < 0 ? -x * x : x * x;
}
function powish(transform) {
var scale = transform(identity, identity),
exponent = 1;
function rescale() {
return exponent === 1 ? transform(identity, identity)
: exponent === 0.5 ? transform(transformSqrt, transformSquare)
: transform(transformPow(exponent), transformPow(1 / exponent));
}
scale.exponent = function(_) {
return arguments.length ? (exponent = +_, rescale()) : exponent;
};
return linearish(scale);
}
function pow() {
var scale = powish(transformer());
scale.copy = function() {
return copy(scale, pow()).exponent(scale.exponent());
};
initRange.apply(scale, arguments);
return scale;
}
function sqrt() {
return pow.apply(null, arguments).exponent(0.5);
}
function quantile() {
var domain = [],
range = [],
thresholds = [],
unknown;
function rescale() {
var i = 0, n = Math.max(1, range.length);
thresholds = new Array(n - 1);
while (++i < n) thresholds[i - 1] = d3Array.quantile(domain, i / n);
return scale;
}
function scale(x) {
return isNaN(x = +x) ? unknown : range[d3Array.bisect(thresholds, x)];
}
scale.invertExtent = function(y) {
var i = range.indexOf(y);
return i < 0 ? [NaN, NaN] : [
i > 0 ? thresholds[i - 1] : domain[0],
i < thresholds.length ? thresholds[i] : domain[domain.length - 1]
];
};
scale.domain = function(_) {
if (!arguments.length) return domain.slice();
domain = [];
for (var i = 0, n = _.length, d; i < n; ++i) if (d = _[i], d != null && !isNaN(d = +d)) domain.push(d);
domain.sort(d3Array.ascending);
return rescale();
};
scale.range = function(_) {
return arguments.length ? (range = slice.call(_), rescale()) : range.slice();
};
scale.unknown = function(_) {
return arguments.length ? (unknown = _, scale) : unknown;
};
scale.quantiles = function() {
return thresholds.slice();
};
scale.copy = function() {
return quantile()
.domain(domain)
.range(range)
.unknown(unknown);
};
return initRange.apply(scale, arguments);
}
function quantize() {
var x0 = 0,
x1 = 1,
n = 1,
domain = [0.5],
range = [0, 1],
unknown;
function scale(x) {
return x <= x ? range[d3Array.bisect(domain, x, 0, n)] : unknown;
}
function rescale() {
var i = -1;
domain = new Array(n);
while (++i < n) domain[i] = ((i + 1) * x1 - (i - n) * x0) / (n + 1);
return scale;
}
scale.domain = function(_) {
return arguments.length ? (x0 = +_[0], x1 = +_[1], rescale()) : [x0, x1];
};
scale.range = function(_) {
return arguments.length ? (n = (range = slice.call(_)).length - 1, rescale()) : range.slice();
};
scale.invertExtent = function(y) {
var i = range.indexOf(y);
return i < 0 ? [NaN, NaN]
: i < 1 ? [x0, domain[0]]
: i >= n ? [domain[n - 1], x1]
: [domain[i - 1], domain[i]];
};
scale.unknown = function(_) {
return arguments.length ? (unknown = _, scale) : scale;
};
scale.thresholds = function() {
return domain.slice();
};
scale.copy = function() {
return quantize()
.domain([x0, x1])
.range(range)
.unknown(unknown);
};
return initRange.apply(linearish(scale), arguments);
}
function threshold() {
var domain = [0.5],
range = [0, 1],
unknown,
n = 1;
function scale(x) {
return x <= x ? range[d3Array.bisect(domain, x, 0, n)] : unknown;
}
scale.domain = function(_) {
return arguments.length ? (domain = slice.call(_), n = Math.min(domain.length, range.length - 1), scale) : domain.slice();
};
scale.range = function(_) {
return arguments.length ? (range = slice.call(_), n = Math.min(domain.length, range.length - 1), scale) : range.slice();
};
scale.invertExtent = function(y) {
var i = range.indexOf(y);
return [domain[i - 1], domain[i]];
};
scale.unknown = function(_) {
return arguments.length ? (unknown = _, scale) : unknown;
};
scale.copy = function() {
return threshold()
.domain(domain)
.range(range)
.unknown(unknown);
};
return initRange.apply(scale, arguments);
}
var durationSecond = 1000,
durationMinute = durationSecond * 60,
durationHour = durationMinute * 60,
durationDay = durationHour * 24,
durationWeek = durationDay * 7,
durationMonth = durationDay * 30,
durationYear = durationDay * 365;
function date(t) {
return new Date(t);
}
function number$1(t) {
return t instanceof Date ? +t : +new Date(+t);
}
function calendar(year, month, week, day, hour, minute, second, millisecond, format) {
var scale = continuous(identity, identity),
invert = scale.invert,
domain = scale.domain;
var formatMillisecond = format(".%L"),
formatSecond = format(":%S"),
formatMinute = format("%I:%M"),
formatHour = format("%I %p"),
formatDay = format("%a %d"),
formatWeek = format("%b %d"),
formatMonth = format("%B"),
formatYear = format("%Y");
var tickIntervals = [
[second, 1, durationSecond],
[second, 5, 5 * durationSecond],
[second, 15, 15 * durationSecond],
[second, 30, 30 * durationSecond],
[minute, 1, durationMinute],
[minute, 5, 5 * durationMinute],
[minute, 15, 15 * durationMinute],
[minute, 30, 30 * durationMinute],
[ hour, 1, durationHour ],
[ hour, 3, 3 * durationHour ],
[ hour, 6, 6 * durationHour ],
[ hour, 12, 12 * durationHour ],
[ day, 1, durationDay ],
[ day, 2, 2 * durationDay ],
[ week, 1, durationWeek ],
[ month, 1, durationMonth ],
[ month, 3, 3 * durationMonth ],
[ year, 1, durationYear ]
];
function tickFormat(date) {
return (second(date) < date ? formatMillisecond
: minute(date) < date ? formatSecond
: hour(date) < date ? formatMinute
: day(date) < date ? formatHour
: month(date) < date ? (week(date) < date ? formatDay : formatWeek)
: year(date) < date ? formatMonth
: formatYear)(date);
}
function tickInterval(interval, start, stop, step) {
if (interval == null) interval = 10;
// If a desired tick count is specified, pick a reasonable tick interval
// based on the extent of the domain and a rough estimate of tick size.
// Otherwise, assume interval is already a time interval and use it.
if (typeof interval === "number") {
var target = Math.abs(stop - start) / interval,
i = d3Array.bisector(function(i) { return i[2]; }).right(tickIntervals, target);
if (i === tickIntervals.length) {
step = d3Array.tickStep(start / durationYear, stop / durationYear, interval);
interval = year;
} else if (i) {
i = tickIntervals[target / tickIntervals[i - 1][2] < tickIntervals[i][2] / target ? i - 1 : i];
step = i[1];
interval = i[0];
} else {
step = Math.max(d3Array.tickStep(start, stop, interval), 1);
interval = millisecond;
}
}
return step == null ? interval : interval.every(step);
}
scale.invert = function(y) {
return new Date(invert(y));
};
scale.domain = function(_) {
return arguments.length ? domain(map.call(_, number$1)) : domain().map(date);
};
scale.ticks = function(interval, step) {
var d = domain(),
t0 = d[0],
t1 = d[d.length - 1],
r = t1 < t0,
t;
if (r) t = t0, t0 = t1, t1 = t;
t = tickInterval(interval, t0, t1, step);
t = t ? t.range(t0, t1 + 1) : []; // inclusive stop
return r ? t.reverse() : t;
};
scale.tickFormat = function(count, specifier) {
return specifier == null ? tickFormat : format(specifier);
};
scale.nice = function(interval, step) {
var d = domain();
return (interval = tickInterval(interval, d[0], d[d.length - 1], step))
? domain(nice(d, interval))
: scale;
};
scale.copy = function() {
return copy(scale, calendar(year, month, week, day, hour, minute, second, millisecond, format));
};
return scale;
}
function time() {
return initRange.apply(calendar(d3Time.timeYear, d3Time.timeMonth, d3Time.timeWeek, d3Time.timeDay, d3Time.timeHour, d3Time.timeMinute, d3Time.timeSecond, d3Time.timeMillisecond, d3TimeFormat.timeFormat).domain([new Date(2000, 0, 1), new Date(2000, 0, 2)]), arguments);
}
function utcTime() {
return initRange.apply(calendar(d3Time.utcYear, d3Time.utcMonth, d3Time.utcWeek, d3Time.utcDay, d3Time.utcHour, d3Time.utcMinute, d3Time.utcSecond, d3Time.utcMillisecond, d3TimeFormat.utcFormat).domain([Date.UTC(2000, 0, 1), Date.UTC(2000, 0, 2)]), arguments);
}
function transformer$1() {
var x0 = 0,
x1 = 1,
t0,
t1,
k10,
transform,
interpolator = identity,
clamp = false,
unknown;
function scale(x) {
return isNaN(x = +x) ? unknown : interpolator(k10 === 0 ? 0.5 : (x = (transform(x) - t0) * k10, clamp ? Math.max(0, Math.min(1, x)) : x));
}
scale.domain = function(_) {
return arguments.length ? (t0 = transform(x0 = +_[0]), t1 = transform(x1 = +_[1]), k10 = t0 === t1 ? 0 : 1 / (t1 - t0), scale) : [x0, x1];
};
scale.clamp = function(_) {
return arguments.length ? (clamp = !!_, scale) : clamp;
};
scale.interpolator = function(_) {
return arguments.length ? (interpolator = _, scale) : interpolator;
};
scale.unknown = function(_) {
return arguments.length ? (unknown = _, scale) : unknown;
};
return function(t) {
transform = t, t0 = t(x0), t1 = t(x1), k10 = t0 === t1 ? 0 : 1 / (t1 - t0);
return scale;
};
}
function copy$1(source, target) {
return target
.domain(source.domain())
.interpolator(source.interpolator())
.clamp(source.clamp())
.unknown(source.unknown());
}
function sequential() {
var scale = linearish(transformer$1()(identity));
scale.copy = function() {
return copy$1(scale, sequential());
};
return initInterpolator.apply(scale, arguments);
}
function sequentialLog() {
var scale = loggish(transformer$1()).domain([1, 10]);
scale.copy = function() {
return copy$1(scale, sequentialLog()).base(scale.base());
};
return initInterpolator.apply(scale, arguments);
}
function sequentialSymlog() {
var scale = symlogish(transformer$1());
scale.copy = function() {
return copy$1(scale, sequentialSymlog()).constant(scale.constant());
};
return initInterpolator.apply(scale, arguments);
}
function sequentialPow() {
var scale = powish(transformer$1());
scale.copy = function() {
return copy$1(scale, sequentialPow()).exponent(scale.exponent());
};
return initInterpolator.apply(scale, arguments);
}
function sequentialSqrt() {
return sequentialPow.apply(null, arguments).exponent(0.5);
}
function sequentialQuantile() {
var domain = [],
interpolator = identity;
function scale(x) {
if (!isNaN(x = +x)) return interpolator((d3Array.bisect(domain, x) - 1) / (domain.length - 1));
}
scale.domain = function(_) {
if (!arguments.length) return domain.slice();
domain = [];
for (var i = 0, n = _.length, d; i < n; ++i) if (d = _[i], d != null && !isNaN(d = +d)) domain.push(d);
domain.sort(d3Array.ascending);
return scale;
};
scale.interpolator = function(_) {
return arguments.length ? (interpolator = _, scale) : interpolator;
};
scale.copy = function() {
return sequentialQuantile(interpolator).domain(domain);
};
return initInterpolator.apply(scale, arguments);
}
function transformer$2() {
var x0 = 0,
x1 = 0.5,
x2 = 1,
t0,
t1,
t2,
k10,
k21,
interpolator = identity,
transform,
clamp = false,
unknown;
function scale(x) {
return isNaN(x = +x) ? unknown : (x = 0.5 + ((x = +transform(x)) - t1) * (x < t1 ? k10 : k21), interpolator(clamp ? Math.max(0, Math.min(1, x)) : x));
}
scale.domain = function(_) {
return arguments.length ? (t0 = transform(x0 = +_[0]), t1 = transform(x1 = +_[1]), t2 = transform(x2 = +_[2]), k10 = t0 === t1 ? 0 : 0.5 / (t1 - t0), k21 = t1 === t2 ? 0 : 0.5 / (t2 - t1), scale) : [x0, x1, x2];
};
scale.clamp = function(_) {
return arguments.length ? (clamp = !!_, scale) : clamp;
};
scale.interpolator = function(_) {
return arguments.length ? (interpolator = _, scale) : interpolator;
};
scale.unknown = function(_) {
return arguments.length ? (unknown = _, scale) : unknown;
};
return function(t) {
transform = t, t0 = t(x0), t1 = t(x1), t2 = t(x2), k10 = t0 === t1 ? 0 : 0.5 / (t1 - t0), k21 = t1 === t2 ? 0 : 0.5 / (t2 - t1);
return scale;
};
}
function diverging() {
var scale = linearish(transformer$2()(identity));
scale.copy = function() {
return copy$1(scale, diverging());
};
return initInterpolator.apply(scale, arguments);
}
function divergingLog() {
var scale = loggish(transformer$2()).domain([0.1, 1, 10]);
scale.copy = function() {
return copy$1(scale, divergingLog()).base(scale.base());
};
return initInterpolator.apply(scale, arguments);
}
function divergingSymlog() {
var scale = symlogish(transformer$2());
scale.copy = function() {
return copy$1(scale, divergingSymlog()).constant(scale.constant());
};
return initInterpolator.apply(scale, arguments);
}
function divergingPow() {
var scale = powish(transformer$2());
scale.copy = function() {
return copy$1(scale, divergingPow()).exponent(scale.exponent());
};
return initInterpolator.apply(scale, arguments);
}
function divergingSqrt() {
return divergingPow.apply(null, arguments).exponent(0.5);
}
exports.scaleBand = band;
exports.scalePoint = point;
exports.scaleIdentity = identity$1;
exports.scaleLinear = linear;
exports.scaleLog = log;
exports.scaleSymlog = symlog;
exports.scaleOrdinal = ordinal;
exports.scaleImplicit = implicit;
exports.scalePow = pow;
exports.scaleSqrt = sqrt;
exports.scaleQuantile = quantile;
exports.scaleQuantize = quantize;
exports.scaleThreshold = threshold;
exports.scaleTime = time;
exports.scaleUtc = utcTime;
exports.scaleSequential = sequential;
exports.scaleSequentialLog = sequentialLog;
exports.scaleSequentialPow = sequentialPow;
exports.scaleSequentialSqrt = sequentialSqrt;
exports.scaleSequentialSymlog = sequentialSymlog;
exports.scaleSequentialQuantile = sequentialQuantile;
exports.scaleDiverging = diverging;
exports.scaleDivergingLog = divergingLog;
exports.scaleDivergingPow = divergingPow;
exports.scaleDivergingSqrt = divergingSqrt;
exports.scaleDivergingSymlog = divergingSymlog;
exports.tickFormat = tickFormat;
Object.defineProperty(exports, '__esModule', { value: true });
}))); | PypiClean |
/SAMY-1.0.0.tar.gz/SAMY-1.0.0/README.md | # YOLO-SAM
A repository for Instance Segmentation using YOLO and SAM (Segment Anything Model).
## Installation
Install YOLOv8 and SAM dependencies:
```
pip install -r requirements.txt
```
Install YOLOv8:
```
pip install ultralytics
```
Install Segment Anything:
```
pip install git+https://github.com/facebookresearch/segment-anything.git
```
## Getting Started
To get started, click the links to download the YOLO and Segment Anything models:
- **YOLO Model: [Yolov8s.](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s.pt)**
- **Segment Anything Model: [ViT-H SAM model.](https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth)**
| PypiClean |
/superftp-1.0.3.tar.gz/superftp-1.0.3/README.md | # superftp
Fast multi-segment FTP client
This FTP client maximizes download speed for large files over
long geographic distances. The program splits the file into
segments. It then launches several download process, one for each segment.
The program monitors what parts of which segments have been downloaded.
Superftp monitors how fast each segment is downloading.
Note that over the Internet, each download routes differently from the source to
destination differently,
and so the download speeds will vary -
especially as the geographic distance between
the server and client increases.
Superftp monitors the download speeds and kills slow downloads that
have been routed inefficiently, and then restarts them. It keeps track
of what segments have been downloaded and does not redownload any
segments.
In sum:
* Large files are segmented into small pieces.
* Each segment is downloaded in parallel.
* Superftp monitors the download rate on each segment.
* Each segment routes differently from the source
* Superftp restarts segments which have been routed through slow connections.
* As segments complete, Superftp reassigns parallel downloads to
remaining segments.
* Aborted, failed, or killed downloads can be resumed
### Installation
The easiest way to install is using pip
To install for python3 (preferred method)
`pip3 install superftp`
To install for python2
`pip2 install superftp`
### Quickstart
Download /example.txt from ftp server with address ftpserver.example, username of Anonymous, and password of password to the current directory.
superftp --server ftpserver.example --username Anonymous --password password \
--remote_path /example.txt
The argument specifiers also have short versions of -s, -u, -p, -rp
superftp -s ftpserver.example -u Anonymous -p password -rp /example.txt
To enable TLS encryption add the --enable_tls flag
superftp -s ftpserver.example -u Anonymous -p password -rp /example.txt --enable_tls
Run the superftp command with the -h option to see the help
### Dependencies
The superftp application and module does not require any additional dependencies outside the standard libraries.
In order to run the unit tests, `pyftpdlib==1.5.5` is required
### Build superftp on a development machine
1. Clone the git repository
2. run the `build.sh` script in the root of the project, the build.sh script will do the following
* clean the project
* run pycodestyle on the project to check that best practice coding standards are followed
* run pylint on the project to check that best practice coding standards are followed
* run the unit tests for the project
* generate documentation for the project (the generated documentation is available at `docs/_build/html/index.html`)
* package the project into a redistributable, the redistributable is available in the `dist` directory in the root of the project
### Release Notes ###
v1.0.3
* First official release | PypiClean |
/pyknp_eventgraph-6.2.5-py3-none-any.whl/pyknp_eventgraph/eventgraph.py | import json
import pickle
from logging import getLogger
from typing import BinaryIO, List, Optional, TextIO, Union
from pyknp import BList
from pyknp_eventgraph.base_phrase import BasePhraseBuilder
from pyknp_eventgraph.builder import Builder
from pyknp_eventgraph.component import Component
from pyknp_eventgraph.document import Document, DocumentBuilder, JsonDocumentBuilder
from pyknp_eventgraph.event import Event
from pyknp_eventgraph.relation import JsonRelationBuilder, Relation, RelationsBuilder
from pyknp_eventgraph.sentence import Sentence
logger = getLogger(__name__)
class EventGraph(Component):
"""EventGraph provides a high-level interface that facilitates NLP application development. The core concept of
EventGraph is event, a language information unit that is closely related to predicate-argument structure but more
application-oriented. Events are linked to each other based on their syntactic and semantic relations.
Attributes:
document (Document): A document on which this EventGraph is built.
"""
def __init__(self):
self.document: Optional[Document] = None
@classmethod
def build(cls, blist: List[BList]) -> "EventGraph":
"""Build an EventGraph from language analysis by KNP.
Args:
blist: A list of bunsetsu lists, each of which is a result of analysis performed by KNP on a sentence.
Example::
from pyknp import KNP
from pyknp_eventgraph import EventGraph
# Parse a document.
document = ['彼女は海外勤務が長いので、英語がうまいに違いない。', '私はそう確信していた。']
knp = KNP()
blists = [knp.parse(sentence) for sentence in document]
# Build an EventGraph.
evg = EventGraph.build(blists)
"""
return EventGraphBuilder.build(blist)
@classmethod
def load(cls, f: Union[TextIO, BinaryIO], binary: bool = False) -> "EventGraph":
"""Deserialize an EventGraph.
Args:
f: A file descriptor.
binary: If true, deserialize an EventGraph using Python's pickle utility. Otherwise, deserialize
an EventGraph using Python's json utility.
Example::
from pyknp_eventgraph import EventGraph
# Load an EventGraph serialized in a JSON format.
with open('evg.json', 'r') as f:
evg = EventGraph.load(f, binary=False)
# Load an EventGraph serialized by Python's pickle utility.
with open('evg.pkl', 'rb') as f:
evg = EventGraph.load(f, binary=True)
Caution:
EventGraph deserialized from a JSON file loses several functionality.
To keep full functionality, use Python\'s pickle utility for serialization.
"""
if binary:
return PickleEventGraphBuilder.build(f)
else:
return JsonEventGraphBuilder.build(f)
def save(self, path: str, binary: bool = False) -> None:
"""Save this EventGraph.
Args:
path: An output file path.
binary: If true, serialize this EventGraph using Python's pickle utility. Otherwise, serialize
this EventGraph using Python's json utility.
Caution:
EventGraph deserialized from a JSON file loses several functionality. To keep full functionality,
use Python\'s pickle utility for serialization.
"""
if binary:
with open(path, "wb") as f:
pickle.dump(self, f)
else:
logger.info(
"EventGraph deserialized from a JSON file loses several functionality. "
"To keep full functionality, use Python's pickle utility for serialization. "
"For details, refer to https://pyknp-eventgraph.readthedocs.io/en/latest/reference/"
"eventgraph.html#pyknp_eventgraph.eventgraph.EventGraph.save."
)
with open(path, "w") as f:
json.dump(self.to_dict(), f, ensure_ascii=False, indent=8)
@property
def sentences(self) -> List[Sentence]:
"""A list of sentences."""
return [sentence for sentence in self.document.sentences]
@property
def events(self) -> List[Event]:
"""A list of events."""
return [event for sentence in self.sentences for event in sentence.events]
@property
def relations(self) -> List[Relation]:
"""A list of relations."""
return [relation for event in self.events for relation in event.outgoing_relations]
def to_dict(self) -> dict:
"""Convert this object into a dictionary."""
return dict(
sentences=[sentence.to_dict() for sentence in self.sentences],
events=[event.to_dict() for event in self.events],
)
def to_string(self) -> str:
"""Convert this object into a string."""
return (
f"<EventGraph, "
f"#sentences: {len(self.sentences)}, "
f"#events: {len(self.events)}, "
f"#relations: {len(self.relations)}>"
)
class EventGraphBuilder(Builder):
@classmethod
def build(cls, blists: List[BList]) -> EventGraph:
logger.debug("Create an EventGraph.")
Builder.reset()
evg = EventGraph()
# Assign a document to the EventGraph.
# A document is a collection of sentences, and a sentence is a collection of events.
DocumentBuilder.build(evg, blists)
# Assign basic phrases to events.
# This process must be performed after constructing a document
# because an event may have a basic phrase recognized by inter-sentential cataphora resolution.
for event in evg.events:
BasePhraseBuilder.build(event)
# Assign event-to-event relations to events.
# This process must be performed after constructing a document.
for event in evg.events:
RelationsBuilder.build(event)
logger.debug("Successfully created an EventGraph.")
logger.debug(evg)
return evg
class PickleEventGraphBuilder(Builder):
@classmethod
def build(cls, f: BinaryIO) -> EventGraph:
logger.debug("Create an EventGraph by loading a pickled file.")
evg = pickle.load(f)
assert isinstance(evg, EventGraph)
logger.debug("Successfully created an EventGraph.")
logger.debug(evg)
return evg
class JsonEventGraphBuilder(Builder):
@classmethod
def build(cls, f: TextIO) -> EventGraph:
logger.debug("Create an EventGraph by loading a JSON file.")
logger.info(
"EventGraph deserialized from a JSON file loses several functionality. "
"To keep full functionality, use Python's pickle utility for serialization. "
"For details, refer to https://pyknp-eventgraph.readthedocs.io/en/latest/reference/eventgraph.html"
"#pyknp_eventgraph.eventgraph.EventGraph.load."
)
Builder.reset()
dump = json.load(f)
evg = EventGraph()
# Assign a document to the EventGraph.
# A document is a collection of sentences, and a sentence is a collection of events.
JsonDocumentBuilder.build(evg, dump)
# Assign event-to-event relations to events.
for event_dump in dump["events"]:
for relation_dump in event_dump["rel"]:
modifier_evid = event_dump["event_id"]
head_evid = relation_dump["event_id"]
JsonRelationBuilder.build(modifier_evid, head_evid, relation_dump)
logger.debug("Successfully created an EventGraph.")
logger.debug(evg)
return evg | PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.