hexsha
stringlengths 40
40
| size
int64 2
1.05M
| ext
stringclasses 9
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
193
| max_stars_repo_name
stringlengths 6
109
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
36.6k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
193
| max_issues_repo_name
stringlengths 6
109
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
29.8k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
193
| max_forks_repo_name
stringlengths 6
109
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
11.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.05M
| avg_line_length
float64 1
404k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
73d2bacf75a73ba0bac5889825333c997f9278cb | 8,438 | py | Python | fanficfare/adapters/adapter_thealphagatecom.py | davidferguson/FanFicUpload | dcc3010b9c35c6d0e479cfc2aa07d951d280d9b2 | [
"Apache-2.0"
] | 1 | 2019-06-13T11:20:33.000Z | 2019-06-13T11:20:33.000Z | fanficfare/adapters/adapter_thealphagatecom.py | davidferguson/FanFicUpload | dcc3010b9c35c6d0e479cfc2aa07d951d280d9b2 | [
"Apache-2.0"
] | null | null | null | fanficfare/adapters/adapter_thealphagatecom.py | davidferguson/FanFicUpload | dcc3010b9c35c6d0e479cfc2aa07d951d280d9b2 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2012 Fanficdownloader team, 2015 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Software: eFiction
import time
import logging
logger = logging.getLogger(__name__)
import re
import urllib2
from ..htmlcleanup import stripHTML
from .. import exceptions as exceptions
from base_adapter import BaseSiteAdapter, makeDate
def getClass():
return TheAlphaGateComAdapter
# Class name has to be unique. Our convention is camel case the
# sitename with Adapter at the end. www is skipped.
class TheAlphaGateComAdapter(BaseSiteAdapter):
def __init__(self, config, url):
BaseSiteAdapter.__init__(self, config, url)
self.decode = ["Windows-1252",
"utf8"] # 1252 is a superset of iso-8859-1.
# Most sites that claim to be
# iso-8859-1 (and some that claim to be
# utf8) are really windows-1252.
self.username = "NoneGiven" # if left empty, site doesn't return any message at all.
self.password = ""
self.is_adult=False
# get storyId from url--url validation guarantees query is only sid=1234
self.story.setMetadata('storyId',self.parsedUrl.query.split('=',)[1])
# normalized story URL.
self._setURL('http://' + self.getSiteDomain() + '/viewstory.php?sid='+self.story.getMetadata('storyId'))
# Each adapter needs to have a unique site abbreviation.
self.story.setMetadata('siteabbrev','tag')
# The date format will vary from site to site.
# http://docs.python.org/library/datetime.html#strftime-strptime-behavior
self.dateformat = "%d %b %Y"
@staticmethod # must be @staticmethod, don't remove it.
def getSiteDomain():
# The site domain. Does have www here, if it uses it.
return 'www.thealphagate.com'
@classmethod
def getSiteExampleURLs(cls):
return "http://"+cls.getSiteDomain()+"/viewstory.php?sid=1234"
def getSiteURLPattern(self):
return re.escape("http://"+self.getSiteDomain()+"/viewstory.php?sid=")+r"\d+$"
## Getting the chapter list and the meta data, plus 'is adult' checking.
def extractChapterUrlsAndMetadata(self):
# index=1 makes sure we see the story chapter index. Some
# sites skip that for one-chapter stories.
url = self.url+'&index=1'
logger.debug("URL: "+url)
try:
data = self._fetchUrl(url)
except urllib2.HTTPError, e:
if e.code == 404:
raise exceptions.StoryDoesNotExist(self.url)
else:
raise e
if "Access denied. This story has not been validated by the adminstrators of this site." in data:
raise exceptions.AccessDenied(self.getSiteDomain() +" says: Access denied. This story has not been validated by the adminstrators of this site.")
# use BeautifulSoup HTML parser to make everything easier to find.
soup = self.make_soup(data)
# print data
# Now go hunting for all the meta data and the chapter list.
## Title
a = soup.find('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+"$"))
self.story.setMetadata('title',stripHTML(a))
# Find authorid and URL from... author url.
a = soup.find('a', href=re.compile(r"viewuser.php\?uid=\d+"))
self.story.setMetadata('authorId',a['href'].split('=')[1])
self.story.setMetadata('authorUrl','http://'+self.host+'/'+a['href'])
self.story.setMetadata('author',a.string)
# Find the chapters:
for chapter in soup.findAll('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+"&chapter=\d+$")):
# just in case there's tags, like <i> in chapter titles.
self.chapterUrls.append((stripHTML(chapter),'http://'+self.host+'/'+chapter['href']))
self.story.setMetadata('numChapters',len(self.chapterUrls))
# eFiction sites don't help us out a lot with their meta data
# formating, so it's a little ugly.
# utility method
def defaultGetattr(d,k):
try:
return d[k]
except:
return ""
# <span class="label">Rated:</span> NC-17<br /> etc
labels = soup.findAll('span',{'class':'label'})
for labelspan in labels:
value = labelspan.nextSibling
label = labelspan.string
if 'Summary' in label:
## Everything until the next span class='label'
svalue = ""
while 'label' not in defaultGetattr(value,'class'):
svalue += unicode(value)
value = value.nextSibling
self.setDescription(url,svalue)
#self.story.setMetadata('description',stripHTML(svalue))
if 'Rated' in label:
self.story.setMetadata('rating', value)
if 'Word count' in label:
self.story.setMetadata('numWords', value)
if 'Categories' in label:
cats = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=categories'))
for cat in cats:
self.story.addToList('category',cat.string)
if 'Characters' in label:
chars = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=characters'))
for char in chars:
self.story.addToList('characters',char.string)
if 'Genre' in label:
genres = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=1'))
for genre in genres:
self.story.addToList('genre',genre.string)
if 'Warnings' in label:
warnings = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=2')) # XXX
for warning in warnings:
self.story.addToList('warnings',warning.string)
if 'Completed' in label:
if 'Yes' in value:
self.story.setMetadata('status', 'Completed')
else:
self.story.setMetadata('status', 'In-Progress')
if 'Published' in label:
self.story.setMetadata('datePublished', makeDate(stripHTML(value), self.dateformat))
if 'Updated' in label:
self.story.setMetadata('dateUpdated', makeDate(stripHTML(value), self.dateformat))
try:
# Find Series name from series URL.
a = soup.find('a', href=re.compile(r"viewseries.php\?seriesid=\d+"))
series_name = a.string
series_url = 'http://'+self.host+'/'+a['href']
# use BeautifulSoup HTML parser to make everything easier to find.
seriessoup = self.make_soup(self._fetchUrl(series_url))
storyas = seriessoup.findAll('a', href=re.compile(r'^viewstory.php\?sid=\d+$'))
i=1
for a in storyas:
if a['href'] == ('viewstory.php?sid='+self.story.getMetadata('storyId')):
self.setSeries(series_name, i)
self.story.setMetadata('seriesUrl',series_url)
break
i+=1
except:
# I find it hard to care if the series parsing fails
pass
# grab the text for an individual chapter.
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
soup = self.make_soup(self._fetchUrl(url))
div = soup.find('div', {'id' : 'story'})
if None == div:
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
return self.utf8FromSoup(url,div)
| 39.246512 | 157 | 0.600735 |
73d2c280cd5a67722e9eb67061b6c744a5188574 | 22,362 | py | Python | ChildTuning/run_glue.py | onlyrico/AliceMind | a6a070b1610e4c4bfe84ee6c4195b2bc4f725ded | [
"Apache-2.0"
] | 15 | 2021-10-02T16:27:54.000Z | 2022-03-13T13:48:38.000Z | ChildTuning/run_glue.py | onlyrico/AliceMind | a6a070b1610e4c4bfe84ee6c4195b2bc4f725ded | [
"Apache-2.0"
] | 1 | 2021-10-04T04:02:33.000Z | 2021-10-08T02:17:40.000Z | ChildTuning/run_glue.py | onlyrico/AliceMind | a6a070b1610e4c4bfe84ee6c4195b2bc4f725ded | [
"Apache-2.0"
] | 2 | 2021-10-02T16:27:56.000Z | 2021-11-23T15:35:13.000Z | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE."""
# You can also adapt this script on your own text classification task. Pointers for this are left as comments.
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
from datasets import load_dataset, load_metric
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
PretrainedConfig,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from ChildTuningD import ChildTuningDtrainer
from ChildTuningF import ChildTuningFtrainer
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
logger = logging.getLogger(__name__)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
task_name: Optional[str] = field(
default=None,
metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())},
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the training data."}
)
validation_file: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the validation data."}
)
test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."})
def __post_init__(self):
if self.task_name is not None:
self.task_name = self.task_name.lower()
if self.task_name not in task_to_keys.keys():
raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys()))
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task or a training/validation file.")
else:
train_extension = self.train_file.split(".")[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
validation_extension = self.validation_file.split(".")[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
# myparams
reserve_p: float = field(
default=0.0
)
mode: str = field(
default=None
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
# sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
# label if at least two columns are provided.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.task_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset("glue.py", data_args.task_name)
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
data_files = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
train_extension = data_args.train_file.split(".")[-1]
test_extension = data_args.test_file.split(".")[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
data_files["test"] = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`.")
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}")
if data_args.train_file.endswith(".csv"):
# Loading a dataset from local csv files
datasets = load_dataset("csv", data_files=data_files)
else:
# Loading a dataset from local json files
datasets = load_dataset("json", data_files=data_files)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
if data_args.task_name is not None:
is_regression = data_args.task_name == "stsb"
if not is_regression:
label_list = datasets["train"].features["label"].names
num_labels = len(label_list)
else:
num_labels = 1
else:
# Trying to have good defaults here, don't hesitate to tweak to your needs.
is_regression = datasets["train"].features["label"].dtype in ["float32", "float64"]
if is_regression:
num_labels = 1
else:
# A useful fast method:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
label_list = datasets["train"].unique("label")
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Preprocessing the datasets
if data_args.task_name is not None:
sentence1_key, sentence2_key = task_to_keys[data_args.task_name]
else:
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
non_label_column_names = [name for name in datasets["train"].column_names if name != "label"]
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", "sentence2"
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
padding = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
label_to_id = None
if (
model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
and data_args.task_name is not None
and not is_regression
):
# Some have all caps in their config, some don't.
label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)}
else:
logger.warn(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
)
elif data_args.task_name is None and not is_regression:
label_to_id = {v: i for i, v in enumerate(label_list)}
def preprocess_function(examples):
# Tokenize the texts
args = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*args, padding=padding, max_length=data_args.max_seq_length, truncation=True)
# Map labels to IDs (not necessary for GLUE tasks)
if label_to_id is not None and "label" in examples:
result["label"] = [label_to_id[l] for l in examples["label"]]
return result
datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache)
train_dataset = datasets["train"]
eval_dataset = datasets["validation_matched" if data_args.task_name == "mnli" else "validation"]
if data_args.task_name is not None or data_args.test_file is not None:
test_dataset = datasets["test_matched" if data_args.task_name == "mnli" else "test"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# Get the metric function
if data_args.task_name is not None:
metric = load_metric("metric.py", data_args.task_name)
# TODO: When datasets metrics include regular accuracy, make an else here and remove special branch from
# compute_metrics
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)
if data_args.task_name is not None:
result = metric.compute(predictions=preds, references=p.label_ids)
if len(result) > 1:
result["combined_score"] = np.mean(list(result.values())).item()
return result
elif is_regression:
return {"mse": ((preds - p.label_ids) ** 2).mean().item()}
else:
return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
# Initialize our Trainer
assert model_args.mode in ['ChildTuning-F', 'ChildTuning-D', None]
if model_args.mode is None:
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset if training_args.do_eval else None,
compute_metrics=compute_metrics,
tokenizer=tokenizer,
data_collator=data_collator,
)
else:
if model_args.mode == 'ChildTuning-F':
trainer_cls = ChildTuningFtrainer
elif model_args.mode == 'ChildTuning-D':
trainer_cls = ChildTuningDtrainer
trainer = trainer_cls(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset if training_args.do_eval else None,
compute_metrics=compute_metrics,
tokenizer=tokenizer,
data_collator=data_collator,
reserve_p=model_args.reserve_p,
mode=model_args.mode
)
# Training
if training_args.do_train:
if last_checkpoint is not None:
checkpoint = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path):
checkpoint = model_args.model_name_or_path
else:
checkpoint = None
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
trainer.save_model() # Saves the tokenizer too for easy upload
output_train_file = os.path.join(training_args.output_dir, "train_results.txt")
if trainer.is_world_process_zero():
with open(output_train_file, "w") as writer:
logger.info("***** Train results *****")
for key, value in sorted(metrics.items()):
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json"))
# Evaluation
eval_results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
eval_datasets = [eval_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
eval_datasets.append(datasets["validation_mismatched"])
for eval_dataset, task in zip(eval_datasets, tasks):
eval_result = trainer.evaluate(eval_dataset=eval_dataset)
output_eval_file = os.path.join(training_args.output_dir, f"eval_results_{task}.txt")
if trainer.is_world_process_zero():
with open(output_eval_file, "w") as writer:
logger.info(f"***** Eval results {task} *****")
for key, value in sorted(eval_result.items()):
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
eval_results.update(eval_result)
if training_args.do_predict:
logger.info("*** Test ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
test_datasets = [test_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
test_datasets.append(datasets["test_mismatched"])
for test_dataset, task in zip(test_datasets, tasks):
# Removing the `label` columns because it contains -1 and Trainer won't like that.
test_dataset.remove_columns_("label")
predictions = trainer.predict(test_dataset=test_dataset).predictions
predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1)
output_test_file = os.path.join(training_args.output_dir, f"test_results_{task}.txt")
if trainer.is_world_process_zero():
with open(output_test_file, "w") as writer:
logger.info(f"***** Test results {task} *****")
writer.write("index\tprediction\n")
for index, item in enumerate(predictions):
if is_regression:
writer.write(f"{index}\t{item:3.3f}\n")
else:
item = label_list[item]
writer.write(f"{index}\t{item}\n")
return eval_results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 43.675781 | 119 | 0.656873 |
73d2de46d782e69b2f4a5c6e163b6f75eb4f82f7 | 437 | py | Python | alipay/aop/api/response/AlipayMerchantAuthCreateResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/response/AlipayMerchantAuthCreateResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/response/AlipayMerchantAuthCreateResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayMerchantAuthCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayMerchantAuthCreateResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AlipayMerchantAuthCreateResponse, self).parse_response_content(response_content)
| 27.3125 | 105 | 0.778032 |
73d2f073d8dfa78b96ca183a5b7669104ab578df | 3,881 | py | Python | botocore/__init__.py | Craigspaz/botocore | 4eae914d8bfab979082c1b4d71a317e3081246c4 | [
"Apache-2.0"
] | null | null | null | botocore/__init__.py | Craigspaz/botocore | 4eae914d8bfab979082c1b4d71a317e3081246c4 | [
"Apache-2.0"
] | null | null | null | botocore/__init__.py | Craigspaz/botocore | 4eae914d8bfab979082c1b4d71a317e3081246c4 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import re
import logging
__version__ = '1.24.18'
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Configure default logger to do nothing
log = logging.getLogger('botocore')
log.addHandler(NullHandler())
_first_cap_regex = re.compile('(.)([A-Z][a-z]+)')
_end_cap_regex = re.compile('([a-z0-9])([A-Z])')
# The regex below handles the special case where some acronym
# name is pluralized, e.g GatewayARNs, ListWebACLs, SomeCNAMEs.
_special_case_transform = re.compile('[A-Z]{2,}s$')
# Prepopulate the cache with special cases that don't match
# our regular transformation.
_xform_cache = {
('CreateCachediSCSIVolume', '_'): 'create_cached_iscsi_volume',
('CreateCachediSCSIVolume', '-'): 'create-cached-iscsi-volume',
('DescribeCachediSCSIVolumes', '_'): 'describe_cached_iscsi_volumes',
('DescribeCachediSCSIVolumes', '-'): 'describe-cached-iscsi-volumes',
('DescribeStorediSCSIVolumes', '_'): 'describe_stored_iscsi_volumes',
('DescribeStorediSCSIVolumes', '-'): 'describe-stored-iscsi-volumes',
('CreateStorediSCSIVolume', '_'): 'create_stored_iscsi_volume',
('CreateStorediSCSIVolume', '-'): 'create-stored-iscsi-volume',
('ListHITsForQualificationType', '_'): 'list_hits_for_qualification_type',
('ListHITsForQualificationType', '-'): 'list-hits-for-qualification-type',
('ExecutePartiQLStatement', '_'): 'execute_partiql_statement',
('ExecutePartiQLStatement', '-'): 'execute-partiql-statement',
('ExecutePartiQLTransaction', '_'): 'execute_partiql_transaction',
('ExecutePartiQLTransaction', '-'): 'execute-partiql-transaction',
('ExecutePartiQLBatch', '_'): 'execute_partiql_batch',
('ExecutePartiQLBatch', '-'): 'execute-partiql-batch',
}
# The items in this dict represent partial renames to apply globally to all
# services which might have a matching argument or operation. This way a
# common mis-translation can be fixed without having to call out each
# individual case.
ScalarTypes = ('string', 'integer', 'boolean', 'timestamp', 'float', 'double')
BOTOCORE_ROOT = os.path.dirname(os.path.abspath(__file__))
# Used to specify anonymous (unsigned) request signature
class UNSIGNED(object):
def __copy__(self):
return self
def __deepcopy__(self, memodict):
return self
UNSIGNED = UNSIGNED()
def xform_name(name, sep='_', _xform_cache=_xform_cache):
"""Convert camel case to a "pythonic" name.
If the name contains the ``sep`` character, then it is
returned unchanged.
"""
if sep in name:
# If the sep is in the name, assume that it's already
# transformed and return the string unchanged.
return name
key = (name, sep)
if key not in _xform_cache:
if _special_case_transform.search(name) is not None:
is_special = _special_case_transform.search(name)
matched = is_special.group()
# Replace something like ARNs, ACLs with _arns, _acls.
name = name[:-len(matched)] + sep + matched.lower()
s1 = _first_cap_regex.sub(r'\1' + sep + r'\2', name)
transformed = _end_cap_regex.sub(r'\1' + sep + r'\2', s1).lower()
_xform_cache[key] = transformed
return _xform_cache[key]
| 38.81 | 78 | 0.703685 |
73d31c5a3b6803306061f620b7aa307fb6ae95e1 | 814 | py | Python | django_joblog/impl/JobLoggerContext.py | defgsus/django-joblog | 88467f951d7ebc586c69e421cab39e4caa395cca | [
"MIT"
] | null | null | null | django_joblog/impl/JobLoggerContext.py | defgsus/django-joblog | 88467f951d7ebc586c69e421cab39e4caa395cca | [
"MIT"
] | null | null | null | django_joblog/impl/JobLoggerContext.py | defgsus/django-joblog | 88467f951d7ebc586c69e421cab39e4caa395cca | [
"MIT"
] | null | null | null | # encoding=utf-8
from __future__ import unicode_literals
from .JobLoggerBase import JobLoggerBase
class JobLoggerContext(object):
"""
Scoped context change for a Logger instance
with JobLogger("my_task") as log:
with JobLoggerContext(log, "subtask_1"):
the_task_1(log)
with JobLoggerContext(log, "subtask_2"):
the_task_2(log)
with JobLoggerContext(log, "subsubtask_2"):
the_sub_task_2(log)
"""
def __init__(self, log, name):
assert isinstance(log, JobLoggerBase)
self._log = log
self._name = name
def __enter__(self):
self._log.push_context(self._name)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_tb:
self._log.pop_context()
| 25.4375 | 55 | 0.63145 |
73d332ca5d8366634ead080b5ab50ded026f2a86 | 5,380 | py | Python | asposewordscloud/models/run_insert.py | aspose-words-cloud/aspose-words-cloud-python | 65c7b55fa4aac69b60d41e7f54aed231df285479 | [
"MIT"
] | 14 | 2018-07-15T17:01:52.000Z | 2018-11-29T06:15:33.000Z | asposewordscloud/models/run_insert.py | aspose-words-cloud/aspose-words-cloud-python | 65c7b55fa4aac69b60d41e7f54aed231df285479 | [
"MIT"
] | 1 | 2018-09-28T12:59:34.000Z | 2019-10-08T08:42:59.000Z | asposewordscloud/models/run_insert.py | aspose-words-cloud/aspose-words-cloud-python | 65c7b55fa4aac69b60d41e7f54aed231df285479 | [
"MIT"
] | 2 | 2020-12-21T07:59:17.000Z | 2022-02-16T21:41:25.000Z | # coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="run_insert.py">
# Copyright (c) 2021 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import datetime
import six
import json
class RunInsert(object):
"""Run element for insert.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'text': 'str'
}
attribute_map = {
'text': 'Text'
}
def __init__(self, text=None): # noqa: E501
"""RunInsert - a model defined in Swagger""" # noqa: E501
self._text = None
self.discriminator = None
if text is not None:
self.text = text
@property
def text(self):
"""Gets the text of this RunInsert. # noqa: E501
Gets or sets the run's text. # noqa: E501
:return: The text of this RunInsert. # noqa: E501
:rtype: str
"""
return self._text
@text.setter
def text(self, text):
"""Sets the text of this RunInsert.
Gets or sets the run's text. # noqa: E501
:param text: The text of this RunInsert. # noqa: E501
:type: str
"""
self._text = text
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if value is None:
continue
if isinstance(value, list):
result[self.attribute_map[attr]] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[self.attribute_map[attr]] = value.to_dict()
elif isinstance(value, dict):
result[self.attribute_map[attr]] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
elif isinstance(value, (datetime.datetime, datetime.date)):
result[self.attribute_map[attr]] = value.isoformat()
else:
result[self.attribute_map[attr]] = value
return result
def to_json(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[self.attribute_map[attr]] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[self.attribute_map[attr]] = value.to_dict()
elif isinstance(value, dict):
result[self.attribute_map[attr]] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
elif isinstance(value, (datetime.datetime, datetime.date)):
result[self.attribute_map[attr]] = value.isoformat()
else:
result[self.attribute_map[attr]] = value
return json.dumps(result)
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RunInsert):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | 34.487179 | 85 | 0.564498 |
73d369575cbd3a6df67a1650635faa4a60b17f2d | 194 | py | Python | rootfs/drycc/__init__.py | hanlucen/controller | 2be591d44508bbca8058d3f856d10d425fa4f519 | [
"MIT"
] | null | null | null | rootfs/drycc/__init__.py | hanlucen/controller | 2be591d44508bbca8058d3f856d10d425fa4f519 | [
"MIT"
] | 19 | 2020-07-30T06:31:29.000Z | 2022-03-14T07:33:44.000Z | rootfs/drycc/__init__.py | hanlucen/controller | 2be591d44508bbca8058d3f856d10d425fa4f519 | [
"MIT"
] | 9 | 2020-07-30T02:50:12.000Z | 2020-12-11T06:44:19.000Z | """
The Drycc main package, including the top-level URLs, Django project
settings, and WSGI setup. Most application domain-specific code lives in
the api, provider, cm, and web Django apps.
"""
| 32.333333 | 72 | 0.762887 |
73d3798dc28fea71c68d8b8f984f89209322ecc7 | 9,947 | py | Python | packages/main/src/RPA/Twitter.py | FormulatedAutomation/rpaframework | 7704d20540fe5ada0c10bd48605afb5d4697b497 | [
"Apache-2.0"
] | null | null | null | packages/main/src/RPA/Twitter.py | FormulatedAutomation/rpaframework | 7704d20540fe5ada0c10bd48605afb5d4697b497 | [
"Apache-2.0"
] | null | null | null | packages/main/src/RPA/Twitter.py | FormulatedAutomation/rpaframework | 7704d20540fe5ada0c10bd48605afb5d4697b497 | [
"Apache-2.0"
] | 1 | 2021-02-11T21:00:11.000Z | 2021-02-11T21:00:11.000Z | from dataclasses import dataclass
import datetime
import logging
from robot.libraries.BuiltIn import (
BuiltIn,
RobotNotRunningError,
)
import tweepy
from tweepy.error import TweepError
from RPA.core.helpers import required_env, required_param
from RPA.core.notebook import notebook_json
from RPA.RobotLogListener import RobotLogListener
try:
BuiltIn().import_library("RPA.RobotLogListener")
except RobotNotRunningError:
pass
@dataclass
class Tweet:
"""Represents Tweet"""
created_at: datetime.datetime
id: int
tweet_id_str: str
text: str
in_reply_to_screen_name: str
lang: str
name: str
screen_name: str
hashtags: list
is_truncated: bool = False
favorite_count: int = 0
retweeted: bool = False
retweet_count: int = 0
class Twitter:
"""Library for managing Twitter.
Library usage requires Twitter developer credentials, which can
be requested from https://developer.twitter.com/
"""
def __init__(self) -> None:
self.logger = logging.getLogger(__name__)
self._auth = None
self.api = None
self._me = None
listener = RobotLogListener()
listener.register_protected_keywords("authorize")
listener.only_info_level(
[
"get_me",
"get_user_tweets",
"text_search_tweets",
"get_user_profile",
"tweet",
"like",
"unlike",
]
)
def authorize(
self,
consumer_key: str = None,
consumer_secret: str = None,
access_token: str = None,
access_token_secret: str = None,
) -> None:
"""Authorize to Twitter API
:param consumer_key: app consumer key
:param consumer_secret: app consumer secret
:param access_token: user access token
:param access_token_secret: user access token secret
"""
if consumer_key is None:
consumer_key = required_env("TWITTER_CONSUMER_KEY")
if consumer_secret is None:
consumer_secret = required_env("TWITTER_CONSUMER_SECRET")
if access_token is None:
access_token = required_env("TWITTER_ACCESS_TOKEN")
if access_token_secret is None:
access_token_secret = required_env("TWITTER_ACCESS_TOKEN_SECRET")
self._auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
self._auth.set_access_token(access_token, access_token_secret)
self.api = tweepy.API(self._auth, wait_on_rate_limit=True)
try:
self.api.verify_credentials()
self.logger.info("Twitter authentication success")
self._me = self.api.me()
except TweepError as e:
self.logger.error("Error during Twitter authentication: %s", str(e))
raise TweepError from e
def get_me(self) -> dict:
"""Get Twitter profile of authenticated user
:return: user profile as dictionary or `None`
"""
data = self._me._json if self._me else None # pylint: disable=W0212
notebook_json(data)
return data
def get_user_tweets(self, username: str = None, count: int = 100) -> list:
"""Get user tweets
:param username: whose tweets to get
:param count: maximum number of tweets, defaults to 100
:return: list of user tweets
"""
required_param(username, "get_user_tweets")
tweets = []
try:
# Pulling individual tweets from query
for tweet in self.api.user_timeline(id=username, count=count):
# Adding to list that contains all tweets
tw = Tweet(
created_at=tweet.created_at,
id=tweet.id,
tweet_id_str=tweet.id_str,
text=tweet.text,
in_reply_to_screen_name=tweet.in_reply_to_screen_name,
lang=tweet.lang,
name=tweet.user.name,
screen_name=tweet.user.screen_name,
hashtags=[ht["text"] for ht in tweet.entities["hashtags"]],
is_truncated=tweet.truncated,
favorite_count=tweet.favorite_count,
retweeted=tweet.retweeted,
retweet_count=tweet.retweet_count,
)
tweets.append(tw)
except TweepError as e:
self.logger.warning("Twitter timeline failed: %s", str(e))
return tweets
def text_search_tweets(
self,
query: str = None,
count: int = 100,
geocode: str = None,
lang: str = None,
locale: str = None,
result_type: str = "mixed",
until: str = None,
since_id: str = None,
max_id: str = None,
) -> list:
"""Search tweets defined by search query
Results types:
- mixed : include both popular and real time results in the response
- recent : return only the most recent results in the response
- popular : return only the most popular results in the response
:param query: search query string of 500 characters maximum,
including operators
:param count: maximum number of tweets, defaults to 100
:param geocode: tweets by users located within a given
radius of the given latitude/longitude
:param lang: language code of tweets
:param locale: language of the query you are sending
:param result_type: type of search results you would prefer to receive,
default "mixed"
:param until: tweets created before the given date
:param since_id: Returns only statuses with an ID greater than
:param max_id: only statuses with an ID less than
:return: list of matching tweets
"""
required_param(query, "text_search_tweets")
tweets = []
try:
# Pulling individual tweets from query
for tweet in self.api.search(
q=query,
count=count,
geocode=geocode,
lang=lang,
locale=locale,
result_type=result_type,
until=until,
since_id=since_id,
max_id=max_id,
):
tw = Tweet(
created_at=tweet.created_at,
id=tweet.id,
tweet_id_str=tweet.id_str,
text=tweet.text,
in_reply_to_screen_name=tweet.in_reply_to_screen_name,
lang=tweet.lang,
name=tweet.user.name,
screen_name=tweet.user.screen_name,
hashtags=[ht["text"] for ht in tweet.entities["hashtags"]],
is_truncated=tweet.truncated,
favorite_count=tweet.favorite_count,
retweeted=tweet.retweeted,
retweet_count=tweet.retweet_count,
)
tweets.append(tw)
except TweepError as e:
self.logger.warning("Twitter search failed: %s", str(e))
return tweets
def get_user_profile(self, username: str = None) -> dict:
"""Get user's Twitter profile
:param username: whose profile to get
:return: profile as dictionary
"""
required_param(username, "get_user_profile")
try:
profile = self.api.get_user(username)
data = profile._json # pylint: disable=W0212
notebook_json(data)
return data
except TweepError:
return None
def tweet(self, content: str = None) -> None:
"""Make a tweet with content
:param content: text for the status update
"""
required_param(content, "tweet")
self.api.update_status(content)
def like(self, tweet: Tweet = None) -> bool:
"""Like a tweet
:param tweet: as a class `Tweet`
:return: `True` if Tweet was liked, `False` if not
"""
required_param(tweet, "like")
try:
self.api.create_favorite(tweet.id)
return True
except TweepError:
self.logger.warning(
'Could not like tweet "%s" by user "%s"',
tweet.text,
tweet.screen_name,
)
return False
def unlike(self, tweet: Tweet = None) -> bool:
"""Unlike a tweet
:param tweet: as a class `Tweet`
:return: `True` if Tweet was unliked, `False` if not
"""
required_param(tweet, "unlike")
try:
self.api.destroy_favorite(tweet.id)
return True
except TweepError:
self.logger.warning(
'Could not unlike tweet "%s" by user "%s"',
tweet.text,
tweet.screen_name,
)
return False
def follow(self, user: str = None) -> bool:
"""Follow Twitter user
:param user: screen name of the user
:return: `True` if user was followed, `False` if not
"""
required_param(user, "follow")
try:
self.api.create_friendship(user)
return True
except TweepError:
self.logger.warning("Could not follow user: %s", user)
return False
def unfollow(self, user: str = None) -> bool:
"""Unfollow Twitter user
:param user: screen name of the user
:return: `True` if user was followed, `False` if not
"""
required_param(user, "unfollow")
try:
self.api.destroy_friendship(user)
return True
except TweepError:
self.logger.warning("Could not unfollow user: %s", user)
return False
| 33.60473 | 80 | 0.57173 |
73d38e1c53f707081801c7b89de1b052f3155d63 | 5,734 | py | Python | plugins/sys_plugin/sys_plugin.py | crossin/yuan-xin | 1c28ef77671293f5e3d40d663f586753a82bbf8b | [
"MIT"
] | null | null | null | plugins/sys_plugin/sys_plugin.py | crossin/yuan-xin | 1c28ef77671293f5e3d40d663f586753a82bbf8b | [
"MIT"
] | null | null | null | plugins/sys_plugin/sys_plugin.py | crossin/yuan-xin | 1c28ef77671293f5e3d40d663f586753a82bbf8b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from micolog_plugin import *
import logging,re
from google.appengine.api import mail
from model import *
from google.appengine.api import users
from base import BaseRequestHandler,urldecode
from google.appengine.ext.webapp import template
SBODY='''New comment on your post "%(title)s"
Author : %(author)s
E-mail : %(email)s
URL : %(weburl)s
Comment:
%(content)s
You can see all comments on this post here:
%(commenturl)s
'''
BBODY='''Hi~ New reference on your comment for post "%(title)s"
Author : %(author)s
URL : %(weburl)s
Comment:
%(content)s
You can see all comments on this post here:
%(commenturl)s
'''
class NotifyHandler(BaseRequestHandler):
def __init__(self):
BaseRequestHandler.__init__(self)
self.current="config"
self.sbody=OptionSet.getValue('sys_plugin_sbody',SBODY)
self.bbody=OptionSet.getValue('sys_plugin_bbody',BBODY)
def get(self):
self.template_vals.update({'self':self})
content=template.render('plugins/sys_plugin/setup.html',self.template_vals)
self.render2('views/admin/setup_base.html',{'m_id':'sysplugin_notify','content':content})
#Also you can use:
#self.render2('plugins/sys_plugin/setup2.html',{'m_id':'sysplugin_notify','self':self})
def post(self):
self.bbody=self.param('bbody')
self.sbody=self.param('sbody')
self.blog.comment_notify_mail=self.parambool('comment_notify_mail')
self.blog.put()
OptionSet.setValue('sys_plugin_sbody',self.sbody)
OptionSet.setValue('sys_plugin_bbody',self.bbody)
self.get()
class sys_plugin(Plugin):
def __init__(self):
Plugin.__init__(self,__file__)
self.author="xuming"
self.authoruri="http://xuming.net"
self.uri="http://xuming.net"
self.description="System plugin for micolog"
self.name="Sys Plugin"
self.version="0.2"
self.blocklist=OptionSet.getValue("sys_plugin_blocklist",default="")
self.register_filter('head',self.head)
self.register_filter('footer',self.footer)
self.register_urlmap('sys_plugin/setup',self.setup)
self.register_urlhandler('/admin/sys_plugin/notify',NotifyHandler)
self.register_setupmenu('sysplugin_notify',_('Notify'),'/admin/sys_plugin/notify')
self.register_action('pre_comment',self.pre_comment)
self.register_action('save_comment',self.save_comment)
self.sbody=OptionSet.getValue('sys_plugin_sbody',SBODY)
self.bbody=OptionSet.getValue('sys_plugin_bbody',BBODY)
def head(self,content,blog=None,*arg1,**arg2):
content=content+'<meta name="generator" content="Micolog %s" />'%blog.version
return content
def footer(self,content,blog=None,*arg1,**arg2):
return content+'<!--Powered by micolog %s-->'%blog.version
def setup(self,page=None,*arg1,**arg2):
if not page.is_login:
page.redirect(users.create_login_url(page.request.uri))
tempstr='''
<p>blocklist:</p>
<form action="" method="post">
<p>
<textarea name="ta_list" style="width:400px;height:300px">%s</textarea>
</p>
<input type="submit" value="submit">
</form>'''
if page.request.method=='GET':
page.render2('views/admin/base.html',{'m_id':'sysplugin_block','content':tempstr%self.blocklist})
else:
self.blocklist=page.param("ta_list")
OptionSet.setValue("sys_plugin_blocklist",self.blocklist)
page.render2('views/admin/base.html',{'m_id':'sysplugin_block','content':tempstr%self.blocklist})
def get(self,page):
return '''<h3>Sys Plugin</h3>
<p>This is a system plugin for micolog. <br>Also a demo for how to write plugin for micolog.</p>
<h4>feature</h4>
<p><ol>
<li>Add Meta <meta name="generator" content="Micolog x.x" /></li>
<li>Add footer "<!--Powered by micolog x.x-->"</li>
<li>Comments Filter with blocklist <a href="/e/sys_plugin/setup">Setup</a></li>
<li>Comment Notify <a href="/admin/sys_plugin/notify">Setup</a></li>
</ol></p>
'''
def pre_comment(self,comment,*arg1,**arg2):
for s in self.blocklist.splitlines():
if comment.content.find(s)>-1:
raise Exception
def save_comment(self,comment,*arg1,**arg2):
if self.blog.comment_notify_mail:
self.notify(comment)
def notify(self,comment):
sbody=self.sbody.decode('utf-8')
bbody=self.bbody.decode('utf-8')
if self.blog.comment_notify_mail and self.blog.owner and not users.is_current_user_admin() :
sbody=sbody%{'title':comment.entry.title,
'author':comment.author,
'weburl':comment.weburl,
'email':comment.email,
'content':comment.content,
'commenturl':comment.entry.fullurl+"#comment-"+str(comment.key().id())
}
mail.send_mail_to_admins(self.blog.owner.email(),'Comments:'+comment.entry.title, sbody,reply_to=comment.email)
#reply comment mail notify
refers = re.findall(r'#comment-(\d+)', comment.content)
if len(refers)!=0:
replyIDs=[int(a) for a in refers]
commentlist=comment.entry.comments()
emaillist=[c.email for c in commentlist if c.reply_notify_mail and c.key().id() in replyIDs]
emaillist = {}.fromkeys(emaillist).keys()
for refer in emaillist:
if self.blog.owner and mail.is_email_valid(refer):
emailbody = bbody%{'title':comment.entry.title,
'author':comment.author,
'weburl':comment.weburl,
'email':comment.email,
'content':comment.content,
'commenturl':comment.entry.fullurl+"#comment-"+str(comment.key().id())
}
message = mail.EmailMessage(sender = self.blog.owner.email(),subject = 'Comments:'+comment.entry.title)
message.to = refer
message.body = emailbody
message.send()
| 35.614907 | 115 | 0.681025 |
73d3a8026ce989fb16c63ee04b869da39d0e4c9d | 1,419 | py | Python | node_functions.py | DigiDigi/cerulean_editor | 4a11e5e4b0c3e846270a5162a0b9ce544d2fce4e | [
"MIT"
] | null | null | null | node_functions.py | DigiDigi/cerulean_editor | 4a11e5e4b0c3e846270a5162a0b9ce544d2fce4e | [
"MIT"
] | null | null | null | node_functions.py | DigiDigi/cerulean_editor | 4a11e5e4b0c3e846270a5162a0b9ce544d2fce4e | [
"MIT"
] | null | null | null | import torch
from constants import *
imports = ('torch') # These are Imports for any functions that exists in the sidebar. Written top level.
functions = dict()
noncalling = set() # Non-functions.
# WIP: The ones I have here are for testing and should actually be functions.
functions[CONT_CLASS] = None
functions[TORCH_NN_LINEAR] = torch.nn.Linear
noncalling.add(TORCH_NN_LINEAR)
functions[TORCH_NN_CONV2D] = torch.nn.Conv2d
noncalling.add(TORCH_NN_SIGMOID)
functions[TORCH_NN_SIGMOID] = torch.nn.Sigmoid
noncalling.add(TORCH_NN_SIGMOID)
functions[DEBUG_FUNC] = None
functions[DEBUG_NONCALL] = None
noncalling.add(DEBUG_NONCALL)
functions[DEBUG_FUNCB] = None
functions[DEBUG_NONCALLB] = None
noncalling.add(DEBUG_NONCALLB)
functions[DEBUG_TESTCONTAINER] = None
functions[DEBUG_TESTCONTAINER2] = None
# WIP:
# How do we write a function here that returns the data in the text view?
functions[F_PYTHON] = None
functions[TORCH_FLOATTENSOR] = torch.FloatTensor
functions[TORCH_AUTOGRAD_VARIABLE] = torch.autograd.Variable
functions[TORCH_NN_LINEAR] = torch.nn.Linear
functions[TORCH_NN_SIGMOID] = torch.nn.Sigmoid
def func_print(v):
print (v)
functions[F_PRINT] = func_print
# Not sure what the cleanest way to implement this is.
def func_cuda(v, device_id=None):
v.cuda(device_id)
return v
functions[F_CUDA] = func_cuda
def func_cpu(v):
v.cpu()
return v
functions[F_CPU] = func_cuda
| 24.894737 | 105 | 0.777308 |
73d3b04ea892a584256077a2115e9da10bb07747 | 2,339 | py | Python | minihack/envs/lab.py | mohamadmansourX/minihack | 3238bf4892ada8aecfe2e71fb493957f7e06b2d8 | [
"Apache-2.0"
] | 1 | 2021-11-19T01:51:38.000Z | 2021-11-19T01:51:38.000Z | minihack/envs/lab.py | mohamadmansourX/minihack | 3238bf4892ada8aecfe2e71fb493957f7e06b2d8 | [
"Apache-2.0"
] | null | null | null | minihack/envs/lab.py | mohamadmansourX/minihack | 3238bf4892ada8aecfe2e71fb493957f7e06b2d8 | [
"Apache-2.0"
] | 1 | 2021-11-17T15:45:02.000Z | 2021-11-17T15:45:02.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
from minihack import MiniHackNavigation, LevelGenerator
from gym.envs import registration
class MiniHackLabyrinth(MiniHackNavigation):
def __init__(self, *args, **kwargs):
map = """
-------------------------------------
|.................|.|...............|
|.|-------------|.|.|.------------|.|
|.|.............|.|.|.............|.|
|.|.|----------.|.|.|------------.|.|
|.|.|...........|.|.............|.|.|
|.|.|.|----------.|-----------|.|.|.|
|.|.|.|...........|.......|...|.|.|.|
|.|.|.|.|----------------.|.|.|.|.|.|
|.|.|.|.|.................|.|.|.|.|.|
|.|.|.|.|.-----------------.|.|.|.|.|
|.|.|.|.|...................|.|.|.|.|
|.|.|.|.|--------------------.|.|.|.|
|.|.|.|.......................|.|.|.|
|.|.|.|-----------------------|.|.|.|
|.|.|...........................|.|.|
|.|.|---------------------------|.|.|
|.|...............................|.|
|.|-------------------------------|.|
|...................................|
-------------------------------------
"""
lvl_gen = LevelGenerator(map=map, lit=True)
lvl_gen.set_start_pos((19, 1))
lvl_gen.add_goal_pos((19, 7))
des_file = lvl_gen.get_des()
kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 1000)
super().__init__(
*args,
des_file=des_file,
**kwargs,
)
class MiniHackLabyrinthSmall(MiniHackNavigation):
def __init__(self, *args, **kwargs):
map = """
--------------------
|.......|.|........|
|.-----.|.|.-----|.|
|.|...|.|.|......|.|
|.|.|.|.|.|-----.|.|
|.|.|...|....|.|.|.|
|.|.--------.|.|.|.|
|.|..........|...|.|
|.|--------------|.|
|..................|
--------------------
"""
lvl_gen = LevelGenerator(map=map, lit=True)
lvl_gen.set_start_pos((9, 1))
lvl_gen.add_goal_pos((14, 5))
des_file = lvl_gen.get_des()
kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 400)
super().__init__(
*args,
des_file=des_file,
**kwargs,
)
registration.register(
id="MiniHack-Labyrinth-Big-v0",
entry_point="minihack.envs.lab:MiniHackLabyrinth",
)
registration.register(
id="MiniHack-Labyrinth-Small-v0",
entry_point="minihack.envs.lab:MiniHackLabyrinthSmall",
)
| 28.180723 | 75 | 0.351432 |
73d3d62f790375c18ac9e34cf96915e5c4330ce4 | 1,045 | py | Python | codegeneration/code_manip/snake_case_vs_CamelCase.py | sacceus/BabylonCpp | 94669cf7cbe3214ec6e905cbf249fa0c9daf6222 | [
"Apache-2.0"
] | 277 | 2017-05-18T08:27:10.000Z | 2022-03-26T01:31:37.000Z | codegeneration/code_manip/snake_case_vs_CamelCase.py | sacceus/BabylonCpp | 94669cf7cbe3214ec6e905cbf249fa0c9daf6222 | [
"Apache-2.0"
] | 77 | 2017-09-03T15:35:02.000Z | 2022-03-28T18:47:20.000Z | codegeneration/code_manip/snake_case_vs_CamelCase.py | sacceus/BabylonCpp | 94669cf7cbe3214ec6e905cbf249fa0c9daf6222 | [
"Apache-2.0"
] | 37 | 2017-03-30T03:36:24.000Z | 2022-01-28T08:28:36.000Z | from types import *
def ToCamelCase(snake_case: str) -> str:
items = snake_case.split("_")
def UpperFirstLetter(word):
if len(word) == 0:
return ""
return word[:1].upper() + word[1:]
itemsCamel = map(UpperFirstLetter, items)
r = "".join(itemsCamel)
return r
def to_snake_case(CamelCase: str) -> str:
r = ""
if CamelCase[0] == "I" and CamelCase[1].isupper():
r = "i" + CamelCase[1].lower()
CamelCase = CamelCase[2:]
else:
r = CamelCase[0].lower()
CamelCase = CamelCase[1:]
# PBRMetallicRoughnessMaterial
for i in range(len(CamelCase)):
c = CamelCase[i]
add_underscore = False
if c.isupper() and i < len(CamelCase) - 1 and CamelCase[i + 1].islower():
add_underscore = True
if add_underscore:
r = r + "_"
r = r + c.lower()
return r
# print(to_snake_case("HelloWorld"))
# print(to_snake_case("PBRMetallicRoughnessMaterial"))
# print(to_snake_case("ICanvas"))
| 28.243243 | 81 | 0.576077 |
73d3f93030fe33f3f12155dd2b91464c327544ab | 346 | py | Python | CritsAndCoffee.Quant.API/API_Quant/config.py | srwagsta/critsAndCoffee2.0 | 4d2042b91675c50c2b6938c1e1863f873c80d391 | [
"Apache-1.1"
] | null | null | null | CritsAndCoffee.Quant.API/API_Quant/config.py | srwagsta/critsAndCoffee2.0 | 4d2042b91675c50c2b6938c1e1863f873c80d391 | [
"Apache-1.1"
] | 10 | 2020-07-16T23:43:12.000Z | 2022-03-02T03:52:43.000Z | CritsAndCoffee.Quant.API/API_Quant/config.py | srwagsta/critsAndCoffee2.0 | 4d2042b91675c50c2b6938c1e1863f873c80d391 | [
"Apache-1.1"
] | null | null | null | import os
DEBUG = True
SECRET_KEY = os.environ['QUANT_API_SECRET_KEY']
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
SQLALCHEMY_TRACK_MODIFICATIONS = False
QUANDL_API_KEY = os.environ['QUANDL_API_KEY']
ACCEPTED_SCOPE_SET = set([x.strip().lower() for x in
(os.environ.get('ACCEPT_TOKEN_CLAIMS').split(','))])
| 24.714286 | 78 | 0.710983 |
73d4592aa3a5af656bfa2c6ef340e618742d5444 | 15,821 | py | Python | pfdicom_tagSub/pfdicom_tagSub.py | reddigari/pfdicom_tagSub | 0eb8f7be164f6f59db35b18ab94b25b887b8163e | [
"Apache-2.0"
] | null | null | null | pfdicom_tagSub/pfdicom_tagSub.py | reddigari/pfdicom_tagSub | 0eb8f7be164f6f59db35b18ab94b25b887b8163e | [
"Apache-2.0"
] | null | null | null | pfdicom_tagSub/pfdicom_tagSub.py | reddigari/pfdicom_tagSub | 0eb8f7be164f6f59db35b18ab94b25b887b8163e | [
"Apache-2.0"
] | null | null | null | # Turn off all logging for modules in this libary.
import logging
logging.disable(logging.CRITICAL)
# System imports
import os
import getpass
import argparse
import json
import pprint
import csv
import re
# Project specific imports
import pfmisc
from pfmisc._colors import Colors
from pfmisc import other
from pfmisc import error
import pudb
import pftree
import pfdicom
class pfdicom_tagSub(pfdicom.pfdicom):
"""
A class based on the 'pfdicom' infrastructure that extracts
and processes DICOM tags according to several requirements.
Powerful output formatting, such as image conversion to jpg/png
and generation of html reports is also supported.
"""
def declare_selfvars(self):
"""
A block to declare self variables
"""
#
# Object desc block
#
self.str_desc = ''
self.__name__ = "pfdicom_tagSub"
self.str_version = "2.0.8"
# Tags
self.b_tagList = False
self.b_tagFile = False
self.str_tagStruct = ''
self.str_tagFile = ''
self.d_tagStruct = {}
self.fileFilter = ''
self.dirFilter = ''
self.dp = None
self.log = None
self.tic_start = 0.0
self.pp = pprint.PrettyPrinter(indent=4)
self.verbosityLevel = -1
def __init__(self, *args, **kwargs):
"""
Constructor for pfdicom_tagSub.
Basically sets some derived class specific member variables (with
explicit call to *this* class) and then calls super class
constructor.
"""
def tagStruct_process(str_tagStruct):
self.str_tagStruct = str_tagStruct
if len(self.str_tagStruct):
self.d_tagStruct = json.loads(str_tagStruct)
def tagFile_process(str_tagFile):
self.str_tagFile = str_tagFile
if len(self.str_tagFile):
self.b_tagFile = True
with open(self.str_tagFile) as f:
self.d_tagStruct = json.load(f)
def outputFile_process(str_outputFile):
self.str_outputFileType = str_outputFile
# pudb.set_trace()
pfdicom_tagSub.declare_selfvars(self)
# Process some of the kwargs by the base class
super().__init__(*args, **kwargs)
for key, value in kwargs.items():
if key == "outputFileType": outputFile_process(value)
if key == 'tagFile': tagFile_process(value)
if key == 'tagStruct': tagStruct_process(value)
if key == 'verbosity': self.verbosityLevel = int(value)
if key == 'fileFilter': self.fileFilter = value
if key == 'dirFilter': self.dirFilter = value
# Set logging
self.dp = pfmisc.debug(
verbosity = self.verbosityLevel,
within = self.__name__
)
self.log = pfmisc.Message()
self.log.syslog(True)
def inputReadCallback(self, *args, **kwargs):
"""
Callback for reading files from specific directory.
In the context of pfdicom_tagSub, this implies reading
DICOM files and returning the dcm data set.
"""
str_path = ''
l_file = []
b_status = True
l_DCMRead = []
filesRead = 0
for k, v in kwargs.items():
if k == 'l_file': l_file = v
if k == 'path': str_path = v
if len(args):
at_data = args[0]
str_path = at_data[0]
l_file = at_data[1]
for f in l_file:
self.dp.qprint("reading: %s/%s" % (str_path, f), level = 5)
d_DCMfileRead = self.DICOMfile_read(
file = '%s/%s' % (str_path, f)
)
b_status = b_status and d_DCMfileRead['status']
l_DCMRead.append(d_DCMfileRead)
str_path = d_DCMfileRead['inputPath']
filesRead += 1
if not len(l_file): b_status = False
return {
'status': b_status,
'l_file': l_file,
'str_path': str_path,
'l_DCMRead': l_DCMRead,
'filesRead': filesRead
}
def inputAnalyzeCallback(self, *args, **kwargs):
"""
Callback for doing actual work on the read data.
This essentially means substituting tags in the
passed list of dcm data sets.
"""
d_DCMRead = {}
b_status = False
l_dcm = []
l_file = []
filesAnalyzed = 0
for k, v in kwargs.items():
if k == 'd_DCMRead': d_DCMRead = v
if k == 'path': str_path = v
if len(args):
at_data = args[0]
str_path = at_data[0]
d_DCMRead = at_data[1]
for d_DCMfileRead in d_DCMRead['l_DCMRead']:
str_path = d_DCMRead['str_path']
l_file = d_DCMRead['l_file']
self.dp.qprint("analyzing: %s" % l_file[filesAnalyzed], level = 5)
d_tagProcess = self.tagStruct_process(d_DCMfileRead['d_DICOM'])
for k, v in d_tagProcess['d_tagNew'].items():
d_tagsInStruct = self.tagsInString_process(d_DCMfileRead['d_DICOM'], v)
str_tagValue = d_tagsInStruct['str_result']
setattr(d_DCMfileRead['d_DICOM']['dcm'], k, str_tagValue)
l_dcm.append(d_DCMfileRead['d_DICOM']['dcm'])
b_status = True
filesAnalyzed += 1
return {
'status': b_status,
'l_dcm': l_dcm,
'str_path': str_path,
'l_file': l_file,
'filesAnalyzed': filesAnalyzed
}
def outputSaveCallback(self, at_data, **kwags):
"""
Callback for saving outputs.
In order to be thread-safe, all directory/file
descriptors must be *absolute* and no chdir()'s
must ever be called!
"""
path = at_data[0]
d_outputInfo = at_data[1]
str_cwd = os.getcwd()
other.mkdir(self.str_outputDir)
filesSaved = 0
other.mkdir(path)
for f, ds in zip(d_outputInfo['l_file'], d_outputInfo['l_dcm']):
ds.save_as('%s/%s' % (path, f))
self.dp.qprint("saving: %s/%s" % (path, f), level = 5)
filesSaved += 1
return {
'status': True,
'filesSaved': filesSaved
}
def tagStruct_process(self, d_DICOM):
"""
A method to "process" any regular expression in the passed
tagStruct dictionary against a current batch of DICOM files.
This is designed to bulk replace all tags that resolve a bool
true in the tag space with the corresponding (possibly itself
expanded) value. For example,
"re:.*hysician": "%_md5|4_%tag"
will tag any string with "hysician" in the tag with an md5 has
(first 4 chars) of the tag:
"""
def tagValue_process(tag, value):
"""
For a given tag and value, process the value component for a
special construct '#tag'. This construct in the value
string is replaced by the tag string itself.
If '#tag' is not in the value string, simply return the
{tag: value}
pair.
"""
if "#tag" in value:
value = value.replace("#tag", tag)
return {tag: value}
d_tagNew : dict = self.d_tagStruct.copy()
b_status : bool = False
for k, v in self.d_tagStruct.items():
if 're:' in k:
str_reg = k.split('re:')[1]
regex = re.compile(r'%s' % str_reg)
for tag in d_DICOM['d_dicomSimple']:
if bool(re.match(regex, tag)):
d_tagNew.update(tagValue_process(tag, v))
b_status = True
return {
'status': b_status,
'd_tagNew': d_tagNew
}
def tags_substitute(self, **kwargs):
"""
A simple "alias" for calling the pftree method.
"""
d_tagSub = {}
d_tagSub = self.pf_tree.tree_process(
inputReadCallback = self.inputReadCallback,
analysisCallback = self.inputAnalyzeCallback,
outputWriteCallback = self.outputSaveCallback,
persistAnalysisResults = False
)
return d_tagSub
def FS_filter(self, at_data, *args, **kwargs) -> dict:
"""
Apply a filter to the string space of file and directory
representations.
The purpose of this method is to reduce the original space of
"<path>": [<"filesToProcess">]
to only those paths and files that are relevant to the operation being
performed. Two filters are understood, a `fileFilter` that filters
filenames that match any of the passed search substrings from the CLI
`--fileFilter`, and a`dirFilter` that filters directories whose
leaf node match any of the passed `--dirFilter` substrings.
The effect of these filters is hierarchical. First, the `fileFilter`
is applied across the space of files for a given directory path. The
files are subject to a logical OR operation across the comma separated
filter argument. Thus, a `fileFilter` of "png,jpg,body" will filter
all files that have the substrings of "png" OR "jpg" OR "body" in their
filenames.
Next, if a `dirFilter` has been specified, the current string path
corresponding to the filenames being filtered is considered. Each
string in the comma separated `dirFilter` list is exacted, and if
the basename of the working directory contains the filter substring,
the (filtered) files are conserved. If the basename of the working
directory does not contain any of the `dirFilter` substrings, the
file list is discarded.
Thus, a `dirFilter` of "100307,100556" and a fileFilter of "png,jpg"
will reduce the space of files to process to ONLY files that have
a parent directory of "100307" OR "100556" AND that contain either the
string "png" OR "jpg" in their file names.
"""
b_status : bool = True
l_file : list = []
l_dirHits : list = []
l_dir : list = []
str_path : str = at_data[0]
al_file : list = at_data[1]
if len(self.fileFilter):
al_file = [x \
for y in self.fileFilter.split(',') \
for x in al_file if y in x]
if len(self.dirFilter):
l_dirHits = [os.path.basename(str_path) \
for y in self.dirFilter.split(',') \
if y in os.path.basename(str_path)]
if len(l_dirHits):
# Remove any duplicates in the l_dirHits:. Duplicates can
# occur if the tokens in the filter expression map more than
# once into the leaf node in the <str_path>, as a path that is
#
# /some/dir/in/the/space/1234567
#
# and a search filter on the dirspace of "123,567"
[l_dir.append(x) for x in l_dirHits if x not in l_dir]
else:
# If no dir hits for this dir, then we zero out the
# file filter
al_file = []
if len(al_file):
al_file.sort()
l_file = al_file
b_status = True
else:
self.dp.qprint( "No valid files to analyze found in path %s!" %
str_path, comms = 'warn', level = 5)
l_file = None
b_status = False
return {
'status': b_status,
'l_file': l_file
}
def filterFileHitList(self) -> dict:
"""
Entry point for filtering the file filter list
at each directory node.
"""
d_filterFileHitList = self.pf_tree.tree_process(
inputReadCallback = None,
analysisCallback = self.FS_filter,
outputWriteCallback = None,
applyResultsTo = 'inputTree',
applyKey = 'l_file',
persistAnalysisResults = True
)
return d_filterFileHitList
def run(self, *args, **kwargs):
"""
The run method calls the base class run() to
perform initial probe and analysis.
Then, it effectively calls the method to perform
the DICOM tag substitution.
"""
b_status = True
d_tagSub = {}
b_timerStart = False
d_filter = {}
self.dp.qprint(
"Starting pfdicom_tagSub run... (please be patient while running)",
level = 1
)
for k, v in kwargs.items():
if k == 'timerStart': b_timerStart = bool(v)
if b_timerStart:
other.tic()
# Run the base class, which probes the file tree
# and does an initial analysis. Also suppress the
# base class from printing JSON results since those
# will be printed by this class
d_pfdicom = super().run(
JSONprint = False,
timerStart = False
)
if d_pfdicom['status']:
if len(self.fileFilter) or len(self.dirFilter):
d_filter = self.filterFileHitList()
b_status = d_filter['status']
str_startDir = os.getcwd()
os.chdir(self.str_inputDir)
if b_status:
d_tagSub = self.tags_substitute()
b_status = b_status and d_tagSub['status']
os.chdir(str_startDir)
d_ret = {
'status': b_status,
'd_pfdicom': d_pfdicom,
'd_tagSub': d_tagSub,
'd_filter': d_filter,
'runTime': other.toc()
}
if self.b_json:
self.ret_dump(d_ret, **kwargs)
self.dp.qprint('Returning from pfdicom_tagSub run...', level = 1)
return d_ret
| 36.203661 | 88 | 0.501422 |
73d45d5f9cade3a3177d5e94521e7b925015fe91 | 14,208 | py | Python | src/google_ads.py | Hojland/paid-media-db-ingestion | 051b4f86796b1b7bcefc4aa361dc9c98f0cac69e | [
"MIT"
] | null | null | null | src/google_ads.py | Hojland/paid-media-db-ingestion | 051b4f86796b1b7bcefc4aa361dc9c98f0cac69e | [
"MIT"
] | null | null | null | src/google_ads.py | Hojland/paid-media-db-ingestion | 051b4f86796b1b7bcefc4aa361dc9c98f0cac69e | [
"MIT"
] | null | null | null | import os
import sys
import re
import pandas as pd
import functools
from datetime import datetime, timedelta
from sqlalchemy.types import String, DateTime, Numeric
from google_auth_oauthlib.flow import InstalledAppFlow
from google.ads.google_ads.client import GoogleAdsClient
from google.ads.google_ads.errors import GoogleAdsException
from google.ads.google_ads.v4.proto import enums
import settings
import sql
from utils import utils, sql_utils
from user_list_service_client import UserListServiceClient
def get_nested_attr(obj, attr, *args):
"""Gets the value of a nested attribute from an object.
Args:
obj: an object to retrieve an attribute value from.
attr: a string of the attribute separated by dots.
Returns:
The object attribute value or the given *args if the attr isn't present.
"""
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split('.'))
def get_enum_translation(enum: str):
# the enum of the supported enums in google ads
# (https://github.com/googleads/google-ads-python/tree/master/google/ads/google_ads/v4/proto/enums)
if enum == 'device':
enum_list = enums.device_pb2.DeviceEnum.Device.items()
elif enum == 'conversion_attribution_event_type':
enum_list = enums.conversion_attribution_event_type_pb2.ConversionAttributionEventTypeEnum.ConversionAttributionEventType.items()
elif enum == 'conversion_action_category':
enum_list = enums.conversion_action_category_pb2.ConversionActionCategoryEnum.ConversionActionCategory.items()
else:
raise NotImplementedError('This Enum is not yet implemented')
trans_dct = {enum_val: enum_name for enum_name, enum_val in enum_list}
return trans_dct
def query_ga_campaign(query: str, client: GoogleAdsClient, customer_id: str):
ga_service = client.get_service('GoogleAdsService', version='v4')
# Issues a search request using streaming.
customer_id = customer_id.replace('-', '')
response = ga_service.search_stream(customer_id, query=query)
select_section = re.search('(?<=SELECT )(.*)(?= FROM)', query)[0]
extract_values = select_section.split(', ')
campaigns = []
try:
for batch in response:
for row in batch.results:
campaign = {}
#for col_table_and_name in col_tables_and_names:
for extract_value in extract_values:
attr = get_nested_attr(row, extract_value)
col_name = extract_value.split('.')[-1]
if type(attr) in [int]:
campaign_attr = {
col_name: attr
}
else:
campaign_attr = {
col_name: getattr(attr, 'value')
}
campaign.update(campaign_attr)
campaigns.append(campaign)
except GoogleAdsException as ex:
print(f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:')
for error in ex.failure.errors:
print(f'\tError with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f'\t\tOn field: {field_path_element.field_name}')
sys.exit(1)
df_campaigns = pd.DataFrame.from_dict(campaigns)
return df_campaigns
def get_campaign_names(client: GoogleAdsClient, customer_id: str):
query = ('SELECT campaign.id, campaign.name '
'FROM campaign '
'ORDER BY campaign.id')
response = query_ga_campaign(query, client, customer_id)
return response
def get_campaign_report_by_device(client: GoogleAdsClient, customer_id: str, time_period_query: str='DURING LAST_30_DAYS'):
query = ('SELECT campaign.id, campaign.name, campaign.start_date, campaign.end_date, '
'metrics.clicks, '
'metrics.cost_micros, metrics.impressions, metrics.ctr, '
'segments.device, segments.date '
'FROM campaign '
'WHERE campaign.status = "ENABLED" AND '
f'segments.date {time_period_query} '
'ORDER BY campaign.id')
campaign_report = query_ga_campaign(query, client, customer_id)
trans_dct = get_enum_translation(enum='device')
campaign_report['device'] = campaign_report['device'].replace(trans_dct)
campaign_report['cost'] = campaign_report['cost_micros'] / 1000000
return campaign_report
def get_conversion_campaign_report(client: GoogleAdsClient, customer_id: str, time_period_query: str='DURING LAST_30_DAYS'):
query = ('SELECT campaign.id, campaign.name, campaign.start_date, campaign.end_date, '
'metrics.conversions, metrics.conversions_value, '
'segments.date, segments.conversion_action_name, segments.conversion_action_category '
'FROM campaign '
'WHERE campaign.status = "ENABLED" AND '
f'segments.date {time_period_query} '
'ORDER BY campaign.id')
campaign_report = query_ga_campaign(query, client, customer_id)
trans_dct = get_enum_translation(enum='conversion_action_category')
campaign_report['conversion_action_category'] = campaign_report['conversion_action_category'].replace(trans_dct)
campaign_report['date'] = pd.to_datetime(campaign_report['date'])
return campaign_report
def get_campaign_report(client: GoogleAdsClient, customer_id: str, time_period_query: str='DURING LAST_30_DAYS'):
query = ('SELECT campaign.id, campaign.name, campaign.start_date, campaign.end_date, '
'metrics.clicks, '
'metrics.cost_micros, metrics.impressions, metrics.ctr, '
'segments.date '
'FROM campaign '
'WHERE campaign.status = "ENABLED" AND '
f'segments.date {time_period_query} '
'ORDER BY campaign.id')
campaign_report = query_ga_campaign(query, client, customer_id)
campaign_report['cost'] = campaign_report['cost_micros'] / 1000000
return campaign_report
def get_platform_type_brandorproduct_campaign_from_naming(name: pd.Series):
col_split = name.str.split(' - ', n=3, expand=True)
col_split.loc[col_split[2].str.contains('BrandOnly', na=False), 3] = 'BrandOnly'
col_split[2] = col_split[2].str.replace(r'\s{1,2}\[BrandOnly\]', '')
platform = col_split[0]
campaign_type = col_split[1]
brandorproduct = col_split[2]
campaign = col_split[3]
return platform, campaign_type, brandorproduct, campaign
## THIS IS DEPRECATED FOR USE HERE
def get_access_token_installed_app(client_secrets_path, scopes):
# A method to get the first access token (after that the client library should refresh it when needed)
flow = InstalledAppFlow.from_client_secrets_file(
client_secrets_path, scopes=scopes)
flow.run_console()
print('Access token: %s' % flow.credentials.token)
print('Refresh token: %s' % flow.credentials.refresh_token)
def main():
client = GoogleAdsClient.load_from_env()
mariadb_engine = sql_utils.create_engine(settings.MARIADB_CONFIG, db_name='output', db_type='mysql')
LAG_TIME = settings.LAG_TIME
# put google_campaign_report to mysql database
if sql_utils.table_exists_notempty(mariadb_engine, 'output', 'google_campaign_report'):
latest_date = sql_utils.get_latest_date_in_table(mariadb_engine, 'google_campaign_report')
else:
latest_date = datetime.today()
LAG_TIME = 365
from_date = (latest_date-timedelta(days=LAG_TIME)).strftime('%Y-%m-%d')
to_date = (datetime.today()+timedelta(days=1)).strftime('%Y-%m-%d')
google_campaign_report = get_campaign_report(client, settings.YOUSEE_CUSTOMER_ID, time_period_query=f'BETWEEN "{from_date}" AND "{to_date}"')
google_campaign_report['platform'], google_campaign_report['campaign_type'], \
google_campaign_report['brandorproduct'], google_campaign_report['campaign'] = \
get_platform_type_brandorproduct_campaign_from_naming(google_campaign_report['name'])
cols = ['name', 'platform', 'campaign_type', 'brandorproduct', 'campaign', 'date', 'start_date', 'clicks', 'impressions', 'cost', 'ctr']
google_campaign_report = google_campaign_report[cols]
dtype_trans = sql.get_dtype_trans(google_campaign_report)
dtype_trans.update({'name': String(150)})
dtype_trans.update({'campaign': String(80)})
dtype_trans.update({'date': DateTime()})
if sql_utils.table_exists_notempty(mariadb_engine, 'output', 'google_campaign_report'):
sql_utils.delete_date_entries_in_table(mariadb_engine, from_date, 'google_campaign_report')
google_campaign_report.to_sql('google_campaign_report', con=mariadb_engine, dtype=dtype_trans, if_exists='append', index=False)
#mariadb_engine.execute('CREATE INDEX google_campaign_report_date_IDX USING BTREE ON `output`.google_campaign_report (date);')
#mariadb_engine.execute('CREATE INDEX google_campaign_report_name_IDX USING HASH ON `output`.google_campaign_report (name, platform, campaign_type, brandorproduct, campaign);')
# put google_device_campaign_report to mysql database
if sql_utils.table_exists_notempty(mariadb_engine, 'output', 'google_device_campaign_report'):
latest_date = sql_utils.get_latest_date_in_table(mariadb_engine, 'google_device_campaign_report')
else:
latest_date = datetime.today()
LAG_TIME = 365
from_date = (latest_date-timedelta(days=LAG_TIME)).strftime('%Y-%m-%d')
to_date = (datetime.today()+timedelta(days=1)).strftime('%Y-%m-%d')
google_device_campaign_report = get_campaign_report_by_device(client, settings.YOUSEE_CUSTOMER_ID, time_period_query=f'BETWEEN "{from_date}" AND "{to_date}"')
google_device_campaign_report['platform'], google_device_campaign_report['campaign_type'], \
google_device_campaign_report['brandorproduct'], google_device_campaign_report['campaign'] = \
get_platform_type_brandorproduct_campaign_from_naming(google_device_campaign_report['name'])
cols = ['name', 'platform', 'campaign_type', 'brandorproduct', 'campaign', 'date', 'device', 'start_date', 'clicks', 'impressions', 'cost', 'ctr']
google_device_campaign_report = google_device_campaign_report[cols]
dtype_trans = sql.get_dtype_trans(google_device_campaign_report)
dtype_trans.update({'name': String(150)})
dtype_trans.update({'campaign': String(80)})
dtype_trans.update({'date': DateTime()})
if sql_utils.table_exists_notempty(mariadb_engine, 'output', 'google_device_campaign_report'):
sql_utils.delete_date_entries_in_table(mariadb_engine, from_date, 'google_device_campaign_report')
google_device_campaign_report.to_sql('google_device_campaign_report', con=mariadb_engine, dtype=dtype_trans, if_exists='append', index=False)
#mariadb_engine.execute('CREATE INDEX device_campaign_report_date_IDX USING BTREE ON `output`.google_device_campaign_report (date);')
#mariadb_engine.execute('CREATE INDEX device_campaign_report_name_IDX USING HASH ON `output`.google_device_campaign_report (name, platform, campaign_type, brandorproduct, campaign);')
#mariadb_engine.execute('CREATE INDEX device_campaign_report_device_IDX USING HASH ON `output`.google_device_campaign_report (device);')
# put google_conversion_campaign_report to mysql database
if sql_utils.table_exists_notempty(mariadb_engine, 'output', 'google_conversion_campaign_report'):
latest_date = sql_utils.get_latest_date_in_table(mariadb_engine, 'google_conversion_campaign_report')
else:
latest_date = datetime.today()
LAG_TIME = 365
from_date = (latest_date-timedelta(days=LAG_TIME)).strftime('%Y-%m-%d')
to_date = (datetime.today()+timedelta(days=1)).strftime('%Y-%m-%d')
google_conversion_campaign_report = get_conversion_campaign_report(client, settings.YOUSEE_CUSTOMER_ID, time_period_query=f'BETWEEN "{from_date}" AND "{to_date}"')
google_conversion_campaign_report['platform'], google_conversion_campaign_report['campaign_type'], \
google_conversion_campaign_report['brandorproduct'], google_conversion_campaign_report['campaign'] = \
get_platform_type_brandorproduct_campaign_from_naming(google_conversion_campaign_report['name'])
cols = ['name', 'platform', 'campaign_type', 'brandorproduct', 'campaign', 'date', 'start_date', 'conversion_action_name', 'conversion_action_category', 'conversions', 'conversions_value']
google_conversion_campaign_report = google_conversion_campaign_report[cols]
dtype_trans = sql.get_dtype_trans(google_conversion_campaign_report)
dtype_trans.update({'name': String(150)})
dtype_trans.update({'campaign': String(80)})
dtype_trans.update({'date': DateTime()})
mariadb_engine = sql_utils.create_engine(settings.MARIADB_CONFIG, db_name='output', db_type='mysql')
if sql_utils.table_exists_notempty(mariadb_engine, 'output', 'google_conversion_campaign_report'):
sql_utils.delete_date_entries_in_table(mariadb_engine, from_date, 'google_conversion_campaign_report')
google_conversion_campaign_report.to_sql('google_conversion_campaign_report', con=mariadb_engine, dtype=dtype_trans, if_exists='append', index=False)
#mariadb_engine.execute('CREATE INDEX google_conversion_campaign_report_date_IDX USING BTREE ON `output`.google_conversion_campaign_report (date);')
#mariadb_engine.execute('CREATE INDEX google_conversion_campaign_report_name_IDX USING HASH ON `output`.google_conversion_campaign_report (name, platform, campaign_type, brandorproduct, campaign);')
#mariadb_engine.execute('CREATE INDEX google_conversion_campaign_report_conversionaction_IDX USING HASH ON `output`.google_conversion_campaign_report (conversion_action_name);')
if __name__ == '__main__':
configured_scopes = [settings.SCOPE]
#get_access_token('../credentials/google_client_secrets.json', configured_scopes)
main() | 52.428044 | 202 | 0.725507 |
73d4af8ee40df72c2719aface0750233de0175ed | 938 | py | Python | demo/yaml_api_usage.py | RockFeng0/rock4automation | a29270ab9fa4cdc79f6453971b7c7a21f01442b0 | [
"MIT"
] | 5 | 2018-09-25T05:49:49.000Z | 2021-12-30T11:06:09.000Z | demo/yaml_api_usage.py | RockFeng0/rock4automation | a29270ab9fa4cdc79f6453971b7c7a21f01442b0 | [
"MIT"
] | 3 | 2018-04-01T04:27:21.000Z | 2019-01-03T11:02:33.000Z | demo/yaml_api_usage.py | RockFeng0/rock4automation | a29270ab9fa4cdc79f6453971b7c7a21f01442b0 | [
"MIT"
] | 7 | 2018-09-25T05:49:51.000Z | 2021-12-30T11:06:11.000Z | # -*- encoding: utf-8 -*-
'''
Current module: demo.yaml_api_usage
Rough version history:
v1.0 Original version to use
********************************************************************
@AUTHOR: Administrator-Bruce Luo(罗科峰)
MAIL: lkf20031988@163.com
RCS: demo.yaml_api_usage,v 1.0 2017年5月18日
FROM: 2017年5月18日
********************************************************************
======================================================================
UI and Web Http automation frame for python.
'''
from rock4 import shoot,target,PacketTest
# 创建并初始化项目
target("test", proj_path = r'D:\auto\env\testProject', initdirs = True)
def case_detail(devdriver):
shoot(devdriver = devdriver,modelfile = r'D:\auto\env\testProject\testcase\api_yaml_usage.yaml',modeltype="api")
if __name__ == "__main__":
# 实例一个测试
test = PacketTest()
test.run_model_case(case_detail) | 30.258065 | 117 | 0.527719 |
73d4b5bbe6797533fa75ac0c8a8d17a0171bf2ef | 6,744 | py | Python | HttpRunnerManager/settings.py | wishchen/HttpRunnerManager | 1e72baa8a84ca70124af908ac683bf88b869d48a | [
"MIT"
] | null | null | null | HttpRunnerManager/settings.py | wishchen/HttpRunnerManager | 1e72baa8a84ca70124af908ac683bf88b869d48a | [
"MIT"
] | null | null | null | HttpRunnerManager/settings.py | wishchen/HttpRunnerManager | 1e72baa8a84ca70124af908ac683bf88b869d48a | [
"MIT"
] | null | null | null | """
Django settings for HttpRunnerManager project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import djcelery
from django.conf.global_settings import SESSION_COOKIE_AGE
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=w+1if4no=o&6!la#5j)3wsu%k@$)6bf+@3=i0h!5)h9h)$*s7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True # 如果部署在生产环境请设置为False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ApiManager',
'djcelery',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'HttpRunnerManager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'HttpRunnerManager.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'HttpRunner', # 新建数据库名
'USER': 'root', # 数据库登录名
'PASSWORD': 'lcc123456', # 数据库登录密码
'HOST': '127.0.0.1', # 数据库所在服务器ip地址
'PORT': '3306', # 监听端口 默认3306即可
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
# STATIC_ROOT = os.path.join(BASE_DIR, 'static') #Nginx寻找静态文件 如果生产环境部署请打开这句
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'), # 静态文件额外目录
)
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder'
)
SESSION_COOKIE_AGE = 30 * 60
djcelery.setup_loader()
CELERY_ENABLE_UTC = True
CELERY_TIMEZONE = 'Asia/Shanghai'
BROKER_URL = 'amqp://guest:guest@127.0.0.1:5672//' # 127.0.0.1即为rabbitmq-server所在服务器ip地址
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TASK_RESULT_EXPIRES = 7200 # celery任务执行结果的超时时间,
CELERYD_CONCURRENCY = 25 # celery worker的并发数 也是命令行-c指定的数目 根据服务器配置实际更改 一般25即可
CELERYD_MAX_TASKS_PER_CHILD = 100 # 每个worker执行了多少任务就会死掉,我建议数量可以大一些,比如200
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': '%(asctime)s [%(name)s:%(lineno)d] [%(module)s:%(funcName)s] [%(levelname)s]- %(message)s'}
# 日志格式
},
'filters': {
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
},
'default': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR, 'logs/all.log'),
'maxBytes': 1024 * 1024 * 100,
'backupCount': 5,
'formatter': 'standard',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
'request_handler': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR, 'logs/script.log'),
'maxBytes': 1024 * 1024 * 100,
'backupCount': 5,
'formatter': 'standard',
},
'scprits_handler': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR, 'logs/script.log'),
'maxBytes': 1024 * 1024 * 100,
'backupCount': 5,
'formatter': 'standard',
},
},
'loggers': {
'django': {
'handlers': ['default', 'console'],
'level': 'INFO',
'propagate': True
},
'HttpRunnerManager.app': {
'handlers': ['default', 'console'],
'level': 'DEBUG',
'propagate': True
},
'django.request': {
'handlers': ['request_handler'],
'level': 'DEBUG',
'propagate': True
},
'HttpRunnerManager': {
'handlers': ['scprits_handler', 'console'],
'level': 'DEBUG',
'propagate': True
},
'scripts': {
'handlers': ['scprits_handler', 'console'],
'level': 'DEBUG',
'propagate': True
},
}
}
| 30.107143 | 113 | 0.626928 |
73d4c79daf334a7f4f508c00d2bf84d468cac437 | 2,902 | py | Python | mialab/filtering/filter.py | mrunibe/MIALab | 82d3f0f4344620fd22384108b022730cde9c7215 | [
"Apache-2.0"
] | 2 | 2018-12-05T09:03:28.000Z | 2019-01-02T15:31:35.000Z | mialab/filtering/filter.py | riedj1/MIALab | 82d3f0f4344620fd22384108b022730cde9c7215 | [
"Apache-2.0"
] | null | null | null | mialab/filtering/filter.py | riedj1/MIALab | 82d3f0f4344620fd22384108b022730cde9c7215 | [
"Apache-2.0"
] | 1 | 2018-10-20T21:27:55.000Z | 2018-10-20T21:27:55.000Z | """This module contains classes to set up a filtering pipeline.
All modules in the filter package implement the basic IFilter interface and can be used to set up a pipeline.
"""
from abc import ABCMeta, abstractmethod
from typing import List
import SimpleITK as sitk
class IFilterParams(metaclass=ABCMeta):
"""Represents a filter parameters interface."""
class IFilter(metaclass=ABCMeta):
"""Filter base class."""
def __init__(self):
"""Initializes a new instance of the IFilter class."""
self.verbose = False
@abstractmethod
def execute(self, image: sitk.Image, params: IFilterParams=None) -> sitk.Image:
"""Executes a filter on an image.
Args:
image (sitk.Image): The image.
params (IFilterParams): The filter parameters.
Returns:
sitk.Image: The filtered image.
"""
raise NotImplementedError()
class FilterPipeline:
"""Represents a filter pipeline, which can be executed on images."""
def __init__(self, filters: List[IFilter]=None):
"""Initializes a new instance of the `FilterPipeline` class.
Args:
filters (list of IFilter): The filters.
"""
self.params = [] # holds image-specific parameters
self.filters = [] # holds the `IFilter`s
if filters is not None:
for filter_ in filters:
self.add_filter(filter_)
def add_filter(self, filter_: IFilter):
"""Add a filter to the pipeline.
Args:
filter_ (IFilter): A filter.
"""
if filter_ is None:
raise ValueError("The parameter filter needs to be specified.")
self.filters.append(filter_)
self.params.append(None) # params must have the same length as filters
def set_param(self, params, filter_index: int):
"""Sets an image-specific parameter for a filter.
Args:
params (IFilterParams): The parameter(s).
filter_index (int): The filter's index the parameters belong to.
"""
self.params[filter_index] = params
def execute(self, image: sitk.Image) -> sitk.Image:
"""Executes the filter pipeline on an image.
Args:
image (sitk.Image): The image.
Returns:
sitk.Image: The filtered image.
"""
for param_index, filter_ in enumerate(self.filters):
image = filter_.execute(image, self.params[param_index])
return image
def __str__(self):
"""Gets a printable string representation.
Returns:
str: String representation.
"""
string = 'FilterPipeline:\n'
for filter_no, filter_ in enumerate(self.filters):
string += ' ' + str(filter_no + 1) + '. ' + ' '.join(str(filter_).splitlines(True))
return string.format(self=self)
| 29.313131 | 109 | 0.614404 |
73d4cfc5fb2cef7e4acb68e1421f00b962c28b1b | 184 | py | Python | binarysearch.io/332_narcissistic_numbers.py | mishrakeshav/Competitive-Programming | b25dcfeec0fb9a9c71bf3a05644b619f4ca83dd2 | [
"MIT"
] | 2 | 2020-06-25T21:10:32.000Z | 2020-12-10T06:53:45.000Z | binarysearch.io/332_narcissistic_numbers.py | mishrakeshav/Competitive-Programming | b25dcfeec0fb9a9c71bf3a05644b619f4ca83dd2 | [
"MIT"
] | null | null | null | binarysearch.io/332_narcissistic_numbers.py | mishrakeshav/Competitive-Programming | b25dcfeec0fb9a9c71bf3a05644b619f4ca83dd2 | [
"MIT"
] | 3 | 2020-05-15T14:17:09.000Z | 2021-07-25T13:18:20.000Z | class Solution:
def solve(self, n):
n1 = str(n)
l = len(n1)
s = 0
for i in n1:
s += int(i)**l
return n == s
| 18.4 | 27 | 0.336957 |
73d4d62614de8e367e4b5893a5d04367f2faf7ae | 13,348 | py | Python | SinGAN/util/imresize.py | cbosoft/SinGAN | c3c950ddf59ae57e4685adc555f5cb4723bd7e78 | [
"MIT"
] | null | null | null | SinGAN/util/imresize.py | cbosoft/SinGAN | c3c950ddf59ae57e4685adc555f5cb4723bd7e78 | [
"MIT"
] | null | null | null | SinGAN/util/imresize.py | cbosoft/SinGAN | c3c950ddf59ae57e4685adc555f5cb4723bd7e78 | [
"MIT"
] | null | null | null | # This code was taken from: https://github.com/assafshocher/resizer by Assaf Shocher
from math import pi
import warnings
import numpy as np
from scipy.ndimage import filters, measurements, interpolation
from skimage import color
import torch
warnings.filterwarnings('ignore', message='The behavior of rgb2gray will change in', category=FutureWarning)
def denorm(x):
out = (x + 1) / 2
return out.clamp(0, 1)
def norm(x):
out = (x - 0.5) * 2
return out.clamp(-1, 1)
def move_to_gpu(t):
if (torch.cuda.is_available()):
t = t.to(torch.device('cuda'))
return t
def np2torch(x, cfg):
if cfg.image_channels == 3:
x = x[:,:,:,None]
x = x.transpose((3, 2, 0, 1))/255
else:
x = color.rgb2gray(x)
x = x[:,:,None,None]
x = x.transpose(3, 2, 0, 1)
x = torch.from_numpy(x).to(cfg.device).float()
x = norm(x)
return x
def torch2uint8(x):
x = x[0,:,:,:]
x = x.permute((1,2,0))
x = 255*denorm(x)
x = x.cpu().numpy()
x = x.astype(np.uint8)
return x
def imresize(im, scale, cfg):
im = torch2uint8(im)
im = imresize_in(im, scale_factor=scale)
im = np2torch(im, cfg)
return im
def imresize_to_shape(im, output_shape, cfg):
im = torch2uint8(im)
im = imresize_in(im, output_shape=output_shape)
im = np2torch(im, cfg)
return im
def imresize_in(im, scale_factor=None, output_shape=None, kernel=None, antialiasing=True, kernel_shift_flag=False):
# First standardize values and fill missing arguments (if needed) by deriving scale from output shape or vice versa
scale_factor, output_shape = fix_scale_and_size(im.shape, output_shape, scale_factor)
# For a given numeric kernel case, just do convolution and sub-sampling (downscaling only)
if type(kernel) == np.ndarray and scale_factor[0] <= 1:
return numeric_kernel(im, kernel, scale_factor, output_shape, kernel_shift_flag)
# Choose interpolation method, each method has the matching kernel size
method, kernel_width = {
"cubic": (cubic, 4.0),
"lanczos2": (lanczos2, 4.0),
"lanczos3": (lanczos3, 6.0),
"box": (box, 1.0),
"linear": (linear, 2.0),
None: (cubic, 4.0) # set default interpolation method as cubic
}.get(kernel)
# Antialiasing is only used when downscaling
antialiasing *= (scale_factor[0] < 1)
# Sort indices of dimensions according to scale of each dimension. since we are going dim by dim this is efficient
sorted_dims = np.argsort(np.array(scale_factor)).tolist()
# Iterate over dimensions to calculate local weights for resizing and resize each time in one direction
out_im = np.copy(im)
for dim in sorted_dims:
# No point doing calculations for scale-factor 1. nothing will happen anyway
if scale_factor[dim] == 1.0:
continue
# for each coordinate (along 1 dim), calculate which coordinates in the input image affect its result and the
# weights that multiply the values there to get its result.
weights, field_of_view = contributions(im.shape[dim], output_shape[dim], scale_factor[dim],
method, kernel_width, antialiasing)
# Use the affecting position values and the set of weights to calculate the result of resizing along this 1 dim
out_im = resize_along_dim(out_im, dim, weights, field_of_view)
return out_im
def fix_scale_and_size(input_shape, output_shape, scale_factor):
# First fixing the scale-factor (if given) to be standardized the function expects (a list of scale factors in the
# same size as the number of input dimensions)
if scale_factor is not None:
# By default, if scale-factor is a scalar we assume 2d resizing and duplicate it.
if np.isscalar(scale_factor):
scale_factor = [scale_factor, scale_factor]
# We extend the size of scale-factor list to the size of the input by assigning 1 to all the unspecified scales
scale_factor = list(scale_factor)
scale_factor.extend([1] * (len(input_shape) - len(scale_factor)))
# Fixing output-shape (if given): extending it to the size of the input-shape, by assigning the original input-size
# to all the unspecified dimensions
if output_shape is not None:
output_shape = list(np.uint(np.array(output_shape))) + list(input_shape[len(output_shape):])
# Dealing with the case of non-give scale-factor, calculating according to output-shape. note that this is
# sub-optimal, because there can be different scales to the same output-shape.
if scale_factor is None:
scale_factor = 1.0 * np.array(output_shape) / np.array(input_shape)
# Dealing with missing output-shape. calculating according to scale-factor
if output_shape is None:
output_shape = np.uint(np.ceil(np.array(input_shape) * np.array(scale_factor)))
return scale_factor, output_shape
def contributions(in_length, out_length, scale, kernel, kernel_width, antialiasing):
# This function calculates a set of 'filters' and a set of field_of_view that will later on be applied
# such that each position from the field_of_view will be multiplied with a matching filter from the
# 'weights' based on the interpolation method and the distance of the sub-pixel location from the pixel centers
# around it. This is only done for one dimension of the image.
# When anti-aliasing is activated (default and only for downscaling) the receptive field is stretched to size of
# 1/sf. this means filtering is more 'low-pass filter'.
fixed_kernel = (lambda arg: scale * kernel(scale * arg)) if antialiasing else kernel
kernel_width *= 1.0 / scale if antialiasing else 1.0
# These are the coordinates of the output image
out_coordinates = np.arange(1, out_length+1)
# These are the matching positions of the output-coordinates on the input image coordinates.
# Best explained by example: say we have 4 horizontal pixels for HR and we downscale by SF=2 and get 2 pixels:
# [1,2,3,4] -> [1,2]. Remember each pixel number is the middle of the pixel.
# The scaling is done between the distances and not pixel numbers (the right boundary of pixel 4 is transformed to
# the right boundary of pixel 2. pixel 1 in the small image matches the boundary between pixels 1 and 2 in the big
# one and not to pixel 2. This means the position is not just multiplication of the old pos by scale-factor).
# So if we measure distance from the left border, middle of pixel 1 is at distance d=0.5, border between 1 and 2 is
# at d=1, and so on (d = p - 0.5). we calculate (d_new = d_old / sf) which means:
# (p_new-0.5 = (p_old-0.5) / sf) -> p_new = p_old/sf + 0.5 * (1-1/sf)
match_coordinates = 1.0 * out_coordinates / scale + 0.5 * (1 - 1.0 / scale)
# This is the left boundary to start multiplying the filter from, it depends on the size of the filter
left_boundary = np.floor(match_coordinates - kernel_width / 2)
# Kernel width needs to be enlarged because when covering has sub-pixel borders, it must 'see' the pixel centers
# of the pixels it only covered a part from. So we add one pixel at each side to consider (weights can zeroize them)
expanded_kernel_width = np.ceil(kernel_width) + 2
# Determine a set of field_of_view for each each output position, these are the pixels in the input image
# that the pixel in the output image 'sees'. We get a matrix whos horizontal dim is the output pixels (big) and the
# vertical dim is the pixels it 'sees' (kernel_size + 2)
field_of_view = np.squeeze(np.uint(np.expand_dims(left_boundary, axis=1) + np.arange(expanded_kernel_width) - 1))
# Assign weight to each pixel in the field of view. A matrix whos horizontal dim is the output pixels and the
# vertical dim is a list of weights matching to the pixel in the field of view (that are specified in
# 'field_of_view')
weights = fixed_kernel(1.0 * np.expand_dims(match_coordinates, axis=1) - field_of_view - 1)
# Normalize weights to sum up to 1. be careful from dividing by 0
sum_weights = np.sum(weights, axis=1)
sum_weights[sum_weights == 0] = 1.0
weights = 1.0 * weights / np.expand_dims(sum_weights, axis=1)
# We use this mirror structure as a trick for reflection padding at the boundaries
mirror = np.uint(np.concatenate((np.arange(in_length), np.arange(in_length - 1, -1, step=-1))))
field_of_view = mirror[np.mod(field_of_view, mirror.shape[0])]
# Get rid of weights and pixel positions that are of zero weight
non_zero_out_pixels = np.nonzero(np.any(weights, axis=0))
weights = np.squeeze(weights[:, non_zero_out_pixels])
field_of_view = np.squeeze(field_of_view[:, non_zero_out_pixels])
# Final products are the relative positions and the matching weights, both are output_size X fixed_kernel_size
return weights, field_of_view
def resize_along_dim(im, dim, weights, field_of_view):
# To be able to act on each dim, we swap so that dim 0 is the wanted dim to resize
tmp_im = np.swapaxes(im, dim, 0)
# We add singleton dimensions to the weight matrix so we can multiply it with the big tensor we get for
# tmp_im[field_of_view.T], (bsxfun style)
weights = np.reshape(weights.T, list(weights.T.shape) + (np.ndim(im) - 1) * [1])
# This is a bit of a complicated multiplication: tmp_im[field_of_view.T] is a tensor of order image_dims+1.
# for each pixel in the output-image it matches the positions the influence it from the input image (along 1 dim
# only, this is why it only adds 1 dim to the shape). We then multiply, for each pixel, its set of positions with
# the matching set of weights. we do this by this big tensor element-wise multiplication (MATLAB bsxfun style:
# matching dims are multiplied element-wise while singletons mean that the matching dim is all multiplied by the
# same number
tmp_out_im = np.sum(tmp_im[field_of_view.T] * weights, axis=0)
# Finally we swap back the axes to the original order
return np.swapaxes(tmp_out_im, dim, 0)
def numeric_kernel(im, kernel, scale_factor, output_shape, kernel_shift_flag):
# See kernel_shift function to understand what this is
if kernel_shift_flag:
kernel = kernel_shift(kernel, scale_factor)
# First run a correlation (convolution with flipped kernel)
out_im = np.zeros_like(im)
for channel in range(np.ndim(im)):
out_im[:, :, channel] = filters.correlate(im[:, :, channel], kernel)
# Then subsample and return
return out_im[np.round(np.linspace(0, im.shape[0] - 1 / scale_factor[0], output_shape[0])).astype(int)[:, None],
np.round(np.linspace(0, im.shape[1] - 1 / scale_factor[1], output_shape[1])).astype(int), :]
def kernel_shift(kernel, sf):
# There are two reasons for shifting the kernel:
# 1. Center of mass is not in the center of the kernel which creates ambiguity. There is no possible way to know
# the degradation process included shifting so we always assume center of mass is center of the kernel.
# 2. We further shift kernel center so that top left result pixel corresponds to the middle of the sfXsf first
# pixels. Default is for odd size to be in the middle of the first pixel and for even sized kernel to be at the
# top left corner of the first pixel. that is why different shift size needed between od and even size.
# Given that these two conditions are fulfilled, we are happy and aligned, the way to test it is as follows:
# The input image, when interpolated (regular bicubic) is exactly aligned with ground truth.
# First calculate the current center of mass for the kernel
current_center_of_mass = measurements.center_of_mass(kernel)
# The second ("+ 0.5 * ....") is for applying condition 2 from the comments above
wanted_center_of_mass = np.array(kernel.shape) / 2 + 0.5 * (sf - (kernel.shape[0] % 2))
# Define the shift vector for the kernel shifting (x,y)
shift_vec = wanted_center_of_mass - current_center_of_mass
# Before applying the shift, we first pad the kernel so that nothing is lost due to the shift
# (biggest shift among dims + 1 for safety)
kernel = np.pad(kernel, np.int(np.ceil(np.max(shift_vec))) + 1, 'constant')
# Finally shift the kernel and return
return interpolation.shift(kernel, shift_vec)
# These next functions are all interpolation methods. x is the distance from the left pixel center
def cubic(x):
absx = np.abs(x)
absx2 = absx ** 2
absx3 = absx ** 3
return ((1.5*absx3 - 2.5*absx2 + 1) * (absx <= 1) +
(-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * ((1 < absx) & (absx <= 2)))
def lanczos2(x):
return (((np.sin(pi*x) * np.sin(pi*x/2) + np.finfo(np.float32).eps) /
((pi**2 * x**2 / 2) + np.finfo(np.float32).eps))
* (abs(x) < 2))
def box(x):
return ((-0.5 <= x) & (x < 0.5)) * 1.0
def lanczos3(x):
return (((np.sin(pi*x) * np.sin(pi*x/3) + np.finfo(np.float32).eps) /
((pi**2 * x**2 / 3) + np.finfo(np.float32).eps))
* (abs(x) < 3))
def linear(x):
return (x + 1) * ((-1 <= x) & (x < 0)) + (1 - x) * ((0 <= x) & (x <= 1))
| 46.508711 | 120 | 0.687818 |
73d4f15e0acc1028450667f025a7b28781a8f79b | 29,560 | py | Python | astronomaly/data_management/image_reader.py | nmcardoso/astronomaly | 28c6a7ced2e1553de463a74fc3a19635e45ae548 | [
"BSD-3-Clause"
] | 48 | 2019-11-22T14:41:59.000Z | 2022-03-23T01:48:59.000Z | astronomaly/data_management/image_reader.py | nmcardoso/astronomaly | 28c6a7ced2e1553de463a74fc3a19635e45ae548 | [
"BSD-3-Clause"
] | 12 | 2021-02-23T15:35:29.000Z | 2022-01-26T09:48:35.000Z | astronomaly/data_management/image_reader.py | nmcardoso/astronomaly | 28c6a7ced2e1553de463a74fc3a19635e45ae548 | [
"BSD-3-Clause"
] | 6 | 2019-11-27T10:02:43.000Z | 2021-10-11T02:18:06.000Z | from astropy.io import fits
from astropy.wcs import WCS
import numpy as np
import os
import tracemalloc
import pandas as pd
import matplotlib as mpl
import io
from skimage.transform import resize
import cv2
from astronomaly.base.base_dataset import Dataset
from astronomaly.base import logging_tools
mpl.use('Agg')
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas # noqa: E402, E501
import matplotlib.pyplot as plt # noqa: E402
def convert_array_to_image(arr, plot_cmap='hot'):
"""
Function to convert an array to a png image ready to be served on a web
page.
Parameters
----------
arr : np.ndarray
Input image
Returns
-------
png image object
Object ready to be passed directly to the frontend
"""
with mpl.rc_context({'backend': 'Agg'}):
fig = plt.figure(figsize=(1, 1), dpi=4 * arr.shape[1])
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
plt.imshow(arr, cmap=plot_cmap)
output = io.BytesIO()
FigureCanvas(fig).print_png(output)
plt.close(fig)
return output
def apply_transform(cutout, transform_function):
"""
Applies the transform function(s) given at initialisation to the image.
Parameters
----------
cutout : np.ndarray
Cutout of image
Returns
-------
np.ndarray
Transformed cutout
"""
if transform_function is not None:
try:
len(transform_function)
new_cutout = cutout
for f in transform_function:
new_cutout = f(new_cutout)
cutout = new_cutout
except TypeError: # Simple way to test if there's only one function
cutout = transform_function(cutout)
return cutout
class AstroImage:
def __init__(self, filenames, file_type='fits', fits_index=0, name=''):
"""
Lightweight wrapper for an astronomy image from a fits file
Parameters
----------
filenames : list of files
Filename of fits file to be read. Can be length one if there's only
one file or multiple if there are multiband images
fits_index : integer
Which HDU object in the list to work with
"""
print('Reading image data from %s...' % filenames[0])
self.filenames = filenames
self.file_type = file_type
self.metadata = {}
self.wcs = None
self.fits_index = fits_index
self.hdul_list = []
try:
for f in filenames:
hdul = fits.open(f, memmap=True)
self.hdul_list.append(hdul)
except FileNotFoundError:
raise FileNotFoundError("File", f, "not found")
# get a test sample
self.get_image_data(0, 10, 0, 10)
if len(name) == 0:
self.name = self._strip_filename()
else:
self.name = name
print('Done!')
def get_image_data(self, row_start, row_end, col_start, col_end):
"""Returns the image data from a fits HDUlist object
Parameters
----------
Returns
-------
np.array
Image data
"""
images = []
rs = row_start
re = row_end
cs = col_start
ce = col_end
for hdul in self.hdul_list:
if self.fits_index is None:
for i in range(len(hdul)):
self.fits_index = i
# snap1 = tracemalloc.take_snapshot()
dat = hdul[self.fits_index].data
# snap2 = tracemalloc.take_snapshot()
# diff = snap2.compare_to(snap1, 'lineno')
# print(diff[0].size_diff)
if dat is not None:
if len(dat.shape) > 2:
dat = dat[0][0]
image = dat[rs:re, cs:ce]
break
self.metadata = dict(hdul[self.fits_index].header)
if self.wcs is None:
self.wcs = WCS(hdul[self.fits_index].header, naxis=2)
else:
dat = hdul[self.fits_index].data
if len(dat.shape) > 2:
dat = dat[0][0]
image = dat[rs:re, cs:ce]
if len(image.shape) > 2:
image = np.squeeze(image)
images.append(image)
if len(images) > 1:
# Should now be a 3d array with multiple channels
image = np.dstack(images)
self.metadata['NAXIS3'] = image.shape[-1]
else:
image = images[0] # Was just the one image
return image
def get_image_shape(self):
"""
Efficiently returns the shape of the image.
Returns
-------
tuple
Image shape
"""
return (self.metadata['NAXIS1'], self.metadata['NAXIS2'])
def clean_up(self):
"""
Closes all open fits files so they don't remain in memory.
"""
print("Closing Fits files...")
for hdul in self.hdul_list:
hdul.close()
logging_tools.log("Fits files closed successfully.")
print("Files closed.")
def _strip_filename(self):
"""
Tiny utility function to make a nice formatted version of the image
name from the input filename string
Returns
-------
string
Formatted file name
"""
s1 = self.filenames[0].split(os.path.sep)[-1]
# extension = s1.split('.')[-1]
return s1
def get_coords(self, x, y):
"""
Returns the RA and DEC coordinates for a given set of pixels.
Parameters
----------
x : int
x pixel value
y : y
y pixel value
Returns
-------
ra, dec
Sky coordinates
"""
return self.wcs.wcs_pix2world(x, y, 0)
class ImageDataset(Dataset):
def __init__(self, fits_index=None, window_size=128, window_shift=None,
display_image_size=128, band_prefixes=[], bands_rgb={},
transform_function=None, display_transform_function=None,
plot_square=False, catalogue=None,
plot_cmap='hot', **kwargs):
"""
Read in a set of images either from a directory or from a list of file
paths (absolute). Inherits from Dataset class.
Parameters
----------
filename : str
If a single file (of any time) is to be read from, the path can be
given using this kwarg.
directory : str
A directory can be given instead of an explicit list of files. The
child class will load all appropriate files in this directory.
list_of_files : list
Instead of the above, a list of files to be loaded can be
explicitly given.
output_dir : str
The directory to save the log file and all outputs to. Defaults to
'./'
fits_index : integer, optional
If these are fits files, specifies which HDU object in the list to
work with
window_size : int, tuple or list, optional
The size of the cutout in pixels. If an integer is provided, the
cutouts will be square. Otherwise a list of
[window_size_x, window_size_y] is expected.
window_shift : int, tuple or list, optional
The size of the window shift in pixels. If the shift is less than
the window size, a sliding window is used to create cutouts. This
can be particularly useful for (for example) creating a training
set for an autoencoder. If an integer is provided, the shift will
be the same in both directions. Otherwise a list of
[window_shift_x, window_shift_y] is expected.
display_image_size : The size of the image to be displayed on the
web page. If the image is smaller than this, it will be
interpolated up to the higher number of pixels. If larger, it will
be downsampled.
band_prefixes : list
Allows you to specify a prefix for an image which corresponds to a
band identifier. This has to be a prefix and the rest of the image
name must be identical in order for Astronomaly to detect these
images should be stacked together.
bands_rgb : Dictionary
Maps the input bands (in separate folders) to rgb values to allow
false colour image plotting. Note that here you can only select
three bands to plot although you can use as many bands as you like
in band_prefixes. The dictionary should have 'r', 'g' and 'b' as
keys with the band prefixes as values.
transform_function : function or list, optional
The transformation function or list of functions that will be
applied to each cutout. The function should take an input 2d array
(the cutout) and return an output 2d array. If a list is provided,
each function is applied in the order of the list.
catalogue : pandas.DataFrame or similar
A catalogue of the positions of sources around which cutouts will
be extracted. Note that a cutout of size "window_size" will be
extracted around these positions and must be the same for all
sources.
plot_square : bool, optional
If True this will add a white border indicating the boundaries of
the original cutout when the image is displayed in the webapp.
plot_cmap : str, optional
The colormap with which to plot the image
"""
super().__init__(fits_index=fits_index, window_size=window_size,
window_shift=window_shift,
display_image_size=display_image_size,
band_prefixes=band_prefixes, bands_rgb=bands_rgb,
transform_function=transform_function,
display_transform_function=display_transform_function,
plot_square=plot_square, catalogue=catalogue,
plot_cmap=plot_cmap,
**kwargs)
self.known_file_types = ['fits', 'fits.fz', 'fits.gz',
'FITS', 'FITS.fz', 'FITS.gz']
self.data_type = 'image'
images = {}
tracemalloc.start()
if len(band_prefixes) != 0:
# Get the matching images in different bands
bands_files = {}
for p in band_prefixes:
for f in self.files:
if p in f:
start_ind = f.find(p)
end_ind = start_ind + len(p)
flname = f[end_ind:]
if flname not in bands_files.keys():
bands_files[flname] = [f]
else:
bands_files[flname] += [f]
for k in bands_files.keys():
extension = k.split('.')[-1]
# print(k, extension)
if extension == 'fz' or extension == 'gz':
extension = '.'.join(k.split('.')[-2:])
if extension in self.known_file_types:
try:
astro_img = AstroImage(bands_files[k],
file_type=extension,
fits_index=fits_index,
name=k)
images[k] = astro_img
except Exception as e:
msg = "Cannot read image " + k + "\n \
Exception is: " + (str)(e)
logging_tools.log(msg, level="ERROR")
# Also convert the rgb dictionary into an index dictionary
# corresponding
if len(bands_rgb) == 0:
self.bands_rgb = {'r': 0, 'g': 1, 'b': 2}
else:
self.bands_rgb = {}
for k in bands_rgb.keys():
band = bands_rgb[k]
ind = band_prefixes.index(band)
self.bands_rgb[k] = ind
else:
for f in self.files:
extension = f.split('.')[-1]
if extension == 'fz' or extension == 'gz':
extension = '.'.join(f.split('.')[-2:])
if extension in self.known_file_types:
try:
astro_img = AstroImage([f],
file_type=extension,
fits_index=fits_index)
images[astro_img.name] = astro_img
except Exception as e:
msg = "Cannot read image " + f + "\n \
Exception is: " + (str)(e)
logging_tools.log(msg, level="ERROR")
if len(list(images.keys())) == 0:
msg = "No images found, Astronomaly cannot proceed."
logging_tools.log(msg, level="ERROR")
raise IOError(msg)
try:
self.window_size_x = window_size[0]
self.window_size_y = window_size[1]
except TypeError:
self.window_size_x = window_size
self.window_size_y = window_size
# Allows sliding windows
if window_shift is not None:
try:
self.window_shift_x = window_shift[0]
self.window_shift_y = window_shift[1]
except TypeError:
self.window_shift_x = window_shift
self.window_shift_y = window_shift
else:
self.window_shift_x = self.window_size_x
self.window_shift_y = self.window_size_y
self.images = images
self.transform_function = transform_function
if display_transform_function is None:
self.display_transform_function = transform_function
else:
self.display_transform_function = display_transform_function
self.plot_square = plot_square
self.plot_cmap = plot_cmap
self.catalogue = catalogue
self.display_image_size = display_image_size
self.band_prefixes = band_prefixes
self.metadata = pd.DataFrame(data=[])
if self.catalogue is None:
self.create_catalogue()
else:
self.convert_catalogue_to_metadata()
print('A catalogue of ', len(self.metadata),
'sources has been provided.')
if 'original_image' in self.metadata.columns:
for img in np.unique(self.metadata.original_image):
if img not in images.keys():
logging_tools.log('Image ' + img + """ found in catalogue
but not in provided image data. Removing from
catalogue.""", level='WARNING')
msk = self.metadata.original_image == img
self.metadata.drop(self.metadata.index[msk], inplace=True)
print('Catalogue reduced to ', len(self.metadata),
'sources')
self.index = self.metadata.index.values
def create_catalogue(self):
"""
If a catalogue is not supplied, this will generate one by cutting up
the image into cutouts.
"""
print('No catalogue found, one will automatically be generated by \
splitting the image into cutouts governed by the window_size..')
for image_name in list(self.images.keys()):
astro_img = self.images[image_name]
img_shape = astro_img.get_image_shape()
# Remember, numpy array index of [row, column]
# corresponds to [y, x]
xvals = np.arange(self.window_size_x // 2,
img_shape[1] - self.window_size_x // 2,
self.window_shift_x)
yvals = np.arange(self.window_size_y // 2,
img_shape[0] - self.window_size_y // 2,
self.window_shift_y)
X, Y = np.meshgrid(xvals, yvals)
x_coords = X.ravel()
y_coords = Y.ravel()
ra, dec = astro_img.get_coords(x_coords, y_coords)
original_image_names = [image_name] * len(x_coords)
new_df = pd.DataFrame(data={
'original_image': original_image_names,
'x': x_coords,
'y': y_coords,
'ra': ra,
'dec': dec,
'peak_flux': [-1] * len(ra)})
self.metadata = pd.concat((self.metadata, new_df),
ignore_index=True)
self.metadata.index = self.metadata.index.astype('str')
print('A catalogue of ', len(self.metadata), 'cutouts has been \
created.')
print('Done!')
def convert_catalogue_to_metadata(self):
if 'original_image' not in self.catalogue.columns:
if len(self.images) > 1:
logging_tools.log("""If multiple fits images are used the
original_image column must be provided in
the catalogue to identify which image the
source belongs to.""",
level='ERROR')
raise ValueError("Incorrect input supplied")
else:
self.catalogue['original_image'] = \
[list(self.images.keys())[0]] * len(self.catalogue)
if 'objid' not in self.catalogue.columns:
self.catalogue['objid'] = np.arange(len(self.catalogue))
if 'peak_flux' not in self.catalogue.columns:
self.catalogue['peak_flux'] = [np.NaN] * len(self.catalogue)
cols = ['original_image', 'x', 'y']
for c in cols[1:]:
if c not in self.catalogue.columns:
logging_tools.log("""If a catalogue is provided the x and y
columns (corresponding to pixel values) must be present""",
level='ERROR')
raise ValueError("Incorrect input supplied")
if 'ra' in self.catalogue.columns:
cols.append('ra')
if 'dec' in self.catalogue.columns:
cols.append('dec')
if 'peak_flux' in self.catalogue.columns:
cols.append('peak_flux')
met = {}
for c in cols:
met[c] = self.catalogue[c].values
the_index = np.array(self.catalogue['objid'].values, dtype='str')
self.metadata = pd.DataFrame(met, index=the_index)
self.metadata['x'] = self.metadata['x'].astype('int')
self.metadata['y'] = self.metadata['y'].astype('int')
def get_sample(self, idx):
"""
Returns the data for a single sample in the dataset as indexed by idx.
Parameters
----------
idx : string
Index of sample
Returns
-------
nd.array
Array of image cutout
"""
x0 = self.metadata.loc[idx, 'x']
y0 = self.metadata.loc[idx, 'y']
original_image = self.metadata.loc[idx, 'original_image']
this_image = self.images[original_image]
x_wid = self.window_size_x // 2
y_wid = self.window_size_y // 2
y_start = y0 - y_wid
y_end = y0 + y_wid
x_start = x0 - x_wid
x_end = x0 + x_wid
invalid_y = y_start < 0 or y_end > this_image.metadata['NAXIS1']
invalid_x = x_start < 0 or x_end > this_image.metadata['NAXIS2']
if invalid_y or invalid_x:
naxis3_present = 'NAXIS3' in this_image.metadata.keys()
if naxis3_present and this_image.metadata['NAXIS3'] > 1:
shp = [self.window_size_y,
self.window_size_x,
this_image.metadata['NAXIS3']]
else:
shp = [self.window_size_y, self.window_size_x]
cutout = np.ones((shp)) * np.nan
else:
cutout = this_image.get_image_data(y_start, y_end, x_start, x_end)
if self.metadata.loc[idx, 'peak_flux'] == -1:
if np.any(np.isnan(cutout)):
flx = -1
else:
flx = np.max(cutout)
self.metadata.loc[idx, 'peak_flux'] = flx
cutout = apply_transform(cutout, self.transform_function)
return cutout
def get_display_data(self, idx):
"""
Returns a single instance of the dataset in a form that is ready to be
displayed by the web front end.
Parameters
----------
idx : str
Index (should be a string to avoid ambiguity)
Returns
-------
png image object
Object ready to be passed directly to the frontend
"""
try:
img_name = self.metadata.loc[idx, 'original_image']
except KeyError:
return None
this_image = self.images[img_name]
x0 = self.metadata.loc[idx, 'x']
y0 = self.metadata.loc[idx, 'y']
factor = 1.5
xmin = (int)(x0 - self.window_size_x * factor)
xmax = (int)(x0 + self.window_size_x * factor)
ymin = (int)(y0 - self.window_size_y * factor)
ymax = (int)(y0 + self.window_size_y * factor)
xstart = max(xmin, 0)
xend = min(xmax, this_image.metadata['NAXIS1'])
ystart = max(ymin, 0)
yend = min(ymax, this_image.metadata['NAXIS2'])
tot_size_x = int(2 * self.window_size_x * factor)
tot_size_y = int(2 * self.window_size_y * factor)
naxis3_present = 'NAXIS3' in this_image.metadata.keys()
if naxis3_present and this_image.metadata['NAXIS3'] > 1:
shp = [tot_size_y, tot_size_x, this_image.metadata['NAXIS3']]
else:
shp = [tot_size_y, tot_size_x]
cutout = np.zeros(shp)
# cutout[ystart - ymin:tot_size_y - (ymax - yend),
# xstart - xmin:tot_size_x - (xmax - xend)] = img[ystart:yend,
#
# xstart:xend]
img_data = this_image.get_image_data(ystart, yend, xstart, xend)
cutout[ystart - ymin:yend - ymin,
xstart - xmin:xend - xmin] = img_data
cutout = np.nan_to_num(cutout)
cutout = apply_transform(cutout, self.display_transform_function)
if len(cutout.shape) > 2 and cutout.shape[-1] >= 3:
new_cutout = np.zeros([cutout.shape[0], cutout.shape[1], 3])
new_cutout[:, :, 0] = cutout[:, :, self.bands_rgb['r']]
new_cutout[:, :, 1] = cutout[:, :, self.bands_rgb['g']]
new_cutout[:, :, 2] = cutout[:, :, self.bands_rgb['b']]
cutout = new_cutout
if self.plot_square:
offset_x = (tot_size_x - self.window_size_x) // 2
offset_y = (tot_size_y - self.window_size_y) // 2
x1 = offset_x
x2 = tot_size_x - offset_x
y1 = offset_y
y2 = tot_size_y - offset_y
mx = cutout.max()
cutout[y1:y2, x1] = mx
cutout[y1:y2, x2] = mx
cutout[y1, x1:x2] = mx
cutout[y2, x1:x2] = mx
min_edge = min(cutout.shape[:2])
max_edge = max(cutout.shape[:2])
if max_edge != self.display_image_size:
new_max = self.display_image_size
new_min = int(min_edge * new_max / max_edge)
if cutout.shape[0] <= cutout.shape[1]:
new_shape = [new_min, new_max]
else:
new_shape = [new_max, new_min]
if len(cutout.shape) > 2:
new_shape.append(cutout.shape[-1])
cutout = resize(cutout, new_shape, anti_aliasing=False)
return convert_array_to_image(cutout, plot_cmap=self.plot_cmap)
class ImageThumbnailsDataset(Dataset):
def __init__(self, display_image_size=128, transform_function=None,
display_transform_function=None,
catalogue=None, additional_metadata=None, **kwargs):
"""
Read in a set of images that have already been cut into thumbnails.
This would be uncommon with astronomical data but is needed to read a
dataset like galaxy zoo. Inherits from Dataset class.
Parameters
----------
filename : str
If a single file (of any time) is to be read from, the path can be
given using this kwarg.
directory : str
A directory can be given instead of an explicit list of files. The
child class will load all appropriate files in this directory.
list_of_files : list
Instead of the above, a list of files to be loaded can be
explicitly given.
output_dir : str
The directory to save the log file and all outputs to. Defaults to
display_image_size : The size of the image to be displayed on the
web page. If the image is smaller than this, it will be
interpolated up to the higher number of pixels. If larger, it will
be downsampled.
transform_function : function or list, optional
The transformation function or list of functions that will be
applied to each cutout. The function should take an input 2d array
(the cutout) and return an output 2d array. If a list is provided,
each function is applied in the order of the list.
catalogue : pandas.DataFrame or similar
A catalogue of the positions of sources around which cutouts will
be extracted. Note that a cutout of size "window_size" will be
extracted around these positions and must be the same for all
sources.
"""
super().__init__(transform_function=transform_function,
display_image_size=128, catalogue=catalogue,
**kwargs)
self.data_type = 'image'
self.known_file_types = ['png', 'jpg', 'jpeg', 'bmp', 'tif', 'tiff']
self.transform_function = transform_function
if display_transform_function is None:
self.display_transform_function = self.transform_function
else:
self.display_transform_function = display_transform_function
self.display_image_size = display_image_size
if catalogue is not None:
if 'objid' in catalogue.columns:
catalogue.set_index('objid')
self.metadata = catalogue
else:
inds = []
file_paths = []
for f in self.files:
extension = f.split('.')[-1]
if extension in self.known_file_types:
inds.append(
f.split(os.path.sep)[-1][:-(len(extension) + 1)])
file_paths.append(f)
self.metadata = pd.DataFrame(index=inds,
data={'filename': file_paths})
self.index = self.metadata.index.values
if additional_metadata is not None:
self.metadata = self.metadata.join(additional_metadata)
def get_sample(self, idx):
"""
Returns the data for a single sample in the dataset as indexed by idx.
Parameters
----------
idx : string
Index of sample
Returns
-------
nd.array
Array of image cutout
"""
filename = self.metadata.loc[idx, 'filename']
img = cv2.imread(filename)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return apply_transform(img, self.transform_function)
def get_display_data(self, idx):
"""
Returns a single instance of the dataset in a form that is ready to be
displayed by the web front end.
Parameters
----------
idx : str
Index (should be a string to avoid ambiguity)
Returns
-------
png image object
Object ready to be passed directly to the frontend
"""
filename = self.metadata.loc[idx, 'filename']
cutout = cv2.imread(filename)
cutout = cv2.cvtColor(cutout, cv2.COLOR_BGR2RGB)
print(cutout.shape)
cutout = apply_transform(cutout, self.display_transform_function)
min_edge = min(cutout.shape[:2])
max_edge = max(cutout.shape[:2])
if max_edge != self.display_image_size:
new_max = self.display_image_size
new_min = int(min_edge * new_max / max_edge)
if cutout.shape[0] <= cutout.shape[1]:
new_shape = [new_min, new_max]
else:
new_shape = [new_max, new_min]
if len(cutout.shape) > 2:
new_shape.append(cutout.shape[-1])
cutout = resize(cutout, new_shape, anti_aliasing=False)
return convert_array_to_image(cutout)
| 37.752235 | 95 | 0.54753 |
73d50c87d34d80b156caf768a4918e624d67a76b | 8,764 | py | Python | SCRAPE/Lib/site-packages/IPython/core/tests/test_history.py | Chinmoy-Prasad-Dutta/scrapy_scraper | 09f6abfc3bcf10ee28f486d83b450c89a07e066e | [
"MIT"
] | 2 | 2022-02-26T11:19:40.000Z | 2022-03-28T08:23:25.000Z | SCRAPE/Lib/site-packages/IPython/core/tests/test_history.py | Chinmoy-Prasad-Dutta/scrapy_scraper | 09f6abfc3bcf10ee28f486d83b450c89a07e066e | [
"MIT"
] | null | null | null | SCRAPE/Lib/site-packages/IPython/core/tests/test_history.py | Chinmoy-Prasad-Dutta/scrapy_scraper | 09f6abfc3bcf10ee28f486d83b450c89a07e066e | [
"MIT"
] | 1 | 2022-03-28T09:19:34.000Z | 2022-03-28T09:19:34.000Z | # coding: utf-8
"""Tests for the IPython tab-completion machinery.
"""
#-----------------------------------------------------------------------------
# Module imports
#-----------------------------------------------------------------------------
# stdlib
import io
import sqlite3
import sys
import tempfile
from datetime import datetime
from pathlib import Path
from tempfile import TemporaryDirectory
# our own packages
from traitlets.config.loader import Config
from IPython.core.history import HistoryManager, extract_hist_ranges
def test_proper_default_encoding():
assert sys.getdefaultencoding() == "utf-8"
def test_history():
ip = get_ipython()
with TemporaryDirectory() as tmpdir:
tmp_path = Path(tmpdir)
hist_manager_ori = ip.history_manager
hist_file = tmp_path / "history.sqlite"
try:
ip.history_manager = HistoryManager(shell=ip, hist_file=hist_file)
hist = ["a=1", "def f():\n test = 1\n return test", "b='€Æ¾÷ß'"]
for i, h in enumerate(hist, start=1):
ip.history_manager.store_inputs(i, h)
ip.history_manager.db_log_output = True
# Doesn't match the input, but we'll just check it's stored.
ip.history_manager.output_hist_reprs[3] = "spam"
ip.history_manager.store_output(3)
assert ip.history_manager.input_hist_raw == [""] + hist
# Detailed tests for _get_range_session
grs = ip.history_manager._get_range_session
assert list(grs(start=2, stop=-1)) == list(zip([0], [2], hist[1:-1]))
assert list(grs(start=-2)) == list(zip([0, 0], [2, 3], hist[-2:]))
assert list(grs(output=True)) == list(
zip([0, 0, 0], [1, 2, 3], zip(hist, [None, None, "spam"]))
)
# Check whether specifying a range beyond the end of the current
# session results in an error (gh-804)
ip.run_line_magic("hist", "2-500")
# Check that we can write non-ascii characters to a file
ip.run_line_magic("hist", "-f %s" % (tmp_path / "test1"))
ip.run_line_magic("hist", "-pf %s" % (tmp_path / "test2"))
ip.run_line_magic("hist", "-nf %s" % (tmp_path / "test3"))
ip.run_line_magic("save", "%s 1-10" % (tmp_path / "test4"))
# New session
ip.history_manager.reset()
newcmds = ["z=5", "class X(object):\n pass", "k='p'", "z=5"]
for i, cmd in enumerate(newcmds, start=1):
ip.history_manager.store_inputs(i, cmd)
gothist = ip.history_manager.get_range(start=1, stop=4)
assert list(gothist) == list(zip([0, 0, 0], [1, 2, 3], newcmds))
# Previous session:
gothist = ip.history_manager.get_range(-1, 1, 4)
assert list(gothist) == list(zip([1, 1, 1], [1, 2, 3], hist))
newhist = [(2, i, c) for (i, c) in enumerate(newcmds, 1)]
# Check get_hist_tail
gothist = ip.history_manager.get_tail(5, output=True,
include_latest=True)
expected = [(1, 3, (hist[-1], "spam"))] \
+ [(s, n, (c, None)) for (s, n, c) in newhist]
assert list(gothist) == expected
gothist = ip.history_manager.get_tail(2)
expected = newhist[-3:-1]
assert list(gothist) == expected
# Check get_hist_search
gothist = ip.history_manager.search("*test*")
assert list(gothist) == [(1, 2, hist[1])]
gothist = ip.history_manager.search("*=*")
assert list(gothist) == [
(1, 1, hist[0]),
(1, 2, hist[1]),
(1, 3, hist[2]),
newhist[0],
newhist[2],
newhist[3],
]
gothist = ip.history_manager.search("*=*", n=4)
assert list(gothist) == [
(1, 3, hist[2]),
newhist[0],
newhist[2],
newhist[3],
]
gothist = ip.history_manager.search("*=*", unique=True)
assert list(gothist) == [
(1, 1, hist[0]),
(1, 2, hist[1]),
(1, 3, hist[2]),
newhist[2],
newhist[3],
]
gothist = ip.history_manager.search("*=*", unique=True, n=3)
assert list(gothist) == [(1, 3, hist[2]), newhist[2], newhist[3]]
gothist = ip.history_manager.search("b*", output=True)
assert list(gothist) == [(1, 3, (hist[2], "spam"))]
# Cross testing: check that magic %save can get previous session.
testfilename = (tmp_path / "test.py").resolve()
ip.run_line_magic("save", str(testfilename) + " ~1/1-3")
with io.open(testfilename, encoding="utf-8") as testfile:
assert testfile.read() == "# coding: utf-8\n" + "\n".join(hist) + "\n"
# Duplicate line numbers - check that it doesn't crash, and
# gets a new session
ip.history_manager.store_inputs(1, "rogue")
ip.history_manager.writeout_cache()
assert ip.history_manager.session_number == 3
# Check that session and line values are not just max values
sessid, lineno, entry = newhist[-1]
assert lineno > 1
ip.history_manager.reset()
lineno = 1
ip.history_manager.store_inputs(lineno, entry)
gothist = ip.history_manager.search("*=*", unique=True)
hist = list(gothist)[-1]
assert sessid < hist[0]
assert hist[1:] == (lineno, entry)
finally:
# Ensure saving thread is shut down before we try to clean up the files
ip.history_manager.save_thread.stop()
# Forcibly close database rather than relying on garbage collection
ip.history_manager.db.close()
# Restore history manager
ip.history_manager = hist_manager_ori
def test_extract_hist_ranges():
instr = "1 2/3 ~4/5-6 ~4/7-~4/9 ~9/2-~7/5 ~10/"
expected = [(0, 1, 2), # 0 == current session
(2, 3, 4),
(-4, 5, 7),
(-4, 7, 10),
(-9, 2, None), # None == to end
(-8, 1, None),
(-7, 1, 6),
(-10, 1, None)]
actual = list(extract_hist_ranges(instr))
assert actual == expected
def test_extract_hist_ranges_empty_str():
instr = ""
expected = [(0, 1, None)] # 0 == current session, None == to end
actual = list(extract_hist_ranges(instr))
assert actual == expected
def test_magic_rerun():
"""Simple test for %rerun (no args -> rerun last line)"""
ip = get_ipython()
ip.run_cell("a = 10", store_history=True)
ip.run_cell("a += 1", store_history=True)
assert ip.user_ns["a"] == 11
ip.run_cell("%rerun", store_history=True)
assert ip.user_ns["a"] == 12
def test_timestamp_type():
ip = get_ipython()
info = ip.history_manager.get_session_info()
assert isinstance(info[1], datetime)
def test_hist_file_config():
cfg = Config()
tfile = tempfile.NamedTemporaryFile(delete=False)
cfg.HistoryManager.hist_file = Path(tfile.name)
try:
hm = HistoryManager(shell=get_ipython(), config=cfg)
assert hm.hist_file == cfg.HistoryManager.hist_file
finally:
try:
Path(tfile.name).unlink()
except OSError:
# same catch as in testing.tools.TempFileMixin
# On Windows, even though we close the file, we still can't
# delete it. I have no clue why
pass
def test_histmanager_disabled():
"""Ensure that disabling the history manager doesn't create a database."""
cfg = Config()
cfg.HistoryAccessor.enabled = False
ip = get_ipython()
with TemporaryDirectory() as tmpdir:
hist_manager_ori = ip.history_manager
hist_file = Path(tmpdir) / "history.sqlite"
cfg.HistoryManager.hist_file = hist_file
try:
ip.history_manager = HistoryManager(shell=ip, config=cfg)
hist = ["a=1", "def f():\n test = 1\n return test", "b='€Æ¾÷ß'"]
for i, h in enumerate(hist, start=1):
ip.history_manager.store_inputs(i, h)
assert ip.history_manager.input_hist_raw == [""] + hist
ip.history_manager.reset()
ip.history_manager.end_session()
finally:
ip.history_manager = hist_manager_ori
# hist_file should not be created
assert hist_file.exists() is False
| 38.104348 | 86 | 0.544158 |
73d5a4670f503af92974e8f36a56af383738ce32 | 1,103 | py | Python | yanghui/704Binary_search.py | technologyMz/ALL_IN_ML | bb63fdd9b98adead316c7f6d8e57742bd0a469df | [
"Apache-2.0"
] | 3 | 2021-01-04T13:08:13.000Z | 2021-08-12T13:08:23.000Z | yanghui/704Binary_search.py | technologyMz/ALL_IN_ML | bb63fdd9b98adead316c7f6d8e57742bd0a469df | [
"Apache-2.0"
] | null | null | null | yanghui/704Binary_search.py | technologyMz/ALL_IN_ML | bb63fdd9b98adead316c7f6d8e57742bd0a469df | [
"Apache-2.0"
] | 1 | 2021-01-04T13:13:14.000Z | 2021-01-04T13:13:14.000Z | def search( nums, target) -> int:
if len(nums) == 0:
return -1
def compare(input_list, target, start, end):
print(start, end)
if start > end: #等于的情况,可能就是middle_index值=target的情况
return -1
middle_index = int(start+(end-start)/2)
if input_list[middle_index] == target:
return middle_index
elif input_list[middle_index] > target:
return compare(input_list, target, start, middle_index - 1)
else:
return compare(input_list, target, middle_index+1, end )
return compare(nums, target, 0, len(nums)-1) # end的初始值需要-1
def search2(nums, target) -> int:
left = 0
right = len(nums) - 1
while True:
if left > right: #等于的情况,可能就是middle_index值=target的情况
return -1
middle_index = int(left+(right-left)/2)
if nums[middle_index] == target:
return middle_index
elif nums[middle_index] > target:
left = left
right = middle_index - 1
else:
left = middle_index+1
right = right
| 30.638889 | 71 | 0.574796 |
73d5cf0b5988fbdeeeb7ce4625bbc15a9f618c1c | 1,257 | py | Python | alipay/aop/api/domain/MaterialModifyInfo.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/MaterialModifyInfo.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/MaterialModifyInfo.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class MaterialModifyInfo(object):
def __init__(self):
self._content = None
self._type = None
@property
def content(self):
return self._content
@content.setter
def content(self, value):
self._content = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.content:
if hasattr(self.content, 'to_alipay_dict'):
params['content'] = self.content.to_alipay_dict()
else:
params['content'] = self.content
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MaterialModifyInfo()
if 'content' in d:
o.content = d['content']
if 'type' in d:
o.type = d['type']
return o
| 22.446429 | 65 | 0.546539 |
73d5ed660753e639202704b771944179e228ee84 | 75 | py | Python | src/gedml/core/metrics/similarity_metrics/__init__.py | wangck20/GeDML | 1f76ac2094d7b88be7fd4eb6145e5586e547b9ca | [
"MIT"
] | 25 | 2021-09-06T13:26:02.000Z | 2022-01-06T13:25:24.000Z | src/gedml/core/metrics/similarity_metrics/__init__.py | wangck20/GeDML | 1f76ac2094d7b88be7fd4eb6145e5586e547b9ca | [
"MIT"
] | 1 | 2021-09-09T08:29:29.000Z | 2021-09-13T15:05:59.000Z | src/gedml/core/metrics/similarity_metrics/__init__.py | wangck20/GeDML | 1f76ac2094d7b88be7fd4eb6145e5586e547b9ca | [
"MIT"
] | 2 | 2021-09-07T08:44:41.000Z | 2021-09-09T08:31:55.000Z | from .cosine_metric import CosineMetric
from .moco_metric import MoCoMetric | 37.5 | 39 | 0.88 |
73d60532ac7e4949a9eeafd7495b11475c281857 | 7,073 | py | Python | sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/activestamp/operations/_protection_container_operation_results_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-01-24T08:54:57.000Z | 2022-01-24T08:54:57.000Z | sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/activestamp/operations/_protection_container_operation_results_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/activestamp/operations/_protection_container_operation_results_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
vault_name: str,
resource_group_name: str,
subscription_id: str,
fabric_name: str,
container_name: str,
operation_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2021-12-01") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}/operationResults/{operationId}") # pylint: disable=line-too-long
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"fabricName": _SERIALIZER.url("fabric_name", fabric_name, 'str'),
"containerName": _SERIALIZER.url("container_name", container_name, 'str'),
"operationId": _SERIALIZER.url("operation_id", operation_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
class ProtectionContainerOperationResultsOperations(object):
"""ProtectionContainerOperationResultsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.recoveryservicesbackup.activestamp.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
vault_name: str,
resource_group_name: str,
fabric_name: str,
container_name: str,
operation_id: str,
**kwargs: Any
) -> Optional["_models.ProtectionContainerResource"]:
"""Fetches the result of any operation on the container.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param fabric_name: Fabric name associated with the container.
:type fabric_name: str
:param container_name: Container name whose information should be fetched.
:type container_name: str
:param operation_id: Operation ID which represents the operation whose result needs to be
fetched.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProtectionContainerResource, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.ProtectionContainerResource or
None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ProtectionContainerResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-12-01") # type: str
request = build_get_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
fabric_name=fabric_name,
container_name=container_name,
operation_id=operation_id,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ProtectionContainerResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}/operationResults/{operationId}"} # type: ignore
| 42.866667 | 291 | 0.692634 |
73d6122d7576d8e2c87dc32902b60298067312f1 | 1,551 | py | Python | project/user/views.py | fv316/flask-template-project | 026459b299c7aa4d82c2b59b98e3c929b4786a78 | [
"MIT"
] | 9 | 2017-02-08T21:42:15.000Z | 2021-12-15T05:18:18.000Z | project/user/views.py | fv316/flask-template-project | 026459b299c7aa4d82c2b59b98e3c929b4786a78 | [
"MIT"
] | 10 | 2016-07-25T11:00:08.000Z | 2019-09-25T14:56:40.000Z | project/user/views.py | fv316/flask-template-project | 026459b299c7aa4d82c2b59b98e3c929b4786a78 | [
"MIT"
] | 7 | 2016-11-01T20:11:03.000Z | 2020-02-04T14:25:49.000Z | # !/usr/bin/python
# -*- coding: utf-8 -*-
from flask import redirect, render_template, request, url_for, Blueprint
from flask_login import login_required, current_user
from project.user.handlers.user_handler import UserHandler
from project.user.forms import LoginForm, RegisterForm
from project.utils import flash
user_blueprint = Blueprint('user', __name__)
@user_blueprint.route('/settings/', methods=['GET'])
@login_required
def settings():
flash.info('Welcome to your dashboard "{}".'.format(current_user.username))
return render_template('user/settings.html')
@user_blueprint.route('/sign-in/', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
if request.method == "POST":
user = UserHandler.register(form)
if user:
flash.info('Welcome "{}".'.format(user.username))
return redirect(request.args.get('next', url_for('app.home')))
return render_template('user/register.html', form=form)
@user_blueprint.route('/login/', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form)
if request.method == "POST":
user = UserHandler.login(form)
if user:
flash.info('Welcome "{}".'.format(user.username))
return redirect(request.args.get('next', url_for('app.home')))
return render_template('user/login.html', form=form)
@user_blueprint.route('/logout/')
@login_required
def logout():
UserHandler.logout()
flash.info(u'You were logged out.')
return redirect(url_for('app.home'))
| 29.264151 | 79 | 0.684075 |
73d61e5471204fe4a63d69cae97b802124e6c8ef | 11,710 | py | Python | services/traction/acapy_client/model/send_menu.py | Open-Earth-Foundation/traction | 908b555a7f408a88541b7692d3730e37a297c919 | [
"Apache-2.0"
] | 12 | 2022-01-29T20:30:03.000Z | 2022-03-29T11:46:14.000Z | services/traction/acapy_client/model/send_menu.py | Open-Earth-Foundation/traction | 908b555a7f408a88541b7692d3730e37a297c919 | [
"Apache-2.0"
] | 38 | 2021-11-22T17:52:50.000Z | 2022-03-31T17:52:00.000Z | services/traction/acapy_client/model/send_menu.py | Open-Earth-Foundation/traction | 908b555a7f408a88541b7692d3730e37a297c919 | [
"Apache-2.0"
] | 9 | 2021-11-22T18:05:48.000Z | 2022-03-29T11:25:08.000Z | """
Aries Cloud Agent
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v0.7.2
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from acapy_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel,
)
from acapy_client.exceptions import ApiAttributeError
def lazy_import():
from acapy_client.model.menu_json import MenuJson
globals()["MenuJson"] = MenuJson
class SendMenu(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
none_type,
) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
"menu": (
{str: (bool, date, datetime, dict, float, int, list, str, none_type)},
), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
"menu": "menu", # noqa: E501
}
read_only_vars = {}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, menu, *args, **kwargs): # noqa: E501
"""SendMenu - a model defined in OpenAPI
Args:
menu ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): Menu to send to connection
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.menu = menu
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, menu, *args, **kwargs): # noqa: E501
"""SendMenu - a model defined in OpenAPI
Args:
menu ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): Menu to send to connection
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.menu = menu
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(
f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes."
)
| 40.801394 | 124 | 0.56012 |
73d631644f9150d4d2096eb272b4ecce978e767d | 2,878 | py | Python | vec_env/monitor.py | Hwhitetooth/jax_muzero | b8ab36251d22e0246d61514841ba17a22f4b2a36 | [
"MIT"
] | 17 | 2022-02-27T13:40:25.000Z | 2022-03-24T21:06:02.000Z | vec_env/monitor.py | Hwhitetooth/jax_muzero | b8ab36251d22e0246d61514841ba17a22f4b2a36 | [
"MIT"
] | 1 | 2022-03-08T19:47:52.000Z | 2022-03-24T21:05:30.000Z | vec_env/monitor.py | Hwhitetooth/jax_muzero | b8ab36251d22e0246d61514841ba17a22f4b2a36 | [
"MIT"
] | 1 | 2022-03-08T14:58:12.000Z | 2022-03-08T14:58:12.000Z | import time
from gym.core import Wrapper
class Monitor(Wrapper):
f = None
def __init__(self, env, allow_early_resets=False, reset_keywords=(), info_keywords=()):
Wrapper.__init__(self, env=env)
self.tstart = time.time()
self.results_writer = None
self.reset_keywords = reset_keywords
self.info_keywords = info_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_rewards = []
self.episode_lengths = []
self.episode_times = []
self.total_steps = 0
self.current_reset_info = {} # extra info about the current episode, that was passed in during reset()
def reset(self, **kwargs):
self.reset_state()
for k in self.reset_keywords:
v = kwargs.get(k)
if v is None:
raise ValueError('Expected you to pass kwarg %s into reset' % k)
self.current_reset_info[k] = v
return self.env.reset(**kwargs)
def reset_state(self):
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError(
"Tried to reset an environment before done. If you want to allow early resets, wrap your env with Monitor(env, path, allow_early_resets=True)")
self.rewards = []
self.needs_reset = False
def step(self, action):
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
ob, rew, done, info = self.env.step(action)
self.update(ob, rew, done, info)
return (ob, rew, done, info)
def update(self, ob, rew, done, info):
self.rewards.append(rew)
if done:
self.needs_reset = True
eprew = sum(self.rewards)
eplen = len(self.rewards)
epinfo = {"r": round(eprew, 6), "l": eplen, "t": round(time.time() - self.tstart, 6)}
for k in self.info_keywords:
epinfo[k] = info[k]
self.episode_rewards.append(eprew)
self.episode_lengths.append(eplen)
self.episode_times.append(time.time() - self.tstart)
epinfo.update(self.current_reset_info)
if self.results_writer:
self.results_writer.write_row(epinfo)
assert isinstance(info, dict)
if isinstance(info, dict):
info['episode'] = epinfo
self.total_steps += 1
def close(self):
super(Monitor, self).close()
if self.f is not None:
self.f.close()
def get_total_steps(self):
return self.total_steps
def get_episode_rewards(self):
return self.episode_rewards
def get_episode_lengths(self):
return self.episode_lengths
def get_episode_times(self):
return self.episode_times
| 34.261905 | 159 | 0.606324 |
73d64f57aabd3849c4b0bdc17ddbaa6987552a25 | 127 | py | Python | tests/conftest.py | NGalandim/roboai-python-cli | 2e2a4c82a0294dd9a43eb85ebb452791abcba8d8 | [
"MIT"
] | 3 | 2020-10-21T11:11:28.000Z | 2022-02-11T18:00:48.000Z | tests/conftest.py | NGalandim/roboai-python-cli | 2e2a4c82a0294dd9a43eb85ebb452791abcba8d8 | [
"MIT"
] | 35 | 2020-10-21T16:36:47.000Z | 2021-11-04T12:45:39.000Z | tests/conftest.py | NGalandim/roboai-python-cli | 2e2a4c82a0294dd9a43eb85ebb452791abcba8d8 | [
"MIT"
] | 2 | 2020-10-13T10:48:48.000Z | 2022-02-11T16:30:35.000Z | import pytest
from click.testing import CliRunner
@pytest.fixture()
def runner():
runner = CliRunner()
return runner
| 14.111111 | 35 | 0.724409 |
73d6558e43d4355bcf7506d10d8b771cbbdb9c1c | 227 | py | Python | notifications/templatetags/notifications_tags.py | konradko/directory-api | e9cd05b1deaf575e94352c46ddbd1857d8119fda | [
"MIT"
] | 1 | 2021-11-06T12:08:26.000Z | 2021-11-06T12:08:26.000Z | notifications/templatetags/notifications_tags.py | konradko/directory-api | e9cd05b1deaf575e94352c46ddbd1857d8119fda | [
"MIT"
] | null | null | null | notifications/templatetags/notifications_tags.py | konradko/directory-api | e9cd05b1deaf575e94352c46ddbd1857d8119fda | [
"MIT"
] | null | null | null | from django import template
from django.conf import settings
register = template.Library()
@register.filter
def company_profile_url(company_number):
return settings.FAS_COMPANY_PROFILE_URL.format(number=company_number)
| 20.636364 | 73 | 0.828194 |
73d6a6eccf96a91b8399508d45cded62fba6788d | 2,644 | py | Python | tql/algo_ml/features/transformer/DF2FFM.py | Jie-Yuan/1_DataMining | f5338388b4f883233f350d4fb9c5903180883430 | [
"Apache-2.0"
] | 14 | 2019-06-25T13:46:32.000Z | 2020-10-27T02:04:59.000Z | tql/algo_ml/features/transformer/DF2FFM.py | Jie-Yuan/2_DataMining | f5338388b4f883233f350d4fb9c5903180883430 | [
"Apache-2.0"
] | null | null | null | tql/algo_ml/features/transformer/DF2FFM.py | Jie-Yuan/2_DataMining | f5338388b4f883233f350d4fb9c5903180883430 | [
"Apache-2.0"
] | 7 | 2019-06-25T13:26:16.000Z | 2020-10-27T02:05:03.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : tql-Python.
# @File : DF2FFM
# @Time : 2019-08-26 20:18
# @Author : yuanjie
# @Email : yuanjie@xiaomi.com
# @Software : PyCharm
# @Description :
import json
from tqdm import tqdm
from csv import DictReader
def csv2libffm(input, fieldnames=None, cols_cat=None, cols_num=None, cols_multi_cat=None, label='label', sep=',',
output='train.libffm', features_map='features.json'):
global field_features
field_features = {}
dialect_map = {',': 'excel', '\t': 'excel-tab'}
with open(output, 'a') as output:
with open(input) as input:
for row in tqdm(DictReader(input, fieldnames, dialect=dialect_map[sep]), desc="Transform ..."):
line = []
# 类别型
cols_cat = cols_cat if cols_cat else [i for i in row.keys() if i != label] # 默认全部为 类别型
for idx, field in enumerate(cols_cat, 1):
feature = row[field]
ff = field + '______' + feature if feature else 'NA' # 类别值就是一个 bin
fnum = field_features.setdefault(ff, len(field_features) + 1)
line.append(f'{idx}:{fnum}:1')
# 多类别型: TODO: cols_multi_cat
# 数值型
if cols_num is not None:
for idx, field in enumerate(cols_num, idx + 1):
feature = row[field]
ff = field # 数值当作一个 bin
fnum = field_features.setdefault(ff, len(field_features) + 1)
if feature != '' and feature != '0':
line.append(f'{idx}:{fnum}:{feature}')
output.write(f"{row.get(label, '-1')} {' '.join(line)}\n")
with open(features_map, 'w') as f:
return json.dump(field_features, f)
# ########################### Lets build some data and test ############################
#
# train, y = make_classification(n_samples=10, n_features=5, n_informative=2, n_redundant=2, n_classes=2, random_state=42)
#
# train = pd.DataFrame(train, columns=['int1', 'int2', 'int3', 's1', 's2'])
# train['int1'] = train['int1'].map(int)
# train['int2'] = train['int2'].map(int)
# train['int3'] = train['int3'].map(int)
# train['s1'] = round(np.log(abs(train['s1'] + 1))).map(str)
# train['s2'] = round(np.log(abs(train['s2'] + 1))).map(str)
# train['clicked'] = y
#
# ffm_train = FFMFormatPandas()
# ffm_train_data = ffm_train.fit_transform(train, y='clicked')
# print('Base data')
# print(train[0:10])
# print('FFM data')
# print(ffm_train_data[0:10])
| 37.771429 | 122 | 0.545764 |
73d6b188b6c62a2f4de4fe070a0b7c370cf125a1 | 486 | py | Python | setup.py | JayDwayne/Airtel-Money-Smart-API-SDK-Gateway | 5d5d80655eaa34a2cbf24058bdc0073b9ac72f2d | [
"MIT"
] | 1 | 2021-06-24T10:25:44.000Z | 2021-06-24T10:25:44.000Z | setup.py | JayDwayne/Airtel-Money-Smart-API-SDK-Gateway | 5d5d80655eaa34a2cbf24058bdc0073b9ac72f2d | [
"MIT"
] | null | null | null | setup.py | JayDwayne/Airtel-Money-Smart-API-SDK-Gateway | 5d5d80655eaa34a2cbf24058bdc0073b9ac72f2d | [
"MIT"
] | null | null | null | from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='airtel-python-sdk',
version='0.1',
description='Airtel Smart API SDK',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/sartim/airtel-python-sdk',
author='sartim',
license='MIT',
packages=['airtel', ],
install_requires=['requests', ],
zip_safe=False
)
| 27 | 56 | 0.644033 |
73d7096030bd46a37a774b7b0368514c1386a232 | 369 | py | Python | blog/migrations/0015_auto_20151217_2002.py | FarmCodeGary/InspirationForge | 2b78e9c388608ac19ed0ecb114ce5e0cc1f33213 | [
"MIT"
] | 1 | 2015-09-16T17:14:36.000Z | 2015-09-16T17:14:36.000Z | blog/migrations/0015_auto_20151217_2002.py | FarmCodeGary/InspirationForge | 2b78e9c388608ac19ed0ecb114ce5e0cc1f33213 | [
"MIT"
] | null | null | null | blog/migrations/0015_auto_20151217_2002.py | FarmCodeGary/InspirationForge | 2b78e9c388608ac19ed0ecb114ce5e0cc1f33213 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0014_auto_20151217_1939'),
]
operations = [
migrations.AlterModelOptions(
name='contributor',
options={'ordering': ['id']},
),
]
| 19.421053 | 44 | 0.598916 |
73d73dc54f07804400c42c6577ceaba8529aca5f | 610 | py | Python | modules/mod_cmd/test_mod.py | slashsec-edu/cryton-modules | 4826b5e353f64e3d915d2abaead93b234a296304 | [
"MIT"
] | null | null | null | modules/mod_cmd/test_mod.py | slashsec-edu/cryton-modules | 4826b5e353f64e3d915d2abaead93b234a296304 | [
"MIT"
] | null | null | null | modules/mod_cmd/test_mod.py | slashsec-edu/cryton-modules | 4826b5e353f64e3d915d2abaead93b234a296304 | [
"MIT"
] | null | null | null | from unittest import TestCase
from mock import patch, MagicMock, Mock
from mod import execute
@patch('time.sleep', MagicMock())
class TestModules(TestCase):
@patch('os.remove', MagicMock())
def test_mod(self, *args):
args = {'cmd': "test"}
mock_run = Mock()
mock_run.stdout = b"output"
mock_run.stderr = b"error"
with patch('subprocess.run', return_value=mock_run):
ret = execute(args)
self.assertEqual(ret.get('return_code'), 0)
self.assertEqual(ret.get('std_out'), 'output')
self.assertEqual(ret.get('mod_err'), 'error')
| 30.5 | 60 | 0.632787 |
73d77fc6b0278c0c2905089747e32df86332df7b | 668 | py | Python | test/fixtures.py | ser/pypayd-ng | 63d0e09ca5e966177874e6e2e82f6036898a56b0 | [
"MIT"
] | 4 | 2015-03-02T04:01:51.000Z | 2020-01-12T04:06:34.000Z | test/fixtures.py | ser/pypayd-ng | 63d0e09ca5e966177874e6e2e82f6036898a56b0 | [
"MIT"
] | 12 | 2015-03-02T06:07:17.000Z | 2016-01-19T00:43:11.000Z | test/fixtures.py | ser/pypayd-ng | 63d0e09ca5e966177874e6e2e82f6036898a56b0 | [
"MIT"
] | 4 | 2015-04-30T01:49:09.000Z | 2019-06-14T11:25:50.000Z | mnemonic= "like just love know never want time out there make look eye"
priv_hwif_main = "xprv9s21ZrQH143K2jhRQJezi4Zw33cwbUUPvJEY4oAEXzzsBT6SvPziuLg1wLyk8aFnB3m3sGqHzD5smZgE4DToj7Pk77dbVy9oWKVDb2b8nVg"
pub_hwif_main = 'xpub661MyMwAqRbcFDmtWLC15CWfb5TRzwCFHXA8sBZr6LXr4FRbTwJyT8zVndatFTL3nGfwyNi6AxhWF5sazTfKXWWZLzRBsAkJ2dykobXC9No'
pub_hwif_test = 'tpubD6NzVbkrYhZ4X1xjxXB6H7r2vCZ5zKhJq9kSDjczSFHjoY6JYAA4bafL2fffmxHHaBCraxDxi4XYwGNCPKWZQwxrQbAVYhQXcbaAZaJhwBc'
test_pw = "test"
addresses = {"0/0/1": "1BxhLe9ikyAWrL89uV2q8tFF3TtyxuKKX4", "0/0/2": "1ZEofWQUcqSKaKcofPTBujZaUDEmKLeAL", "0/1/1": "13z2Qj2adQMTVyHFKFpeWqCxMHqrhx5cAo"}
wallet_file_name="test_wallet.txt"
| 83.5 | 152 | 0.881737 |
73d794f24db300520fb33b9bfe18222c05d842f4 | 1,230 | py | Python | unittests/test_app.py | threathunterX/nebula_query_web | 9c73d82f7e6bc322ea2edfd86ff62727c49d7abb | [
"Apache-2.0"
] | 1 | 2019-05-01T09:42:31.000Z | 2019-05-01T09:42:31.000Z | unittests/test_app.py | threathunterX/nebula_query_web | 9c73d82f7e6bc322ea2edfd86ff62727c49d7abb | [
"Apache-2.0"
] | null | null | null | unittests/test_app.py | threathunterX/nebula_query_web | 9c73d82f7e6bc322ea2edfd86ff62727c49d7abb | [
"Apache-2.0"
] | 5 | 2019-06-24T05:48:54.000Z | 2022-02-18T03:34:30.000Z | # -*- coding: utf-8 -*-
from flask import Flask
from flask import _app_ctx_stack
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from flask_sqlalchemy import BaseQuery
# database session registry object, configured from
# create_app factory
DbSession = scoped_session(
sessionmaker(),
# __ident_func__ should be hashable, therefore used
# for recognizing different incoming requests
scopefunc=_app_ctx_stack.__ident_func__
)
def create_app(name_handler, config_object):
"""
Application factory
:param name_handler: name of the application.
:param config_object: the configuration object.
"""
app = Flask(name_handler)
app.config.update(config_object)
app.engine = create_engine(app.config["SQLALCHEMY_DATABASE_URI"])
global DbSession
# BaseQuery class provides some additional methods like
# first_or_404() or get_or_404() -- borrowed from
# mitsuhiko's Flask-SQLAlchemy
DbSession.configure(bind=app.engine, query_cls=BaseQuery)
@app.teardown_appcontext
def teardown(exception=None):
global DbSession
if DbSession:
DbSession.remove()
return app | 29.285714 | 69 | 0.744715 |
73d7a0d784aa445ec2ed751b207117ac808fb24f | 4,381 | py | Python | sdks/python/appcenter_sdk/models/Deployment.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | null | null | null | sdks/python/appcenter_sdk/models/Deployment.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 6 | 2019-10-23T06:38:53.000Z | 2022-01-22T07:57:58.000Z | sdks/python/appcenter_sdk/models/Deployment.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 2 | 2019-10-23T06:31:05.000Z | 2021-08-21T17:32:47.000Z | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class Deployment(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
Upload = "Upload"
Promote = "Promote"
Rollback = "Rollback"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'key': 'string',
'name': 'string',
'latest_release': ''
}
attribute_map = {
'key': 'key',
'name': 'name',
'latest_release': 'latest_release'
}
def __init__(self, key=None, name=None, latest_release=None): # noqa: E501
"""Deployment - a model defined in Swagger""" # noqa: E501
self._key = None
self._name = None
self._latest_release = None
self.discriminator = None
if key is not None:
self.key = key
self.name = name
if latest_release is not None:
self.latest_release = latest_release
@property
def key(self):
"""Gets the key of this Deployment. # noqa: E501
:return: The key of this Deployment. # noqa: E501
:rtype: string
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this Deployment.
:param key: The key of this Deployment. # noqa: E501
:type: string
"""
self._key = key
@property
def name(self):
"""Gets the name of this Deployment. # noqa: E501
:return: The name of this Deployment. # noqa: E501
:rtype: string
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Deployment.
:param name: The name of this Deployment. # noqa: E501
:type: string
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def latest_release(self):
"""Gets the latest_release of this Deployment. # noqa: E501
:return: The latest_release of this Deployment. # noqa: E501
:rtype:
"""
return self._latest_release
@latest_release.setter
def latest_release(self, latest_release):
"""Sets the latest_release of this Deployment.
:param latest_release: The latest_release of this Deployment. # noqa: E501
:type:
"""
self._latest_release = latest_release
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Deployment):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.923077 | 90 | 0.555581 |
73d7b7a2db5ddfe733dfc22fa30afb4454c96313 | 42 | py | Python | custom_components/nexxtsolution_router/__init__.py | luiscbrenes/nexxtsolution_router | 77d1d16b14900fa802a10ec41e2f83eba7c9b985 | [
"Apache-2.0"
] | null | null | null | custom_components/nexxtsolution_router/__init__.py | luiscbrenes/nexxtsolution_router | 77d1d16b14900fa802a10ec41e2f83eba7c9b985 | [
"Apache-2.0"
] | null | null | null | custom_components/nexxtsolution_router/__init__.py | luiscbrenes/nexxtsolution_router | 77d1d16b14900fa802a10ec41e2f83eba7c9b985 | [
"Apache-2.0"
] | null | null | null | """The nexxtsolution_router component."""
| 21 | 41 | 0.761905 |
73d7ceb04d489c831893992b94466d9a8002b00c | 1,609 | py | Python | challenges/networks/treasures-of-polybius/solve/solution.py | infosec-ucalgary/magpieCTF-2021 | 78459a1e16ac3a135e891c2246232c4890960b92 | [
"MIT"
] | 18 | 2021-02-22T00:09:27.000Z | 2022-02-28T14:23:33.000Z | challenges/networks/treasures-of-polybius/solve/solution.py | infosec-ucalgary/magpieCTF-2021 | 78459a1e16ac3a135e891c2246232c4890960b92 | [
"MIT"
] | null | null | null | challenges/networks/treasures-of-polybius/solve/solution.py | infosec-ucalgary/magpieCTF-2021 | 78459a1e16ac3a135e891c2246232c4890960b92 | [
"MIT"
] | 6 | 2021-02-22T01:32:10.000Z | 2022-02-25T15:48:42.000Z | #!/usr/bin/env python3
import requests
import os
import subprocess as subp
def get_ids():
ids = []
with open("link_list.txt", 'r') as link_list:
links = link_list.read().split('\n')
for link in links:
if not link: continue
id_str = link.split('/')[-2]
ids.append(id_str)
return ids
def main():
ids = get_ids()
prev_key = ""
for id_str in ids:
dl_link = f"https://drive.google.com/u/0/uc?id={id_str}&export=download"
confirm_key = ""
# Essential code is below
session = requests.Session()
resp = session.get(dl_link)
for key, value in session.cookies.get_dict().items():
if "download" not in key: continue
confirm_key = value
break
if not confirm_key:
print("Confirmation key not found :(")
return
prev_key = confirm_key
confirm_link = f"https://drive.google.com/u/0/uc?export=download&id={id_str}&confirm={confirm_key}"
print(f"requesting {confirm_link}")
resp = session.get(confirm_link)
session.close()
if("magpie" not in str(resp.content)):
print("Flag not found, moving to next link")
continue
print("Flag found! Writing to flag.bin")
with open("flag.bin", "wb+") as flag_file:
flag_file.write(resp.content)
print("File with flag written to flag.bin")
print("use `strings flag.bin | grep magpie` to retrieve the flag")
return
return
if __name__ == "__main__": main()
| 26.377049 | 107 | 0.57862 |
73d7d17a683670f223fd2cae9d82c4b835d1b18f | 428 | py | Python | drivers/plot_rotation_X_RUWE.py | lgbouma/earhart | 1d2b65d58655725f43c1bf9705b897bf767d4ca1 | [
"MIT"
] | null | null | null | drivers/plot_rotation_X_RUWE.py | lgbouma/earhart | 1d2b65d58655725f43c1bf9705b897bf767d4ca1 | [
"MIT"
] | null | null | null | drivers/plot_rotation_X_RUWE.py | lgbouma/earhart | 1d2b65d58655725f43c1bf9705b897bf767d4ca1 | [
"MIT"
] | null | null | null | import os
import earhart.plotting as ep
from earhart.paths import RESULTSDIR
PLOTDIR = os.path.join(RESULTSDIR, 'rotation_X_RUWE')
if not os.path.exists(PLOTDIR):
os.mkdir(PLOTDIR)
for yscale in ['linear', 'log']:
ep.plot_rotation_X_RUWE(PLOTDIR, 'viridis', emph_1937=1, yscale=yscale)
ep.plot_rotation_X_RUWE(PLOTDIR, 'nipy_spectral', yscale=yscale)
ep.plot_rotation_X_RUWE(PLOTDIR, 'viridis', yscale=yscale)
| 32.923077 | 75 | 0.759346 |
73d7da1efbeba6fb1b29c2f5ae089f7feb48f982 | 3,132 | py | Python | venv/lib/python3.6/site-packages/kubernetes/client/models/v1_daemon_endpoint.py | DiptoChakrabarty/Kube-Automate | 2072d1aadd58eb405c7308ff5cfecbf50300ead3 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/kubernetes/client/models/v1_daemon_endpoint.py | DiptoChakrabarty/Kube-Automate | 2072d1aadd58eb405c7308ff5cfecbf50300ead3 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/kubernetes/client/models/v1_daemon_endpoint.py | DiptoChakrabarty/Kube-Automate | 2072d1aadd58eb405c7308ff5cfecbf50300ead3 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.15
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V1DaemonEndpoint(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'port': 'int'
}
attribute_map = {
'port': 'Port'
}
def __init__(self, port=None): # noqa: E501
"""V1DaemonEndpoint - a model defined in OpenAPI""" # noqa: E501
self._port = None
self.discriminator = None
self.port = port
@property
def port(self):
"""Gets the port of this V1DaemonEndpoint. # noqa: E501
Port number of the given endpoint. # noqa: E501
:return: The port of this V1DaemonEndpoint. # noqa: E501
:rtype: int
"""
return self._port
@port.setter
def port(self, port):
"""Sets the port of this V1DaemonEndpoint.
Port number of the given endpoint. # noqa: E501
:param port: The port of this V1DaemonEndpoint. # noqa: E501
:type: int
"""
if port is None:
raise ValueError("Invalid value for `port`, must not be `None`") # noqa: E501
self._port = port
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1DaemonEndpoint):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27 | 124 | 0.557152 |
73d7de1f2e78084a887ebd685901a5eb8bc95de1 | 3,609 | py | Python | rasa/nlu/featurizers/featurizer.py | venushong667/rasa | dc0af420818e263fb4ef97c0d7f1c65e1da83bd1 | [
"Apache-2.0"
] | null | null | null | rasa/nlu/featurizers/featurizer.py | venushong667/rasa | dc0af420818e263fb4ef97c0d7f1c65e1da83bd1 | [
"Apache-2.0"
] | null | null | null | rasa/nlu/featurizers/featurizer.py | venushong667/rasa | dc0af420818e263fb4ef97c0d7f1c65e1da83bd1 | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
from abc import abstractmethod, ABC
from collections import Counter
from typing import Generic, Iterable, Text, Optional, Dict, Any, TypeVar
from rasa.nlu.constants import FEATURIZER_CLASS_ALIAS
from rasa.shared.nlu.training_data.features import Features
from rasa.shared.nlu.training_data.message import Message
from rasa.shared.exceptions import InvalidConfigException
from rasa.shared.nlu.constants import FEATURE_TYPE_SENTENCE, FEATURE_TYPE_SEQUENCE
# TODO: remove after all featurizers have been migrated
from rasa.nlu.featurizers._featurizer import (
Featurizer,
SparseFeaturizer,
DenseFeaturizer,
)
Featurizer = Featurizer
SparseFeaturizer = SparseFeaturizer
DenseFeaturizer = DenseFeaturizer
FeatureType = TypeVar("FeatureType")
class Featurizer2(Generic[FeatureType], ABC):
"""Base class for all featurizers."""
@staticmethod
def get_default_config() -> Dict[Text, Any]:
"""Returns the component's default config."""
return {FEATURIZER_CLASS_ALIAS: None}
def __init__(self, name: Text, config: Dict[Text, Any]) -> None:
"""Instantiates a new featurizer.
Args:
config: configuration
name: a name that can be used as identifier, in case the configuration does
not specify an `alias` (or this `alias` is None)
"""
super().__init__()
self.validate_config(config)
self._config = config
self._identifier = self._config[FEATURIZER_CLASS_ALIAS] or name
@classmethod
@abstractmethod
def validate_config(cls, config: Dict[Text, Any]) -> None:
"""Validates that the component is configured properly."""
...
def add_features_to_message(
self,
sequence: FeatureType,
sentence: Optional[FeatureType],
attribute: Text,
message: Message,
) -> None:
"""Adds sequence and sentence features for the attribute to the given message.
Args:
sequence: sequence feature matrix
sentence: sentence feature matrix
attribute: the attribute which both features describe
message: the message to which we want to add those features
"""
for type, features in [
(FEATURE_TYPE_SEQUENCE, sequence),
(FEATURE_TYPE_SENTENCE, sentence),
]:
if features is not None:
wrapped_feature = Features(features, type, attribute, self._identifier,)
message.add_features(wrapped_feature)
@staticmethod
def raise_if_featurizer_configs_are_not_compatible(
featurizer_configs: Iterable[Dict[Text, Any]]
) -> None:
"""Validates that the given configurations of featurizers can be used together.
Raises:
`InvalidConfigException` if the given featurizers should not be used in
the same graph.
"""
# NOTE: this assumes the names given via the execution context are unique
alias_counter = Counter(
config[FEATURIZER_CLASS_ALIAS]
for config in featurizer_configs
if FEATURIZER_CLASS_ALIAS in config
)
if not alias_counter: # no alias found
return
if alias_counter.most_common(1)[0][1] > 1:
raise InvalidConfigException(
f"Expected the featurizers to have unique names but found "
f" (name, count): {alias_counter.most_common()}. "
f"Please update your config such that each featurizer has a unique "
f"alias."
)
| 35.732673 | 88 | 0.665004 |
73d7ff066982e54a4de8f8f5ba5e89d5f2639c25 | 7,250 | py | Python | tfrecords/readers/driving_reader.py | goodgodgd/vode-2020 | 98e34120d642780576ac51d57c2f0597e7e1e524 | [
"BSD-2-Clause"
] | 4 | 2020-08-15T02:14:03.000Z | 2021-01-30T08:18:18.000Z | tfrecords/readers/driving_reader.py | goodgodgd/vode-2020 | 98e34120d642780576ac51d57c2f0597e7e1e524 | [
"BSD-2-Clause"
] | 23 | 2020-01-24T07:25:40.000Z | 2021-06-02T00:50:32.000Z | tfrecords/readers/driving_reader.py | goodgodgd/vode-2020 | 98e34120d642780576ac51d57c2f0597e7e1e524 | [
"BSD-2-Clause"
] | 1 | 2020-07-02T12:26:45.000Z | 2020-07-02T12:26:45.000Z | import os.path as op
import numpy as np
from glob import glob
from PIL import Image
import zipfile
from tfrecords.readers.reader_base import DataReaderBase
from tfrecords.tfr_util import resize_depth_map, apply_color_map
class DrivingStereoReader(DataReaderBase):
def __init__(self, split=""):
super().__init__(split)
self.zip_files = dict()
self.intrinsic = np.array(0)
self.intrinsic_R = np.array(0)
self.stereo_T_LR = np.array(0)
"""
Public methods used outside this class
"""
def init_drive(self, drive_path):
"""
prepare variables to read a new sequence data
drive_path like : .../driving_stereo/train-left-image/2018-07-16-15-18-53.zip
"""
self.zip_files = self._load_zip_files(drive_path)
self.frame_names = self.zip_files["leftImg"].namelist()
self.frame_names.sort()
calib = self._read_calib(drive_path)
# TODO check: LEFT is 103 and RIGHT is 101??
self.intrinsic = np.reshape(calib["P_rect_103"], (3, 4))[:, :3]
# print("intrinsic:\n", self.intrinsic)
self.intrinsic_R = np.reshape(calib["P_rect_101"], (3, 4))[:, :3]
rot = np.reshape(calib["R_103"], (3, 3))
trn = np.reshape(calib["T_103"], (3, 1))
T_RL = np.concatenate([np.concatenate([rot, trn], axis=1),
np.array([[0, 0, 0, 1]], dtype=np.float32)], axis=0)
self.stereo_T_LR = np.linalg.inv(T_RL)
# print("stereo_T_LR:\n", self.stereo_T_LR)
def _load_zip_files(self, drive_path):
zip_files = dict()
left_img_zip = drive_path
zip_files["leftImg"] = zipfile.ZipFile(left_img_zip)
right_img_zip = left_img_zip.replace("-left-image", "-right-image")
zip_files["rightImg"] = zipfile.ZipFile(right_img_zip)
depth_map_zip = left_img_zip.replace("-left-image", "-depth-map")
zip_files["depthMap"] = zipfile.ZipFile(depth_map_zip)
return zip_files
def _read_calib(self, drive_path):
calib_file = drive_path.split("/")
calib_file[-2] = "calib/half-image-calib"
calib_file = "/".join(calib_file)
calib_file = calib_file.replace(".zip", ".txt")
params = dict()
with open(calib_file, "r") as fr:
lines = fr.readlines()
for line in lines:
line = line.rstrip("\n")
key, values = line.split(":")
values = values.strip().split(" ")
values = [float(val) for val in values]
values = np.array(values, dtype=np.float32)
params[key] = values
# print("[_read_calib]", key, values)
return params
def num_frames_(self):
return len(self.frame_names) - 4
def get_range_(self):
return range(2, len(self.frame_names)-2)
def get_image(self, index, right=False):
filename = self.frame_names[index]
zipkey = "rightImg" if right else "leftImg"
image_bytes = self.zip_files[zipkey].open(filename)
image = Image.open(image_bytes)
image = np.array(image, np.uint8)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return image
def get_pose(self, index, right=False):
return None
def get_depth(self, index, srcshape_hw, dstshape_hw, intrinsic, right=False):
assert right is False, "driving stereo dataset has only left depths"
filename = self.frame_names[index]
depth_bytes = self.zip_files["depthMap"].open(filename.replace(".jpg", ".png"))
detph = Image.open(depth_bytes)
depth = np.array(detph, np.uint16).astype(np.float32) / 256.
depth = resize_depth_map(depth, srcshape_hw, dstshape_hw)
return depth.astype(np.float32)
def get_intrinsic(self, index=0, right=False):
# loaded in init_drive()
intrinsic = self.intrinsic_R if right else self.intrinsic
return intrinsic.copy()
def get_stereo_extrinsic(self, index=0):
# loaded in init_drive()
return self.stereo_T_LR.copy()
# ======================================================================
import cv2
from config import opts
from utils.util_funcs import print_progress_status
def test_driving_stereo_reader():
srcpath = opts.get_raw_data_path("driving_stereo")
drive_paths = glob(op.join(srcpath, f"train-left-image", "*.zip"))
for drive_path in drive_paths:
print("\n!!! New drive start !!!", drive_path)
reader = DrivingStereoReader("train")
reader.init_drive(drive_path)
frame_indices = reader.get_range_()
for fi in frame_indices:
image = reader.get_image(fi)
intrinsic = reader.get_intrinsic(fi)
depth = reader.get_depth(fi, image.shape[:2], opts.get_img_shape("HW", "cityscapes"), intrinsic)
drive_path = op.basename(drive_path)
frame_name = op.basename(reader.frame_names[fi])
frame_name = frame_name.replace(op.basename(drive_path[:-4]), 'drive')
frame_name = frame_name.replace(op.basename(drive_path[:10]), 'date')
print(f"== test_city_reader) drive: {op.basename(drive_path)}, frame: {fi}, {frame_name}")
view = image
depth_view = apply_color_map(depth)
cv2.imshow("image", view)
cv2.imshow("dstdepth", depth_view)
key = cv2.waitKey(0)
if key == ord('q'):
break
from tfrecords.tfrecord_reader import TfrecordReader
from model.synthesize.synthesize_base import SynthesizeMultiScale
import utils.util_funcs as uf
import utils.convert_pose as cp
import tensorflow as tf
def test_driving_stereo_synthesis():
tfrpath = op.join(opts.DATAPATH_TFR, "driving_stereo_train")
dataset = TfrecordReader(tfrpath).get_dataset()
batid, srcid = 0, 0
for i, features in enumerate(dataset):
if i == 0:
print("==== check shapes")
for key, val in features.items():
print(" ", i, key, val.shape, val.dtype)
left_target = features["image5d"][:, 4]
right_source = features["image5d_R"][:, 4:5] # numsrc = 1
intrinsic = features["intrinsic"]
depth_ms = uf.multi_scale_depths(features["depth_gt"], [1, 2, 4, 8])
pose_r2l = tf.linalg.inv(features["stereo_T_LR"])
pose_r2l = tf.expand_dims(pose_r2l, axis=1)
# pose_r2l = tf.tile(pose_r2l, [1, 1, 1, 1]) # numsrc = 1
pose_r2l = cp.pose_matr2rvec_batch(pose_r2l)
synth_ms = SynthesizeMultiScale()(right_source, intrinsic, depth_ms, pose_r2l)
src_image = right_source[batid, srcid]
tgt_image = left_target[batid]
syn_image = synth_ms[0][batid, srcid]
depth_view = apply_color_map(depth_ms[0][batid].numpy())
view = tf.concat([src_image, tgt_image, syn_image], axis=0)
view = uf.to_uint8_image(view).numpy()
view = np.concatenate([view, depth_view], axis=0)
cv2.imshow("stereo synthesize", view)
key = cv2.waitKey()
if key == ord('q'):
break
if __name__ == "__main__":
test_driving_stereo_reader()
# test_driving_stereo_synthesis()
| 38.978495 | 108 | 0.620276 |
73d8440e1d7b9b30b0a0204eb271dd7d1cd26d9d | 1,980 | gyp | Python | chrome/installer/mini_installer_syzygy.gyp | Wzzzx/chromium-crosswalk | 768dde8efa71169f1c1113ca6ef322f1e8c9e7de | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2019-01-28T08:09:58.000Z | 2021-11-15T15:32:10.000Z | chrome/installer/mini_installer_syzygy.gyp | maidiHaitai/haitaibrowser | a232a56bcfb177913a14210e7733e0ea83a6b18d | [
"BSD-3-Clause"
] | null | null | null | chrome/installer/mini_installer_syzygy.gyp | maidiHaitai/haitaibrowser | a232a56bcfb177913a14210e7733e0ea83a6b18d | [
"BSD-3-Clause"
] | 6 | 2020-09-23T08:56:12.000Z | 2021-11-18T03:40:49.000Z | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'msvs_use_common_release': 0,
'msvs_use_common_linker_extras': 0,
},
'includes': [
'../../build/win_precompile.gypi',
],
'conditions': [
# This target won't build in fastbuild, since there are no PDBs.
['OS=="win" and fastbuild==0', {
'conditions': [
['chrome_multiple_dll==0', {
'targets': [
{
'target_name': 'mini_installer_syzygy',
'product_name': 'mini_installer',
'variables': {
'chrome_dll_project': [
'../chrome_syzygy.gyp:chrome_dll_syzygy',
],
'chrome_dll_path': [
'<(PRODUCT_DIR)/syzygy/chrome.dll',
],
'output_dir': '<(PRODUCT_DIR)/syzygy',
},
# Bulk of the build configuration comes from here.
'includes': [ 'mini_installer.gypi', ],
},
],
}, {
'targets': [
{
'target_name': 'mini_installer_syzygy',
'type': 'executable',
'product_name': 'mini_installer',
'variables': {
'chrome_dll_project': [
'../chrome_syzygy.gyp:chrome_dll_syzygy',
'../chrome_syzygy.gyp:chrome_child_dll_syzygy',
],
'chrome_dll_path': [
'<(PRODUCT_DIR)/syzygy/chrome.dll',
'<(PRODUCT_DIR)/syzygy/chrome_child.dll',
],
'output_dir': '<(PRODUCT_DIR)/syzygy',
},
# Bulk of the build configuration comes from here.
'includes': [ 'mini_installer.gypi', ],
},
],
}],
],
},{
'targets': [],
}],
],
}
| 31.428571 | 72 | 0.473232 |
73d84cdc39b77d84038d654ab8eac7fee2efe81b | 3,263 | py | Python | pathfinder/client/commands.py | ayyess/pathfinder.vim | 3f636aa80388ae78b6fee4837750f66a166b3afb | [
"MIT"
] | null | null | null | pathfinder/client/commands.py | ayyess/pathfinder.vim | 3f636aa80388ae78b6fee4837750f66a166b3afb | [
"MIT"
] | null | null | null | pathfinder/client/commands.py | ayyess/pathfinder.vim | 3f636aa80388ae78b6fee4837750f66a166b3afb | [
"MIT"
] | null | null | null | import time
import vim
import pathfinder.client.output as output
from pathfinder.client.client import client
from pathfinder.window import cursor_in_same_position, winsaveview
class RecordedState:
"""
A snapshot of useful information at a specific time.
Used for comparing the start of a path to the current state to determine whether
pathfinding should begin.
"""
def __init__(self):
self.time = time.time()
self.view = winsaveview()
self.mode = vim.eval("mode()")
self.buffer_contents = vim.eval("getline(0,'$')")
def reset():
"""
Reset variables ready for a new movement.
This is called after run(), and to cancel a path on events such as BufNewFile.
"""
global start_state, current_state
start_state = RecordedState()
current_state = start_state
def run():
"""
Start calculating a path.
This is called by loop() below, when there are no motions for a while. It also
runs for a variety of autocmd events which indicate the end of a movement, such as
entering insert mode.
"""
if start_state is None or current_state is None:
return
if not cursor_in_same_position(start_state.view, current_state.view):
# Start pathfinding in the background and call display_results when done
client.pathfind(start_state.view, current_state.view, output.show_output)
reset()
def update_current():
"""Called before run() when using manual commands."""
global current_state
current_state = RecordedState()
def autorun():
"""Called on a timer several times per second, if autorun is enabled."""
global start_state, current_state
new_state = RecordedState()
if (
current_state.mode in ["n", "v", "V"]
and time.time() >= current_state.time + vim.vars["pf_autorun_delay"]
# This is checked in run(), but that would reset the timer if we called it
and not cursor_in_same_position(start_state.view, current_state.view)
):
# No motions for the configured timeout
run()
elif start_state.mode != new_state.mode:
if start_state.mode in ["n", "v", "V"]:
# Changed modes, leaving one of normal, visual or visual-line
# (we don't want to trigger for leaving e.g. insert mode since cursor
# movements there are not made with motions)
run()
else:
reset()
elif (
new_state.mode == "n"
and new_state.buffer_contents != start_state.buffer_contents
):
# Buffer has changed in normal mode
# This means a command like x,rx,p must have been used
run()
elif not cursor_in_same_position(current_state.view, new_state.view):
current_state = new_state
def explain():
"""Called for the :PathfinderExplain command."""
if output.last_output is None:
print("No suggestion to explain.")
else:
# explained_motions yields each line
# sep tells print to put \n between them rather than space
print(*output.explained_motions(output.last_output), sep="\n")
def stop():
"""Called when Vim is about to shut down."""
client.close()
# Call reset to set the initial state
reset()
| 29.93578 | 86 | 0.6635 |
73d8659c8e3cfe0a883aa5035393cc6ed5f557c4 | 1,948 | py | Python | alipay/aop/api/domain/AlipayOpenServicemarketPromotiontaskRelationQueryModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayOpenServicemarketPromotiontaskRelationQueryModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayOpenServicemarketPromotiontaskRelationQueryModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenServicemarketPromotiontaskRelationQueryModel(object):
def __init__(self):
self._commodity_id = None
self._page_num = None
self._page_size = None
@property
def commodity_id(self):
return self._commodity_id
@commodity_id.setter
def commodity_id(self, value):
self._commodity_id = value
@property
def page_num(self):
return self._page_num
@page_num.setter
def page_num(self, value):
self._page_num = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
def to_alipay_dict(self):
params = dict()
if self.commodity_id:
if hasattr(self.commodity_id, 'to_alipay_dict'):
params['commodity_id'] = self.commodity_id.to_alipay_dict()
else:
params['commodity_id'] = self.commodity_id
if self.page_num:
if hasattr(self.page_num, 'to_alipay_dict'):
params['page_num'] = self.page_num.to_alipay_dict()
else:
params['page_num'] = self.page_num
if self.page_size:
if hasattr(self.page_size, 'to_alipay_dict'):
params['page_size'] = self.page_size.to_alipay_dict()
else:
params['page_size'] = self.page_size
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenServicemarketPromotiontaskRelationQueryModel()
if 'commodity_id' in d:
o.commodity_id = d['commodity_id']
if 'page_num' in d:
o.page_num = d['page_num']
if 'page_size' in d:
o.page_size = d['page_size']
return o
| 27.43662 | 75 | 0.600616 |
73d886144b0036b18506997ea053cc28535fbd74 | 2,462 | py | Python | examples/multizone_example.py | Cygn/pychromecast | bccba1d9d84893d243ce5d626878d5f2f5095442 | [
"MIT"
] | null | null | null | examples/multizone_example.py | Cygn/pychromecast | bccba1d9d84893d243ce5d626878d5f2f5095442 | [
"MIT"
] | null | null | null | examples/multizone_example.py | Cygn/pychromecast | bccba1d9d84893d243ce5d626878d5f2f5095442 | [
"MIT"
] | null | null | null | """
Example on how to use the Multizone (Audio Group) Controller
"""
# pylint: disable=invalid-name
import argparse
import logging
import sys
import time
import zeroconf
import pychromecast
from pychromecast.controllers.multizone import (
MultizoneController,
MultiZoneControllerListener,
)
from pychromecast.socket_client import ConnectionStatusListener
# Change to the name of your Chromecast
CAST_NAME = "Whole house"
parser = argparse.ArgumentParser(
description="Example on how to use the Multizone Controller to track groupp members."
)
parser.add_argument("--show-debug", help="Enable debug log", action="store_true")
parser.add_argument(
"--show-zeroconf-debug", help="Enable zeroconf debug log", action="store_true"
)
parser.add_argument(
"--cast", help='Name of speaker group (default: "%(default)s")', default=CAST_NAME
)
args = parser.parse_args()
if args.show_debug:
logging.basicConfig(level=logging.DEBUG)
if args.show_zeroconf_debug:
print("Zeroconf version: " + zeroconf.__version__)
logging.getLogger("zeroconf").setLevel(logging.DEBUG)
class MyConnectionStatusListener(ConnectionStatusListener):
"""ConnectionStatusListener"""
def __init__(self, _mz):
self._mz = _mz
def new_connection_status(self, status):
if status.status == "CONNECTED":
self._mz.update_members()
class MyMultiZoneControllerListener(MultiZoneControllerListener):
"""MultiZoneControllerListener"""
def multizone_member_added(self, group_uuid):
print("New member: {}".format(group_uuid))
def multizone_member_removed(self, group_uuid):
print("Removed member: {}".format(group_uuid))
def multizone_status_received(self, group_uuid, media_status):
print("Members: {}".format(mz.members))
chromecasts, browser = pychromecast.get_listed_chromecasts(friendly_names=[args.cast])
if not chromecasts:
print('No chromecast with name "{}" discovered'.format(args.cast))
sys.exit(1)
cast = chromecasts[0]
# Add listeners
mz = MultizoneController(cast.uuid)
mz.register_listener(MyMultiZoneControllerListener())
cast.register_handler(mz)
cast.register_connection_listener(MyConnectionStatusListener(mz))
# Start socket client's worker thread and wait for initial status update
cast.wait()
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
break
# Shut down discovery
pychromecast.discovery.stop_discovery(browser)
| 27.355556 | 89 | 0.749797 |
73d8af7cc4a1d0bb170fe489058743deaeced0b9 | 4,284 | py | Python | example/plane_wave_decomposition.py | narahahn/continuous_measurement | 2391f712880ec18957dc5491af6cd3665b97e086 | [
"MIT"
] | null | null | null | example/plane_wave_decomposition.py | narahahn/continuous_measurement | 2391f712880ec18957dc5491af6cd3665b97e086 | [
"MIT"
] | null | null | null | example/plane_wave_decomposition.py | narahahn/continuous_measurement | 2391f712880ec18957dc5491af6cd3665b97e086 | [
"MIT"
] | null | null | null | """
Plane wave decomposition using continuously measured impulse responses
* point source in a free-field
* CARDIOID microphone moving on a circle at a constant speed
* captured signal computed by using fractional delay filters + oversampling
* system identification based on spatial interpolation of a given order
* impulse responses computed for discrete positions
* apply plane wave decomposition to the impulse responses
* compare the results for discrete (sequential) and continuous measurements
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as signal
import micarray
from sys import path
path.append('../')
from utils import *
from source import *
# Constants
c = 343
fs = 16000
# Source
xs = [0, 6, 0] # Point source
source_type = 'point'
# Receiver
R = 0.5
Omega = 2 * np.pi / 20
L = int(2 * np.pi / Omega * fs)
t = (1/fs) * np.arange(L)
phi0 = 0
phi = Omega * t + phi0
xm = [R*np.cos(phi), R*np.sin(phi), np.zeros_like(phi)]
# Excitation
N = 1600 # excitation period
p = perfect_sequence_randomphase(N)
# Experimental parameters
K = int(L/N) # number of target angles = equivalent number of sampling points
int_order = 30 # spatial interpolation order
Omega_al = c / N / R # anti-aliasing angular speed
# Captured signal of a moving microphone
rm = np.array(xs)[:, np.newaxis] - np.array(xm)
cardioid_gain = 0.5 * (np.sum(rm*xm, axis=0) / np.linalg.norm(rm, axis=0) / np.linalg.norm(xm, axis=0)+1)
waveform_l, shift_l, offset_l = impulse_response(xs, xm, source_type, fs)
waveform_l *= cardioid_gain[:, np.newaxis]
s = captured_signal(waveform_l, shift_l, p)
# Target angles for impulse response computation
phi_k = np.linspace(0, 2 * np.pi, num=K, endpoint=False)
# Static measurement (reference)
x_k = [R*np.cos(phi_k), R*np.sin(phi_k), np.zeros_like(phi_k)]
r_k = np.array(xs)[:, np.newaxis] - np.array(x_k)
cardioid_gain = 0.5 * (np.sum(r_k*x_k, axis=0) / np.linalg.norm(r_k, axis=0) / np.linalg.norm(x_k, axis=0)+1)
waveform_k, shift_k, offset_k = impulse_response(xs, x_k, source_type, fs)
waveform_k *= cardioid_gain[:, np.newaxis]
h0, _, _ = construct_ir_matrix(waveform_k, shift_k, N)
H0 = np.fft.rfft(h0, axis=-1)
# Continuous measurement
h = system_identification(phi, s, phi_k, p, interpolation='lagrange', int_order=int_order)
H = np.fft.rfft(h, axis=-1)
# Modal Beamforming
bf_order = (K-1) // 2 # beamforming order
Npwd = 360
freq = np.linspace(0, fs/2, num=H.shape[-1], endpoint=True)
k = 2 * np.pi * freq / c
phi_pwd = np.linspace(0, 2*np.pi, num=Npwd, endpoint=False)
Bn = micarray.modal.radial.circular_pw(bf_order, k, R, setup='card')
Dn, _ = micarray.modal.radial.regularize(1/Bn, 3000, 'softclip')
D = micarray.modal.radial.circ_diagonal_mode_mat(Dn)
Psi_p = micarray.modal.angular.cht_matrix(bf_order, phi_k, 2*np.pi/len(phi_k))
Psi_q = micarray.modal.angular.cht_matrix(bf_order, phi_pwd)
A_pwd = np.matmul(np.matmul(Psi_q.T, D), np.conj(Psi_p))
q0_pwd = np.squeeze(np.matmul(A_pwd, np.expand_dims(H0.T, 2)))
q0_pwd_t = np.fft.irfft(q0_pwd, axis=0)
q_pwd = np.squeeze(np.matmul(A_pwd, np.expand_dims(H.T, 2)))
q_pwd_t = np.fft.irfft(q_pwd, axis=0)
# Plots
phi_pwd_deg = np.rad2deg(phi_pwd)
freq_kHz = freq / 1000
time_ms = np.arange(N) / fs * 1000
t0 = np.linalg.norm(xs) / c * 1000
# Fig. Beam pattern - discrete
plt.figure(figsize=(10, 8))
plt.pcolormesh(phi_pwd_deg, freq, db(q0_pwd))
plt.colorbar(label='dB')
plt.clim(-100, 0)
plt.xlabel(r'$\phi$ / deg')
plt.ylabel('$f$ / kHz')
plt.title('Modal beamforming - discrete')
# Fig. Beam pattern - continuous
plt.figure(figsize=(10, 8))
plt.pcolormesh(phi_pwd_deg, freq, db(q_pwd))
plt.colorbar(label='dB')
plt.clim(-100, 0)
plt.xlabel(r'$\phi$ / deg')
plt.ylabel('$t$ / ms')
plt.title('Modal beamforming - continuous')
# Fig. PWD signals - discrete
plt.figure(figsize=(10, 8))
plt.pcolormesh(phi_pwd_deg, time_ms, db(q0_pwd_t))
plt.colorbar(label='dB')
plt.clim(-80, 0)
plt.xlabel(r'$\phi$ / deg')
plt.ylabel('$t$ / ms')
#plt.ylim(t0-15, t0+15)
plt.title('PWD signals - discrete')
# Fig. PWD signals - continuous
plt.figure(figsize=(10, 8))
plt.pcolormesh(phi_pwd_deg, time_ms, db(q_pwd_t))
plt.colorbar(label='dB')
plt.clim(-80, 0)
plt.xlabel(r'$\phi$ / deg')
plt.ylabel('$t$ / ms')
#plt.ylim(t0-15, t0+15)
plt.title('PWD signals - continuous') | 31.970149 | 109 | 0.713119 |
73d8c88732500415cdda9e916e73aa91e884590c | 10,314 | py | Python | NumNum/load_data.py | MilesQLi/NumNum | 97851ccb74610c414bf474016915173b8abfe913 | [
"MIT"
] | 42 | 2016-10-26T16:27:26.000Z | 2021-07-11T05:41:11.000Z | NumNum/load_data.py | lg920810/NumNum | 97851ccb74610c414bf474016915173b8abfe913 | [
"MIT"
] | 2 | 2017-08-09T16:10:33.000Z | 2018-08-01T03:09:21.000Z | NumNum/load_data.py | lg920810/NumNum | 97851ccb74610c414bf474016915173b8abfe913 | [
"MIT"
] | 30 | 2016-10-09T02:16:24.000Z | 2021-07-11T05:47:18.000Z | # Import Modules
from __future__ import print_function
from six.moves import cPickle as pickle
from six.moves import range
from six.moves.urllib.request import urlretrieve
from scipy import ndimage
from PIL import Image
import numpy as np
import os
import sys
import tarfile
import h5py
from numpy import random
# Download data
print('Downloading data...')
url = 'http://ufldl.stanford.edu/housenumbers/'
def maybe_download(filename, force=False):
"""Download a file if not present, and make sure it's the right size."""
if force or not os.path.exists(filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, filename)
print('Download Complete!')
statinfo = os.stat(filename)
return filename
train_filename = maybe_download('train.tar.gz')
test_filename = maybe_download('test.tar.gz')
extra_filename = maybe_download('extra.tar.gz')
print('Successfully downloaded data!')
# Unzip Data
print('Unzipping data...')
np.random.seed(8)
def maybe_extract(filename, force=False):
# Remove .tar.gz
root = os.path.splitext(os.path.splitext(filename)[0])[0]
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall()
tar.close()
data_folders = root
print(data_folders)
return data_folders
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
extra_folders = maybe_extract(extra_filename)
print('Successfully unzipped data!')
# Create dictionary for bounding boxes
print('Creating dictionary of bounding boxes...')
class DigitStructFile:
def __init__(self, inf):
self.inf = h5py.File(inf, 'r')
self.digitStructName = self.inf['digitStruct']['name']
self.digitStructBbox = self.inf['digitStruct']['bbox']
def getName(self,n):
return ''.join([chr(c[0]) for c in self.inf[self.digitStructName[n][0]].value])
def bboxHelper(self,attr):
if (len(attr) > 1):
attr = [self.inf[attr.value[j].item()].value[0][0] for j in range(len(attr))]
else:
attr = [attr.value[0][0]]
return attr
def getBbox(self,n):
bbox = {}
bb = self.digitStructBbox[n].item()
bbox['height'] = self.bboxHelper(self.inf[bb]["height"])
bbox['label'] = self.bboxHelper(self.inf[bb]["label"])
bbox['left'] = self.bboxHelper(self.inf[bb]["left"])
bbox['top'] = self.bboxHelper(self.inf[bb]["top"])
bbox['width'] = self.bboxHelper(self.inf[bb]["width"])
return bbox
def getDigitStructure(self,n):
s = self.getBbox(n)
s['name']=self.getName(n)
return s
def getAllDigitStructure(self):
return [self.getDigitStructure(i) for i in range(len(self.digitStructName))]
def getAllDigitStructure_ByDigit(self):
pictDat = self.getAllDigitStructure()
result = []
structCnt = 1
for i in range(len(pictDat)):
item = { 'filename' : pictDat[i]["name"] }
figures = []
for j in range(len(pictDat[i]['height'])):
figure = {}
figure['height'] = pictDat[i]['height'][j]
figure['label'] = pictDat[i]['label'][j]
figure['left'] = pictDat[i]['left'][j]
figure['top'] = pictDat[i]['top'][j]
figure['width'] = pictDat[i]['width'][j]
figures.append(figure)
structCnt = structCnt + 1
item['boxes'] = figures
result.append(item)
return result
print("Successfully created dictionary of bounding boxes!")
# Get Digit Structure
print('Getting digit structure for training data...')
digitFileTrain=DigitStructFile(os.path.join('train','digitStruct.mat'))
train_data=digitFileTrain.getAllDigitStructure_ByDigit()
print('Success!')
print('Getting digit structure for test data...')
digitFileTest=DigitStructFile(os.path.join('test','digitStruct.mat'))
test_data=digitFileTest.getAllDigitStructure_ByDigit()
print('Success!')
print('Getting digit structure for extra data...')
digitFileExtra=DigitStructFile(os.path.join('extra','digitStruct.mat'))
extra_data=digitFileExtra.getAllDigitStructure_ByDigit()
print('Success!')
# Crop Training Images
print('Cropping training images...')
train_imsize = np.ndarray([len(train_data),2])
for i in np.arange(len(train_data)):
filename = train_data[i]['filename']
fullname = os.path.join(train_folders, filename)
im = Image.open(fullname)
train_imsize[i, :] = im.size[:]
print('Success!')
# Crop Test Images
print('Cropping test images...')
test_imsize = np.ndarray([len(test_data),2])
for i in np.arange(len(test_data)):
filename = test_data[i]['filename']
fullname = os.path.join(test_folders, filename)
im = Image.open(fullname)
test_imsize[i, :] = im.size[:]
print('Success!')
# Crop Extra Images
print('Cropping extra images...')
extra_imsize = np.ndarray([len(extra_data),2])
for i in np.arange(len(extra_data)):
filename = extra_data[i]['filename']
fullname = os.path.join(extra_folders, filename)
im = Image.open(fullname)
extra_imsize[i, :] = im.size[:]
print('Success!')
# Use extra data
def generate_dataset(data, folder):
dataset = np.ndarray([len(data),32,32,1], dtype='float32')
labels = np.ones([len(data),6], dtype=int) * 10
for i in np.arange(len(data)):
filename = data[i]['filename']
fullname = os.path.join(folder, filename)
im = Image.open(fullname)
boxes = data[i]['boxes']
num_digit = len(boxes)
labels[i,0] = num_digit
top = np.ndarray([num_digit], dtype='float32')
left = np.ndarray([num_digit], dtype='float32')
height = np.ndarray([num_digit], dtype='float32')
width = np.ndarray([num_digit], dtype='float32')
for j in np.arange(num_digit):
if j < 5:
labels[i,j+1] = boxes[j]['label']
if boxes[j]['label'] == 10: labels[i,j+1] = 0
else: print('#',i,'image has more than 5 digits.')
top[j] = boxes[j]['top']
left[j] = boxes[j]['left']
height[j] = boxes[j]['height']
width[j] = boxes[j]['width']
im_top = np.amin(top)
im_left = np.amin(left)
im_height = np.amax(top) + height[np.argmax(top)] - im_top
im_width = np.amax(left) + width[np.argmax(left)] - im_left
im_top = np.floor(im_top - 0.1 * im_height)
im_left = np.floor(im_left - 0.1 * im_width)
im_bottom = np.amin([np.ceil(im_top + 1.2 * im_height), im.size[1]])
im_right = np.amin([np.ceil(im_left + 1.2 * im_width), im.size[0]])
im = im.crop((im_left, im_top, im_right, im_bottom)).resize([32,32], Image.ANTIALIAS)
im = np.dot(np.array(im, dtype='float32'), [[0.2989],[0.5870],[0.1140]])
mean = np.mean(im, dtype='float32')
std = np.std(im, dtype='float32', ddof=1)
if std < 1e-4: std = 1.
im = (im - mean) / std
dataset[i,:,:,:] = im[:,:,:]
return dataset, labels
print('Generating training dataset and labels...')
train_dataset, train_labels = generate_dataset(train_data, train_folders)
print('Success! \n Training set: {} \n Training labels: {}'.format(train_dataset.shape, train_labels.shape))
print('Generating testing dataset and labels...')
test_dataset, test_labels = generate_dataset(test_data, test_folders)
print('Success! \n Testing set: {} \n Testing labels: {}'.format(test_dataset.shape, test_labels.shape))
print('Generating extra dataset and labels...')
extra_dataset, extra_labels = generate_dataset(extra_data, extra_folders)
print('Success! \n Testing set: {} \n Testing labels: {}'.format(extra_dataset.shape, extra_labels.shape))
# Clean up data by deleting digits more than 5 (very few)
print('Cleaning up training data...')
train_dataset = np.delete(train_dataset, 29929, axis=0)
train_labels = np.delete(train_labels, 29929, axis=0)
print('Success!')
# Expand Training Data
print('Expanding training data randomly...')
random.seed(8)
n_labels = 10
valid_index = []
valid_index2 = []
train_index = []
train_index2 = []
for i in np.arange(n_labels):
valid_index.extend(np.where(train_labels[:,1] == (i))[0][:400].tolist())
train_index.extend(np.where(train_labels[:,1] == (i))[0][400:].tolist())
valid_index2.extend(np.where(extra_labels[:,1] == (i))[0][:200].tolist())
train_index2.extend(np.where(extra_labels[:,1] == (i))[0][200:].tolist())
random.shuffle(valid_index)
random.shuffle(train_index)
random.shuffle(valid_index2)
random.shuffle(train_index2)
valid_dataset = np.concatenate((extra_dataset[valid_index2,:,:,:], train_dataset[valid_index,:,:,:]), axis=0)
valid_labels = np.concatenate((extra_labels[valid_index2,:], train_labels[valid_index,:]), axis=0)
train_dataset_new = np.concatenate((extra_dataset[train_index2,:,:,:], train_dataset[train_index,:,:,:]), axis=0)
train_labels_new = np.concatenate((extra_labels[train_index2,:], train_labels[train_index,:]), axis=0)
print('Success! \n Training set: {} \n Training labels: {}'.format(train_dataset_new.shape, train_labels_new.shape))
print('Success! \n Validation set: {} \n Validation labels: {}'.format(valid_dataset.shape, valid_labels.shape))
print('Success! \n Testing set: {} \n Testing labels: {}'.format(test_dataset.shape, test_labels.shape))
# Create Pickling File
print('Pickling data...')
pickle_file = 'SVHN.pickle'
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset_new,
'train_labels': train_labels_new,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to {}: {}'.format(pickle_file, e))
raise
statinfo = os.stat(pickle_file)
print('Success!')
print('Compressed pickle size: {}'.format(statinfo.st_size))
| 35.443299 | 116 | 0.65668 |
73d8e0338542eea89648519e280e841017012985 | 3,540 | py | Python | cases/multi_modal_rating_prediction.py | rozlana-g/FEDOT | a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c | [
"BSD-3-Clause"
] | 358 | 2020-06-11T09:34:53.000Z | 2022-03-31T12:56:22.000Z | cases/multi_modal_rating_prediction.py | rozlana-g/FEDOT | a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c | [
"BSD-3-Clause"
] | 467 | 2020-06-11T13:49:45.000Z | 2022-03-31T14:19:48.000Z | cases/multi_modal_rating_prediction.py | rozlana-g/FEDOT | a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c | [
"BSD-3-Clause"
] | 48 | 2020-07-13T14:50:45.000Z | 2022-03-26T09:37:13.000Z | import datetime
from examples.multi_modal_pipeline import calculate_validation_metric, generate_initial_pipeline_and_data, \
prepare_multi_modal_data
from fedot.core.composer.gp_composer.gp_composer import GPComposerBuilder, GPComposerRequirements
from fedot.core.log import default_log
from fedot.core.optimisers.gp_comp.gp_optimiser import GPGraphOptimiserParameters, GeneticSchemeTypesEnum
from fedot.core.repository.operation_types_repository import get_operations_for_task
from fedot.core.repository.quality_metrics_repository import ClassificationMetricsEnum
from fedot.core.repository.tasks import Task, TaskTypesEnum
def run_multi_modal_case(files_path, is_visualise=False, timeout=datetime.timedelta(minutes=2)):
task = Task(TaskTypesEnum.classification)
images_size = (128, 128)
train_num, test_num, train_img, test_img, train_text, test_text = prepare_multi_modal_data(files_path, task,
images_size)
pipeline, fit_data, predict_data = generate_initial_pipeline_and_data(images_size,
train_num, test_num,
train_img, test_img,
train_text, test_text)
# the search of the models provided by the framework that can be used as nodes in a pipeline for the selected task
available_model_types = get_operations_for_task(task=task, mode='model')
# the choice of the metric for the pipeline quality assessment during composition
metric_function = ClassificationMetricsEnum.ROCAUC_penalty
# the choice and initialisation of the GP search
composer_requirements = GPComposerRequirements(
primary=available_model_types,
secondary=available_model_types, max_arity=3,
max_depth=3, pop_size=5, num_of_generations=5,
crossover_prob=0.8, mutation_prob=0.8, timeout=timeout)
# GP optimiser parameters choice
scheme_type = GeneticSchemeTypesEnum.parameter_free
optimiser_parameters = GPGraphOptimiserParameters(genetic_scheme_type=scheme_type)
# Create builder for composer and set composer params
logger = default_log('FEDOT logger', verbose_level=4)
# the multi modal template (with data sources) is passed as inital assumption for composer
builder = GPComposerBuilder(task=task).with_requirements(composer_requirements). \
with_metrics(metric_function).with_optimiser_parameters(optimiser_parameters).with_logger(logger=logger). \
with_initial_pipeline(pipeline).with_cache('multi_modal_opt.cache')
# Create GP-based composer
composer = builder.build()
# the optimal pipeline generation by composition - the most time-consuming task
pipeline_evo_composed = composer.compose_pipeline(data=fit_data,
is_visualise=True)
pipeline_evo_composed.fit(input_data=fit_data)
if is_visualise:
pipeline_evo_composed.show()
prediction = pipeline_evo_composed.predict(predict_data)
err = calculate_validation_metric(prediction, test_num)
print(f'ROC AUC for validation sample is {err}')
return err
def download_mmdb_dataset():
# TODO change to uploadable full dataset
pass
if __name__ == '__main__':
download_mmdb_dataset()
run_multi_modal_case('cases/data/mm_imdb', is_visualise=True)
| 45.384615 | 118 | 0.711864 |
73d8f84425464410586db007f133ecd2367b84bb | 2,314 | py | Python | Unsupervised_Learning/PCA.py | shyamsn97/artificial-intelligence | 9f15906f0b342195ba66c20bcc77d895ba64169f | [
"MIT"
] | null | null | null | Unsupervised_Learning/PCA.py | shyamsn97/artificial-intelligence | 9f15906f0b342195ba66c20bcc77d895ba64169f | [
"MIT"
] | null | null | null | Unsupervised_Learning/PCA.py | shyamsn97/artificial-intelligence | 9f15906f0b342195ba66c20bcc77d895ba64169f | [
"MIT"
] | null | null | null | import numpy as np
import sys
sys.path.append('../tools')
import tools
class PCA():
"""
Dimensionality reduction using the diagonalization of a covariance matrix
Can specify whether to use calculate the covariance matrix of the rows or columns of a given data matrix
Parameters:
X: numpy array() data matrix
column: boolean determines whether to generate covariance matrix from columns or rows
eigenvalues and eigenvectors: numpy array() eigenvalues and eigenvectors of the covariance matrix
proportion_variance: numpy array() variance explained by PCs
cumulative_var: numpy array() cumulative variance explained by PCs
"""
def __init__(self, X,column=True):
#PCA uses the rows of X or the columns to construct the cov matrix
self.column = column
self.X = X
if(column == True):
self.mumat = X.mean(axis=0)
self.cov = tools.compute_covariance(X)
else:
self.mumat = X.mean(axis=1)
self.cov = tools.compute_covariance(X,False)
self.X_shifted = self.X - self.mumat
self.eigenvalues, self.eigenvectors = np.linalg.eig(self.cov)
self.eigenvectors = self.eigenvectors.astype(float).real
self.eigenvalues = np.sort(self.eigenvalues)[::-1]
self.proportion_variance = ((self.eigenvalues/float(sum(self.eigenvalues))).astype(float)).real
self.cumulative_var = np.cumsum(self.proportion_variance)
def rank(self,n):
#this approximates the original matrix by using n eigenvectors corresponding to the biggest eigenvalues
eigenvalues = self.eigenvalues
eigenvectors = self.eigenvectors
indices = eigenvalues.argsort()[::-1][:n]
Q = eigenvectors[:,indices]
if self.column == False:
return Q.dot(Q.T.dot(self.X_shifted)).astype(float) + self.mumat
else:
return self.X_shifted.dot(Q).dot(Q.T).astype(float) + self.mumat
def project(self,n):
#this projects the data onto a lower dimension
eigenvalues = self.eigenvalues
eigenvectors = self.eigenvectors
indices = eigenvalues.argsort()[::-1][:n]
Q = eigenvectors[:,indices]
return self.X.dot(Q).astype(float) | 44.5 | 115 | 0.652982 |
73d8f9cf56a331fc21e62d5878b7fb370c830212 | 672 | py | Python | LC/253.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | 2 | 2018-02-24T17:20:02.000Z | 2018-02-24T17:25:43.000Z | LC/253.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | null | null | null | LC/253.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | null | null | null | # Definition for an interval.
# class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution(object):
def minMeetingRooms(self, intervals):
"""
:type intervals: List[Interval]
:rtype: int
"""
l=[]
for x in intervals:
l.append((x.start, 's'))
l.append((x.end, 'e'))
l=sorted(l, key=lambda x: x[0]-0.1*(x[1]=='e'))
res=0
room=0
for x in l:
if x[1]=='s':
room+=1
res=max(res, room)
else:
room-=1
return res | 24 | 55 | 0.425595 |
73d910c5e2758a0d08a69f6aef30ab94d53c0d18 | 1,605 | py | Python | python/wordcount.py | KasperD/script-fun | 919c8cfca65ce2177213145e6c553c1f4fa33b63 | [
"MIT"
] | null | null | null | python/wordcount.py | KasperD/script-fun | 919c8cfca65ce2177213145e6c553c1f4fa33b63 | [
"MIT"
] | null | null | null | python/wordcount.py | KasperD/script-fun | 919c8cfca65ce2177213145e6c553c1f4fa33b63 | [
"MIT"
] | null | null | null | #!/usr/bin/python
def main():
'''
Main function
'''
book = open("1984.txt", 'r') # Open the texfile as read-only
bookarray = {} # Create empty directory
for line in book:
if line != '\n': # If the line is only a newline then don't bother
line = line.replace("\n", "") #replace new lines
for each in line.split(" "):
# remove all special characters that show up and may be attached to a word
# * I know I can do better but this is prettier and eaiser to read *
each = each.replace(".", "")
each = each.replace(";", "")
each = each.replace("?", "")
each = each.replace(";", "")
each = each.replace(",", "")
each = each.replace("'", "")
each = each.replace("!", "")
each = each.replace("-", "")
each = each.replace("\"", "")
each = each.replace(":", "")
each = each.lower() #Force lowercase for all words
if each in bookarray:
# create counter variable and update word count number
wordCounter = int(bookarray[each]) + 1
bookarray.update({each: wordCounter})
else:
# Create brand new word entry
bookarray[each] = 1
# Output all the fun from above to get word list with count
for each in bookarray:
print each, bookarray[each]
'''
Main function call
'''
if __name__ == '__main__':
main()
| 35.666667 | 90 | 0.490343 |
73d916975e494b7766544665b071d3e7a0fed6bc | 24,175 | py | Python | h5py/_hl/group.py | jnsebgosselin/h5py | f525d1c180f6b4bc59726faf833fd50a26d57d83 | [
"BSD-3-Clause"
] | 1 | 2020-08-29T20:40:05.000Z | 2020-08-29T20:40:05.000Z | h5py/_hl/group.py | jnsebgosselin/h5py | f525d1c180f6b4bc59726faf833fd50a26d57d83 | [
"BSD-3-Clause"
] | null | null | null | h5py/_hl/group.py | jnsebgosselin/h5py | f525d1c180f6b4bc59726faf833fd50a26d57d83 | [
"BSD-3-Clause"
] | null | null | null | # This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 Andrew Collette and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
"""
Implements support for high-level access to HDF5 groups.
"""
import posixpath as pp
import numpy
from .compat import filename_decode, filename_encode
from .. import h5, h5g, h5i, h5o, h5r, h5t, h5l, h5p, h5s, h5d
from . import base
from .base import HLObject, MutableMappingHDF5, phil, with_phil
from . import dataset
from . import datatype
from .vds import vds_support
class Group(HLObject, MutableMappingHDF5):
""" Represents an HDF5 group.
"""
def __init__(self, bind):
""" Create a new Group object by binding to a low-level GroupID.
"""
with phil:
if not isinstance(bind, h5g.GroupID):
raise ValueError("%s is not a GroupID" % bind)
super(Group, self).__init__(bind)
_gcpl_crt_order = h5p.create(h5p.GROUP_CREATE)
_gcpl_crt_order.set_link_creation_order(
h5p.CRT_ORDER_TRACKED | h5p.CRT_ORDER_INDEXED)
_gcpl_crt_order.set_attr_creation_order(
h5p.CRT_ORDER_TRACKED | h5p.CRT_ORDER_INDEXED)
def create_group(self, name, track_order=None):
""" Create and return a new subgroup.
Name may be absolute or relative. Fails if the target name already
exists.
track_order
Track dataset/group/attribute creation order under this group
if True. If None use global default h5.get_config().track_order.
"""
if track_order is None:
track_order = h5.get_config().track_order
with phil:
name, lcpl = self._e(name, lcpl=True)
gcpl = Group._gcpl_crt_order if track_order else None
gid = h5g.create(self.id, name, lcpl=lcpl, gcpl=gcpl)
return Group(gid)
def create_dataset(self, name, shape=None, dtype=None, data=None, **kwds):
""" Create a new HDF5 dataset
name
Name of the dataset (absolute or relative). Provide None to make
an anonymous dataset.
shape
Dataset shape. Use "()" for scalar datasets. Required if "data"
isn't provided.
dtype
Numpy dtype or string. If omitted, dtype('f') will be used.
Required if "data" isn't provided; otherwise, overrides data
array's dtype.
data
Provide data to initialize the dataset. If used, you can omit
shape and dtype arguments.
Keyword-only arguments:
chunks
(Tuple or int) Chunk shape, or True to enable auto-chunking. Integers can
be used for 1D shape.
maxshape
(Tuple or int) Make the dataset resizable up to this shape. Use None for
axes you want to be unlimited. Integers can be used for 1D shape.
compression
(String or int) Compression strategy. Legal values are 'gzip',
'szip', 'lzf'. If an integer in range(10), this indicates gzip
compression level. Otherwise, an integer indicates the number of a
dynamically loaded compression filter.
compression_opts
Compression settings. This is an integer for gzip, 2-tuple for
szip, etc. If specifying a dynamically loaded compression filter
number, this must be a tuple of values.
scaleoffset
(Integer) Enable scale/offset filter for (usually) lossy
compression of integer or floating-point data. For integer
data, the value of scaleoffset is the number of bits to
retain (pass 0 to let HDF5 determine the minimum number of
bits necessary for lossless compression). For floating point
data, scaleoffset is the number of digits after the decimal
place to retain; stored values thus have absolute error
less than 0.5*10**(-scaleoffset).
shuffle
(T/F) Enable shuffle filter.
fletcher32
(T/F) Enable fletcher32 error detection. Not permitted in
conjunction with the scale/offset filter.
fillvalue
(Scalar) Use this value for uninitialized parts of the dataset.
track_times
(T/F) Enable dataset creation timestamps.
track_order
(T/F) Track attribute creation order if True. If omitted use
global default h5.get_config().track_order.
external
(Iterable of tuples) Sets the external storage property, thus
designating that the dataset will be stored in one or more
non-HDF5 files external to the HDF5 file. Adds each tuple
of (name, offset, size) to the dataset's list of external files.
Each name must be a str, bytes, or os.PathLike; each offset and
size, an integer. If only a name is given instead of an iterable
of tuples, it is equivalent to [(name, 0, h5py.h5f.UNLIMITED)].
"""
if 'track_order' not in kwds:
kwds['track_order'] = h5.get_config().track_order
with phil:
group = self
if name:
if '/' in name:
h5objects = [obj for obj in name.split('/') if len(obj)]
name = h5objects[-1]
h5objects = h5objects[:-1]
for new_group in h5objects:
group = group.get(new_group) or group.create_group(new_group)
name = self._e(name)
dsid = dataset.make_new_dset(group, shape, dtype, data, name, **kwds)
dset = dataset.Dataset(dsid)
return dset
if vds_support:
def create_virtual_dataset(self, name, layout, fillvalue=None):
"""Create a new virtual dataset in this group.
See virtual datasets in the docs for more information.
name
(str) Name of the new dataset
layout
(VirtualLayout) Defines the sources for the virtual dataset
fillvalue
The value to use where there is no data.
"""
from .vds import VDSmap
# Encode filenames and dataset names appropriately.
sources = []
for vspace, file_name, dset_name, src_space in layout.sources:
if file_name == self.file.filename:
# use relative path if the source dataset is in the same
# file, in order to keep the virtual dataset valid in case
# the file is renamed.
file_name = '.'
sources.append(VDSmap(vspace, filename_encode(file_name),
self._e(dset_name), src_space))
with phil:
group = self
if name:
if '/' in name:
h5objects = [obj for obj in name.split('/') if len(obj)]
name = h5objects[-1]
h5objects = h5objects[:-1]
for new_group in h5objects:
group = group.get(new_group) or group.create_group(new_group)
name = self._e(name)
dsid = dataset.make_new_virtual_dset(group, layout.shape,
sources=sources, dtype=layout.dtype, name=name,
maxshape=layout.maxshape, fillvalue=fillvalue)
dset = dataset.Dataset(dsid)
return dset
def require_dataset(self, name, shape, dtype, exact=False, **kwds):
""" Open a dataset, creating it if it doesn't exist.
If keyword "exact" is False (default), an existing dataset must have
the same shape and a conversion-compatible dtype to be returned. If
True, the shape and dtype must match exactly.
Other dataset keywords (see create_dataset) may be provided, but are
only used if a new dataset is to be created.
Raises TypeError if an incompatible object already exists, or if the
shape or dtype don't match according to the above rules.
"""
with phil:
if not name in self:
return self.create_dataset(name, *(shape, dtype), **kwds)
if isinstance(shape, int):
shape = (shape,)
dset = self[name]
if not isinstance(dset, dataset.Dataset):
raise TypeError("Incompatible object (%s) already exists" % dset.__class__.__name__)
if not shape == dset.shape:
raise TypeError("Shapes do not match (existing %s vs new %s)" % (dset.shape, shape))
if exact:
if not dtype == dset.dtype:
raise TypeError("Datatypes do not exactly match (existing %s vs new %s)" % (dset.dtype, dtype))
elif not numpy.can_cast(dtype, dset.dtype):
raise TypeError("Datatypes cannot be safely cast (existing %s vs new %s)" % (dset.dtype, dtype))
return dset
def create_dataset_like(self, name, other, **kwupdate):
""" Create a dataset similar to `other`.
name
Name of the dataset (absolute or relative). Provide None to make
an anonymous dataset.
other
The dataset which the new dataset should mimic. All properties, such
as shape, dtype, chunking, ... will be taken from it, but no data
or attributes are being copied.
Any dataset keywords (see create_dataset) may be provided, including
shape and dtype, in which case the provided values take precedence over
those from `other`.
"""
for k in ('shape', 'dtype', 'chunks', 'compression',
'compression_opts', 'scaleoffset', 'shuffle', 'fletcher32',
'fillvalue'):
kwupdate.setdefault(k, getattr(other, k))
# TODO: more elegant way to pass these (dcpl to create_dataset?)
dcpl = other.id.get_create_plist()
kwupdate.setdefault('track_times', dcpl.get_obj_track_times())
kwupdate.setdefault('track_order', dcpl.get_attr_creation_order() > 0)
# Special case: the maxshape property always exists, but if we pass it
# to create_dataset, the new dataset will automatically get chunked
# layout. So we copy it only if it is different from shape.
if other.maxshape != other.shape:
kwupdate.setdefault('maxshape', other.maxshape)
return self.create_dataset(name, **kwupdate)
def require_group(self, name):
# TODO: support kwargs like require_dataset
"""Return a group, creating it if it doesn't exist.
TypeError is raised if something with that name already exists that
isn't a group.
"""
with phil:
if not name in self:
return self.create_group(name)
grp = self[name]
if not isinstance(grp, Group):
raise TypeError("Incompatible object (%s) already exists" % grp.__class__.__name__)
return grp
@with_phil
def __getitem__(self, name):
""" Open an object in the file """
if isinstance(name, h5r.Reference):
oid = h5r.dereference(name, self.id)
if oid is None:
raise ValueError("Invalid HDF5 object reference")
else:
oid = h5o.open(self.id, self._e(name), lapl=self._lapl)
otype = h5i.get_type(oid)
if otype == h5i.GROUP:
return Group(oid)
elif otype == h5i.DATASET:
return dataset.Dataset(oid, readonly=(self.file.mode == 'r'))
elif otype == h5i.DATATYPE:
return datatype.Datatype(oid)
else:
raise TypeError("Unknown object type")
def get(self, name, default=None, getclass=False, getlink=False):
""" Retrieve an item or other information.
"name" given only:
Return the item, or "default" if it doesn't exist
"getclass" is True:
Return the class of object (Group, Dataset, etc.), or "default"
if nothing with that name exists
"getlink" is True:
Return HardLink, SoftLink or ExternalLink instances. Return
"default" if nothing with that name exists.
"getlink" and "getclass" are True:
Return HardLink, SoftLink and ExternalLink classes. Return
"default" if nothing with that name exists.
Example:
>>> cls = group.get('foo', getclass=True)
>>> if cls == SoftLink:
"""
# pylint: disable=arguments-differ
with phil:
if not (getclass or getlink):
try:
return self[name]
except KeyError:
return default
if not name in self:
return default
elif getclass and not getlink:
typecode = h5o.get_info(self.id, self._e(name)).type
try:
return {h5o.TYPE_GROUP: Group,
h5o.TYPE_DATASET: dataset.Dataset,
h5o.TYPE_NAMED_DATATYPE: datatype.Datatype}[typecode]
except KeyError:
raise TypeError("Unknown object type")
elif getlink:
typecode = self.id.links.get_info(self._e(name)).type
if typecode == h5l.TYPE_SOFT:
if getclass:
return SoftLink
linkbytes = self.id.links.get_val(self._e(name))
return SoftLink(self._d(linkbytes))
elif typecode == h5l.TYPE_EXTERNAL:
if getclass:
return ExternalLink
filebytes, linkbytes = self.id.links.get_val(self._e(name))
return ExternalLink(
filename_decode(filebytes), self._d(linkbytes)
)
elif typecode == h5l.TYPE_HARD:
return HardLink if getclass else HardLink()
else:
raise TypeError("Unknown link type")
def __setitem__(self, name, obj):
""" Add an object to the group. The name must not already be in use.
The action taken depends on the type of object assigned:
Named HDF5 object (Dataset, Group, Datatype)
A hard link is created at "name" which points to the
given object.
SoftLink or ExternalLink
Create the corresponding link.
Numpy ndarray
The array is converted to a dataset object, with default
settings (contiguous storage, etc.).
Numpy dtype
Commit a copy of the datatype as a named datatype in the file.
Anything else
Attempt to convert it to an ndarray and store it. Scalar
values are stored as scalar datasets. Raise ValueError if we
can't understand the resulting array dtype.
"""
do_link = False
with phil:
name, lcpl = self._e(name, lcpl=True)
if isinstance(obj, HLObject):
h5o.link(obj.id, self.id, name, lcpl=lcpl, lapl=self._lapl)
elif isinstance(obj, SoftLink):
self.id.links.create_soft(name, self._e(obj.path),
lcpl=lcpl, lapl=self._lapl)
elif isinstance(obj, ExternalLink):
do_link = True
elif isinstance(obj, numpy.dtype):
htype = h5t.py_create(obj, logical=True)
htype.commit(self.id, name, lcpl=lcpl)
else:
ds = self.create_dataset(None, data=obj)
h5o.link(ds.id, self.id, name, lcpl=lcpl)
if do_link:
fn = filename_encode(obj.filename)
with phil:
self.id.links.create_external(name, fn, self._e(obj.path),
lcpl=lcpl, lapl=self._lapl)
@with_phil
def __delitem__(self, name):
""" Delete (unlink) an item from this group. """
self.id.unlink(self._e(name))
@with_phil
def __len__(self):
""" Number of members attached to this group """
return self.id.get_num_objs()
@with_phil
def __iter__(self):
""" Iterate over member names """
for x in self.id.__iter__():
yield self._d(x)
@with_phil
def __contains__(self, name):
""" Test if a member name exists """
return self._e(name) in self.id
def copy(self, source, dest, name=None,
shallow=False, expand_soft=False, expand_external=False,
expand_refs=False, without_attrs=False):
"""Copy an object or group.
The source can be a path, Group, Dataset, or Datatype object. The
destination can be either a path or a Group object. The source and
destinations need not be in the same file.
If the source is a Group object, all objects contained in that group
will be copied recursively.
When the destination is a Group object, by default the target will
be created in that group with its current name (basename of obj.name).
You can override that by setting "name" to a string.
There are various options which all default to "False":
- shallow: copy only immediate members of a group.
- expand_soft: expand soft links into new objects.
- expand_external: expand external links into new objects.
- expand_refs: copy objects that are pointed to by references.
- without_attrs: copy object without copying attributes.
Example:
>>> f = File('myfile.hdf5')
>>> f.listnames()
['MyGroup']
>>> f.copy('MyGroup', 'MyCopy')
>>> f.listnames()
['MyGroup', 'MyCopy']
"""
with phil:
if isinstance(source, HLObject):
source_path = '.'
else:
# Interpret source as a path relative to this group
source_path = source
source = self
if isinstance(dest, Group):
if name is not None:
dest_path = name
else:
# copy source into dest group: dest_name/source_name
dest_path = pp.basename(h5i.get_name(source[source_path].id))
elif isinstance(dest, HLObject):
raise TypeError("Destination must be path or Group object")
else:
# Interpret destination as a path relative to this group
dest_path = dest
dest = self
flags = 0
if shallow:
flags |= h5o.COPY_SHALLOW_HIERARCHY_FLAG
if expand_soft:
flags |= h5o.COPY_EXPAND_SOFT_LINK_FLAG
if expand_external:
flags |= h5o.COPY_EXPAND_EXT_LINK_FLAG
if expand_refs:
flags |= h5o.COPY_EXPAND_REFERENCE_FLAG
if without_attrs:
flags |= h5o.COPY_WITHOUT_ATTR_FLAG
if flags:
copypl = h5p.create(h5p.OBJECT_COPY)
copypl.set_copy_object(flags)
else:
copypl = None
h5o.copy(source.id, self._e(source_path), dest.id, self._e(dest_path),
copypl, base.dlcpl)
def move(self, source, dest):
""" Move a link to a new location in the file.
If "source" is a hard link, this effectively renames the object. If
"source" is a soft or external link, the link itself is moved, with its
value unmodified.
"""
with phil:
if source == dest:
return
self.id.links.move(self._e(source), self.id, self._e(dest),
lapl=self._lapl, lcpl=self._lcpl)
def visit(self, func):
""" Recursively visit all names in this group and subgroups (HDF5 1.8).
You supply a callable (function, method or callable object); it
will be called exactly once for each link in this group and every
group below it. Your callable must conform to the signature:
func(<member name>) => <None or return value>
Returning None continues iteration, returning anything else stops
and immediately returns that value from the visit method. No
particular order of iteration within groups is guaranteed.
Example:
>>> # List the entire contents of the file
>>> f = File("foo.hdf5")
>>> list_of_names = []
>>> f.visit(list_of_names.append)
"""
with phil:
def proxy(name):
""" Call the function with the text name, not bytes """
return func(self._d(name))
return h5o.visit(self.id, proxy)
def visititems(self, func):
""" Recursively visit names and objects in this group (HDF5 1.8).
You supply a callable (function, method or callable object); it
will be called exactly once for each link in this group and every
group below it. Your callable must conform to the signature:
func(<member name>, <object>) => <None or return value>
Returning None continues iteration, returning anything else stops
and immediately returns that value from the visit method. No
particular order of iteration within groups is guaranteed.
Example:
# Get a list of all datasets in the file
>>> mylist = []
>>> def func(name, obj):
... if isinstance(obj, Dataset):
... mylist.append(name)
...
>>> f = File('foo.hdf5')
>>> f.visititems(func)
"""
with phil:
def proxy(name):
""" Use the text name of the object, not bytes """
name = self._d(name)
return func(name, self[name])
return h5o.visit(self.id, proxy)
@with_phil
def __repr__(self):
if not self:
r = u"<Closed HDF5 group>"
else:
namestr = (
'"%s"' % self.name
) if self.name is not None else u"(anonymous)"
r = '<HDF5 group %s (%d members)>' % (namestr, len(self))
return r
class HardLink(object):
"""
Represents a hard link in an HDF5 file. Provided only so that
Group.get works in a sensible way. Has no other function.
"""
pass
class SoftLink(object):
"""
Represents a symbolic ("soft") link in an HDF5 file. The path
may be absolute or relative. No checking is performed to ensure
that the target actually exists.
"""
@property
def path(self):
""" Soft link value. Not guaranteed to be a valid path. """
return self._path
def __init__(self, path):
self._path = str(path)
def __repr__(self):
return '<SoftLink to "%s">' % self.path
class ExternalLink(object):
"""
Represents an HDF5 external link. Paths may be absolute or relative.
No checking is performed to ensure either the target or file exists.
"""
@property
def path(self):
""" Soft link path, i.e. the part inside the HDF5 file. """
return self._path
@property
def filename(self):
""" Path to the external HDF5 file in the filesystem. """
return self._filename
def __init__(self, filename, path):
self._filename = filename_decode(filename_encode(filename))
self._path = path
def __repr__(self):
return '<ExternalLink to "%s" in file "%s"' % (self.path,
self.filename)
| 36.408133 | 115 | 0.575305 |
73d94b4b368f931299cdfcee36fdfb1db1588774 | 2,883 | py | Python | tests/test__init__.py | tommilligan/OverTheWire | 8862562fd1cd58b5e6c0f22f2bbff21459e770ec | [
"Apache-2.0"
] | null | null | null | tests/test__init__.py | tommilligan/OverTheWire | 8862562fd1cd58b5e6c0f22f2bbff21459e770ec | [
"Apache-2.0"
] | null | null | null | tests/test__init__.py | tommilligan/OverTheWire | 8862562fd1cd58b5e6c0f22f2bbff21459e770ec | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import argparse
import unittest
import OverTheWire
class TestOTWLevelSetup(unittest.TestCase):
def test_otw_level_setup_known_int(self):
level = OverTheWire.OTWLevel("bandit", 0)
self.assertEqual(level.levelNumber, 0)
self.assertEqual(level.levelName, "bandit0")
self.assertEqual(level.configFile, "connections.yml")
def test_otw_level_setup_known_str(self):
level = OverTheWire.OTWLevel("bandit", "0", configFile="hovercraft.yml")
self.assertEqual(level.levelNumber, 0)
self.assertEqual(level.levelName, "bandit0")
self.assertEqual(level.configFile, "hovercraft.yml")
class TestOTWLevelNormal(unittest.TestCase):
def setUp(self):
self.level = OverTheWire.OTWLevel("bandit", 0)
self.level.savePassword("eggs")
def test_otw_level_savePassword(self):
self.level.savePassword("spam")
def test_otw_level_loadPassword(self):
password = self.level.loadPassword()
self.assertEqual(password, "eggs")
def test_otw_level_config(self):
config = self.level.config()
self.assertEqual(config["domain"], "bandit.labs.overthewire.org")
self.assertEqual(config["port"], 2220)
def test_otw_level_connectionCommand(self):
command = self.level.connectionCommand()
self.assertEqual(command, "sshpass -p eggs ssh -o StrictHostKeyChecking=no -p 2220 bandit0@bandit.labs.overthewire.org")
def test_otw_level_startCommand(self):
command = self.level.startCommand()
self.assertEqual(command, "sshpass -p eggs ssh -o StrictHostKeyChecking=no -p 2220 bandit0@bandit.labs.overthewire.org")
def tearDown(self):
self.level.savePassword("bandit0")
class TestOTWLevelWeb(unittest.TestCase):
def setUp(self):
self.level = OverTheWire.OTWLevel("natas", 0)
self.level.savePassword("eggs")
def test_otw_level_browseCommand(self):
command = self.level.browseCommand()
self.assertEqual(command, "xdg-open http://natas0:eggs@natas0.natas.labs.overthewire.org/")
def test_otw_level_startCommand(self):
command = self.level.startCommand()
self.assertEqual(command, "xdg-open http://natas0:eggs@natas0.natas.labs.overthewire.org/")
def tearDown(self):
self.level.savePassword("natas0")
class TestOTWLevelEdge(unittest.TestCase):
def test_otw_level_loadPassword_not_exist(self):
level = OverTheWire.OTWLevel("bandit", -1)
with self.assertRaises(OverTheWire.OTWException):
password = level.loadPassword()
def test_otw_level_savePassword_permissions_error(self):
level = OverTheWire.OTWLevel("bandit", 0)
level.passwordFile = '/no-permissions-here'
with self.assertRaises(OverTheWire.OTWException):
level.savePassword("spam")
| 37.934211 | 128 | 0.700659 |
73d95b1ff529388e44d116b0be3f84fe9ebb77df | 1,067 | py | Python | soco/__init__.py | oyvindmal/SocoWebService | 3cb8f4c08109d07dced4b09621acaf5ac41c732b | [
"MIT"
] | 2 | 2015-08-08T22:26:39.000Z | 2015-08-16T13:59:04.000Z | soco/__init__.py | silky/SoCo | 6f318df953ebd11d472bba6c85c9b58840d2fed3 | [
"MIT"
] | null | null | null | soco/__init__.py | silky/SoCo | 6f318df953ebd11d472bba6c85c9b58840d2fed3 | [
"MIT"
] | 1 | 2021-07-18T03:19:07.000Z | 2021-07-18T03:19:07.000Z | # -*- coding: utf-8 -*-
""" SoCo (Sonos Controller) is a simple library to control Sonos speakers """
# There is no need for all strings here to be unicode, and Py2 cannot import
# modules with unicode names
# https://github.com/SoCo/SoCo/issues/98
# from __future__ import unicode_literals
# Will be parsed by setup.py to determine package metadata
__author__ = 'The SoCo-Team <python-soco@googlegroups.com>'
__version__ = '0.8'
__website__ = 'https://github.com/SoCo/SoCo'
__license__ = 'MIT License'
from .core import discover, SoCo, SonosDiscovery
from .exceptions import SoCoException, UnknownSoCoException
# You really should not `import *` - it is poor practice
# but if you do, here is what you get:
__all__ = [
'discover',
'SonosDiscovery',
'SoCo',
'SoCoException',
'UnknownSoCoException',
]
# http://docs.python.org/2/howto/logging.html#library-config
# Avoids spurious error messages if no logger is configured by the user
import logging
from .compat import NullHandler
logging.getLogger(__name__).addHandler(NullHandler())
| 29.638889 | 77 | 0.740394 |
73d97f9863238ebd21b595902fc7f34f8f632455 | 8,242 | py | Python | python-code/opengl-learning/pyglet-gl/14_batch_groups.py | juxiangwu/image-processing | c644ef3386973b2b983c6b6b08f15dc8d52cd39f | [
"Apache-2.0"
] | 13 | 2018-09-07T02:29:07.000Z | 2021-06-18T08:40:09.000Z | python-code/opengl-learning/pyglet-gl/14_batch_groups.py | juxiangwu/image-processing | c644ef3386973b2b983c6b6b08f15dc8d52cd39f | [
"Apache-2.0"
] | null | null | null | python-code/opengl-learning/pyglet-gl/14_batch_groups.py | juxiangwu/image-processing | c644ef3386973b2b983c6b6b08f15dc8d52cd39f | [
"Apache-2.0"
] | 4 | 2019-06-20T00:09:39.000Z | 2021-07-15T10:14:36.000Z | #coding:utf-8
from pyglet.gl import *
import numpy as np
import OpenGL.GL.shaders
import ctypes
import pyrr
import time
from math import sin
from pyrr import Vector3, matrix44, Matrix44
import time
import numpy
class ObjLoader:
def __init__(self):
self.vert_coords = []
self.text_coords = []
self.norm_coords = []
self.vertex_index = []
self.texture_index = []
self.normal_index = []
self.model_vertices = []
self.model_textures = []
self.model_normals = []
def load_model(self, file):
for line in open(file, 'r'):
if line.startswith('#'): continue
values = line.split()
if not values: continue
if values[0] == 'v':
self.vert_coords.append(values[1:4])
if values[0] == 'vt':
self.text_coords.append(values[1:3])
if values[0] == 'vn':
self.norm_coords.append(values[1:4])
if values[0] == 'f':
face_i = []
text_i = []
norm_i = []
for v in values[1:4]:
w = v.split('/')
face_i.append(int(w[0])-1)
text_i.append(int(w[1])-1)
norm_i.append(int(w[2])-1)
self.vertex_index.append(face_i)
self.texture_index.append(text_i)
self.normal_index.append(norm_i)
self.vertex_index = [y for x in self.vertex_index for y in x]
self.texture_index = [y for x in self.texture_index for y in x]
self.normal_index = [y for x in self.normal_index for y in x]
for i in self.vertex_index:
self.model_vertices.extend(map(float, self.vert_coords[i]))
for i in self.texture_index:
self.model_textures.extend(map(float, self.text_coords[i]))
for i in self.normal_index:
self.model_normals.extend(map(float, self.norm_coords[i]))
def load_shader(shader_file):
with open(shader_file) as f:
shader_source = f.read()
return str.encode(shader_source)
def compile_shader(vs, fs):
vert_shader = load_shader(vs)
frag_shader = load_shader(fs)
vertex_buff = ctypes.create_string_buffer(vert_shader)
c_vertex = ctypes.cast(ctypes.pointer(ctypes.pointer(vertex_buff)), ctypes.POINTER(ctypes.POINTER(GLchar)))
vertex_shader = glCreateShader(GL_VERTEX_SHADER)
glShaderSource(vertex_shader, 1, c_vertex, None)
glCompileShader(vertex_shader)
fragment_buff = ctypes.create_string_buffer(frag_shader)
c_fragment = ctypes.cast(ctypes.pointer(ctypes.pointer(fragment_buff)), ctypes.POINTER(ctypes.POINTER(GLchar)))
fragment_shader = glCreateShader(GL_FRAGMENT_SHADER)
glShaderSource(fragment_shader, 1, c_fragment, None)
glCompileShader(fragment_shader)
shader = glCreateProgram()
glAttachShader(shader, vertex_shader)
glAttachShader(shader, fragment_shader)
glLinkProgram(shader)
return shader
main_batch = pyglet.graphics.Batch()
class Shader:
model_loc = None
@staticmethod
def init():
shader = compile_shader("datas/glsl/video_13_vert.glsl", "datas/glsl/video_13_frag.glsl")
glUseProgram(shader)
view = matrix44.create_from_translation(Vector3([0.0, 0.0, -2.0])).flatten().astype("float32")
projection = matrix44.create_perspective_projection_matrix(45.0, 1280 / 720, 0.1, 100.0).flatten().astype("float32")
c_view = numpy.ctypeslib.as_ctypes(view)
c_projection = numpy.ctypeslib.as_ctypes(projection)
view_loc = glGetUniformLocation(shader, b"view")
proj_loc = glGetUniformLocation(shader, b"projection")
Shader.model_loc = glGetUniformLocation(shader, b"model")
glUniformMatrix4fv(view_loc, 1, GL_FALSE, c_view)
glUniformMatrix4fv(proj_loc, 1, GL_FALSE, c_projection)
class Monkey:
def __init__(self):
mesh = ObjLoader()
mesh.load_model("datas/models/opengl/monkey.obj")
num_verts = len(mesh.model_vertices) // 3
group = pyglet.graphics.Group()
group.set_state = self.state
self.verts = main_batch.add(num_verts, GL_TRIANGLES, group, ('v3f', mesh.model_vertices),
('t2f', mesh.model_textures))
self.model = matrix44.create_from_translation(Vector3([-2.0, 0.0, -4.0])).flatten().astype("float32")
self.c_model = numpy.ctypeslib.as_ctypes(self.model)
# region texture settings
self.texture = GLuint(0)
glGenTextures(1, self.texture)
glBindTexture(GL_TEXTURE_2D, self.texture)
# set the texture wrapping
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
# set the texture filtering
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
image = pyglet.image.load('datas/models/opengl/monkey.jpg')
image_data = image.get_data('RGB', image.pitch)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, image.width, image.height, 0, GL_RGB, GL_UNSIGNED_BYTE, image_data)
# endregion
def state(self):
# vertices
glEnableVertexAttribArray(0)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, self.verts.vertices)
# textures
glEnableVertexAttribArray(1)
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, self.verts.tex_coords)
glBindTexture(GL_TEXTURE_2D, self.texture)
glUniformMatrix4fv(Shader.model_loc, 1, GL_FALSE, self.c_model)
class Cube:
def __init__(self):
mesh = ObjLoader()
mesh.load_model("datas/models/opengl/cube.obj")
num_verts = len(mesh.model_vertices) // 3
group = pyglet.graphics.Group()
group.set_state = self.state
self.verts = main_batch.add(num_verts, GL_TRIANGLES, group, ('v3f', mesh.model_vertices),
('t2f', mesh.model_textures))
self.model = matrix44.create_from_translation(Vector3([2.0, 0.0, -4.0])).flatten().astype("float32")
self.c_model = numpy.ctypeslib.as_ctypes(self.model)
# region texture settings
self.texture = GLuint(0)
glGenTextures(1, self.texture)
glBindTexture(GL_TEXTURE_2D, self.texture)
# set the texture wrapping
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
# set the texture filtering
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
image = pyglet.image.load('datas/models/opengl/cube.jpg')
image_data = image.get_data('RGB', image.pitch)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, image.width, image.height, 0, GL_RGB, GL_UNSIGNED_BYTE, image_data)
# endregion
def state(self):
# vertices
glEnableVertexAttribArray(0)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, self.verts.vertices)
# textures
glEnableVertexAttribArray(1)
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, self.verts.tex_coords)
glBindTexture(GL_TEXTURE_2D, self.texture)
glUniformMatrix4fv(Shader.model_loc, 1, GL_FALSE, self.c_model)
class MyWindow(pyglet.window.Window):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.set_minimum_size(400, 300)
glClearColor(0.2, 0.3, 0.2, 1.0)
glEnable(GL_DEPTH_TEST)
Shader.init()
self.cube = Cube()
self.monkey = Monkey()
def on_draw(self):
self.clear()
main_batch.draw()
def on_resize(self, width, height):
glViewport(0, 0, width, height)
def update(self, dt):
pass
if __name__ == "__main__":
window = MyWindow(800, 600, "My Pyglet Window", resizable=True)
pyglet.clock.schedule_interval(window.update, 1/60.0)
pyglet.app.run() | 34.923729 | 124 | 0.643169 |
73d9972f37ab33f8212b22ae61e0c9d5b3821aaa | 166 | py | Python | src/lexico/lexical_error.py | thrnkk/compiler | 15e1ec1848ef432c1dfdf8b1f9ae79641bd253ca | [
"MIT"
] | 1 | 2021-10-05T13:12:09.000Z | 2021-10-05T13:12:09.000Z | src/lexico/lexical_error.py | thrnkk/compiler | 15e1ec1848ef432c1dfdf8b1f9ae79641bd253ca | [
"MIT"
] | null | null | null | src/lexico/lexical_error.py | thrnkk/compiler | 15e1ec1848ef432c1dfdf8b1f9ae79641bd253ca | [
"MIT"
] | null | null | null | from .analysis_error import AnalysisError
class LexicalError(AnalysisError):
def __init__(self, msg, position = None):
super().__init__(msg, position)
| 20.75 | 45 | 0.728916 |
73d99ec8d205a102b0f0a557b5bff3a5294995a4 | 402 | py | Python | tests/test_mantle/mothball/register/reg4rs.py | splhack/loam | 10b08bd622b7cfd63eabaec4729f6238e4521b30 | [
"MIT"
] | 14 | 2017-10-08T09:16:10.000Z | 2021-11-27T19:12:24.000Z | tests/test_mantle/mothball/register/reg4rs.py | splhack/loam | 10b08bd622b7cfd63eabaec4729f6238e4521b30 | [
"MIT"
] | 7 | 2018-04-12T21:33:49.000Z | 2018-08-21T22:14:20.000Z | tests/test_mantle/mothball/register/reg4rs.py | splhack/loam | 10b08bd622b7cfd63eabaec4729f6238e4521b30 | [
"MIT"
] | 3 | 2018-07-24T04:55:02.000Z | 2019-12-30T08:12:39.000Z | import magma as m
from mantle import *
from loam.boards.papilioone import PapilioOne
from loam.shields.megawing import MegaWing
megawing = MegaWing(PapilioOne)
megawing.Clock.on()
megawing.Switch.on(6)
megawing.LED.on(4)
main = megawing.main()
I = main.SWITCH[0:4]
RESET = main.SWITCH[4]
SET = main.SWITCH[5]
O = main.LED
reg = Register(4,r=True,s=True)
m.wire( reg(I, RESET=RESET, SET=SET), O )
| 18.272727 | 45 | 0.731343 |
73d9a378cdd5d6aeb3e3a18cfc8c07a0e3d79057 | 4,106 | py | Python | alipay/aop/api/request/AlipayEbppInvoiceEnterpriseconsumeConsumeBatchqueryRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/request/AlipayEbppInvoiceEnterpriseconsumeConsumeBatchqueryRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/request/AlipayEbppInvoiceEnterpriseconsumeConsumeBatchqueryRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayEbppInvoiceEnterpriseconsumeConsumeBatchqueryModel import AlipayEbppInvoiceEnterpriseconsumeConsumeBatchqueryModel
class AlipayEbppInvoiceEnterpriseconsumeConsumeBatchqueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayEbppInvoiceEnterpriseconsumeConsumeBatchqueryModel):
self._biz_content = value
else:
self._biz_content = AlipayEbppInvoiceEnterpriseconsumeConsumeBatchqueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.ebpp.invoice.enterpriseconsume.consume.batchquery'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 28.317241 | 148 | 0.656113 |
73d9b8383b995374d5684a4d3ffb9aee0d89d611 | 6,807 | py | Python | tests/docker_compose/test_docker_compose.py | srbhr/jina | 29fec50f84c86dc23400afc11508669cb631ee15 | [
"Apache-2.0"
] | null | null | null | tests/docker_compose/test_docker_compose.py | srbhr/jina | 29fec50f84c86dc23400afc11508669cb631ee15 | [
"Apache-2.0"
] | null | null | null | tests/docker_compose/test_docker_compose.py | srbhr/jina | 29fec50f84c86dc23400afc11508669cb631ee15 | [
"Apache-2.0"
] | null | null | null | # kind version has to be bumped to v0.11.1 since pytest-kind is just using v0.10.0 which does not work on ubuntu in ci
import pytest
import os
import time
from jina import Flow, Document
class DockerComposeFlow:
def __init__(self, dump_path):
self.dump_path = dump_path
def __enter__(self):
os.system(
f"docker-compose -f {self.dump_path} --project-directory . up --build -d --remove-orphans"
)
time.sleep(10)
def __exit__(self, exc_type, exc_val, exc_tb):
os.system(
f"docker-compose -f {self.dump_path} --project-directory . down --remove-orphans"
)
async def run_test(flow, endpoint, num_docs=10, request_size=10):
# start port forwarding
from jina.clients import Client
client_kwargs = dict(
host='localhost',
port=flow.port_expose,
asyncio=True,
)
client_kwargs.update(flow._common_kwargs)
client = Client(**client_kwargs)
client.show_progress = True
responses = []
async for resp in client.post(
endpoint,
inputs=[Document() for _ in range(num_docs)],
return_results=True,
request_size=request_size,
):
responses.append(resp)
return responses
@pytest.fixture()
def flow_with_sharding(docker_images, polling):
flow = Flow(name='test-flow-with-sharding', port_expose=9090, protocol='http').add(
name='test_executor',
shards=2,
replicas=2,
uses=f'docker://{docker_images[0]}',
uses_after=f'docker://{docker_images[1]}',
polling=polling,
)
return flow
@pytest.fixture
def flow_configmap(docker_images):
flow = Flow(name='k8s-flow-configmap', port_expose=9090, protocol='http').add(
name='test_executor',
uses=f'docker://{docker_images[0]}',
env={'k1': 'v1', 'k2': 'v2'},
)
return flow
@pytest.fixture
def flow_with_needs(docker_images):
flow = (
Flow(
name='test-flow-with-needs',
port_expose=9090,
protocol='http',
)
.add(
name='segmenter',
uses=f'docker://{docker_images[0]}',
)
.add(
name='textencoder',
uses=f'docker://{docker_images[0]}',
needs='segmenter',
)
.add(
name='imageencoder',
uses=f'docker://{docker_images[0]}',
needs='segmenter',
)
.add(
name='merger',
uses_before=f'docker://{docker_images[1]}',
needs=['imageencoder', 'textencoder'],
)
)
return flow
@pytest.mark.asyncio
@pytest.mark.timeout(3600)
@pytest.mark.parametrize(
'docker_images',
[['test-executor', 'executor-merger', 'jinaai/jina']],
indirect=True,
)
async def test_flow_with_needs(logger, flow_with_needs, tmpdir, docker_images):
dump_path = os.path.join(str(tmpdir), 'docker-compose.yml')
flow_with_needs.to_docker_compose_yaml(dump_path, 'default')
with DockerComposeFlow(dump_path):
resp = await run_test(
flow=flow_with_needs,
endpoint='/debug',
)
expected_traversed_executors = {
'segmenter',
'imageencoder',
'textencoder',
}
docs = resp[0].docs
assert len(docs) == 10
for doc in docs:
assert set(doc.tags['traversed-executors']) == expected_traversed_executors
@pytest.mark.timeout(3600)
@pytest.mark.asyncio
@pytest.mark.parametrize(
'docker_images',
[['test-executor', 'executor-merger', 'jinaai/jina']],
indirect=True,
)
@pytest.mark.parametrize('polling', ['ANY', 'ALL'])
async def test_flow_with_sharding(flow_with_sharding, polling, tmpdir):
dump_path = os.path.join(str(tmpdir), 'docker-compose.yml')
flow_with_sharding.to_docker_compose_yaml(dump_path)
with DockerComposeFlow(dump_path):
resp = await run_test(
flow=flow_with_sharding, endpoint='/debug', num_docs=10, request_size=1
)
assert len(resp) == 10
docs = resp[0].docs
for r in resp[1:]:
docs.extend(r.docs)
assert len(docs) == 10
runtimes_to_visit = {
'test_executor-0/rep-0',
'test_executor-1/rep-0',
'test_executor-0/rep-1',
'test_executor-1/rep-1',
}
for doc in docs:
if polling == 'ALL':
assert len(set(doc.tags['traversed-executors'])) == 2
assert set(doc.tags['shard_id']) == {0, 1}
assert doc.tags['parallel'] == [2, 2]
assert doc.tags['shards'] == [2, 2]
for executor in doc.tags['traversed-executors']:
if executor in runtimes_to_visit:
runtimes_to_visit.remove(executor)
else:
assert len(set(doc.tags['traversed-executors'])) == 1
assert len(set(doc.tags['shard_id'])) == 1
assert 0 in set(doc.tags['shard_id']) or 1 in set(doc.tags['shard_id'])
assert doc.tags['parallel'] == [2]
assert doc.tags['shards'] == [2]
for executor in doc.tags['traversed-executors']:
if executor in runtimes_to_visit:
runtimes_to_visit.remove(executor)
assert len(runtimes_to_visit) == 0
@pytest.mark.timeout(3600)
@pytest.mark.asyncio
@pytest.mark.parametrize(
'docker_images', [['test-executor', 'jinaai/jina']], indirect=True
)
async def test_flow_with_configmap(flow_configmap, docker_images, tmpdir):
dump_path = os.path.join(str(tmpdir), 'docker-compose.yml')
flow_configmap.to_docker_compose_yaml(dump_path)
with DockerComposeFlow(dump_path):
resp = await run_test(
flow=flow_configmap,
endpoint='/env',
)
docs = resp[0].docs
assert len(docs) == 10
for doc in docs:
assert doc.tags['k1'] == 'v1'
assert doc.tags['k2'] == 'v2'
assert doc.tags['env'] == {'k1': 'v1', 'k2': 'v2'}
@pytest.mark.asyncio
@pytest.mark.timeout(3600)
@pytest.mark.parametrize(
'docker_images',
[['test-executor', 'jinaai/jina']],
indirect=True,
)
async def test_flow_with_workspace(logger, docker_images, tmpdir):
flow = Flow(name='k8s_flow-with_workspace', port_expose=9090, protocol='http').add(
name='test_executor',
uses=f'docker://{docker_images[0]}',
workspace='/shared',
)
dump_path = os.path.join(str(tmpdir), 'docker-compose.yml')
flow.to_docker_compose_yaml(dump_path)
with DockerComposeFlow(dump_path):
resp = await run_test(
flow=flow,
endpoint='/workspace',
)
docs = resp[0].docs
assert len(docs) == 10
for doc in docs:
assert doc.tags['workspace'] == '/shared/TestExecutor/0'
| 29.214592 | 118 | 0.603643 |
73d9bee8e07f1bdb72793e539ea09a4ec7114b75 | 979 | py | Python | neatsociety/__init__.py | machinebrains/neat-society | 395c092083aa62ac8ad2a93a9afde659ecbd85fd | [
"BSD-3-Clause"
] | 2 | 2018-03-03T16:30:09.000Z | 2022-01-02T17:02:22.000Z | neatsociety/__init__.py | machinebrains/neat-society | 395c092083aa62ac8ad2a93a9afde659ecbd85fd | [
"BSD-3-Clause"
] | null | null | null | neatsociety/__init__.py | machinebrains/neat-society | 395c092083aa62ac8ad2a93a9afde659ecbd85fd | [
"BSD-3-Clause"
] | null | null | null | from neatsociety import activations
activation_functions = activations.ActivationFunctionSet()
activation_functions.add('sigmoid', activations.sigmoid_activation)
activation_functions.add('tanh', activations.tanh_activation)
activation_functions.add('sin', activations.sin_activation)
activation_functions.add('gauss', activations.gauss_activation)
activation_functions.add('relu', activations.relu_activation)
activation_functions.add('identity', activations.identity_activation)
activation_functions.add('clamped', activations.clamped_activation)
activation_functions.add('inv', activations.inv_activation)
activation_functions.add('log', activations.log_activation)
activation_functions.add('exp', activations.exp_activation)
activation_functions.add('abs', activations.abs_activation)
activation_functions.add('hat', activations.hat_activation)
activation_functions.add('square', activations.square_activation)
activation_functions.add('cube', activations.cube_activation)
| 51.526316 | 69 | 0.858018 |
73d9c033f4bc2d45ffc4f5f4a9c667a4a940b928 | 7,654 | py | Python | lsstlc.py | ywx649999311/lsst_cadence | fe08fdedb8f45fab33effd40eff43626d81ef10a | [
"MIT"
] | null | null | null | lsstlc.py | ywx649999311/lsst_cadence | fe08fdedb8f45fab33effd40eff43626d81ef10a | [
"MIT"
] | null | null | null | lsstlc.py | ywx649999311/lsst_cadence | fe08fdedb8f45fab33effd40eff43626d81ef10a | [
"MIT"
] | null | null | null | """Module including function lc2file and two sub-classes class lsstlc and class extLC."""
import numpy as np
import kali
from astropy import stats
import os
def lc2file(file_dir, lc, full=False, timescales=None):
"""Save light curve to a .npz file.
Args:
file_path(str): path + file name
lc: Kali light curve object
full(bool): Full mock LC or not
timescales(list): CARMA coefficients in timescale format
"""
# Check out if full LC is saved, if so require more input!
if full:
if timescales is None:
raise Exception('Full LC need CARMA timescales!')
elif not isinstance(timescales, list):
raise Exception('Timescales must contained in a list!')
lc_id = 'c{}{}'.format(lc.pSim, lc.qSim)
for i in range(len(timescales)):
lc_id += '_{:.2f}'.format(timescales[i])
meta = [lc.pSim, lc.qSim, lc.fracNoiseToSignal, lc.fracIntrinsicVar]
if 'mock_t' in lc.__dict__:
np.savez(os.path.join(file_dir, lc.name), t=lc.t, x=lc.x, y=lc.y, yerr=lc.yerr, mask=lc.mask, meta=meta, mock_t=lc.mock_t)
return lc.name
else:
np.savez(os.path.join(file_dir, lc_id), t=lc.t, x=lc.x, y=lc.y, yerr=lc.yerr, mask=lc.mask, meta=meta)
return lc_id
class lsstlc(kali.lc.lc):
"""A subclass of Kali's lc class.
This class down sample the mock lc with given dates. More flexible plotting is also available.
"""
def __init__(self, ra, dec, obsTimes, mockLC, min_sep=None, band='a', fix_dt=False, **kwargs):
"""Initiation method.
Args:
ra(float): Right ascension
dec(float): Declination
obsTimes(ndarray): A numpy array of the observing dates in seconds
mockLC: Mock lightcuve simulated using Kali
min_sep(float): Min intra-night seperation (in hours) for LSST observations of the particular
point on the sky
band(str): Observing band, defaults to 'a' stands for all bands.
fixed_dt:(bool): Whether the full LC is sampled every 30 sec.
"""
self._ra, self._dec = ra, dec
if fix_dt:
self.min_sep = 30.0
elif min_sep is None:
raise Exception('Minimum seperation is either fixed at 30 sec or must be provided (in hours)!')
else:
self.min_sep = min_sep*3600 # hours to seconds (floor to avoid dim error)
self.obsTimes = obsTimes
self.mockLC = mockLC
name = '{}_{}_{}_c{}{}'.format(ra, dec, band, mockLC.pSim, mockLC.qSim)
kali.lc.lc.__init__(self, name=name, band=band, pSim=self.mockLC.pSim, qSim=self.mockLC.qSim)
def read(self, name=None, band=None, path=None, **kwargs):
"""Method will be called first during object initiation.
Args:
name(str, optional): Name of the light curve
band(str, optional): Observing band
path(str, optional): Not used in the context, just to match the orgianl function format
"""
self.name = name
self.band = band
self.xunit = kwargs.get('xunit', r'$t$')
self.yunit = kwargs.get('yunit', r'$F$')
opSim_index = np.around(self.obsTimes/self.min_sep).astype('int')
mark = np.full((self.mockLC.t.shape[0],), False, dtype='bool')
mark[opSim_index] = True
self.mock_t = self.mockLC.t[np.where(mark)] # mock_t is time from mock LC
self.x = self.mockLC.x[np.where(mark)]
self.y = self.mockLC.y[np.where(mark)]
self.yerr = self.mockLC.yerr[np.where(mark)]
self.mask = self.mockLC.mask[np.where(mark)]
self._numCadences = self.mock_t.shape[0]
self.startT = self.mock_t[0]
self.t = self.mock_t - self.startT # get t for new LC
def write(self, name=None, band=None, pwd=None, **kwargs):
"""Not implemented, but required to complet the class."""
pass
def plot_x_y_err(self):
""""Return the entries for plotting.
Returns:
x(ndarray): An array storing the observation timestamps in days
y(ndarray): An array storing the intrinsic flux of the AGN
err_x(ndarray): same as x
err_y(ndarray): An array storing the observed flux (with noise) of the AGN
err_yerr(ndarray): An array storing the error bar magnitude
"""
x = self.mock_t
y = self.x - np.mean(self.x) + np.mean(self.y[np.where(self.mask == 1.0)[0]])
err_x = self.mock_t[np.where(self.mask == 1.0)[0]]
err_y = self.y[np.where(self.mask == 1.0)[0]]
err_yerr = self.yerr[np.where(self.mask == 1.0)[0]]
return x, y, err_x, err_y, err_yerr
def periodogram_sb(self, nterms=1):
"""Single-band periodogram."""
if (hasattr(self, '_periodogramfreqs_sb') and
hasattr(self, '_periodogram_sb') and
hasattr(self, '_periodogramerr_sb')):
return self._periodogramfreqs_sb, self._periodogram_sb, self._periodogramerr_sb
else:
# if self.numCadences > 50:
# model = gatspy.periodic.LombScargleFast()
# else:
# model = gatspy.periodic.LombScargle()
ls = stats.LombScargle(self.t, self.y, self.yerr, nterms=nterms)
f, psd = ls.autopower(method='fast', normalization='psd', maximum_frequency=1/self.mindt)
self._periodogramfreqs_sb = np.require(np.array(f), requirements=['F', 'A', 'W', 'O', 'E'])
self._periodogram_sb = np.require(np.array(psd), requirements=['F', 'A', 'W', 'O', 'E'])
self._periodogramerr_sb = np.require(np.array(self._periodogram_sb.shape[0]*[0.0]), requirements=['F', 'A', 'W', 'O', 'E'])
for i in range(self._periodogram_sb.shape[0]):
if self._periodogram_sb[i] <= 0.0:
self._periodogram_sb[i] = np.nan
return self._periodogramfreqs_sb, self._periodogram_sb, self._periodogramerr_sb
class extLC(kali.lc.lc):
"""subclass of kali.lc to readin LC saved to file."""
# Class variable are used to update LC model parameters
pSim = 0
qSim = 0
fracIntrinsicVar = 0.15
fracNoiseToSignal = 0.001
def __init__(self, file_path):
"""Init function."""
path = file_path
base = os.path.basename(file_path)
name = os.path.splitext(base)[0]
band = ''
kali.lc.lc.__init__(self, path=path, name=name, band=band)
self._pSim = extLC.pSim
self._qSim = extLC.qSim
self._fracIntrinsicVar = extLC.fracIntrinsicVar
self._fracNoiseToSignal = extLC.fracNoiseToSignal
def read(self, path, name=None, band=None, **kwargs):
"""Overwrite the default read function."""
lc_data = np.load(path)
if 'mock_t' in lc_data.files:
self.mock_t = lc_data['mock_t']
self.t = lc_data['t']
self.x = lc_data['x']
self.y = lc_data['y']
self.yerr = lc_data['yerr']
self.mask = lc_data['mask']
meta = lc_data['meta']
self.startT = self.t[0]
self._numCadences = self.t.shape[0]
self.name = name
self.band = band
self.xunit = kwargs.get('xunit', r'$t$')
self.yunit = kwargs.get('yunit', r'$F$')
extLC.pSim = int(meta[0])
extLC.qSim = int(meta[1])
extLC.fracNoiseToSignal = float(meta[2])
extLC.fracIntrinsicVar = float(meta[3])
def write(self, name=None, band=None, pwd=None, **kwargs):
"""Not implemented, but required to complet the class."""
pass
| 39.05102 | 135 | 0.602691 |
73d9ee265cfe6e744a45b6b3e480b85ade49e699 | 1,035 | py | Python | adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/examples/ina260_averaging.py | jacoblb64/pico_rgb_keypad_hid | 3251ca6a98ef86d9f98c54f639c4d61810601a0b | [
"MIT"
] | 47 | 2021-02-15T23:02:36.000Z | 2022-03-04T21:30:03.000Z | adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/examples/ina260_averaging.py | jacoblb64/pico_rgb_keypad_hid | 3251ca6a98ef86d9f98c54f639c4d61810601a0b | [
"MIT"
] | 8 | 2019-03-09T22:29:46.000Z | 2021-09-27T16:45:25.000Z | adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/examples/ina260_averaging.py | jacoblb64/pico_rgb_keypad_hid | 3251ca6a98ef86d9f98c54f639c4d61810601a0b | [
"MIT"
] | 14 | 2021-02-20T17:40:56.000Z | 2022-01-01T19:53:38.000Z | # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
import time
import board
from adafruit_ina260 import INA260, AveragingCount
i2c = board.I2C()
ina260 = INA260(i2c)
# Raise the averaging count to a larger number to smooth out the results
ina260.averaging_count = AveragingCount.COUNT_4
while True:
print("Current (average count 4): %.2f" % (ina260.current))
print("Voltage (average count 4): %.2f" % (ina260.voltage))
print("Power (average count 4): %.2f" % (ina260.power))
time.sleep(1)
# The can be seen most clearly using a serial plotter. Comment out the above
# and then switch between uncommenting *one* of the two below to compare
# ina260.averaging_count = AveragingCount.COUNT_1
# while True:
# print("%.2f, %.2f, %.2f"%(ina260.current, ina260.voltage, ina260.power))
# time.sleep(.5)
# ina260.averaging_count = AveragingCount.COUNT_4
# while True:
# print("%.2f, %.2f, %.2f"%(ina260.current, ina260.voltage, ina260.power))
# time.sleep(.5)
| 32.34375 | 78 | 0.71401 |
73da2301016bd0db72b8d57f1da8b9e5e59a04d8 | 5,463 | py | Python | tests/cupy_tests/random_tests/test_permutations.py | andy6975/cupy | 34b388e4a4fe7c59092b4d4c9c96b2f307e49e46 | [
"MIT"
] | 1 | 2019-12-01T09:08:14.000Z | 2019-12-01T09:08:14.000Z | tests/cupy_tests/random_tests/test_permutations.py | hephaex/cupy | 5cf50a93bbdebe825337ed7996c464e84b1495ba | [
"MIT"
] | 1 | 2019-08-05T09:36:13.000Z | 2019-08-06T12:03:01.000Z | tests/cupy_tests/random_tests/test_permutations.py | hephaex/cupy | 5cf50a93bbdebe825337ed7996c464e84b1495ba | [
"MIT"
] | 1 | 2022-03-24T13:19:55.000Z | 2022-03-24T13:19:55.000Z | import unittest
import cupy
from cupy import testing
from cupy.testing import condition
import numpy
@testing.parameterize(
{'seed': None},
{'seed': 0},
)
@testing.gpu
class TestPermutations(unittest.TestCase):
def _xp_random(self, xp):
if self.seed is None:
return xp.random
else:
return xp.random.RandomState(seed=self.seed)
# Test ranks
@testing.numpy_cupy_raises()
def test_permutation_zero_dim(self, xp):
xp_random = self._xp_random(xp)
a = testing.shaped_random((), xp)
xp_random.permutation(a)
# Test same values
@testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True)
def test_permutation_sort_1dim(self, dtype):
cupy_random = self._xp_random(cupy)
a = cupy.arange(10, dtype=dtype)
b = cupy.copy(a)
c = cupy_random.permutation(a)
testing.assert_allclose(a, b)
testing.assert_allclose(b, cupy.sort(c))
@testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True)
def test_permutation_sort_ndim(self, dtype):
cupy_random = self._xp_random(cupy)
a = cupy.arange(15, dtype=dtype).reshape(5, 3)
b = cupy.copy(a)
c = cupy_random.permutation(a)
testing.assert_allclose(a, b)
testing.assert_allclose(b, cupy.sort(c, axis=0))
# Test seed
@testing.for_all_dtypes()
def test_permutation_seed1(self, dtype):
a = testing.shaped_random((10,), cupy, dtype)
b = cupy.copy(a)
cupy_random = self._xp_random(cupy)
if self.seed is None:
cupy_random.seed(0)
pa = cupy_random.permutation(a)
cupy_random = self._xp_random(cupy)
if self.seed is None:
cupy_random.seed(0)
pb = cupy_random.permutation(b)
testing.assert_allclose(pa, pb)
@testing.gpu
class TestShuffle(unittest.TestCase):
# Test ranks
@testing.numpy_cupy_raises()
def test_shuffle_zero_dim(self, xp):
a = testing.shaped_random((), xp)
xp.random.shuffle(a)
# Test same values
@testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True)
def test_shuffle_sort_1dim(self, dtype):
a = cupy.arange(10, dtype=dtype)
b = cupy.copy(a)
cupy.random.shuffle(a)
testing.assert_allclose(cupy.sort(a), b)
@testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True)
def test_shuffle_sort_ndim(self, dtype):
a = cupy.arange(15, dtype=dtype).reshape(5, 3)
b = cupy.copy(a)
cupy.random.shuffle(a)
testing.assert_allclose(cupy.sort(a, axis=0), b)
# Test seed
@testing.for_all_dtypes()
def test_shuffle_seed1(self, dtype):
a = testing.shaped_random((10,), cupy, dtype)
b = cupy.copy(a)
cupy.random.seed(0)
cupy.random.shuffle(a)
cupy.random.seed(0)
cupy.random.shuffle(b)
testing.assert_allclose(a, b)
@testing.parameterize(*(testing.product({
'num': [0, 1, 100, 1000, 10000, 100000],
})))
@testing.gpu
class TestPermutationSoundness(unittest.TestCase):
def setUp(self):
a = cupy.random.permutation(self.num)
self.a = a.get()
# Test soundness
@condition.repeat(3)
def test_permutation_soundness(self):
assert(numpy.sort(self.a) == numpy.arange(self.num)).all()
@testing.parameterize(*(testing.product({
'offset': [0, 17, 34, 51],
'gap': [1, 2, 3, 5, 7],
'mask': [1, 2, 4, 8, 16, 32, 64, 128],
})))
@testing.gpu
class TestPermutationRandomness(unittest.TestCase):
num = 256
def setUp(self):
a = cupy.random.permutation(self.num)
self.a = a.get()
self.num_half = int(self.num / 2)
# Simple bit proportion test
# This test is to check kind of randomness of permutation.
# An intuition behind this test is that, when you make a sub-array
# by regularly extracting half elements from the permuted array,
# the sub-array should also hold randomeness and accordingly
# frequency of appearance of 0 and 1 at each bit position of
# whole elements in the sub-array should become similar
# when elements count of original array is 2^N.
# Note that this is not an establishd method to check randomness.
# TODO(anaruse): implement randomness check using some established methods.
@condition.repeat_with_success_at_least(5, 3)
def test_permutation_randomness(self):
if self.mask > self.num_half:
return
index = numpy.arange(self.num_half)
index = (index * self.gap + self.offset) % self.num
samples = self.a[index]
ret = (samples & self.mask > 0)
count = numpy.count_nonzero(ret) # expectation: self.num_half / 2
if count > self.num_half - count:
count = self.num_half - count
prob_le_count = self._calc_probability(count)
if prob_le_count < 0.001:
raise
def _calc_probability(self, count):
comb_all = self._comb(self.num, self.num_half)
comb_le_count = 0
for i in range(count + 1):
tmp = self._comb(self.num_half, i)
comb_i = tmp * tmp
comb_le_count += comb_i
prob = comb_le_count / comb_all
return prob
def _comb(self, N, k):
val = numpy.float64(1)
for i in range(k):
val *= (N - i) / (k - i)
return val
| 30.016484 | 79 | 0.632802 |
73da40a5de5134c811652902db08da1803660cd3 | 127,925 | py | Python | google/appengine/ext/ndb/model.py | luduvigo/app-blog-code | 2de41573955f8dfb5f6d964d3cae2bbee6db2f9a | [
"Apache-2.0"
] | null | null | null | google/appengine/ext/ndb/model.py | luduvigo/app-blog-code | 2de41573955f8dfb5f6d964d3cae2bbee6db2f9a | [
"Apache-2.0"
] | null | null | null | google/appengine/ext/ndb/model.py | luduvigo/app-blog-code | 2de41573955f8dfb5f6d964d3cae2bbee6db2f9a | [
"Apache-2.0"
] | null | null | null | """Model and Property classes and associated stuff.
A model class represents the structure of entities stored in the
datastore. Applications define model classes to indicate the
structure of their entities, then instantiate those model classes
to create entities.
All model classes must inherit (directly or indirectly) from Model.
Through the magic of metaclasses, straightforward assignments in the
model class definition can be used to declare the model's structure:
class Person(Model):
name = StringProperty()
age = IntegerProperty()
We can now create a Person entity and write it to the datastore:
p = Person(name='Arthur Dent', age=42)
k = p.put()
The return value from put() is a Key (see the documentation for
ndb/key.py), which can be used to retrieve the same entity later:
p2 = k.get()
p2 == p # Returns True
To update an entity, simple change its attributes and write it back
(note that this doesn't change the key):
p2.name = 'Arthur Philip Dent'
p2.put()
We can also delete an entity (by using the key):
k.delete()
The property definitions in the class body tell the system the names
and the types of the fields to be stored in the datastore, whether
they must be indexed, their default value, and more.
Many different Property types exist. Most are indexed by default, the
exceptions indicated in the list below:
- StringProperty: a short text string, limited to 500 bytes
- TextProperty: an unlimited text string; unindexed
- BlobProperty: an unlimited byte string; unindexed
- IntegerProperty: a 64-bit signed integer
- FloatProperty: a double precision floating point number
- BooleanProperty: a bool value
- DateTimeProperty: a datetime object. Note: App Engine always uses
UTC as the timezone
- DateProperty: a date object
- TimeProperty: a time object
- GeoPtProperty: a geographical location, i.e. (latitude, longitude)
- KeyProperty: a datastore Key value, optionally constrained to
referring to a specific kind
- UserProperty: a User object (for backwards compatibility only)
- StructuredProperty: a field that is itself structured like an
entity; see below for more details
- LocalStructuredProperty: like StructuredProperty but the on-disk
representation is an opaque blob; unindexed
- ComputedProperty: a property whose value is computed from other
properties by a user-defined function. The property value is
written to the datastore so that it can be used in queries, but the
value from the datastore is not used when the entity is read back
- GenericProperty: a property whose type is not constrained; mostly
used by the Expando class (see below) but also usable explicitly
- JsonProperty: a property whose value is any object that can be
serialized using JSON; the value written to the datastore is a JSON
representation of that object
- PickleProperty: a property whose value is any object that can be
serialized using Python's pickle protocol; the value written to the
datastore is the pickled representation of that object, using the
highest available pickle protocol
Most Property classes have similar constructor signatures. They
accept several optional keyword arguments:
- name=<string>: the name used to store the property value in the
datastore. Unlike the following options, this may also be given as
a positional argument
- indexed=<bool>: indicates whether the property should be indexed
(allowing queries on this property's value)
- repeated=<bool>: indicates that this property can have multiple
values in the same entity.
- required=<bool>: indicates that this property must be given a value
- default=<value>: a default value if no explicit value is given
- choices=<list of values>: a list or tuple of allowable values
- validator=<function>: a general-purpose validation function. It
will be called with two arguments (prop, value) and should either
return the validated value or raise an exception. It is also
allowed for the function to modify the value, but calling it again
on the modified value should not modify the value further. (For
example: a validator that returns value.strip() or value.lower() is
fine, but one that returns value + '$' is not.)
- verbose_name=<value>: A human readable name for this property. This
human readable name can be used for html form labels.
The repeated, required and default options are mutually exclusive: a
repeated property cannot be required nor can it specify a default
value (the default is always an empty list and an empty list is always
an allowed value), and a required property cannot have a default.
Some property types have additional arguments. Some property types
do not support all options.
Repeated properties are always represented as Python lists; if there
is only one value, the list has only one element. When a new list is
assigned to a repeated property, all elements of the list are
validated. Since it is also possible to mutate lists in place,
repeated properties are re-validated before they are written to the
datastore.
No validation happens when an entity is read from the datastore;
however property values read that have the wrong type (e.g. a string
value for an IntegerProperty) are ignored.
For non-repeated properties, None is always a possible value, and no
validation is called when the value is set to None. However for
required properties, writing the entity to the datastore requires
the value to be something other than None (and valid).
The StructuredProperty is different from most other properties; it
lets you define a sub-structure for your entities. The substructure
itself is defined using a model class, and the attribute value is an
instance of that model class. However it is not stored in the
datastore as a separate entity; instead, its attribute values are
included in the parent entity using a naming convention (the name of
the structured attribute followed by a dot followed by the name of the
subattribute). For example:
class Address(Model):
street = StringProperty()
city = StringProperty()
class Person(Model):
name = StringProperty()
address = StructuredProperty(Address)
p = Person(name='Harry Potter',
address=Address(street='4 Privet Drive',
city='Little Whinging'))
k.put()
This would write a single 'Person' entity with three attributes (as
you could verify using the Datastore Viewer in the Admin Console):
name = 'Harry Potter'
address.street = '4 Privet Drive'
address.city = 'Little Whinging'
Structured property types can be nested arbitrarily deep, but in a
hierarchy of nested structured property types, only one level can have
the repeated flag set. It is fine to have multiple structured
properties referencing the same model class.
It is also fine to use the same model class both as a top-level entity
class and as for a structured property; however queries for the model
class will only return the top-level entities.
The LocalStructuredProperty works similar to StructuredProperty on the
Python side. For example:
class Address(Model):
street = StringProperty()
city = StringProperty()
class Person(Model):
name = StringProperty()
address = LocalStructuredProperty(Address)
p = Person(name='Harry Potter',
address=Address(street='4 Privet Drive',
city='Little Whinging'))
k.put()
However the data written to the datastore is different; it writes a
'Person' entity with a 'name' attribute as before and a single
'address' attribute whose value is a blob which encodes the Address
value (using the standard"protocol buffer" encoding).
Sometimes the set of properties is not known ahead of time. In such
cases you can use the Expando class. This is a Model subclass that
creates properties on the fly, both upon assignment and when loading
an entity from the datastore. For example:
class SuperPerson(Expando):
name = StringProperty()
superpower = StringProperty()
razorgirl = SuperPerson(name='Molly Millions',
superpower='bionic eyes, razorblade hands',
rasta_name='Steppin\' Razor',
alt_name='Sally Shears')
elastigirl = SuperPerson(name='Helen Parr',
superpower='stretchable body')
elastigirl.max_stretch = 30 # Meters
You can inspect the properties of an expando instance using the
_properties attribute:
>>> print razorgirl._properties.keys()
['rasta_name', 'name', 'superpower', 'alt_name']
>>> print elastigirl._properties
{'max_stretch': GenericProperty('max_stretch'),
'name': StringProperty('name'),
'superpower': StringProperty('superpower')}
Note: this property exists for plain Model instances too; it is just
not as interesting for those.
The Model class offers basic query support. You can create a Query
object by calling the query() class method. Iterating over a Query
object returns the entities matching the query one at a time.
Query objects are fully described in the docstring for query.py, but
there is one handy shortcut that is only available through
Model.query(): positional arguments are interpreted as filter
expressions which are combined through an AND operator. For example:
Person.query(Person.name == 'Harry Potter', Person.age >= 11)
is equivalent to:
Person.query().filter(Person.name == 'Harry Potter', Person.age >= 11)
Keyword arguments passed to .query() are passed along to the Query()
constructor.
It is possible to query for field values of stuctured properties. For
example:
qry = Person.query(Person.address.city == 'London')
A number of top-level functions also live in this module:
- transaction() runs a function inside a transaction
- get_multi() reads multiple entities at once
- put_multi() writes multiple entities at once
- delete_multi() deletes multiple entities at once
All these have a corresponding *_async() variant as well.
The *_multi_async() functions return a list of Futures.
And finally these (without async variants):
- in_transaction() tests whether you are currently running in a transaction
- @transactional decorates functions that should be run in a transaction
There are many other interesting features. For example, Model
subclasses may define pre-call and post-call hooks for most operations
(get, put, delete, allocate_ids), and Property classes may be
subclassed to suit various needs. Documentation for writing a
Property subclass is in the docstring for the Property class.
"""
__author__ = 'guido@google.com (Guido van Rossum)'
import copy
import cPickle as pickle
import datetime
import zlib
from .google_imports import datastore_errors
from .google_imports import datastore_types
from .google_imports import users
from .google_imports import datastore_query
from .google_imports import datastore_rpc
from .google_imports import entity_pb
from . import utils
# NOTE: 'key' is a common local variable name.
from . import key as key_module
Key = key_module.Key # For export.
# NOTE: Property and Error classes are added later.
__all__ = ['Key', 'BlobKey', 'GeoPt', 'Rollback',
'Index', 'IndexState', 'IndexProperty',
'ModelAdapter', 'ModelAttribute',
'ModelKey', 'MetaModel', 'Model', 'Expando',
'transaction', 'transaction_async',
'in_transaction', 'transactional', 'non_transactional',
'get_multi', 'get_multi_async',
'put_multi', 'put_multi_async',
'delete_multi', 'delete_multi_async',
'get_indexes', 'get_indexes_async',
'make_connection',
]
BlobKey = datastore_types.BlobKey
GeoPt = datastore_types.GeoPt
Rollback = datastore_errors.Rollback
class KindError(datastore_errors.BadValueError):
"""Raised when an implementation for a kind can't be found.
Also raised when the Kind is not an 8-bit string.
"""
class BadProjectionError(datastore_errors.Error):
"""Raised when a property name used as a projection is invalid."""
class UnprojectedPropertyError(datastore_errors.Error):
"""Raised when getting a property value that's not in the projection."""
class ReadonlyPropertyError(datastore_errors.Error):
"""Raised when attempting to set a property value that is read-only."""
class ComputedPropertyError(ReadonlyPropertyError):
"""Raised when attempting to set a value to a computed property."""
# Various imported limits.
_MAX_LONG = key_module._MAX_LONG
_MAX_STRING_LENGTH = datastore_types._MAX_STRING_LENGTH
# Map index directions to human-readable strings.
_DIR_MAP = {
entity_pb.Index_Property.ASCENDING: 'asc',
entity_pb.Index_Property.DESCENDING: 'desc',
}
# Map index states to human-readable strings.
_STATE_MAP = {
entity_pb.CompositeIndex.ERROR: 'error',
entity_pb.CompositeIndex.DELETED: 'deleting',
entity_pb.CompositeIndex.READ_WRITE: 'serving',
entity_pb.CompositeIndex.WRITE_ONLY: 'building',
}
class _NotEqualMixin(object):
"""Mix-in class that implements __ne__ in terms of __eq__."""
def __ne__(self, other):
"""Implement self != other as not(self == other)."""
eq = self.__eq__(other)
if eq is NotImplemented:
return NotImplemented
return not eq
class IndexProperty(_NotEqualMixin):
"""Immutable object representing a single property in an index."""
@utils.positional(1)
def __new__(cls, name, direction):
"""Constructor."""
obj = object.__new__(cls)
obj.__name = name
obj.__direction = direction
return obj
@property
def name(self):
"""The property name being indexed, a string."""
return self.__name
@property
def direction(self):
"""The direction in the index for this property, 'asc' or 'desc'."""
return self.__direction
def __repr__(self):
"""Return a string representation."""
return '%s(name=%r, direction=%r)' % (self.__class__.__name__,
self.name,
self.direction)
def __eq__(self, other):
"""Compare two index properties for equality."""
if not isinstance(other, IndexProperty):
return NotImplemented
return self.name == other.name and self.direction == other.direction
def __hash__(self):
return hash((self.name, self.direction))
class Index(_NotEqualMixin):
"""Immutable object representing an index."""
@utils.positional(1)
def __new__(cls, kind, properties, ancestor):
"""Constructor."""
obj = object.__new__(cls)
obj.__kind = kind
obj.__properties = properties
obj.__ancestor = ancestor
return obj
@property
def kind(self):
"""The kind being indexed, a string."""
return self.__kind
@property
def properties(self):
"""A list of PropertyIndex objects giving the properties being indexed."""
return self.__properties
@property
def ancestor(self):
"""Whether this is an ancestor index, a bool."""
return self.__ancestor
def __repr__(self):
"""Return a string representation."""
parts = []
parts.append('kind=%r' % self.kind)
parts.append('properties=%r' % self.properties)
parts.append('ancestor=%s' % self.ancestor)
return '%s(%s)' % (self.__class__.__name__, ', '.join(parts))
def __eq__(self, other):
"""Compare two indexes."""
if not isinstance(other, Index):
return NotImplemented
return (self.kind == other.kind and
self.properties == other.properties and
self.ancestor == other.ancestor)
def __hash__(self):
return hash((self.kind, self.properties, self.ancestor))
class IndexState(_NotEqualMixin):
"""Immutable object representing and index and its state."""
@utils.positional(1)
def __new__(cls, definition, state, id):
"""Constructor."""
obj = object.__new__(cls)
obj.__definition = definition
obj.__state = state
obj.__id = id
return obj
@property
def definition(self):
"""An Index object describing the index."""
return self.__definition
@property
def state(self):
"""The index state, a string.
Possible values are 'error', 'deleting', 'serving' or 'building'.
"""
return self.__state
@property
def id(self):
"""The index ID, an integer."""
return self.__id
def __repr__(self):
"""Return a string representation."""
parts = []
parts.append('definition=%r' % self.definition)
parts.append('state=%r' % self.state)
parts.append('id=%d' % self.id)
return '%s(%s)' % (self.__class__.__name__, ', '.join(parts))
def __eq__(self, other):
"""Compare two index states."""
if not isinstance(other, IndexState):
return NotImplemented
return (self.definition == other.definition and
self.state == other.state and
self.id == other.id)
def __hash__(self):
return hash((self.definition, self.state, self.id))
class ModelAdapter(datastore_rpc.AbstractAdapter):
"""Conversions between 'our' Key and Model classes and protobufs.
This is needed to construct a Connection object, which in turn is
needed to construct a Context object.
See the base class docstring for more info about the signatures.
"""
def __init__(self, default_model=None):
"""Constructor.
Args:
default_model: If an implementation for the kind cannot be found, use
this model class. If none is specified, an exception will be thrown
(default).
"""
self.default_model = default_model
self.want_pbs = 0
# Make this a context manager to request setting _orig_pb.
# Used in query.py by _MultiQuery.run_to_queue().
def __enter__(self):
self.want_pbs += 1
def __exit__(self, *unused_args):
self.want_pbs -= 1
def pb_to_key(self, pb):
return Key(reference=pb)
def key_to_pb(self, key):
return key.reference()
def pb_to_entity(self, pb):
key = None
kind = None
if pb.key().path().element_size():
key = Key(reference=pb.key())
kind = key.kind()
modelclass = Model._kind_map.get(kind, self.default_model)
if modelclass is None:
raise KindError(
"No model class found for kind '%s'. Did you forget to import it?" %
kind)
entity = modelclass._from_pb(pb, key=key, set_key=False)
if self.want_pbs:
entity._orig_pb = pb
return entity
def entity_to_pb(self, ent):
pb = ent._to_pb()
return pb
def pb_to_index(self, pb):
index_def = pb.definition()
properties = [IndexProperty(name=prop.name(),
direction=_DIR_MAP[prop.direction()])
for prop in index_def.property_list()]
index = Index(kind=index_def.entity_type(),
properties=properties,
ancestor=bool(index_def.ancestor()),
)
index_state = IndexState(definition=index,
state=_STATE_MAP[pb.state()],
id=pb.id(),
)
return index_state
def make_connection(config=None, default_model=None):
"""Create a new Connection object with the right adapter.
Optionally you can pass in a datastore_rpc.Configuration object.
"""
return datastore_rpc.Connection(
adapter=ModelAdapter(default_model),
config=config)
class ModelAttribute(object):
"""A Base class signifying the presence of a _fix_up() method."""
def _fix_up(self, cls, code_name):
pass
class _BaseValue(_NotEqualMixin):
"""A marker object wrapping a 'base type' value.
This is used to be able to tell whether ent._values[name] is a
user value (i.e. of a type that the Python code understands) or a
base value (i.e of a type that serialization understands).
User values are unwrapped; base values are wrapped in a
_BaseValue instance.
"""
__slots__ = ['b_val']
def __init__(self, b_val):
"""Constructor. Argument is the base value to be wrapped."""
assert b_val is not None
assert not isinstance(b_val, list), repr(b_val)
self.b_val = b_val
def __repr__(self):
return '_BaseValue(%r)' % (self.b_val,)
def __eq__(self, other):
if not isinstance(other, _BaseValue):
return NotImplemented
return self.b_val == other.b_val
def __hash__(self):
raise TypeError('_BaseValue is not immutable')
class Property(ModelAttribute):
"""A class describing a typed, persisted attribute of a datastore entity.
Not to be confused with Python's 'property' built-in.
This is just a base class; there are specific subclasses that
describe Properties of various types (and GenericProperty which
describes a dynamically typed Property).
All special Property attributes, even those considered 'public',
have names starting with an underscore, because StructuredProperty
uses the non-underscore attribute namespace to refer to nested
Property names; this is essential for specifying queries on
subproperties (see the module docstring).
The Property class and its predefined subclasses allow easy
subclassing using composable (or stackable) validation and
conversion APIs. These require some terminology definitions:
- A 'user value' is a value such as would be set and accessed by the
application code using standard attributes on the entity.
- A 'base value' is a value such as would be serialized to
and deserialized from the datastore.
The values stored in ent._values[name] and accessed by
_store_value() and _retrieve_value() can be either user values or
base values. To retrieve user values, use
_get_user_value(). To retrieve base values, use
_get_base_value(). In particular, _get_value() calls
_get_user_value(), and _serialize() effectively calls
_get_base_value().
To store a user value, just call _store_value(). To store a
base value, wrap the value in a _BaseValue() and then
call _store_value().
A Property subclass that wants to implement a specific
transformation between user values and serialiazble values should
implement two methods, _to_base_type() and _from_base_type().
These should *NOT* call their super() method; super calls are taken
care of by _call_to_base_type() and _call_from_base_type().
This is what is meant by composable (or stackable) APIs.
The API supports 'stacking' classes with ever more sophisticated
user<-->base conversions: the user-->base conversion
goes from more sophisticated to less sophisticated, while the
base-->user conversion goes from less sophisticated to more
sophisticated. For example, see the relationship between
BlobProperty, TextProperty and StringProperty.
In addition to _to_base_type() and _from_base_type(), the
_validate() method is also a composable API.
The validation API distinguishes between 'lax' and 'strict' user
values. The set of lax values is a superset of the set of strict
values. The _validate() method takes a lax value and if necessary
converts it to a strict value. This means that when setting the
property value, lax values are accepted, while when getting the
property value, only strict values will be returned. If no
conversion is needed, _validate() may return None. If the argument
is outside the set of accepted lax values, _validate() should raise
an exception, preferably TypeError or
datastore_errors.BadValueError.
Example/boilerplate:
def _validate(self, value):
'Lax user value to strict user value.'
if not isinstance(value, <top type>):
raise TypeError(...) # Or datastore_errors.BadValueError(...).
def _to_base_type(self, value):
'(Strict) user value to base value.'
if isinstance(value, <user type>):
return <base type>(value)
def _from_base_type(self, value):
'base value to (strict) user value.'
if not isinstance(value, <base type>):
return <user type>(value)
Things that _validate(), _to_base_type() and _from_base_type()
do *not* need to handle:
- None: They will not be called with None (and if they return None,
this means that the value does not need conversion).
- Repeated values: The infrastructure (_get_user_value() and
_get_base_value()) takes care of calling
_from_base_type() or _to_base_type() for each list item in a
repeated value.
- Wrapping values in _BaseValue(): The wrapping and unwrapping is
taken care of by the infrastructure that calls the composable APIs.
- Comparisons: The comparison operations call _to_base_type() on
their operand.
- Distinguishing between user and base values: the
infrastructure guarantees that _from_base_type() will be called
with an (unwrapped) base value, and that
_to_base_type() will be called with a user value.
- Returning the original value: if any of these return None, the
original value is kept. (Returning a differen value not equal to
None will substitute the different value.)
"""
# TODO: Separate 'simple' properties from base Property class
_code_name = None
_name = None
_indexed = True
_repeated = False
_required = False
_default = None
_choices = None
_validator = None
_verbose_name = None
__creation_counter_global = 0
_attributes = ['_name', '_indexed', '_repeated', '_required', '_default',
'_choices', '_validator', '_verbose_name']
_positional = 1 # Only name is a positional argument.
@utils.positional(1 + _positional) # Add 1 for self.
def __init__(self, name=None, indexed=None, repeated=None,
required=None, default=None, choices=None, validator=None,
verbose_name=None):
"""Constructor. For arguments see the module docstring."""
if name is not None:
if isinstance(name, unicode):
name = name.encode('utf-8')
if not isinstance(name, str):
raise TypeError('Name %r is not a string' % (name,))
if '.' in name:
raise ValueError('Name %r cannot contain period characters' % (name,))
self._name = name
if indexed is not None:
self._indexed = indexed
if repeated is not None:
self._repeated = repeated
if required is not None:
self._required = required
if default is not None:
# TODO: Call _validate() on default?
self._default = default
if verbose_name is not None:
self._verbose_name = verbose_name
if (bool(self._repeated) +
bool(self._required) +
(self._default is not None)) > 1:
raise ValueError('repeated, required and default are mutally exclusive.')
if choices is not None:
if not isinstance(choices, (list, tuple, set, frozenset)):
raise TypeError('choices must be a list, tuple or set; received %r' %
choices)
# TODO: Call _validate() on each choice?
self._choices = frozenset(choices)
if validator is not None:
# The validator is called as follows:
# value = validator(prop, value)
# It should return the value to be used, or raise an exception.
# It should be idempotent, i.e. calling it a second time should
# not further modify the value. So a validator that returns e.g.
# value.lower() or value.strip() is fine, but one that returns
# value + '$' is not.
if not hasattr(validator, '__call__'):
raise TypeError('validator must be callable or None; received %r' %
validator)
self._validator = validator
# Keep a unique creation counter.
Property.__creation_counter_global += 1
self._creation_counter = Property.__creation_counter_global
def __repr__(self):
"""Return a compact unambiguous string representation of a property."""
args = []
cls = self.__class__
for i, attr in enumerate(self._attributes):
val = getattr(self, attr)
if val is not getattr(cls, attr):
if isinstance(val, type):
s = val.__name__
else:
s = repr(val)
if i >= cls._positional:
if attr.startswith('_'):
attr = attr[1:]
s = '%s=%s' % (attr, s)
args.append(s)
s = '%s(%s)' % (self.__class__.__name__, ', '.join(args))
return s
def _datastore_type(self, value):
"""Internal hook used by property filters.
Sometimes the low-level query interface needs a specific data type
in order for the right filter to be constructed. See _comparison().
"""
return value
def _comparison(self, op, value):
"""Internal helper for comparison operators.
Args:
op: The operator ('=', '<' etc.).
Returns:
A FilterNode instance representing the requested comparison.
"""
# NOTE: This is also used by query.gql().
if not self._indexed:
raise datastore_errors.BadFilterError(
'Cannot query for unindexed property %s' % self._name)
from .query import FilterNode # Import late to avoid circular imports.
if value is not None:
value = self._do_validate(value)
value = self._call_to_base_type(value)
value = self._datastore_type(value)
return FilterNode(self._name, op, value)
# Comparison operators on Property instances don't compare the
# properties; instead they return FilterNode instances that can be
# used in queries. See the module docstrings above and in query.py
# for details on how these can be used.
def __eq__(self, value):
"""Return a FilterNode instance representing the '=' comparison."""
return self._comparison('=', value)
def __ne__(self, value):
"""Return a FilterNode instance representing the '!=' comparison."""
return self._comparison('!=', value)
def __lt__(self, value):
"""Return a FilterNode instance representing the '<' comparison."""
return self._comparison('<', value)
def __le__(self, value):
"""Return a FilterNode instance representing the '<=' comparison."""
return self._comparison('<=', value)
def __gt__(self, value):
"""Return a FilterNode instance representing the '>' comparison."""
return self._comparison('>', value)
def __ge__(self, value):
"""Return a FilterNode instance representing the '>=' comparison."""
return self._comparison('>=', value)
def _IN(self, value):
"""Comparison operator for the 'in' comparison operator.
The Python 'in' operator cannot be overloaded in the way we want
to, so we define a method. For example:
Employee.query(Employee.rank.IN([4, 5, 6]))
Note that the method is called ._IN() but may normally be invoked
as .IN(); ._IN() is provided for the case you have a
StructuredProperty with a model that has a Property named IN.
"""
if not self._indexed:
raise datastore_errors.BadFilterError(
'Cannot query for unindexed property %s' % self._name)
from .query import FilterNode # Import late to avoid circular imports.
if not isinstance(value, (list, tuple, set, frozenset)):
raise datastore_errors.BadArgumentError(
'Expected list, tuple or set, got %r' % (value,))
values = []
for val in value:
if val is not None:
val = self._do_validate(val)
val = self._call_to_base_type(val)
val = self._datastore_type(val)
values.append(val)
return FilterNode(self._name, 'in', values)
IN = _IN
def __neg__(self):
"""Return a descending sort order on this Property.
For example:
Employee.query().order(-Employee.rank)
"""
return datastore_query.PropertyOrder(
self._name, datastore_query.PropertyOrder.DESCENDING)
def __pos__(self):
"""Return an ascending sort order on this Property.
Note that this is redundant but provided for consistency with
__neg__. For example, the following two are equivalent:
Employee.query().order(+Employee.rank)
Employee.query().order(Employee.rank)
"""
return datastore_query.PropertyOrder(self._name)
def _do_validate(self, value):
"""Call all validations on the value.
This calls the most derived _validate() method(s), then the custom
validator function, and then checks the choices. It returns the
value, possibly modified in an idempotent way, or raises an
exception.
Note that this does not call all composable _validate() methods.
It only calls _validate() methods up to but not including the
first _to_base_type() method, when the MRO is traversed looking
for _validate() and _to_base_type() methods. (IOW if a class
defines both _validate() and _to_base_type(), its _validate()
is called and then the search is aborted.)
Note that for a repeated Property this function should be called
for each item in the list, not for the list as a whole.
"""
if isinstance(value, _BaseValue):
return value
value = self._call_shallow_validation(value)
if self._validator is not None:
newvalue = self._validator(self, value)
if newvalue is not None:
value = newvalue
if self._choices is not None:
if value not in self._choices:
raise datastore_errors.BadValueError(
'Value %r for property %s is not an allowed choice' %
(value, self._name))
return value
def _fix_up(self, cls, code_name):
"""Internal helper called to tell the property its name.
This is called by _fix_up_properties() which is called by
MetaModel when finishing the construction of a Model subclass.
The name passed in is the name of the class attribute to which the
Property is assigned (a.k.a. the code name). Note that this means
that each Property instance must be assigned to (at most) one
class attribute. E.g. to declare three strings, you must call
StringProperty() three times, you cannot write
foo = bar = baz = StringProperty()
"""
self._code_name = code_name
if self._name is None:
self._name = code_name
def _store_value(self, entity, value):
"""Internal helper to store a value in an entity for a Property.
This assumes validation has already taken place. For a repeated
Property the value should be a list.
"""
entity._values[self._name] = value
def _set_value(self, entity, value):
"""Internal helper to set a value in an entity for a Property.
This performs validation first. For a repeated Property the value
should be a list.
"""
if entity._projection:
raise ReadonlyPropertyError(
'You cannot set property values of a projection entity')
if self._repeated:
if not isinstance(value, (list, tuple, set, frozenset)):
raise datastore_errors.BadValueError('Expected list or tuple, got %r' %
(value,))
value = [self._do_validate(v) for v in value]
else:
if value is not None:
value = self._do_validate(value)
self._store_value(entity, value)
def _has_value(self, entity, unused_rest=None):
"""Internal helper to ask if the entity has a value for this Property."""
return self._name in entity._values
def _retrieve_value(self, entity, default=None):
"""Internal helper to retrieve the value for this Property from an entity.
This returns None if no value is set, or the default argument if
given. For a repeated Property this returns a list if a value is
set, otherwise None. No additional transformations are applied.
"""
return entity._values.get(self._name, default)
def _get_user_value(self, entity):
"""Return the user value for this property of the given entity.
This implies removing the _BaseValue() wrapper if present, and
if it is, calling all _from_base_type() methods, in the reverse
method resolution order of the property's class. It also handles
default values and repeated properties.
"""
return self._apply_to_values(entity, self._opt_call_from_base_type)
def _get_base_value(self, entity):
"""Return the base value for this property of the given entity.
This implies calling all _to_base_type() methods, in the method
resolution order of the property's class, and adding a
_BaseValue() wrapper, if one is not already present. (If one
is present, no work is done.) It also handles default values and
repeated properties.
"""
return self._apply_to_values(entity, self._opt_call_to_base_type)
# TODO: Invent a shorter name for this.
def _get_base_value_unwrapped_as_list(self, entity):
"""Like _get_base_value(), but always returns a list.
Returns:
A new list of unwrapped base values. For an unrepeated
property, if the value is missing or None, returns [None]; for a
repeated property, if the original value is missing or None or
empty, returns [].
"""
wrapped = self._get_base_value(entity)
if self._repeated:
if wrapped is None:
return []
assert isinstance(wrapped, list)
return [w.b_val for w in wrapped]
else:
if wrapped is None:
return [None]
assert isinstance(wrapped, _BaseValue)
return [wrapped.b_val]
def _opt_call_from_base_type(self, value):
"""Call _from_base_type() if necessary.
If the value is a _BaseValue instance, unwrap it and call all
_from_base_type() methods. Otherwise, return the value
unchanged.
"""
if isinstance(value, _BaseValue):
value = self._call_from_base_type(value.b_val)
return value
def _value_to_repr(self, value):
"""Turn a value (base or not) into its repr().
This exists so that property classes can override it separately.
"""
# Manually apply _from_base_type() so as not to have a side
# effect on what's contained in the entity. Printing a value
# should not change it!
val = self._opt_call_from_base_type(value)
return repr(val)
def _opt_call_to_base_type(self, value):
"""Call _to_base_type() if necessary.
If the value is a _BaseValue instance, return it unchanged.
Otherwise, call all _validate() and _to_base_type() methods and
wrap it in a _BaseValue instance.
"""
if not isinstance(value, _BaseValue):
value = _BaseValue(self._call_to_base_type(value))
return value
def _call_from_base_type(self, value):
"""Call all _from_base_type() methods on the value.
This calls the methods in the reverse method resolution order of
the property's class.
"""
methods = self._find_methods('_from_base_type', reverse=True)
call = self._apply_list(methods)
return call(value)
def _call_to_base_type(self, value):
"""Call all _validate() and _to_base_type() methods on the value.
This calls the methods in the method resolution order of the
property's class.
"""
methods = self._find_methods('_validate', '_to_base_type')
call = self._apply_list(methods)
return call(value)
def _call_shallow_validation(self, value):
"""Call the initial set of _validate() methods.
This is similar to _call_to_base_type() except it only calls
those _validate() methods that can be called without needing to
call _to_base_type().
An example: suppose the class hierarchy is A -> B -> C ->
Property, and suppose A defines _validate() only, but B and C
define _validate() and _to_base_type(). The full list of
methods called by _call_to_base_type() is:
A._validate()
B._validate()
B._to_base_type()
C._validate()
C._to_base_type()
This method will call A._validate() and B._validate() but not the
others.
"""
methods = []
for method in self._find_methods('_validate', '_to_base_type'):
if method.__name__ != '_validate':
break
methods.append(method)
call = self._apply_list(methods)
return call(value)
@classmethod
def _find_methods(cls, *names, **kwds):
"""Compute a list of composable methods.
Because this is a common operation and the class hierarchy is
static, the outcome is cached (assuming that for a particular list
of names the reversed flag is either always on, or always off).
Args:
*names: One or more method names.
reverse: Optional flag, default False; if True, the list is
reversed.
Returns:
A list of callable class method objects.
"""
reverse = kwds.pop('reverse', False)
assert not kwds, repr(kwds)
cache = cls.__dict__.get('_find_methods_cache')
if cache:
hit = cache.get(names)
if hit is not None:
return hit
else:
cls._find_methods_cache = cache = {}
methods = []
for c in cls.__mro__:
for name in names:
method = c.__dict__.get(name)
if method is not None:
methods.append(method)
if reverse:
methods.reverse()
cache[names] = methods
return methods
def _apply_list(self, methods):
"""Return a single callable that applies a list of methods to a value.
If a method returns None, the last value is kept; if it returns
some other value, that replaces the last value. Exceptions are
not caught.
"""
def call(value):
for method in methods:
newvalue = method(self, value)
if newvalue is not None:
value = newvalue
return value
return call
def _apply_to_values(self, entity, function):
"""Apply a function to the property value/values of a given entity.
This retrieves the property value, applies the function, and then
stores the value back. For a repeated property, the function is
applied separately to each of the values in the list. The
resulting value or list of values is both stored back in the
entity and returned from this method.
"""
value = self._retrieve_value(entity, self._default)
if self._repeated:
if value is None:
value = []
self._store_value(entity, value)
else:
value[:] = map(function, value)
else:
if value is not None:
newvalue = function(value)
if newvalue is not None and newvalue is not value:
self._store_value(entity, newvalue)
value = newvalue
return value
def _get_value(self, entity):
"""Internal helper to get the value for this Property from an entity.
For a repeated Property this initializes the value to an empty
list if it is not set.
"""
if entity._projection:
if self._name not in entity._projection:
raise UnprojectedPropertyError(
'Property %s is not in the projection' % (self._name,))
return self._get_user_value(entity)
def _delete_value(self, entity):
"""Internal helper to delete the value for this Property from an entity.
Note that if no value exists this is a no-op; deleted values will
not be serialized but requesting their value will return None (or
an empty list in the case of a repeated Property).
"""
if self._name in entity._values:
del entity._values[self._name]
def _is_initialized(self, entity):
"""Internal helper to ask if the entity has a value for this Property.
This returns False if a value is stored but it is None.
"""
return not self._required or (self._has_value(entity) and
self._get_value(entity) is not None)
def __get__(self, entity, unused_cls=None):
"""Descriptor protocol: get the value from the entity."""
if entity is None:
return self # __get__ called on class
return self._get_value(entity)
def __set__(self, entity, value):
"""Descriptor protocol: set the value on the entity."""
self._set_value(entity, value)
def __delete__(self, entity):
"""Descriptor protocol: delete the value from the entity."""
self._delete_value(entity)
def _serialize(self, entity, pb, prefix='', parent_repeated=False):
"""Internal helper to serialize this property to a protocol buffer.
Subclasses may override this method.
Args:
entity: The entity, a Model (subclass) instance.
pb: The protocol buffer, an EntityProto instance.
prefix: Optional name prefix used for StructuredProperty
(if present, must end in '.').
parent_repeated: True if the parent (or an earlier ancestor)
is a repeated Property.
"""
values = self._get_base_value_unwrapped_as_list(entity)
for val in values:
if self._indexed:
p = pb.add_property()
else:
p = pb.add_raw_property()
p.set_name(prefix + self._name)
p.set_multiple(self._repeated or parent_repeated)
v = p.mutable_value()
if val is not None:
self._db_set_value(v, p, val)
def _deserialize(self, entity, p, unused_depth=1):
"""Internal helper to deserialize this property from a protocol buffer.
Subclasses may override this method.
Args:
entity: The entity, a Model (subclass) instance.
p: A Property Message object (a protocol buffer).
depth: Optional nesting depth, default 1 (unused here, but used
by some subclasses that override this method).
"""
v = p.value()
val = self._db_get_value(v, p)
if val is not None:
val = _BaseValue(val)
if self._repeated:
if self._has_value(entity):
value = self._retrieve_value(entity)
assert isinstance(value, list), repr(value)
value.append(val)
else:
value = [val]
else:
value = val
self._store_value(entity, value)
def _prepare_for_put(self, entity):
pass
def _check_projection(self, rest=None):
"""Helper to check whether this property can be used as a projection.
Args:
rest: Optional subproperty to check, of the form 'name1.name2...nameN'.
Raises:
BadProjectionError if this property is not indexed or if a
subproperty is specified. (StructuredProperty overrides this
method to handle subprpoperties.)
"""
if not self._indexed:
raise BadProjectionError('Projecting on unindexed property %s' %
self._name)
if rest:
raise BadProjectionError('Projecting on subproperty %s.%s '
'but %s is not a structured property' %
(self._name, rest, self._name))
def _get_for_dict(self, entity):
"""Retrieve the value like _get_value(), processed for _to_dict().
Property subclasses can override this if they want the dictionary
returned by entity._to_dict() to contain a different value. The
main use case is StructuredProperty and LocalStructuredProperty.
"""
return self._get_value(entity)
def _validate_key(value, entity=None):
if not isinstance(value, Key):
# TODO: BadKeyError.
raise datastore_errors.BadValueError('Expected Key, got %r' % value)
if entity and entity.__class__ not in (Model, Expando):
if value.kind() != entity._get_kind():
raise KindError('Expected Key kind to be %s; received %s' %
(entity._get_kind(), value.kind()))
return value
class ModelKey(Property):
"""Special property to store the Model key."""
def __init__(self):
super(ModelKey, self).__init__()
self._name = '__key__'
def _datastore_type(self, value):
return datastore_types.Key(value.urlsafe())
def _comparison(self, op, value):
if value is not None:
return super(ModelKey, self)._comparison(op, value)
raise datastore_errors.BadValueError(
"__key__ filter query can't be compared to None")
# TODO: Support IN().
def _validate(self, value):
return _validate_key(value)
def _set_value(self, entity, value):
"""Setter for key attribute."""
if value is not None:
value = _validate_key(value, entity=entity)
value = entity._validate_key(value)
entity._entity_key = value
def _get_value(self, entity):
"""Getter for key attribute."""
return entity._entity_key
def _delete_value(self, entity):
"""Deleter for key attribute."""
entity._entity_key = None
class BooleanProperty(Property):
"""A Property whose value is a Python bool."""
# TODO: Allow int/long values equal to 0 or 1?
def _validate(self, value):
if not isinstance(value, bool):
raise datastore_errors.BadValueError('Expected bool, got %r' %
(value,))
return value
def _db_set_value(self, v, unused_p, value):
if not isinstance(value, bool):
raise TypeError('BooleanProperty %s can only be set to bool values; '
'received %r' % (self._name, value))
v.set_booleanvalue(value)
def _db_get_value(self, v, unused_p):
if not v.has_booleanvalue():
return None
# The booleanvalue field is an int32, so booleanvalue() returns an
# int, hence the conversion.
return bool(v.booleanvalue())
class IntegerProperty(Property):
"""A Property whose value is a Python int or long (or bool)."""
def _validate(self, value):
if not isinstance(value, (int, long)):
raise datastore_errors.BadValueError('Expected integer, got %r' %
(value,))
return int(value)
def _db_set_value(self, v, unused_p, value):
if not isinstance(value, (bool, int, long)):
raise TypeError('IntegerProperty %s can only be set to integer values; '
'received %r' % (self._name, value))
v.set_int64value(value)
def _db_get_value(self, v, unused_p):
if not v.has_int64value():
return None
return int(v.int64value())
class FloatProperty(Property):
"""A Property whose value is a Python float.
Note: int, long and bool are also allowed.
"""
def _validate(self, value):
if not isinstance(value, (int, long, float)):
raise datastore_errors.BadValueError('Expected float, got %r' %
(value,))
return float(value)
def _db_set_value(self, v, unused_p, value):
if not isinstance(value, (bool, int, long, float)):
raise TypeError('FloatProperty %s can only be set to integer or float '
'values; received %r' % (self._name, value))
v.set_doublevalue(float(value))
def _db_get_value(self, v, unused_p):
if not v.has_doublevalue():
return None
return v.doublevalue()
# A custom 'meaning' for compressed properties.
_MEANING_URI_COMPRESSED = 'ZLIB'
class _CompressedValue(_NotEqualMixin):
"""A marker object wrapping compressed values."""
__slots__ = ['z_val']
def __init__(self, z_val):
"""Constructor. Argument is a string returned by zlib.compress()."""
assert isinstance(z_val, str), repr(z_val)
self.z_val = z_val
def __repr__(self):
return '_CompressedValue(%s)' % repr(self.z_val)
def __eq__(self, other):
if not isinstance(other, _CompressedValue):
return NotImplemented
return self.z_val == other.z_val
def __hash__(self):
raise TypeError('_CompressedValue is not immutable')
class BlobProperty(Property):
"""A Property whose value is a byte string. It may be compressed."""
_indexed = False
_compressed = False
_attributes = Property._attributes + ['_compressed']
@utils.positional(1 + Property._positional)
def __init__(self, name=None, compressed=False, **kwds):
super(BlobProperty, self).__init__(name=name, **kwds)
self._compressed = compressed
if compressed and self._indexed:
# TODO: Allow this, but only allow == and IN comparisons?
raise NotImplementedError('BlobProperty %s cannot be compressed and '
'indexed at the same time.' % self._name)
def _value_to_repr(self, value):
long_repr = super(BlobProperty, self)._value_to_repr(value)
# Note that we may truncate even if the value is shorter than
# _MAX_STRING_LENGTH; e.g. if it contains many \xXX or \uUUUU
# escapes.
if len(long_repr) > _MAX_STRING_LENGTH + 4:
# Truncate, assuming the final character is the closing quote.
long_repr = long_repr[:_MAX_STRING_LENGTH] + '...' + long_repr[-1]
return long_repr
def _validate(self, value):
if not isinstance(value, str):
raise datastore_errors.BadValueError('Expected str, got %r' %
(value,))
if (self._indexed and
not isinstance(self, TextProperty) and
len(value) > _MAX_STRING_LENGTH):
raise datastore_errors.BadValueError(
'Indexed value %s must be at most %d bytes' %
(self._name, _MAX_STRING_LENGTH))
def _to_base_type(self, value):
if self._compressed:
return _CompressedValue(zlib.compress(value))
def _from_base_type(self, value):
if isinstance(value, _CompressedValue):
return zlib.decompress(value.z_val)
def _datastore_type(self, value):
# Since this is only used for queries, and queries imply an
# indexed property, always use ByteString.
return datastore_types.ByteString(value)
def _db_set_value(self, v, p, value):
if isinstance(value, _CompressedValue):
self._db_set_compressed_meaning(p)
value = value.z_val
else:
self._db_set_uncompressed_meaning(p)
v.set_stringvalue(value)
def _db_set_compressed_meaning(self, p):
# Use meaning_uri because setting meaning to something else that is not
# BLOB or BYTESTRING will cause the value to be decoded from utf-8 in
# datastore_types.FromPropertyPb. That would break the compressed string.
p.set_meaning_uri(_MEANING_URI_COMPRESSED)
p.set_meaning(entity_pb.Property.BLOB)
def _db_set_uncompressed_meaning(self, p):
if self._indexed:
p.set_meaning(entity_pb.Property.BYTESTRING)
else:
p.set_meaning(entity_pb.Property.BLOB)
def _db_get_value(self, v, p):
if not v.has_stringvalue():
return None
value = v.stringvalue()
if p.meaning_uri() == _MEANING_URI_COMPRESSED:
value = _CompressedValue(value)
return value
class TextProperty(BlobProperty):
"""An unindexed Property whose value is a text string of unlimited length."""
def _validate(self, value):
if isinstance(value, str):
# Decode from UTF-8 -- if this fails, we can't write it.
try:
value = unicode(value, 'utf-8')
except UnicodeError:
raise datastore_errors.BadValueError('Expected valid UTF-8, got %r' %
(value,))
elif not isinstance(value, unicode):
raise datastore_errors.BadValueError('Expected string, got %r' %
(value,))
if self._indexed and len(value) > _MAX_STRING_LENGTH:
raise datastore_errors.BadValueError(
'Indexed value %s must be at most %d characters' %
(self._name, _MAX_STRING_LENGTH))
def _to_base_type(self, value):
if isinstance(value, unicode):
return value.encode('utf-8')
def _from_base_type(self, value):
if isinstance(value, str):
try:
return unicode(value, 'utf-8')
except UnicodeDecodeError:
# Since older versions of NDB could write non-UTF-8 TEXT
# properties, we can't just reject these. But _validate() now
# rejects these, so you can't write new non-UTF-8 TEXT
# properties.
# TODO: Eventually we should close this hole.
pass
def _db_set_uncompressed_meaning(self, p):
if not self._indexed:
p.set_meaning(entity_pb.Property.TEXT)
class StringProperty(TextProperty):
"""An indexed Property whose value is a text string of limited length."""
_indexed = True
class GeoPtProperty(Property):
"""A Property whose value is a GeoPt."""
def _validate(self, value):
if not isinstance(value, GeoPt):
raise datastore_errors.BadValueError('Expected GeoPt, got %r' %
(value,))
def _db_set_value(self, v, unused_p, value):
if not isinstance(value, GeoPt):
raise TypeError('GeoPtProperty %s can only be set to GeoPt values; '
'received %r' % (self._name, value))
pv = v.mutable_pointvalue()
pv.set_x(value.lat)
pv.set_y(value.lon)
def _db_get_value(self, v, unused_p):
if not v.has_pointvalue():
return None
pv = v.pointvalue()
return GeoPt(pv.x(), pv.y())
def _unpack_user(v):
"""Internal helper to unpack a User value from a protocol buffer."""
uv = v.uservalue()
email = unicode(uv.email().decode('utf-8'))
auth_domain = unicode(uv.auth_domain().decode('utf-8'))
obfuscated_gaiaid = uv.obfuscated_gaiaid().decode('utf-8')
obfuscated_gaiaid = unicode(obfuscated_gaiaid)
federated_identity = None
if uv.has_federated_identity():
federated_identity = unicode(
uv.federated_identity().decode('utf-8'))
value = users.User(email=email,
_auth_domain=auth_domain,
_user_id=obfuscated_gaiaid,
federated_identity=federated_identity)
return value
class PickleProperty(BlobProperty):
"""A Property whose value is any picklable Python object."""
def _to_base_type(self, value):
return pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
def _from_base_type(self, value):
return pickle.loads(value)
class JsonProperty(BlobProperty):
"""A property whose value is any Json-encodable Python object."""
# Use late import so the dependency is optional.
def _to_base_type(self, value):
try:
import json
except ImportError:
import simplejson as json
return json.dumps(value)
def _from_base_type(self, value):
try:
import json
except ImportError:
import simplejson as json
return json.loads(value)
class UserProperty(Property):
"""A Property whose value is a User object.
Note: this exists for backwards compatibility with existing
datastore schemas only; we do not recommend storing User objects
directly in the datastore, but instead recommend storing the
user.user_id() value.
"""
_attributes = Property._attributes + ['_auto_current_user',
'_auto_current_user_add']
_auto_current_user = False
_auto_current_user_add = False
@utils.positional(1 + Property._positional)
def __init__(self, name=None, auto_current_user=False,
auto_current_user_add=False, **kwds):
super(UserProperty, self).__init__(name=name, **kwds)
# TODO: Disallow combining auto_current_user* and default?
if self._repeated:
if auto_current_user:
raise ValueError('UserProperty could use auto_current_user and be '
'repeated, but there would be no point.')
elif auto_current_user_add:
raise ValueError('UserProperty could use auto_current_user_add and be '
'repeated, but there would be no point.')
self._auto_current_user = auto_current_user
self._auto_current_user_add = auto_current_user_add
def _validate(self, value):
if not isinstance(value, users.User):
raise datastore_errors.BadValueError('Expected User, got %r' %
(value,))
def _prepare_for_put(self, entity):
if (self._auto_current_user or
(self._auto_current_user_add and not self._has_value(entity))):
value = users.get_current_user()
if value is not None:
self._store_value(entity, value)
def _db_set_value(self, v, p, value):
datastore_types.PackUser(p.name(), value, v)
def _db_get_value(self, v, unused_p):
if not v.has_uservalue():
return None
return _unpack_user(v)
class KeyProperty(Property):
"""A Property whose value is a Key object.
Optional keyword argument: kind=<kind>, to require that keys
assigned to this property always have the indicated kind. May be a
string or a Model subclass.
"""
_attributes = Property._attributes + ['_kind']
_kind = None
@utils.positional(2 + Property._positional)
def __init__(self, *args, **kwds):
# Support several positional signatures:
# () => name=None, kind from kwds
# (None) => name=None, kind from kwds
# (name) => name=arg 0, kind from kwds
# (kind) => name=None, kind=arg 0
# (name, kind) => name=arg 0, kind=arg 1
# (kind, name) => name=arg 1, kind=arg 0
# The positional kind must be a Model subclass; it cannot be a string.
name = kind = None
for arg in args:
if isinstance(arg, basestring):
if name is not None:
raise TypeError('You can only specify one name')
name = arg
elif isinstance(arg, type) and issubclass(arg, Model):
if kind is not None:
raise TypeError('You can only specify one kind')
kind = arg
elif arg is not None:
raise TypeError('Unexpected positional argument: %r' % (arg,))
if name is None:
name = kwds.pop('name', None)
elif 'name' in kwds:
raise TypeError('You can only specify name once')
if kind is None:
kind = kwds.pop('kind', None)
elif 'kind' in kwds:
raise TypeError('You can only specify kind once')
if kind is not None:
if isinstance(kind, type) and issubclass(kind, Model):
kind = kind._get_kind()
if isinstance(kind, unicode):
kind = kind.encode('utf-8')
if not isinstance(kind, str):
raise TypeError('kind must be a Model class or a string')
super(KeyProperty, self).__init__(name, **kwds)
self._kind = kind
def _datastore_type(self, value):
return datastore_types.Key(value.urlsafe())
def _validate(self, value):
if not isinstance(value, Key):
raise datastore_errors.BadValueError('Expected Key, got %r' % (value,))
# Reject incomplete keys.
if not value.id():
raise datastore_errors.BadValueError('Expected complete Key, got %r' %
(value,))
if self._kind is not None:
if value.kind() != self._kind:
raise datastore_errors.BadValueError(
'Expected Key with kind=%r, got %r' % (self._kind, value))
def _db_set_value(self, v, unused_p, value):
if not isinstance(value, Key):
raise TypeError('KeyProperty %s can only be set to Key values; '
'received %r' % (self._name, value))
# See datastore_types.PackKey
ref = value.reference()
rv = v.mutable_referencevalue() # A Reference
rv.set_app(ref.app())
if ref.has_name_space():
rv.set_name_space(ref.name_space())
for elem in ref.path().element_list():
rv.add_pathelement().CopyFrom(elem)
def _db_get_value(self, v, unused_p):
if not v.has_referencevalue():
return None
ref = entity_pb.Reference()
rv = v.referencevalue()
if rv.has_app():
ref.set_app(rv.app())
if rv.has_name_space():
ref.set_name_space(rv.name_space())
path = ref.mutable_path()
for elem in rv.pathelement_list():
path.add_element().CopyFrom(elem)
return Key(reference=ref)
class BlobKeyProperty(Property):
"""A Property whose value is a BlobKey object."""
def _validate(self, value):
if not isinstance(value, datastore_types.BlobKey):
raise datastore_errors.BadValueError('Expected BlobKey, got %r' %
(value,))
def _db_set_value(self, v, p, value):
if not isinstance(value, datastore_types.BlobKey):
raise TypeError('BlobKeyProperty %s can only be set to BlobKey values; '
'received %r' % (self._name, value))
p.set_meaning(entity_pb.Property.BLOBKEY)
v.set_stringvalue(str(value))
def _db_get_value(self, v, unused_p):
if not v.has_stringvalue():
return None
return datastore_types.BlobKey(v.stringvalue())
# The Epoch (a zero POSIX timestamp).
_EPOCH = datetime.datetime.utcfromtimestamp(0)
class DateTimeProperty(Property):
"""A Property whose value is a datetime object.
Note: Unlike Django, auto_now_add can be overridden by setting the
value before writing the entity. And unlike classic db, auto_now
does not supply a default value. Also unlike classic db, when the
entity is written, the property values are updated to match what
was written. Finally, beware that this also updates the value in
the in-process cache, *and* that auto_now_add may interact weirdly
with transaction retries (a retry of a property with auto_now_add
set will reuse the value that was set on the first try).
"""
_attributes = Property._attributes + ['_auto_now', '_auto_now_add']
_auto_now = False
_auto_now_add = False
@utils.positional(1 + Property._positional)
def __init__(self, name=None, auto_now=False, auto_now_add=False, **kwds):
super(DateTimeProperty, self).__init__(name=name, **kwds)
# TODO: Disallow combining auto_now* and default?
if self._repeated:
if auto_now:
raise ValueError('DateTimeProperty %s could use auto_now and be '
'repeated, but there would be no point.' % self._name)
elif auto_now_add:
raise ValueError('DateTimeProperty %s could use auto_now_add and be '
'repeated, but there would be no point.' % self._name)
self._auto_now = auto_now
self._auto_now_add = auto_now_add
def _validate(self, value):
if not isinstance(value, datetime.datetime):
raise datastore_errors.BadValueError('Expected datetime, got %r' %
(value,))
def _now(self):
return datetime.datetime.now()
def _prepare_for_put(self, entity):
if (self._auto_now or
(self._auto_now_add and not self._has_value(entity))):
value = self._now()
self._store_value(entity, value)
def _db_set_value(self, v, p, value):
if not isinstance(value, datetime.datetime):
raise TypeError('DatetimeProperty %s can only be set to datetime values; '
'received %r' % (self._name, value))
if value.tzinfo is not None:
raise NotImplementedError('DatetimeProperty %s can only support UTC. '
'Please derive a new Property to support '
'alternative timezones.' % self._name)
dt = value - _EPOCH
ival = dt.microseconds + 1000000 * (dt.seconds + 24 * 3600 * dt.days)
v.set_int64value(ival)
p.set_meaning(entity_pb.Property.GD_WHEN)
def _db_get_value(self, v, unused_p):
if not v.has_int64value():
return None
ival = v.int64value()
return _EPOCH + datetime.timedelta(microseconds=ival)
def _date_to_datetime(value):
"""Convert a date to a datetime for datastore storage.
Args:
value: A datetime.date object.
Returns:
A datetime object with time set to 0:00.
"""
if not isinstance(value, datetime.date):
raise TypeError('Cannot convert to datetime expected date value; '
'received %s' % value)
return datetime.datetime(value.year, value.month, value.day)
def _time_to_datetime(value):
"""Convert a time to a datetime for datastore storage.
Args:
value: A datetime.time object.
Returns:
A datetime object with date set to 1970-01-01.
"""
if not isinstance(value, datetime.time):
raise TypeError('Cannot convert to datetime expected time value; '
'received %s' % value)
return datetime.datetime(1970, 1, 1,
value.hour, value.minute, value.second,
value.microsecond)
class DateProperty(DateTimeProperty):
"""A Property whose value is a date object."""
def _validate(self, value):
if not isinstance(value, datetime.date):
raise datastore_errors.BadValueError('Expected date, got %r' %
(value,))
def _to_base_type(self, value):
assert isinstance(value, datetime.date), repr(value)
return _date_to_datetime(value)
def _from_base_type(self, value):
assert isinstance(value, datetime.datetime), repr(value)
return value.date()
def _now(self):
return datetime.date.today()
class TimeProperty(DateTimeProperty):
"""A Property whose value is a time object."""
def _validate(self, value):
if not isinstance(value, datetime.time):
raise datastore_errors.BadValueError('Expected time, got %r' %
(value,))
def _to_base_type(self, value):
assert isinstance(value, datetime.time), repr(value)
return _time_to_datetime(value)
def _from_base_type(self, value):
assert isinstance(value, datetime.datetime), repr(value)
return value.time()
def _now(self):
return datetime.datetime.now().time()
class _StructuredGetForDictMixin(Property):
"""Mixin class so *StructuredProperty can share _get_for_dict().
The behavior here is that sub-entities are converted to dictionaries
by calling to_dict() on them (also doing the right thing for
repeated properties).
"""
def _get_for_dict(self, entity):
value = self._get_value(entity)
if self._repeated:
value = [v._to_dict() for v in value]
elif value is not None:
value = value._to_dict()
return value
class StructuredProperty(_StructuredGetForDictMixin):
"""A Property whose value is itself an entity.
The values of the sub-entity are indexed and can be queried.
See the module docstring for details.
"""
_modelclass = None
_attributes = ['_modelclass'] + Property._attributes
_positional = 1 + Property._positional # Add modelclass as positional arg.
@utils.positional(1 + _positional)
def __init__(self, modelclass, name=None, **kwds):
super(StructuredProperty, self).__init__(name=name, **kwds)
if self._repeated:
if modelclass._has_repeated:
raise TypeError('This StructuredProperty cannot use repeated=True '
'because its model class (%s) contains repeated '
'properties (directly or indirectly).' %
modelclass.__name__)
self._modelclass = modelclass
def _get_value(self, entity):
"""Override _get_value() to *not* raise UnprojectedPropertyError."""
value = self._get_user_value(entity)
if value is None and entity._projection:
# Invoke super _get_value() to raise the proper exception.
return super(StructuredProperty, self)._get_value(entity)
return value
def __getattr__(self, attrname):
"""Dynamically get a subproperty."""
# Optimistically try to use the dict key.
prop = self._modelclass._properties.get(attrname)
# We're done if we have a hit and _code_name matches.
if prop is None or prop._code_name != attrname:
# Otherwise, use linear search looking for a matching _code_name.
for prop in self._modelclass._properties.values():
if prop._code_name == attrname:
break
else:
# This is executed when we never execute the above break.
prop = None
if prop is None:
raise AttributeError('Model subclass %s has no attribute %s' %
(self._modelclass.__name__, attrname))
prop_copy = copy.copy(prop)
prop_copy._name = self._name + '.' + prop_copy._name
# Cache the outcome, so subsequent requests for the same attribute
# name will get the copied property directly rather than going
# through the above motions all over again.
setattr(self, attrname, prop_copy)
return prop_copy
def _comparison(self, op, value):
if op != '=':
raise datastore_errors.BadFilterError(
'StructuredProperty filter can only use ==')
if not self._indexed:
raise datastore_errors.BadFilterError(
'Cannot query for unindexed StructuredProperty %s' % self._name)
# Import late to avoid circular imports.
from .query import ConjunctionNode, PostFilterNode
from .query import RepeatedStructuredPropertyPredicate
if value is None:
from .query import FilterNode # Import late to avoid circular imports.
return FilterNode(self._name, op, value)
value = self._do_validate(value)
value = self._call_to_base_type(value)
filters = []
match_keys = []
# TODO: Why not just iterate over value._values?
for prop in self._modelclass._properties.itervalues():
vals = prop._get_base_value_unwrapped_as_list(value)
if prop._repeated:
if vals:
raise datastore_errors.BadFilterError(
'Cannot query for non-empty repeated property %s' % prop._name)
continue
assert isinstance(vals, list) and len(vals) == 1, repr(vals)
val = vals[0]
if val is not None:
altprop = getattr(self, prop._code_name)
filt = altprop._comparison(op, val)
filters.append(filt)
match_keys.append(altprop._name)
if not filters:
raise datastore_errors.BadFilterError(
'StructuredProperty filter without any values')
if len(filters) == 1:
return filters[0]
if self._repeated:
pb = value._to_pb(allow_partial=True)
pred = RepeatedStructuredPropertyPredicate(match_keys, pb,
self._name + '.')
filters.append(PostFilterNode(pred))
return ConjunctionNode(*filters)
def _IN(self, value):
if not isinstance(value, (list, tuple, set, frozenset)):
raise datastore_errors.BadArgumentError(
'Expected list, tuple or set, got %r' % (value,))
from .query import DisjunctionNode, FalseNode
# Expand to a series of == filters.
filters = [self._comparison('=', val) for val in value]
if not filters:
# DisjunctionNode doesn't like an empty list of filters.
# Running the query will still fail, but this matches the
# behavior of IN for regular properties.
return FalseNode()
else:
return DisjunctionNode(*filters)
IN = _IN
def _validate(self, value):
if not isinstance(value, self._modelclass):
raise datastore_errors.BadValueError('Expected %s instance, got %r' %
(self._modelclass.__name__, value))
def _has_value(self, entity, rest=None):
# rest: optional list of attribute names to check in addition.
# Basically, prop._has_value(self, ent, ['x', 'y']) is similar to
# (prop._has_value(ent) and
# prop.x._has_value(ent.x) and
# prop.x.y._has_value(ent.x.y))
# assuming prop.x and prop.x.y exist.
# NOTE: This is not particularly efficient if len(rest) > 1,
# but that seems a rare case, so for now I don't care.
ok = super(StructuredProperty, self)._has_value(entity)
if ok and rest:
lst = self._get_base_value_unwrapped_as_list(entity)
if len(lst) != 1:
raise RuntimeError('Failed to retrieve sub-entity of StructuredProperty'
' %s' % self._name)
subent = lst[0]
if subent is None:
return True
subprop = subent._properties.get(rest[0])
if subprop is None:
ok = False
else:
ok = subprop._has_value(subent, rest[1:])
return ok
def _serialize(self, entity, pb, prefix='', parent_repeated=False):
# entity -> pb; pb is an EntityProto message
values = self._get_base_value_unwrapped_as_list(entity)
for value in values:
if value is not None:
# TODO: Avoid re-sorting for repeated values.
for unused_name, prop in sorted(value._properties.iteritems()):
prop._serialize(value, pb, prefix + self._name + '.',
self._repeated or parent_repeated)
else:
# Serialize a single None
super(StructuredProperty, self)._serialize(
entity, pb, prefix=prefix, parent_repeated=parent_repeated)
def _deserialize(self, entity, p, depth=1):
if not self._repeated:
subentity = self._retrieve_value(entity)
if subentity is None:
subentity = self._modelclass()
self._store_value(entity, _BaseValue(subentity))
cls = self._modelclass
if isinstance(subentity, _BaseValue):
# NOTE: It may not be a _BaseValue when we're deserializing a
# repeated structured property.
subentity = subentity.b_val
if not isinstance(subentity, cls):
raise RuntimeError('Cannot deserialize StructuredProperty %s; value '
'retrieved not a %s instance %r' %
(self._name, cls.__name__, subentity))
prop = subentity._get_property_for(p, depth=depth)
if prop is None:
# Special case: kill subentity after all.
self._store_value(entity, None)
return
prop._deserialize(subentity, p, depth + 1)
return
# The repeated case is more complicated.
# TODO: Prove we won't get here for orphans.
name = p.name()
parts = name.split('.')
if len(parts) <= depth:
raise RuntimeError('StructuredProperty %s expected to find properties '
'separated by periods at a depth of %i; received %r' %
(self._name, depth, parts))
next = parts[depth]
rest = parts[depth + 1:]
prop = self._modelclass._properties.get(next)
if prop is None:
raise RuntimeError('Unable to find property %s of StructuredProperty %s.'
% (next, self._name))
values = self._get_base_value_unwrapped_as_list(entity)
# Find the first subentity that doesn't have a value for this
# property yet.
for sub in values:
if not isinstance(sub, self._modelclass):
raise TypeError('sub-entities must be instances of their Model class.')
if not prop._has_value(sub, rest):
subentity = sub
break
else:
# We didn't find one. Add a new one to the underlying list of
# values (the list returned by
# _get_base_value_unwrapped_as_list() is a copy so we
# can't append to it).
subentity = self._modelclass()
values = self._retrieve_value(entity)
values.append(_BaseValue(subentity))
prop._deserialize(subentity, p, depth + 1)
def _prepare_for_put(self, entity):
values = self._get_base_value_unwrapped_as_list(entity)
for value in values:
if value is not None:
value._prepare_for_put()
def _check_projection(self, rest=None):
"""Override for Model._check_projection().
Raises:
BadProjectionError if no subproperty is specified or if something
is wrong with the subproperty.
"""
if not rest:
raise BadProjectionError('Projecting on structured property %s '
'requires a subproperty' %
self._name)
self._modelclass._check_projections([rest])
class LocalStructuredProperty(_StructuredGetForDictMixin, BlobProperty):
"""Substructure that is serialized to an opaque blob.
This looks like StructuredProperty on the Python side, but is
written like a BlobProperty in the datastore. It is not indexed
and you cannot query for subproperties. On the other hand, the
on-disk representation is more efficient and can be made even more
efficient by passing compressed=True, which compresses the blob
data using gzip.
"""
_indexed = False
_modelclass = None
_keep_keys = False
_attributes = ['_modelclass'] + BlobProperty._attributes + ['_keep_keys']
_positional = 1 + BlobProperty._positional # Add modelclass as positional.
@utils.positional(1 + _positional)
def __init__(self, modelclass,
name=None, compressed=False, keep_keys=False,
**kwds):
super(LocalStructuredProperty, self).__init__(name=name,
compressed=compressed,
**kwds)
if self._indexed:
raise NotImplementedError('Cannot index LocalStructuredProperty %s.' %
self._name)
self._modelclass = modelclass
self._keep_keys = keep_keys
def _validate(self, value):
if not isinstance(value, self._modelclass):
raise datastore_errors.BadValueError('Expected %s instance, got %r' %
(self._modelclass.__name__, value))
def _to_base_type(self, value):
if isinstance(value, self._modelclass):
pb = value._to_pb(set_key=self._keep_keys)
return pb.SerializePartialToString()
def _from_base_type(self, value):
if not isinstance(value, self._modelclass):
pb = entity_pb.EntityProto()
pb.MergePartialFromString(value)
if not self._keep_keys:
pb.clear_key()
return self._modelclass._from_pb(pb)
def _prepare_for_put(self, entity):
# TODO: Using _get_user_value() here makes it impossible to
# subclass this class and add a _from_base_type(). But using
# _get_base_value() won't work, since that would return
# the serialized (and possibly compressed) serialized blob.
value = self._get_user_value(entity)
if value is not None:
if self._repeated:
for subent in value:
subent._prepare_for_put()
else:
value._prepare_for_put()
def _db_set_uncompressed_meaning(self, p):
p.set_meaning(entity_pb.Property.ENTITY_PROTO)
class GenericProperty(Property):
"""A Property whose value can be (almost) any basic type.
This is mainly used for Expando and for orphans (values present in
the datastore but not represented in the Model subclass) but can
also be used explicitly for properties with dynamically-typed
values.
This supports compressed=True, which is only effective for str
values (not for unicode), and implies indexed=False.
"""
_compressed = False
_attributes = Property._attributes + ['_compressed']
@utils.positional(1 + Property._positional)
def __init__(self, name=None, compressed=False, **kwds):
if compressed: # Compressed implies unindexed.
kwds.setdefault('indexed', False)
super(GenericProperty, self).__init__(name=name, **kwds)
self._compressed = compressed
if compressed and self._indexed:
# TODO: Allow this, but only allow == and IN comparisons?
raise NotImplementedError('GenericProperty %s cannot be compressed and '
'indexed at the same time.' % self._name)
def _to_base_type(self, value):
if self._compressed and isinstance(value, str):
return _CompressedValue(zlib.compress(value))
def _from_base_type(self, value):
if isinstance(value, _CompressedValue):
return zlib.decompress(value.z_val)
def _validate(self, value):
if (isinstance(value, basestring) and
self._indexed and
len(value) > _MAX_STRING_LENGTH):
raise datastore_errors.BadValueError(
'Indexed value %s must be at most %d bytes' %
(self._name, _MAX_STRING_LENGTH))
def _db_get_value(self, v, p):
# This is awkward but there seems to be no faster way to inspect
# what union member is present. datastore_types.FromPropertyPb(),
# the undisputed authority, has the same series of if-elif blocks.
# (We don't even want to think about multiple members... :-)
if v.has_stringvalue():
sval = v.stringvalue()
meaning = p.meaning()
if meaning == entity_pb.Property.BLOBKEY:
sval = BlobKey(sval)
elif meaning == entity_pb.Property.BLOB:
if p.meaning_uri() == _MEANING_URI_COMPRESSED:
sval = _CompressedValue(sval)
elif meaning == entity_pb.Property.ENTITY_PROTO:
# NOTE: This is only used for uncompressed LocalStructuredProperties.
pb = entity_pb.EntityProto()
pb.MergePartialFromString(sval)
modelclass = Expando
if pb.key().path().element_size():
kind = pb.key().path().element(-1).type()
modelclass = Model._kind_map.get(kind, modelclass)
sval = modelclass._from_pb(pb)
elif meaning != entity_pb.Property.BYTESTRING:
try:
sval.decode('ascii')
# If this passes, don't return unicode.
except UnicodeDecodeError:
try:
sval = unicode(sval.decode('utf-8'))
except UnicodeDecodeError:
pass
return sval
elif v.has_int64value():
ival = v.int64value()
if p.meaning() == entity_pb.Property.GD_WHEN:
return _EPOCH + datetime.timedelta(microseconds=ival)
return ival
elif v.has_booleanvalue():
# The booleanvalue field is an int32, so booleanvalue() returns
# an int, hence the conversion.
return bool(v.booleanvalue())
elif v.has_doublevalue():
return v.doublevalue()
elif v.has_referencevalue():
rv = v.referencevalue()
app = rv.app()
namespace = rv.name_space()
pairs = [(elem.type(), elem.id() or elem.name())
for elem in rv.pathelement_list()]
return Key(pairs=pairs, app=app, namespace=namespace)
elif v.has_pointvalue():
pv = v.pointvalue()
return GeoPt(pv.x(), pv.y())
elif v.has_uservalue():
return _unpack_user(v)
else:
# A missing value implies null.
return None
def _db_set_value(self, v, p, value):
# TODO: use a dict mapping types to functions
if isinstance(value, str):
v.set_stringvalue(value)
# TODO: Set meaning to BLOB or BYTESTRING if it's not UTF-8?
# (Or TEXT if unindexed.)
elif isinstance(value, unicode):
v.set_stringvalue(value.encode('utf8'))
if not self._indexed:
p.set_meaning(entity_pb.Property.TEXT)
elif isinstance(value, bool): # Must test before int!
v.set_booleanvalue(value)
elif isinstance(value, (int, long)):
if not (-_MAX_LONG <= value < _MAX_LONG):
raise TypeError('Property %s can only accept 64-bit integers; '
'received %s' % value)
v.set_int64value(value)
elif isinstance(value, float):
v.set_doublevalue(value)
elif isinstance(value, Key):
# See datastore_types.PackKey
ref = value.reference()
rv = v.mutable_referencevalue() # A Reference
rv.set_app(ref.app())
if ref.has_name_space():
rv.set_name_space(ref.name_space())
for elem in ref.path().element_list():
rv.add_pathelement().CopyFrom(elem)
elif isinstance(value, datetime.datetime):
if value.tzinfo is not None:
raise NotImplementedError('Property %s can only support the UTC. '
'Please derive a new Property to support '
'alternative timezones.' % self._name)
dt = value - _EPOCH
ival = dt.microseconds + 1000000 * (dt.seconds + 24 * 3600 * dt.days)
v.set_int64value(ival)
p.set_meaning(entity_pb.Property.GD_WHEN)
elif isinstance(value, GeoPt):
pv = v.mutable_pointvalue()
pv.set_x(value.lat)
pv.set_y(value.lon)
elif isinstance(value, users.User):
datastore_types.PackUser(p.name(), value, v)
elif isinstance(value, BlobKey):
v.set_stringvalue(str(value))
p.set_meaning(entity_pb.Property.BLOBKEY)
elif isinstance(value, Model):
set_key = value._key is not None
pb = value._to_pb(set_key=set_key)
value = pb.SerializePartialToString()
v.set_stringvalue(value)
p.set_meaning(entity_pb.Property.ENTITY_PROTO)
elif isinstance(value, _CompressedValue):
value = value.z_val
v.set_stringvalue(value)
p.set_meaning_uri(_MEANING_URI_COMPRESSED)
p.set_meaning(entity_pb.Property.BLOB)
else:
raise NotImplementedError('Property %s does not support %s types.' %
(self._name, type(value)))
class ComputedProperty(GenericProperty):
"""A Property whose value is determined by a user-supplied function.
Computed properties cannot be set directly, but are instead generated by a
function when required. They are useful to provide fields in the datastore
that can be used for filtering or sorting without having to manually set the
value in code - for example, sorting on the length of a BlobProperty, or
using an equality filter to check if another field is not empty.
ComputedProperty can be declared as a regular property, passing a function as
the first argument, or it can be used as a decorator for the function that
does the calculation.
Example:
>>> class DatastoreFile(Model):
... name = StringProperty()
... name_lower = ComputedProperty(lambda self: self.name.lower())
...
... data = BlobProperty()
...
... @ComputedProperty
... def size(self):
... return len(self.data)
...
... def _compute_hash(self):
... return hashlib.sha1(self.data).hexdigest()
... hash = ComputedProperty(_compute_hash, name='sha1')
"""
def __init__(self, func, name=None, indexed=None, repeated=None):
"""Constructor.
Args:
func: A function that takes one argument, the model instance, and returns
a calculated value.
"""
super(ComputedProperty, self).__init__(name=name, indexed=indexed,
repeated=repeated)
self._func = func
def _set_value(self, entity, value):
raise ComputedPropertyError("Cannot assign to a ComputedProperty")
def _get_value(self, entity):
# About projections and computed properties: if the computed
# property itself is in the projection, don't recompute it; this
# prevents raising UnprojectedPropertyError if one of the
# dependents is not in the projection. However, if the computed
# property is not in the projection, compute it normally -- its
# dependents may all be in the projection, and it may be useful to
# access the computed value without having it in the projection.
# In this case, if any of the dependents is not in the projection,
# accessing it in the computation function will raise
# UnprojectedPropertyError which will just bubble up.
if entity._projection and self._name in entity._projection:
return super(ComputedProperty, self)._get_value(entity)
value = self._func(entity)
self._store_value(entity, value)
return value
def _prepare_for_put(self, entity):
self._get_value(entity) # For its side effects.
class MetaModel(type):
"""Metaclass for Model.
This exists to fix up the properties -- they need to know their name.
This is accomplished by calling the class's _fix_properties() method.
"""
def __init__(cls, name, bases, classdict):
super(MetaModel, cls).__init__(name, bases, classdict)
cls._fix_up_properties()
def __repr__(cls):
props = []
for _, prop in sorted(cls._properties.iteritems()):
props.append('%s=%r' % (prop._code_name, prop))
return '%s<%s>' % (cls.__name__, ', '.join(props))
class Model(_NotEqualMixin):
"""A class describing datastore entities.
Model instances are usually called entities. All model classes
inheriting from Model automatically have MetaModel as their
metaclass, so that the properties are fixed up properly after the
class once the class is defined.
Because of this, you cannot use the same Property object to describe
multiple properties -- you must create separate Property objects for
each property. E.g. this does not work:
wrong_prop = StringProperty()
class Wrong(Model):
wrong1 = wrong_prop
wrong2 = wrong_prop
The kind is normally equal to the class name (exclusive of the
module name or any other parent scope). To override the kind,
define a class method named _get_kind(), as follows:
class MyModel(Model):
@classmethod
def _get_kind(cls):
return 'AnotherKind'
"""
__metaclass__ = MetaModel
# Class variables updated by _fix_up_properties()
_properties = None
_has_repeated = False
_kind_map = {} # Dict mapping {kind: Model subclass}
# Defaults for instance variables.
_entity_key = None
_values = None
_projection = () # Tuple of names of projected properties.
# Hardcoded pseudo-property for the key.
_key = ModelKey()
key = _key
def __init__(*args, **kwds):
"""Creates a new instance of this model (a.k.a. an entity).
The new entity must be written to the datastore using an explicit
call to .put().
Keyword Args:
key: Key instance for this model. If key is used, id and parent must
be None.
id: Key id for this model. If id is used, key must be None.
parent: Key instance for the parent model or None for a top-level one.
If parent is used, key must be None.
namespace: Optional namespace.
app: Optional app ID.
**kwds: Keyword arguments mapping to properties of this model.
Note: you cannot define a property named key; the .key attribute
always refers to the entity's key. But you can define properties
named id or parent. Values for the latter cannot be passed
through the constructor, but can be assigned to entity attributes
after the entity has been created.
"""
(self,) = args
get_arg = self.__get_arg
key = get_arg(kwds, 'key')
id = get_arg(kwds, 'id')
app = get_arg(kwds, 'app')
namespace = get_arg(kwds, 'namespace')
parent = get_arg(kwds, 'parent')
projection = get_arg(kwds, 'projection')
if key is not None:
if (id is not None or parent is not None or
app is not None or namespace is not None):
raise datastore_errors.BadArgumentError(
'Model constructor given key= does not accept '
'id=, app=, namespace=, or parent=.')
self._key = _validate_key(key, entity=self)
elif (id is not None or parent is not None or
app is not None or namespace is not None):
self._key = Key(self._get_kind(), id,
parent=parent, app=app, namespace=namespace)
self._values = {}
self._set_attributes(kwds)
# Set the projection last, otherwise it will prevent _set_attributes().
if projection:
self._projection = tuple(projection)
@classmethod
def __get_arg(cls, kwds, kwd):
"""Helper method to parse keywords that may be property names."""
alt_kwd = '_' + kwd
if alt_kwd in kwds:
return kwds.pop(alt_kwd)
if kwd in kwds:
obj = getattr(cls, kwd, None)
if not isinstance(obj, Property) or isinstance(obj, ModelKey):
return kwds.pop(kwd)
return None
def __getstate__(self):
return self._to_pb().Encode()
def __setstate__(self, serialized_pb):
pb = entity_pb.EntityProto(serialized_pb)
self.__init__()
self.__class__._from_pb(pb, set_key=False, ent=self)
def _populate(self, **kwds):
"""Populate an instance from keyword arguments.
Each keyword argument will be used to set a corresponding
property. Keywords must refer to valid property name. This is
similar to passing keyword arguments to the Model constructor,
except that no provisions for key, id or parent are made.
"""
self._set_attributes(kwds)
populate = _populate
def _set_attributes(self, kwds):
"""Internal helper to set attributes from keyword arguments.
Expando overrides this.
"""
cls = self.__class__
for name, value in kwds.iteritems():
prop = getattr(cls, name) # Raises AttributeError for unknown properties.
if not isinstance(prop, Property):
raise TypeError('Cannot set non-property %s' % name)
prop._set_value(self, value)
def _find_uninitialized(self):
"""Internal helper to find uninitialized properties.
Returns:
A set of property names.
"""
return set(name
for name, prop in self._properties.iteritems()
if not prop._is_initialized(self))
def _check_initialized(self):
"""Internal helper to check for uninitialized properties.
Raises:
BadValueError if it finds any.
"""
baddies = self._find_uninitialized()
if baddies:
raise datastore_errors.BadValueError(
'Entity has uninitialized properties: %s' % ', '.join(baddies))
def __repr__(self):
"""Return an unambiguous string representation of an entity."""
args = []
for prop in self._properties.itervalues():
if prop._has_value(self):
val = prop._retrieve_value(self)
if val is None:
rep = 'None'
elif prop._repeated:
reprs = [prop._value_to_repr(v) for v in val]
if reprs:
reprs[0] = '[' + reprs[0]
reprs[-1] = reprs[-1] + ']'
rep = ', '.join(reprs)
else:
rep = '[]'
else:
rep = prop._value_to_repr(val)
args.append('%s=%s' % (prop._code_name, rep))
args.sort()
if self._key is not None:
args.insert(0, 'key=%r' % self._key)
if self._projection:
args.append('_projection=%r' % (self._projection,))
s = '%s(%s)' % (self.__class__.__name__, ', '.join(args))
return s
@classmethod
def _get_kind(cls):
"""Return the kind name for this class.
This defaults to cls.__name__; users may overrid this to give a
class a different on-disk name than its class name.
"""
return cls.__name__
@classmethod
def _class_name(cls):
"""A hook for polymodel to override.
For regular models and expandos this is just an alias for
_get_kind(). For PolyModel subclasses, it returns the class name
(as set in the 'class' attribute thereof), whereas _get_kind()
returns the kind (the class name of the root class of a specific
PolyModel hierarchy).
"""
return cls._get_kind()
@classmethod
def _default_filters(cls):
"""Return an iterable of filters that are always to be applied.
This is used by PolyModel to quietly insert a filter for the
current class name.
"""
return ()
@classmethod
def _reset_kind_map(cls):
"""Clear the kind map. Useful for testing."""
# Preserve "system" kinds, like __namespace__
keep = {}
for name, value in cls._kind_map.iteritems():
if name.startswith('__') and name.endswith('__'):
keep[name] = value
cls._kind_map.clear()
cls._kind_map.update(keep)
def _has_complete_key(self):
"""Return whether this entity has a complete key."""
return self._key is not None and self._key.id() is not None
def __hash__(self):
"""Dummy hash function.
Raises:
Always TypeError to emphasize that entities are mutable.
"""
raise TypeError('Model is not immutable')
# TODO: Reject __lt__, __le__, __gt__, __ge__.
def __eq__(self, other):
"""Compare two entities of the same class for equality."""
if other.__class__ is not self.__class__:
return NotImplemented
if self._key != other._key:
# TODO: If one key is None and the other is an explicit
# incomplete key of the simplest form, this should be OK.
return False
return self._equivalent(other)
def _equivalent(self, other):
"""Compare two entities of the same class, excluding keys."""
if other.__class__ is not self.__class__: # TODO: What about subclasses?
raise NotImplementedError('Cannot compare different model classes. '
'%s is not %s' % (self.__class__.__name__,
other.__class_.__name__))
if self._projection != other._projection:
return False
# It's all about determining inequality early.
if len(self._properties) != len(other._properties):
return False # Can only happen for Expandos.
my_prop_names = set(self._properties.iterkeys())
their_prop_names = set(other._properties.iterkeys())
if my_prop_names != their_prop_names:
return False # Again, only possible for Expandos.
if self._projection:
my_prop_names = set(self._projection)
for name in my_prop_names:
if '.' in name:
name, _ = name.split('.', 1)
my_value = self._properties[name]._get_value(self)
their_value = other._properties[name]._get_value(other)
if my_value != their_value:
return False
return True
def _to_pb(self, pb=None, allow_partial=False, set_key=True):
"""Internal helper to turn an entity into an EntityProto protobuf."""
if not allow_partial:
self._check_initialized()
if pb is None:
pb = entity_pb.EntityProto()
if set_key:
# TODO: Move the key stuff into ModelAdapter.entity_to_pb()?
self._key_to_pb(pb)
for unused_name, prop in sorted(self._properties.iteritems()):
prop._serialize(self, pb)
return pb
def _key_to_pb(self, pb):
"""Internal helper to copy the key into a protobuf."""
key = self._key
if key is None:
pairs = [(self._get_kind(), None)]
ref = key_module._ReferenceFromPairs(pairs, reference=pb.mutable_key())
else:
ref = key.reference()
pb.mutable_key().CopyFrom(ref)
group = pb.mutable_entity_group() # Must initialize this.
# To work around an SDK issue, only set the entity group if the
# full key is complete. TODO: Remove the top test once fixed.
if key is not None and key.id():
elem = ref.path().element(0)
if elem.id() or elem.name():
group.add_element().CopyFrom(elem)
@classmethod
def _from_pb(cls, pb, set_key=True, ent=None, key=None):
"""Internal helper to create an entity from an EntityProto protobuf."""
if not isinstance(pb, entity_pb.EntityProto):
raise TypeError('pb must be a EntityProto; received %r' % pb)
if ent is None:
ent = cls()
# A key passed in overrides a key in the pb.
if key is None and pb.key().path().element_size():
key = Key(reference=pb.key())
# If set_key is not set, skip a trivial incomplete key.
if key is not None and (set_key or key.id() or key.parent()):
ent._key = key
indexed_properties = pb.property_list()
unindexed_properties = pb.raw_property_list()
projection = []
for plist in [indexed_properties, unindexed_properties]:
for p in plist:
if p.meaning() == entity_pb.Property.INDEX_VALUE:
projection.append(p.name())
prop = ent._get_property_for(p, plist is indexed_properties)
prop._deserialize(ent, p)
ent._set_projection(projection)
return ent
def _set_projection(self, projection):
self._projection = tuple(projection)
by_prefix = {}
for propname in projection:
if '.' in propname:
head, tail = propname.split('.', 1)
if head in by_prefix:
by_prefix[head].append(tail)
else:
by_prefix[head] = [tail]
for propname, proj in by_prefix.iteritems():
prop = self._properties.get(propname)
subval = prop._get_base_value_unwrapped_as_list(self)
for item in subval:
item._set_projection(proj)
def _get_property_for(self, p, indexed=True, depth=0):
"""Internal helper to get the Property for a protobuf-level property."""
name = p.name()
parts = name.split('.')
if len(parts) <= depth:
# Apparently there's an unstructured value here.
# Assume it is a None written for a missing value.
# (It could also be that a schema change turned an unstructured
# value into a structured one. In that case, too, it seems
# better to return None than to return an unstructured value,
# since the latter doesn't match the current schema.)
return None
next = parts[depth]
prop = self._properties.get(next)
if prop is None:
prop = self._fake_property(p, next, indexed)
return prop
def _clone_properties(self):
"""Internal helper to clone self._properties if necessary."""
cls = self.__class__
if self._properties is cls._properties:
self._properties = dict(cls._properties)
def _fake_property(self, p, next, indexed=True):
"""Internal helper to create a fake Property."""
self._clone_properties()
if p.name() != next and not p.name().endswith('.' + next):
prop = StructuredProperty(Expando, next)
prop._store_value(self, _BaseValue(Expando()))
else:
compressed = p.meaning_uri() == _MEANING_URI_COMPRESSED
prop = GenericProperty(next,
repeated=p.multiple(),
indexed=indexed,
compressed=compressed)
prop._code_name = next
self._properties[prop._name] = prop
return prop
@utils.positional(1)
def _to_dict(self, include=None, exclude=None):
"""Return a dict containing the entity's property values.
Args:
include: Optional set of property names to include, default all.
exclude: Optional set of property names to skip, default none.
A name contained in both include and exclude is excluded.
"""
if (include is not None and
not isinstance(include, (list, tuple, set, frozenset))):
raise TypeError('include should be a list, tuple or set')
if (exclude is not None and
not isinstance(exclude, (list, tuple, set, frozenset))):
raise TypeError('exclude should be a list, tuple or set')
values = {}
for prop in self._properties.itervalues():
name = prop._code_name
if include is not None and name not in include:
continue
if exclude is not None and name in exclude:
continue
try:
values[name] = prop._get_for_dict(self)
except UnprojectedPropertyError:
pass # Ignore unprojected properties rather than failing.
return values
to_dict = _to_dict
@classmethod
def _fix_up_properties(cls):
"""Fix up the properties by calling their _fix_up() method.
Note: This is called by MetaModel, but may also be called manually
after dynamically updating a model class.
"""
# Verify that _get_kind() returns an 8-bit string.
kind = cls._get_kind()
if not isinstance(kind, basestring):
raise KindError('Class %s defines a _get_kind() method that returns '
'a non-string (%r)' % (cls.__name__, kind))
if not isinstance(kind, str):
try:
kind = kind.encode('ascii') # ASCII contents is okay.
except UnicodeEncodeError:
raise KindError('Class %s defines a _get_kind() method that returns '
'a Unicode string (%r); please encode using utf-8' %
(cls.__name__, kind))
cls._properties = {} # Map of {name: Property}
if cls.__module__ == __name__: # Skip the classes in *this* file.
return
for name in set(dir(cls)):
attr = getattr(cls, name, None)
if isinstance(attr, ModelAttribute) and not isinstance(attr, ModelKey):
if name.startswith('_'):
raise TypeError('ModelAttribute %s cannot begin with an underscore '
'character. _ prefixed attributes are reserved for '
'temporary Model instance values.' % name)
attr._fix_up(cls, name)
if isinstance(attr, Property):
if (attr._repeated or
(isinstance(attr, StructuredProperty) and
attr._modelclass._has_repeated)):
cls._has_repeated = True
cls._properties[attr._name] = attr
cls._update_kind_map()
@classmethod
def _update_kind_map(cls):
"""Update the kind map to include this class."""
cls._kind_map[cls._get_kind()] = cls
def _prepare_for_put(self):
if self._properties:
for prop in self._properties.itervalues():
prop._prepare_for_put(self)
@classmethod
def _check_projections(cls, projections):
"""Helper to check that a list of projections is valid for this class.
Called from query.py.
Args:
projections: List or tuple of projections -- each being a string
giving a property name, possibly containing dots (to address
subproperties of structured properties).
Raises:
BadProjectionError if one of the properties is invalid.
AssertionError if the argument is not a list or tuple of strings.
"""
assert isinstance(projections, (list, tuple)), repr(projections)
for name in projections:
assert isinstance(name, basestring), repr(name)
if '.' in name:
name, rest = name.split('.', 1)
else:
rest = None
prop = cls._properties.get(name)
if prop is None:
cls._unknown_projection(name)
else:
prop._check_projection(rest)
@classmethod
def _unknown_projection(cls, name):
"""Helper to raise an exception for an unknown property name.
This is called by _check_projections(). It is overridden by
Expando, where this is a no-op.
Raises:
BadProjectionError.
"""
raise BadProjectionError('Projecting on unknown property %s' % name)
def _validate_key(self, key):
"""Validation for _key attribute (designed to be overridden).
Args:
key: Proposed Key to use for entity.
Returns:
A valid key.
"""
return key
# Datastore API using the default context.
# These use local import since otherwise they'd be recursive imports.
@classmethod
def _query(cls, *args, **kwds):
"""Create a Query object for this class.
Keyword arguments are passed to the Query() constructor. If
positional arguments are given they are used to apply an initial
filter.
Returns:
A Query object.
"""
# TODO: Disallow non-empty args and filter=.
from .query import Query # Import late to avoid circular imports.
qry = Query(kind=cls._get_kind(), **kwds)
qry = qry.filter(*cls._default_filters())
qry = qry.filter(*args)
return qry
query = _query
@classmethod
def _gql(cls, query_string, *args, **kwds):
"""Run a GQL query."""
from .query import gql # Import late to avoid circular imports.
return gql('SELECT * FROM %s %s' % (cls._class_name(), query_string),
*args, **kwds)
gql = _gql
def _put(self, **ctx_options):
"""Write this entity to the datastore.
If the operation creates or completes a key, the entity's key
attribute is set to the new, complete key.
Returns:
The key for the entity. This is always a complete key.
"""
return self._put_async(**ctx_options).get_result()
put = _put
def _put_async(self, **ctx_options):
"""Write this entity to the datastore.
This is the asynchronous version of Model._put().
"""
if self._projection:
raise datastore_errors.BadRequestError('Cannot put a partial entity')
from . import tasklets
ctx = tasklets.get_context()
self._prepare_for_put()
if self._key is None:
self._key = Key(self._get_kind(), None)
self._pre_put_hook()
fut = ctx.put(self, **ctx_options)
post_hook = self._post_put_hook
if not self._is_default_hook(Model._default_post_put_hook, post_hook):
fut.add_immediate_callback(post_hook, fut)
return fut
put_async = _put_async
@classmethod
def _get_or_insert(*args, **kwds):
"""Transactionally retrieves an existing entity or creates a new one.
Positional Args:
name: Key name to retrieve or create.
Keyword Args:
namespace: Optional namespace.
app: Optional app ID.
parent: Parent entity key, if any.
context_options: ContextOptions object (not keyword args!) or None.
**kwds: Keyword arguments to pass to the constructor of the model class
if an instance for the specified key name does not already exist. If
an instance with the supplied key_name and parent already exists,
these arguments will be discarded.
Returns:
Existing instance of Model class with the specified key name and parent
or a new one that has just been created.
"""
cls, args = args[0], args[1:]
return cls._get_or_insert_async(*args, **kwds).get_result()
get_or_insert = _get_or_insert
@classmethod
def _get_or_insert_async(*args, **kwds):
"""Transactionally retrieves an existing entity or creates a new one.
This is the asynchronous version of Model._get_or_insert().
"""
# NOTE: The signature is really weird here because we want to support
# models with properties named e.g. 'cls' or 'name'.
from . import tasklets
cls, name = args # These must always be positional.
get_arg = cls.__get_arg
app = get_arg(kwds, 'app')
namespace = get_arg(kwds, 'namespace')
parent = get_arg(kwds, 'parent')
context_options = get_arg(kwds, 'context_options')
# (End of super-special argument parsing.)
# TODO: Test the heck out of this, in all sorts of evil scenarios.
if not isinstance(name, basestring):
raise TypeError('name must be a string; received %r' % name)
elif not name:
raise ValueError('name cannot be an empty string.')
key = Key(cls, name, app=app, namespace=namespace, parent=parent)
@tasklets.tasklet
def internal_tasklet():
@tasklets.tasklet
def txn():
ent = yield key.get_async(options=context_options)
if ent is None:
ent = cls(**kwds) # TODO: Use _populate().
ent._key = key
yield ent.put_async(options=context_options)
raise tasklets.Return(ent)
if in_transaction():
# Run txn() in existing transaction.
ent = yield txn()
else:
# Maybe avoid a transaction altogether.
ent = yield key.get_async(options=context_options)
if ent is None:
# Run txn() in new transaction.
ent = yield transaction_async(txn)
raise tasklets.Return(ent)
return internal_tasklet()
get_or_insert_async = _get_or_insert_async
@classmethod
def _allocate_ids(cls, size=None, max=None, parent=None, **ctx_options):
"""Allocates a range of key IDs for this model class.
Args:
size: Number of IDs to allocate. Either size or max can be specified,
not both.
max: Maximum ID to allocate. Either size or max can be specified,
not both.
parent: Parent key for which the IDs will be allocated.
**ctx_options: Context options.
Returns:
A tuple with (start, end) for the allocated range, inclusive.
"""
return cls._allocate_ids_async(size=size, max=max, parent=parent,
**ctx_options).get_result()
allocate_ids = _allocate_ids
@classmethod
def _allocate_ids_async(cls, size=None, max=None, parent=None,
**ctx_options):
"""Allocates a range of key IDs for this model class.
This is the asynchronous version of Model._allocate_ids().
"""
from . import tasklets
ctx = tasklets.get_context()
cls._pre_allocate_ids_hook(size, max, parent)
key = Key(cls._get_kind(), None, parent=parent)
fut = ctx.allocate_ids(key, size=size, max=max, **ctx_options)
post_hook = cls._post_allocate_ids_hook
if not cls._is_default_hook(Model._default_post_allocate_ids_hook,
post_hook):
fut.add_immediate_callback(post_hook, size, max, parent, fut)
return fut
allocate_ids_async = _allocate_ids_async
@classmethod
@utils.positional(3)
def _get_by_id(cls, id, parent=None, **ctx_options):
"""Returns an instance of Model class by ID.
This is really just a shorthand for Key(cls, id, ...).get().
Args:
id: A string or integer key ID.
parent: Optional parent key of the model to get.
namespace: Optional namespace.
app: Optional app ID.
**ctx_options: Context options.
Returns:
A model instance or None if not found.
"""
return cls._get_by_id_async(id, parent=parent, **ctx_options).get_result()
get_by_id = _get_by_id
@classmethod
@utils.positional(3)
def _get_by_id_async(cls, id, parent=None, app=None, namespace=None,
**ctx_options):
"""Returns an instance of Model class by ID (and app, namespace).
This is the asynchronous version of Model._get_by_id().
"""
key = Key(cls._get_kind(), id, parent=parent, app=app, namespace=namespace)
return key.get_async(**ctx_options)
get_by_id_async = _get_by_id_async
# Hooks that wrap around mutations. Most are class methods with
# the notable exception of put, which is an instance method.
# To use these, override them in your model class and call
# super(<myclass>, cls).<hook>(*args).
# Note that the pre-hooks are called before the operation is
# scheduled. The post-hooks are called (by the Future) after the
# operation has completed.
# Do not use or touch the _default_* hooks. These exist for
# internal use only.
@classmethod
def _pre_allocate_ids_hook(cls, size, max, parent):
pass
_default_pre_allocate_ids_hook = _pre_allocate_ids_hook
@classmethod
def _post_allocate_ids_hook(cls, size, max, parent, future):
pass
_default_post_allocate_ids_hook = _post_allocate_ids_hook
@classmethod
def _pre_delete_hook(cls, key):
pass
_default_pre_delete_hook = _pre_delete_hook
@classmethod
def _post_delete_hook(cls, key, future):
pass
_default_post_delete_hook = _post_delete_hook
@classmethod
def _pre_get_hook(cls, key):
pass
_default_pre_get_hook = _pre_get_hook
@classmethod
def _post_get_hook(cls, key, future):
pass
_default_post_get_hook = _post_get_hook
def _pre_put_hook(self):
pass
_default_pre_put_hook = _pre_put_hook
def _post_put_hook(self, future):
pass
_default_post_put_hook = _post_put_hook
@staticmethod
def _is_default_hook(default_hook, hook):
"""Checks whether a specific hook is in its default state.
Args:
cls: A ndb.model.Model class.
default_hook: Callable specified by ndb internally (do not override).
hook: The hook defined by a model class using _post_*_hook.
Raises:
TypeError if either the default hook or the tested hook are not callable.
"""
if not hasattr(default_hook, '__call__'):
raise TypeError('Default hooks for ndb.model.Model must be callable')
if not hasattr(hook, '__call__'):
raise TypeError('Hooks must be callable')
return default_hook.im_func is hook.im_func
class Expando(Model):
"""Model subclass to support dynamic Property names and types.
See the module docstring for details.
"""
# Set this to False (in an Expando subclass or entity) to make
# properties default to unindexed.
_default_indexed = True
def _set_attributes(self, kwds):
for name, value in kwds.iteritems():
setattr(self, name, value)
@classmethod
def _unknown_projection(cls, name):
# It is not an error to project on an unknown Expando property.
pass
def __getattr__(self, name):
if name.startswith('_'):
return super(Expando, self).__getattr__(name)
prop = self._properties.get(name)
if prop is None:
return super(Expando, self).__getattribute__(name)
return prop._get_value(self)
def __setattr__(self, name, value):
if (name.startswith('_') or
isinstance(getattr(self.__class__, name, None), (Property, property))):
return super(Expando, self).__setattr__(name, value)
# TODO: Refactor this to share code with _fake_property().
self._clone_properties()
if isinstance(value, Model):
prop = StructuredProperty(Model, name)
else:
repeated = isinstance(value, list)
indexed = self._default_indexed
# TODO: What if it's a list of Model instances?
prop = GenericProperty(name, repeated=repeated, indexed=indexed)
prop._code_name = name
self._properties[name] = prop
prop._set_value(self, value)
def __delattr__(self, name):
if (name.startswith('_') or
isinstance(getattr(self.__class__, name, None), (Property, property))):
return super(Expando, self).__delattr__(name)
prop = self._properties.get(name)
if not isinstance(prop, Property):
raise TypeError('Model properties must be Property instances; not %r' %
prop)
prop._delete_value(self)
if prop in self.__class__._properties:
raise RuntimeError('Property %s still in the list of properties for the '
'base class.' % name)
del self._properties[name]
@utils.positional(1)
def transaction(callback, **ctx_options):
"""Run a callback in a transaction.
Args:
callback: A function or tasklet to be called.
**ctx_options: Transaction options.
Useful options include:
retries=N: Retry up to N times (i.e. try up to N+1 times)
propagation=<flag>: Determines how an existing transaction should be
propagated, where <flag> can be one of the following:
TransactionOptions.NESTED: Start a nested transaction (this is the
default; but actual nested transactions are not yet implemented,
so effectively you can only use this outside an existing transaction).
TransactionOptions.MANDATORY: A transaction must already be in progress.
TransactionOptions.ALLOWED: If a transaction is in progress, join it.
TransactionOptions.INDEPENDENT: Always start a new parallel transaction.
xg=True: On the High Replication Datastore, enable cross-group
transactions, i.e. allow writing to up to 5 entity groups.
WARNING: Using anything other than NESTED for the propagation flag
can have strange consequences. When using ALLOWED or MANDATORY, if
an exception is raised, the transaction is likely not safe to
commit. When using INDEPENDENT it is not generally safe to return
values read to the caller (as they were not read in the caller's
transaction).
Returns:
Whatever callback() returns.
Raises:
Whatever callback() raises; datastore_errors.TransactionFailedError
if the transaction failed.
Note:
To pass arguments to a callback function, use a lambda, e.g.
def my_callback(key, inc):
...
transaction(lambda: my_callback(Key(...), 1))
"""
fut = transaction_async(callback, **ctx_options)
return fut.get_result()
@utils.positional(1)
def transaction_async(callback, **ctx_options):
"""Run a callback in a transaction.
This is the asynchronous version of transaction().
"""
from . import tasklets
return tasklets.get_context().transaction(callback, **ctx_options)
def in_transaction():
"""Return whether a transaction is currently active."""
from . import tasklets
return tasklets.get_context().in_transaction()
@utils.positional(1)
def transactional(_func=None, **ctx_options):
"""Decorator to make a function automatically run in a transaction.
Args:
_func: Do not use.
**ctx_options: Transaction options (see transaction(), but propagation
default to TransactionOptions.ALLOWED).
This supports two forms:
(1) Vanilla:
@transactional
def callback(arg):
...
(2) With options:
@transactional(retries=1)
def callback(arg):
...
"""
if _func is not None:
# Form (1), vanilla.
if ctx_options:
raise TypeError('@transactional() does not take positional arguments')
# TODO: Avoid recursion, call outer_transactional_wrapper() directly?
return transactional()(_func)
ctx_options.setdefault('propagation',
datastore_rpc.TransactionOptions.ALLOWED)
# Form (2), with options.
def outer_transactional_wrapper(func):
@utils.wrapping(func)
def inner_transactional_wrapper(*args, **kwds):
f = func
if args or kwds:
f = lambda: func(*args, **kwds)
return transaction(f, **ctx_options)
return inner_transactional_wrapper
return outer_transactional_wrapper
@utils.positional(1)
def non_transactional(_func=None, allow_existing=True):
"""A decorator that ensures a function is run outside a transaction.
If there is an existing transaction (and allow_existing=True), the
existing transaction is paused while the function is executed.
Args:
_func: Do not use.
allow_existing: If false, throw an exception if called from within
a transaction. If true, temporarily re-establish the
previous non-transactional context. Defaults to True.
This supports two forms, similar to transactional().
Returns:
A wrapper for the decorated function that ensures it runs outside a
transaction.
"""
if _func is not None:
# TODO: Avoid recursion, call outer_non_transactional_wrapper() directly?
return non_transactional()(_func)
def outer_non_transactional_wrapper(func):
from . import tasklets
@utils.wrapping(func)
def inner_non_transactional_wrapper(*args, **kwds):
ctx = tasklets.get_context()
if not ctx.in_transaction():
return func(*args, **kwds)
if not allow_existing:
raise datastore_errors.BadRequestError(
'%s cannot be called within a transaction.' % func.__name__)
save_ctx = ctx
while ctx.in_transaction():
ctx = ctx._parent_context
if ctx is None:
raise datastore_errors.BadRequestError(
'Context without non-transactional ancestor')
try:
tasklets.set_context(ctx)
return func(*args, **kwds)
finally:
tasklets.set_context(save_ctx)
return inner_non_transactional_wrapper
return outer_non_transactional_wrapper
def get_multi_async(keys, **ctx_options):
"""Fetches a sequence of keys.
Args:
keys: A sequence of keys.
**ctx_options: Context options.
Returns:
A list of futures.
"""
return [key.get_async(**ctx_options) for key in keys]
def get_multi(keys, **ctx_options):
"""Fetches a sequence of keys.
Args:
keys: A sequence of keys.
**ctx_options: Context options.
Returns:
A list whose items are either a Model instance or None if the key wasn't
found.
"""
return [future.get_result()
for future in get_multi_async(keys, **ctx_options)]
def put_multi_async(entities, **ctx_options):
"""Stores a sequence of Model instances.
Args:
entities: A sequence of Model instances.
**ctx_options: Context options.
Returns:
A list of futures.
"""
return [entity.put_async(**ctx_options) for entity in entities]
def put_multi(entities, **ctx_options):
"""Stores a sequence of Model instances.
Args:
entities: A sequence of Model instances.
**ctx_options: Context options.
Returns:
A list with the stored keys.
"""
return [future.get_result()
for future in put_multi_async(entities, **ctx_options)]
def delete_multi_async(keys, **ctx_options):
"""Deletes a sequence of keys.
Args:
keys: A sequence of keys.
**ctx_options: Context options.
Returns:
A list of futures.
"""
return [key.delete_async(**ctx_options) for key in keys]
def delete_multi(keys, **ctx_options):
"""Deletes a sequence of keys.
Args:
keys: A sequence of keys.
**ctx_options: Context options.
Returns:
A list whose items are all None, one per deleted key.
"""
return [future.get_result()
for future in delete_multi_async(keys, **ctx_options)]
def get_indexes_async(**ctx_options):
"""Get a data structure representing the configured indexes.
Args:
**ctx_options: Context options.
Returns:
A future.
"""
from . import tasklets
ctx = tasklets.get_context()
return ctx.get_indexes(**ctx_options)
def get_indexes(**ctx_options):
"""Get a data structure representing the configured indexes.
Args:
**ctx_options: Context options.
Returns:
A list of Index objects.
"""
return get_indexes_async(**ctx_options).get_result()
# Update __all__ to contain all Property and Exception subclasses.
for _name, _object in globals().items():
if ((_name.endswith('Property') and issubclass(_object, Property)) or
(_name.endswith('Error') and issubclass(_object, Exception))):
__all__.append(_name)
| 34.705643 | 80 | 0.677334 |
73da5a1ba1e03119ae58f0dadb864976ca2235d0 | 4,440 | py | Python | bin/update_docker.py | bbayles/cibuildwheel | af2014c954cdda356d496732c4d5398c61526b8f | [
"BSD-2-Clause"
] | 702 | 2017-03-23T16:30:24.000Z | 2021-05-19T06:34:41.000Z | bin/update_docker.py | bbayles/cibuildwheel | af2014c954cdda356d496732c4d5398c61526b8f | [
"BSD-2-Clause"
] | 594 | 2017-03-20T23:13:42.000Z | 2021-05-19T14:22:19.000Z | bin/update_docker.py | bbayles/cibuildwheel | af2014c954cdda356d496732c4d5398c61526b8f | [
"BSD-2-Clause"
] | 124 | 2017-03-31T14:13:26.000Z | 2021-05-14T19:48:31.000Z | #!/usr/bin/env python3
from __future__ import annotations
import configparser
from pathlib import Path
from typing import NamedTuple
import requests
DIR = Path(__file__).parent.resolve()
RESOURCES = DIR.parent / "cibuildwheel/resources"
class Image(NamedTuple):
manylinux_version: str
platform: str
image_name: str
tag: str | None # Set this to pin the image
images = [
# manylinux1 images
Image("manylinux1", "x86_64", "quay.io/pypa/manylinux1_x86_64", None),
Image("manylinux1", "i686", "quay.io/pypa/manylinux1_i686", None),
# manylinux2010 images
Image("manylinux2010", "x86_64", "quay.io/pypa/manylinux2010_x86_64", None),
Image("manylinux2010", "i686", "quay.io/pypa/manylinux2010_i686", None),
Image("manylinux2010", "pypy_x86_64", "quay.io/pypa/manylinux2010_x86_64", None),
Image("manylinux2010", "pypy_i686", "quay.io/pypa/manylinux2010_i686", None),
# manylinux2014 images
Image("manylinux2014", "x86_64", "quay.io/pypa/manylinux2014_x86_64", None),
Image("manylinux2014", "i686", "quay.io/pypa/manylinux2014_i686", None),
Image("manylinux2014", "aarch64", "quay.io/pypa/manylinux2014_aarch64", None),
Image("manylinux2014", "ppc64le", "quay.io/pypa/manylinux2014_ppc64le", None),
Image("manylinux2014", "s390x", "quay.io/pypa/manylinux2014_s390x", None),
Image("manylinux2014", "pypy_x86_64", "quay.io/pypa/manylinux2014_x86_64", None),
Image("manylinux2014", "pypy_i686", "quay.io/pypa/manylinux2014_i686", None),
Image("manylinux2014", "pypy_aarch64", "quay.io/pypa/manylinux2014_aarch64", None),
# manylinux_2_24 images
Image("manylinux_2_24", "x86_64", "quay.io/pypa/manylinux_2_24_x86_64", None),
Image("manylinux_2_24", "i686", "quay.io/pypa/manylinux_2_24_i686", None),
Image("manylinux_2_24", "aarch64", "quay.io/pypa/manylinux_2_24_aarch64", None),
Image("manylinux_2_24", "ppc64le", "quay.io/pypa/manylinux_2_24_ppc64le", None),
Image("manylinux_2_24", "s390x", "quay.io/pypa/manylinux_2_24_s390x", None),
Image("manylinux_2_24", "pypy_x86_64", "quay.io/pypa/manylinux_2_24_x86_64", None),
Image("manylinux_2_24", "pypy_i686", "quay.io/pypa/manylinux_2_24_i686", None),
Image("manylinux_2_24", "pypy_aarch64", "quay.io/pypa/manylinux_2_24_aarch64", None),
# musllinux_1_1 images
Image("musllinux_1_1", "x86_64", "quay.io/pypa/musllinux_1_1_x86_64", None),
Image("musllinux_1_1", "i686", "quay.io/pypa/musllinux_1_1_i686", None),
Image("musllinux_1_1", "aarch64", "quay.io/pypa/musllinux_1_1_aarch64", None),
Image("musllinux_1_1", "ppc64le", "quay.io/pypa/musllinux_1_1_ppc64le", None),
Image("musllinux_1_1", "s390x", "quay.io/pypa/musllinux_1_1_s390x", None),
]
config = configparser.ConfigParser()
for image in images:
# get the tag name whose digest matches 'latest'
if image.tag is not None:
# image has been pinned, do not update
tag_name = image.tag
elif image.image_name.startswith("quay.io/"):
_, _, repository_name = image.image_name.partition("/")
response = requests.get(
f"https://quay.io/api/v1/repository/{repository_name}?includeTags=true"
)
response.raise_for_status()
repo_info = response.json()
tags_dict = repo_info["tags"]
latest_tag = tags_dict.pop("latest")
# find the tag whose manifest matches 'latest'
tag_name = next(
name
for (name, info) in tags_dict.items()
if info["manifest_digest"] == latest_tag["manifest_digest"]
)
else:
response = requests.get(f"https://hub.docker.com/v2/repositories/{image.image_name}/tags")
response.raise_for_status()
tags = response.json()["results"]
latest_tag = next(tag for tag in tags if tag["name"] == "latest")
# i don't know what it would mean to have multiple images per tag
assert len(latest_tag["images"]) == 1
digest = latest_tag["images"][0]["digest"]
pinned_tag = next(
tag for tag in tags if tag != latest_tag and tag["images"][0]["digest"] == digest
)
tag_name = pinned_tag["name"]
if not config.has_section(image.platform):
config[image.platform] = {}
config[image.platform][image.manylinux_version] = f"{image.image_name}:{tag_name}"
with open(RESOURCES / "pinned_docker_images.cfg", "w") as f:
config.write(f)
| 43.960396 | 98 | 0.682207 |
73da615449127cd818a34f4ca5d22909faa80bcf | 6,928 | py | Python | asposewordscloud/models/requests/update_form_field_online_request.py | aspose-words-cloud/aspose-words-cloud-python | 65c7b55fa4aac69b60d41e7f54aed231df285479 | [
"MIT"
] | 14 | 2018-07-15T17:01:52.000Z | 2018-11-29T06:15:33.000Z | asposewordscloud/models/requests/update_form_field_online_request.py | aspose-words-cloud/aspose-words-cloud-python | 65c7b55fa4aac69b60d41e7f54aed231df285479 | [
"MIT"
] | 1 | 2018-09-28T12:59:34.000Z | 2019-10-08T08:42:59.000Z | asposewordscloud/models/requests/update_form_field_online_request.py | aspose-words-cloud/aspose-words-cloud-python | 65c7b55fa4aac69b60d41e7f54aed231df285479 | [
"MIT"
] | 2 | 2020-12-21T07:59:17.000Z | 2022-02-16T21:41:25.000Z | # coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="update_form_field_online_request.py">
# Copyright (c) 2021 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import json
from six.moves.urllib.parse import quote
from asposewordscloud import *
from asposewordscloud.models import *
from asposewordscloud.models.requests import *
from asposewordscloud.models.responses import *
class UpdateFormFieldOnlineRequest(BaseRequestObject):
"""
Request model for update_form_field_online operation.
Initializes a new instance.
:param document The document.
:param form_field From field data.
:param index Object index.
:param node_path The path to the node in the document tree.
:param load_encoding Encoding that will be used to load an HTML (or TXT) document if the encoding is not specified in HTML.
:param password Password for opening an encrypted document.
:param dest_file_name Result path of the document after the operation. If this parameter is omitted then result of the operation will be saved as the source document.
:param revision_author Initials of the author to use for revisions.If you set this parameter and then make some changes to the document programmatically, save the document and later open the document in MS Word you will see these changes as revisions.
:param revision_date_time The date and time to use for revisions.
"""
def __init__(self, document, form_field, index, node_path=None, load_encoding=None, password=None, dest_file_name=None, revision_author=None, revision_date_time=None):
self.document = document
self.form_field = form_field
self.index = index
self.node_path = node_path
self.load_encoding = load_encoding
self.password = password
self.dest_file_name = dest_file_name
self.revision_author = revision_author
self.revision_date_time = revision_date_time
def create_http_request(self, api_client):
# verify the required parameter 'document' is set
if self.document is None:
raise ValueError("Missing the required parameter `document` when calling `update_form_field_online`") # noqa: E501
# verify the required parameter 'form_field' is set
if self.form_field is None:
raise ValueError("Missing the required parameter `form_field` when calling `update_form_field_online`") # noqa: E501
# verify the required parameter 'index' is set
if self.index is None:
raise ValueError("Missing the required parameter `index` when calling `update_form_field_online`") # noqa: E501
path = '/v4.0/words/online/put/{nodePath}/formfields/{index}'
path_params = {}
if self.index is not None:
path_params['index'] = self.index # noqa: E501
else:
path_params['index'] = '' # noqa: E501
if self.node_path is not None:
path_params['nodePath'] = self.node_path # noqa: E501
else:
path_params['nodePath'] = '' # noqa: E501
# path parameters
collection_formats = {}
if path_params:
path_params = api_client.sanitize_for_serialization(path_params)
path_params = api_client.parameters_to_tuples(path_params, collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
path = path.replace(
'{%s}' % k,
quote(str(v), safe=api_client.configuration.safe_chars_for_path_param)
)
# remove optional path parameters
path = path.replace('//', '/')
query_params = []
if self.load_encoding is not None:
query_params.append(('loadEncoding', self.load_encoding)) # noqa: E501
if self.password is not None:
query_params.append(('password', self.password)) # noqa: E501
if self.dest_file_name is not None:
query_params.append(('destFileName', self.dest_file_name)) # noqa: E501
if self.revision_author is not None:
query_params.append(('revisionAuthor', self.revision_author)) # noqa: E501
if self.revision_date_time is not None:
query_params.append(('revisionDateTime', self.revision_date_time)) # noqa: E501
header_params = {}
# HTTP header `Content-Type`
header_params['Content-Type'] = api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
form_params = []
if self.document is not None:
form_params.append(['document', self.document, 'file']) # noqa: E501
if self.form_field is not None:
form_params.append(['formField', self.form_field.to_json(), 'string']) # noqa: E501
body_params = None
return {
"method": "PUT",
"path": path,
"query_params": query_params,
"header_params": header_params,
"form_params": form_params,
"body": body_params,
"collection_formats": collection_formats,
"response_type": 'UpdateFormFieldOnlineResponse' # noqa: E501
}
def get_response_type(self):
return 'UpdateFormFieldOnlineResponse' # noqa: E501
def deserialize_response(self, api_client, response):
multipart = self.getparts(response)
return UpdateFormFieldOnlineResponse(
self.deserialize(json.loads(multipart[0].text), FormFieldResponse, api_client),
self.deserialize_file(multipart[1].content, multipart[1].headers, api_client))
| 49.485714 | 255 | 0.665416 |
73da61bd4a04b12c28d6977f84c04dd1270d0ed9 | 16,308 | py | Python | acq4/drivers/SutterMP285/mp285.py | tropp/ACQ4 | 792e05e99cedfc175593d200aeabecd6fa6304ce | [
"MIT"
] | null | null | null | acq4/drivers/SutterMP285/mp285.py | tropp/ACQ4 | 792e05e99cedfc175593d200aeabecd6fa6304ce | [
"MIT"
] | null | null | null | acq4/drivers/SutterMP285/mp285.py | tropp/ACQ4 | 792e05e99cedfc175593d200aeabecd6fa6304ce | [
"MIT"
] | null | null | null | import serial, struct, time, collections, threading
from ..SerialDevice import SerialDevice
ErrorVals = {
0: ('SP Over-run', 'The previous character was not unloaded before the latest was received.'),
1: ('Frame Error', 'A valid stop bit was not received during the appropriate time period.'),
2: ('Buffer Over-run', 'The input buffer is filled and CR has not been received.'),
4: ('Bad Command', 'Input can not be interpreted -- command byte not valid.'),
8: ('Move Interrupted', 'A requested move was interrupted by input on the serial port.'),
16:('Arduino error', 'Error was reported by arduino interface.'),
32:('MP285 Timeout', 'Arduino timed out waiting for response from MP285.'),
64:('Command timeout', 'Arduino timed out waiting for full command from computer.'),
}
class TimeoutError(Exception):
pass
class MP285Error(Exception):
pass
class SutterMP285(SerialDevice):
"""
Class for communicating with Sutter MP-285 via serial port.
Note that this class is NOT thread-safe.
"""
def __init__(self, port, baud=9600):
"""
port: serial COM port (0 => com1)"""
#self.port = port
#self.baud = baud
#self.sp = serial.Serial(int(self.port), baudrate=self.baud, bytesize=serial.EIGHTBITS, timeout=0)
SerialDevice.__init__(self, port=port, baudrate=baud)
self._scale = None
self.moving = False
time.sleep(1.0) ## Give devices a moment to chill after opening the serial line.
self.clearBuffer()
self.setSpeed(777) ## may be required to be sure Sutter is behaving (voodoo...)
self.clearBuffer()
def getPos(self, scaled=True):
"""Get current position reported by controller. Returns a tuple (x,y,z); values given in m."""
## request position
self.write('c\r') # request is directly to Sutter MP285 in this case.
packet = self.read(length=13, timeout=8.0, term='\r')
if len(packet) != 12:
raise Exception("Sutter MP285: getPos: bad position packet: <%s> expected 12, got %d" % (repr(packet), len(packet)))
pos = [packet[:4], packet[4:8], packet[8:]]
pos = [struct.unpack('=l', x)[0] for x in pos]
if not scaled:
return pos
scale = self.scale()
pos = [x*scale for x in pos]
return pos
def getImmediatePos(self, returnButtons=False):
"""This is a non-standard command provided by custom hardware (Arduino controller).
It returns an estimated position even while the ROE is in use.
(if getPos() is called while the ROE is in use, the MP285 will very likely crash.)
"""
# self.readPacket(block=False)
self.write('p') # talks to Arduino only.
packet = self.read(length=13, timeout=5.0, term='\r')
if len(packet) != 12:
raise Exception("Sutter MP285: getImmediatePos: bad position packet: <%s> (%d)" % (repr(packet),len(packet)))
pos = [packet[:4], packet[4:8], packet[8:12]]
pos = [struct.unpack('=l', x)[0] for x in pos]
scale = self.scale()
pos = [x*scale for x in pos]
if returnButtons:
btn = packet[12]
btns = [ord(btn) & x == 0 for x in [1, 4, 16, 64]]
return pos, btns
return pos
def getButtonState(self):
p,b = self.getImmediatePos(returnButtons=True)
return b
def setPos(self, pos, block=True, timeout=10.):
"""Set the position.
Arguments:
pos: tuple (x, y, z) values must be given in meters.
Setting a coordinate to None leaves it unchanged.
block: bool, if true then the function does not return until the move is complete.
"""
scale = self.scale()
if len(pos) < 3:
pos = list(pos) + [None] * (3-len(pos))
if None in pos:
currentPos = self.getPos(scaled=False)
pos = [(pos[i]/scale if pos[i] is not None else currentPos[i]) for i in range(3)]
cmd = 'm' + struct.pack('=3l', int(pos[0]), int(pos[1]), int(pos[2])) + '\r'
self.write(cmd)
self.moving = True
if block:
self.blockWhileMoving(timeout=timeout)
def checkMoving(self):
"""
Return bool whether the stage is currently moving.
"""
if self.sp.inWaiting() > 0:
self.read(length=1, term='\r')
self.moving = False
if not self.moving:
return False
return True
def blockWhileMoving(self, timeout=10.0):
"""
Blocks until stage is done moving, or until timeour.
"""
if not self.moving:
return
self.read(length=1, timeout=timeout, term='\r')
self.moving = False
def moveBy(self, pos, block=True, timeout=10.):
"""Move by the specified distance.
Arguments:
pos: tuple (dx, dy, dz) values must be given in meters.
block: bool, if true then the function does not return until the move is complete.
"""
scale = self.scale()
if len(pos) < 3:
pos = list(pos) + [0.0] * (3-len(pos))
currentPos = self.getPos(scaled=False)
pos = [pos[i]/scale + currentPos[i] for i in range(3)]
cmd = 'm' + struct.pack('=3l', int(pos[0]), int(pos[1]), int(pos[2])) + '\r'
self.write(cmd)
if block:
self.blockWhileMoving(timeout=timeout)
def scale(self):
## Scale of position values in msteps/m
## Does this value change during operation?
## Should I be using step_mul for anything?
if self._scale is None:
stat = self.stat()
self._scale = 1e-6 / stat['step_div']
return self._scale
def stop(self):
self.write('\3')
try:
self.readPacket()
except MP285Error as err:
for e in err.args[0]:
if e[0] == 8: ## move interrupted, like we asked for
return
raise
def setSpeed(self, speed, fine=True, timeout=10.):
"""Set the speed of movements used when setPos is called.
Arguments:
speed: integer from 1 to 6550 in coarse mode, 1310 in fine mode.
Note that small numbers can result in imperceptibly slow movement.
fine: bool; True => 50uSteps/step False => 10uSteps/step
"""
v = int(speed)
## arbitrary speed limits.. do these apply to all devices?
maxSpd = 6550
if fine:
maxSpd = 1310
v = max(min(v, maxSpd), 1)
#print "MP285 speed:", v
if fine:
v |= 0x8000
cmd = 'V' + struct.pack('=H', v) + '\r'
self.write(cmd)
self.read(1, term='\r', timeout=timeout)
def stat(self, ):
self.write('s\r')
packet = self.read(33, timeout=5.0, term='\r')
if len(packet) != 32:
raise Exception("Sutter MP285: bad stat packet: '%s'" % repr(packet))
paramNames = ['flags', 'udirx', 'udiry', 'udirz', 'roe_vari', 'uoffset', 'urange', 'pulse',
'uspeed', 'indevice', 'flags2', 'jumpspd', 'highspd', 'dead', 'watch_dog',
'step_div', 'step_mul', 'xspeed', 'version']
vals = struct.unpack('=4B5H2B8H', packet)
params = collections.OrderedDict()
for i,n in enumerate(paramNames):
params[n] = vals[i]
flags = params['flags']
params['setup_num'] = flags & 0xF
params['roe_dir'] = -1 if (flags & 2**4) else 1
params['rel_abs_f'] = 'abs' if (flags & 2**5) else 'rel'
params['mode_f'] = 'cont' if (flags & 2**6) else 'pulse'
params['store_f'] = 'stored' if (flags & 2**7) else 'erased'
flags2 = params['flags2']
params['loop_mode'] = bool(flags2 & 1)
params['learn_mode'] = bool(flags2 & 2)
params['step_mode'] = 50 if (flags2 & 4) else 10
params['sw2_mode'] = bool(flags2 & 8)
params['sw1_mode'] = bool(flags2 & 16)
params['sw3_mode'] = bool(flags2 & 32)
params['sw4_mode'] = bool(flags2 & 64)
params['reverse_it'] = bool(flags2 & 128)
params['resolution'] = 50 if (params['xspeed'] & 2**15) else 10
params['speed'] = params['xspeed'] & 0x7FFF
return params
def setLimits(self, limits):
"""Set position limits on the device which may not be exceeded.
This command is only available when using custom hardware.
limits = [+x, -x, +y, -y, +z, -z]
If a limit is None, it will be ignored.
"""
scale = self.scale()
useLims = [(1 if x is not None else 0) for x in limits]
limits = [(0 if x is None else int(x/scale)) for x in limits]
data = struct.pack("=6l6B", *(limits + useLims))
self.write('l'+data+'\r');
self.readPacket()
def reset(self, hard=False):
"""Reset the controller.
Arguments:
hard: If False, then a soft-reset "r" command is sent
If True, then a hard-reset "R" command is sent (not supported by all hardware)"""
cmd = 'r\r'
if hard:
cmd = 'R\r'
self.write(cmd)
## wait for reset, check for error
s = self.clearBuffer()
if len(s) == 2 and s[1] == '\r':
self.raiseError(s[0])
## clear out anything else in the buffer (reset may generate garbage)
if s == '\x00':
return True ## successful reset
return False
def setOrigin(self):
self.write('o\r')
self.readPacket()
def setAbsolute(self):
self.write('a\r')
self.readPacket()
def setRelative(self):
self.write('b\r')
self.readPacket()
def continueAfterPause(self):
self.write('e\r')
self.readPacket()
def refresh(self):
self.write('n\r')
self.readPacket()
def readPacket(self):
return self.readUntil('\r')
#def clearBuffer(self):
#d = self.readAll()
#time.sleep(0.1)
#d += self.readAll()
#if len(d) > 0:
#print "Sutter MP285: Warning: tossed data ", repr(d)
#return d
#def readAll(self):
### read all bytes waiting in buffer; non-blocking.
#n = self.sp.inWaiting()
#if n > 0:
#return self.sp.read(n)
#return ''
#def write(self, data, timeout=10.0):
#self.blockWhileMoving(timeout=timeout) # If the stage is still moving, wait until it is done before sending another packet.
##self.readAll() ## always empty buffer before sending command
#self.sp.write(data)
#def close(self):
#self.sp.close()
def raiseError(self, errVals):
## errVals should be list of error codes
errors = []
for err in errVals:
hit = False
for k in ErrorVals:
if ord(err) & k:
hit = True
errors.append((k,)+ErrorVals[k])
if not hit:
errors.append((ord(err), "Unknown error code", ""))
raise MP285Error(errors)
#def read(self, length, timeout=5, term=None):
### Read *length* bytes or raise exception on timeout.
### if *term* is given, check that the last byte is *term* and remove it
### from the returned packet.
##self.sp.setTimeout(timeout) #broken!
#packet = self.readWithTimeout(length, timeout)
#if len(packet) < length:
#raise Exception("MP285: Timed out waiting for serial data (received so far: %s)" % repr(packet))
#if term is not None:
#if packet[-len(term):] != term:
#self.clearBuffer()
#raise Exception("MP285: Packet corrupt: %s (len=%d)" % (repr(packet), len(packet)))
#return packet[:-len(term)]
#return packet
#def readWithTimeout(self, nBytes, timeout):
#start = time.time()
#packet = ''
#while len(packet) < nBytes and time.time()-start < timeout:
#packet += self.sp.read(1)
#return packet
#def readPacket(self, expect=0, timeout=5, block=True):
### Read until a carriage return is encountered (or timeout).
### If expect is >0, then try to get a packet of that length, ignoring \r within that data
### if block is False, then return immediately if no data is available.
#start = time.time()
#res = ''
#errors = []
#packets = []
#while True:
#s = self.readAll()
#if not block and len(s) == 0:
#return
#if expect > 0: ## move bytes into result without checking for \r
#nb = expect-len(res)
#res += s[:nb]
#s = s[nb:]
#try:
#while len(s) > 0: ## pull packets out of s one at a time
#res += s[:s.index('\r')]
#s = s[s.index('\r')+1:]
#if len(res) == 1: ## error packet was sent
#errors.append(res)
#else:
#packets.append(res)
#res = ''
#except ValueError: ## partial packet; append and wait for more data
#res += s
#if len(res) > 32: ## no valid packets are longer than 32 bytes; give up
#raise Exception("Got junk data while reading for packet: '%s'" % str(res))
#if len(res) == 0:
#if len(errors) > 0:
#self.raiseError(errors)
#if len(packets) == 1: ## success
#return packets[0]
#if len(packets) > 1:
#raise Exception("Too many packets read.", packets)
##if len(s) > 0:
##if s != '\r' and s[0] != '=':
##print "SutterMP285 Error: '%s'" % s
###print "return:", repr(s)
##break
#time.sleep(0.01)
#if time.time() - start > timeout:
#raise TimeoutError("Timeout while waiting for response. (Data so far: %s)" % repr(res))
if __name__ == '__main__':
s = SutterMP285(port=5, baud=19200) # Arduino baud rate, NOT MP285 baud rate.
#s = SutterMP285(port=2, baud=9600)
def pos():
p = s.getPos()
print "<mp285> x: %0.2fum y: %0.2fum, z: %0.2fum" % (p[0]*1e6, p[1]*1e6, p[2]*1e6)
def ipos():
p = s.getImmediatePos()
print "x: %0.2fum y: %0.2fum, z: %0.2fum" % (p[0]*1e6, p[1]*1e6, p[2]*1e6)
def stat():
st = s.stat()
for k in st:
print "%s:%s%s" % (k, " "*(15-len(k)), str(st[k]))
def monitor():
while True:
pos()
def clock(speed, fine=False, runtime=2.0):
s.setSpeed(6500, fine=False)
s.setPos([-0.01, 0, 0])
pos = s.getPos()
s.setSpeed(speed, fine)
time.clock()
t = time.clock()
dist = runtime*speed*1e-6
s.setPos([pos[0]+dist, pos[1], pos[2]], timeout=runtime*2)
s.setPos(pos, timeout=runtime*2)
dt = 0.5*(time.clock()-t)
print "%d: dt=%0.2gs, dx=%0.2gm, %0.2f mm/s" % (int(speed), dt, dist, dist*1e3/dt)
def saw(dx, dz, zstep=5e-6):
p1 = s.getPos()
z = p1[2]
p1 = p1[:2]
p2 = [p1[0] + dx, p1[1]]
n = int(dz/zstep)
for i in range(n):
print "step:", i
s.setPos(p2)
s.setPos(p1)
if i < n-1:
z += zstep
s.setPos([None,None,z])
ipos()
pos()
| 36.72973 | 132 | 0.51858 |
73da7031688c1e3fef5dc65c06878b68fd1a8c68 | 938 | py | Python | camply/notifications/base_notifications.py | grantland/camply | aec4c6b82380fdc85f254563d8e8de290edc05ad | [
"MIT"
] | 123 | 2021-05-19T04:56:47.000Z | 2022-03-23T19:04:45.000Z | camply/notifications/base_notifications.py | grantland/camply | aec4c6b82380fdc85f254563d8e8de290edc05ad | [
"MIT"
] | 11 | 2021-05-25T20:22:14.000Z | 2022-03-05T16:31:32.000Z | camply/notifications/base_notifications.py | grantland/camply | aec4c6b82380fdc85f254563d8e8de290edc05ad | [
"MIT"
] | 21 | 2021-05-24T05:53:24.000Z | 2022-03-31T02:03:41.000Z | #!/usr/bin/env python3
# Author:: Justin Flannery (mailto:juftin@juftin.com)
"""
Push Notifications Template
"""
from abc import ABC, abstractmethod
import logging
from typing import List
from camply.containers import AvailableCampsite
logger = logging.getLogger(__name__)
class BaseNotifications(ABC):
"""
Base Notifications
"""
@staticmethod
@abstractmethod
def send_message(message: str, **kwargs):
"""
Send a message
Parameters
----------
message: str
Message Text
**kwargs
All kwargs passed to underlying notification method
"""
pass
@staticmethod
@abstractmethod
def send_campsites(campsites: List[AvailableCampsite], **kwargs):
"""
Send a message with a campsite object
Parameters
----------
campsites: List[AvailableCampsite]
"""
pass
| 19.142857 | 69 | 0.60661 |
73da7e533ed955d00173b7290eb88c69f98ecf1b | 13,484 | py | Python | Python_paper/smclomo/.ipynb_checkpoints/smc-checkpoint.py | LaGauffre/SMCCompoMo | 242feab1f1a6f923b682cfb8b033bb9c96317dc3 | [
"MIT"
] | 1 | 2021-06-18T01:42:08.000Z | 2021-06-18T01:42:08.000Z | Python_paper/smclomo/.ipynb_checkpoints/smc-checkpoint.py | LaGauffre/SMCCompoMo | 242feab1f1a6f923b682cfb8b033bb9c96317dc3 | [
"MIT"
] | null | null | null | Python_paper/smclomo/.ipynb_checkpoints/smc-checkpoint.py | LaGauffre/SMCCompoMo | 242feab1f1a6f923b682cfb8b033bb9c96317dc3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 3 17:21:21 2020
@author: pierr
"""
import pandas as pd
import numpy as np
import scipy.special as sp
import scipy.stats as st
from joblib import Parallel, delayed
from .loss_distribution import logp_wrap, logd_wrap
from .prior_distribution import sim_prior_wrap, logp_prior_wrap
from .temperature import temperature_search, batch_size_search
from .move import Gibbs_move
def smc_likelihood_annealing(X, loss_model, parms_names, popSize, model_prior, a, b, ρ,
c,n_step_max, err, paralell, n_proc, verbose):
"""
Sequential Monte Carlo Sampler of the posterior distribution.
Parameters
----------
X: array
loss data required to evaluate the likelihood
loss_model: string
loss model being fitted
parms_names: array
names of the loss model parameters,
first the shape parameter of the belly distribution
second the tail index
third the threshold between small and large claims
popSize: int
number of particles sampled, size of the cloud
model_prior: string
prior distribution
a, b: arrays
prior distribution parameters
ρ: float
tuning parameter for the target effective sample size
move_type: string
type of moves to choose in ("Metropolis", "Gibbs", "Indpendent)"
c : float
Calibrate the number of steps required so that the probability that
each particle is moved at least once equals c.
n_step_max: int
limits the number of steps
err: float
Temperature threshold
verbose: boolean
Whether to print the steps
Returns
-------
list
A list that provides the posterior sample along with the smc estimator of
the marginal likelihood.
Example
-------
k, α, θ = 1/2, 1/2, 5
X = sim_wei_par(10, k, α, θ)
loss_model, parms_names, popSize, model_prior, a, b, method, ρ, move_type, c, \
n_step_max = "wei-par", ['k','α', 'θ'], 20, "uniform", np.array([0,0,0]),\
np.array([10,10,10]), "likelihood annealing", 1/2, "Gibbs", 0.99, 20
trace, marg_log_lik = smc(X, loss_model, parms_names,
100,
model_prior, a, b,
method, ρ, move_type, c,n_step_max)
trace.mean(), marg_log_lik
"""
log_prob_prior, log_prob, d = logp_prior_wrap(model_prior, a, b), \
logp_wrap(X, loss_model), len(parms_names)
# Generation counter
g = 0
if verbose:
print('Sample generation ' + str(g) + " from the " + str(model_prior) +
" prior distribution")
# Initialisation of the particle cloud
init_cloud = sim_prior_wrap(model_prior, a, b, parms_names, popSize)
init_cloud['logw'] = np.log(np.ones(popSize))
init_cloud['W'] = np.ones(popSize) / popSize
# This initial particle cloud is placed inside a list
clouds = []
clouds.append(init_cloud)
# Temperature sequence either true temperature or proportion of observations
γ_seq = np.array([0])
# We keep on iterating until the temperature reaches 1
while γ_seq[-1] < 1:
g = g + 1
particles = clouds[g-1][parms_names].values
# Updating temperature sequence
γ, logw, W, ESS = temperature_search(particles,
log_prob,ρ * popSize, γ_seq[-1],
err)
γ_seq = np.append(γ_seq, γ)
cloud_cov = np.cov(particles,
bias = False,
aweights = W,
rowvar = False) * 2.38 / np.sqrt(d)
particles_resampled = particles[np.random.choice(popSize,popSize, p = W)]
def move_particle_trial(particle):
trace, acceptance = Gibbs_move(1,np.diag(cloud_cov), log_prob,
log_prob_prior, particle, γ, d)
return(np.append(trace[-1], np.mean(np.any(acceptance[1:]))))
if paralell:
res_trial = np.array(Parallel(n_jobs=n_proc)(delayed(move_particle_trial)(i)
for i in particles_resampled))
else:
res_trial = np.array([move_particle_trial(particle)
for particle in particles_resampled])
particles_trial, acc_trial = res_trial[:,0:d], res_trial[:,-1]
n_steps = int(min(n_step_max,max(2,np.ceil(np.log(1-c) / np.log(1-(np.mean(acc_trial)-1e-6))))))
def move_particle(particle):
trace, acceptance = Gibbs_move(n_steps,np.diag(cloud_cov), log_prob,
log_prob_prior, particle, γ, d)
return(np.append(trace[-1], np.mean(np.any(acceptance[1:]))))
if paralell:
res = np.array(Parallel(n_jobs=n_proc)(delayed(move_particle)(i)
for i in particles_trial))
else:
res = np.array([move_particle(particle) for particle in particles_trial])
particles_moved, acc_rate = res[:,0:d], res[:,-1]
if verbose:
print('Generation: ' + str(g) + " ;temperature: "+str(γ_seq[-1])+
" ;ESS: "+str(ESS)+
" ;steps:" + str(n_steps+1) + " ;particle moved: "+
str(np.mean(acc_rate) * 100) + "%" )
cloud = pd.DataFrame(particles_moved)
cloud.columns = parms_names
# Updating unormalized weights
cloud['logw'] = logw
# Updating normalized weights
cloud['W'] = W
clouds.append(cloud)
marginal_log_likelihood = sum([ sp.logsumexp(cloud['logw'] - np.log(popSize))
for cloud in clouds[1:g+1]])
log_probs = [log_prob(particle) for particle in particles_moved]
DIC = - 2* log_prob(np.mean(particles_moved, axis = 0)) + \
2* (2* np.mean(log_probs) - 2* log_prob(np.mean(particles_moved, axis = 0)))
logd = logd_wrap(particles_moved, loss_model)
logds = np.array([logd(x) for x in X])
WAIC = - 2*(
sum(np.log(np.mean(np.exp(logds), axis = 1))) -
sum(np.var(logds, axis = 1))
)
return(clouds[-1][parms_names], marginal_log_likelihood, DIC, WAIC)
def smc_data_by_batch(X, loss_model, parms_names, popSize, model_prior, a, b, ρ,
c, n_step_max, paralell, n_proc, verbose):
"""
Sequential Monte Carlo Sampler of the posterior distribution.
Parameters
----------
X: array
loss data required to evaluate the likelihood
loss_model: string
loss model being fitted
parms_names: array
names of the loss model parameters,
first the shape parameter of the belly distribution
second the tail index
third the threshold between small and large claims
popSize: int
number of particles sampled, size of the cloud
model_prior: string
prior distribution
a, b: arrays
prior distribution parameters
ρ: float
tuning parameter for the target effective sample size
move_type: string
type of moves to choose in ("Metropolis", "Gibbs", "Indpendent)"
c : float
Calibrate the number of steps required so that the probability that
each particle is moved at least once equals c.
n_step_max: int
limits the number of steps
verbose: boolean
Whether to print the steps
Returns
-------
list
A list that provides the posterior sample along with the smc estimator of
the marginal likelihood.
Example
-------
k, α, θ = 1/2, 1/2, 5
X = sim_wei_par(10, k, α, θ)
loss_model, parms_names, popSize, model_prior, a, b, method, ρ, move_type, c, \
n_step_max = "wei-par", ['k','α', 'θ'], 20, "uniform", np.array([0,0,0]),\
np.array([10,10,10]), "likelihood annealing", 1/2, "Gibbs", 0.99, 20
trace, marg_log_lik = smc(X, loss_model, parms_names,
100,
model_prior, a, b,
method, ρ, move_type, c,n_step_max)
trace.mean(), marg_log_lik
"""
log_prob_prior, d = logp_prior_wrap(model_prior, a, b), len(parms_names)
# Generation counter
g = 0
if verbose:
print('Sample generation ' + str(g) + " from the " + str(model_prior) +
" prior distribution")
# Initialisation of the particle cloud
init_cloud = sim_prior_wrap(model_prior, a, b, parms_names, popSize)
init_cloud['logw'] = np.log(np.ones(popSize))
init_cloud['W'] = np.ones(popSize) / popSize
# This initial particle cloud is placed inside a list
clouds = []
clouds.append(init_cloud)
# sequence of data batch size
n_seq = np.array([0])
# We keep on iterating until the temperature reaches 1
while n_seq[-1] < len(X):
g = g + 1
# Updating temperature sequence
particles = clouds[g-1][parms_names].values
n, logw, W, ESS = batch_size_search(particles, ρ * popSize, n_seq[-1], X, loss_model)
n_seq = np.append(n_seq, n)
cloud_cov = np.cov(particles,
bias = False,
aweights = W,
rowvar = False) * 2.38 / np.sqrt(d)
particles_resampled = particles[np.random.choice(popSize,popSize, p = W)]
log_prob = logp_wrap(X[0:n], loss_model)
def move_particle_trial(particle):
trace, acceptance = Gibbs_move(1,np.diag(cloud_cov), log_prob,
log_prob_prior, particle, 1, d)
return(np.append(trace[-1], np.mean(np.any(acceptance[1:]))))
if paralell:
res_trial = np.array(Parallel(n_jobs=n_proc)(delayed(move_particle_trial)(i)
for i in particles_resampled))
else:
res_trial = np.array([move_particle_trial(particle)
for particle in particles_resampled])
particles_trial, acc_trial = res_trial[:,0:d], res_trial[:,-1]
n_steps = int(min(n_step_max,max(2,np.ceil(np.log(1-c) / np.log(1-(np.mean(acc_trial)-1e-6))))))
def move_particle(particle):
trace, acceptance = Gibbs_move(n_steps,np.diag(cloud_cov), log_prob,
log_prob_prior, particle, 1, d)
return(np.append(trace[-1], np.mean(np.any(acceptance[1:]))))
if paralell:
res = np.array(Parallel(n_jobs=n_proc)(delayed(move_particle)(i)
for i in particles_trial))
else:
res = np.array([move_particle(particle) for particle in particles_trial])
particles_moved, acc_rate = res[:,0:d], res[:,-1]
if verbose:
print('Generation: ' + str(g) + " ;batch size: "+str(n_seq[-1])+
" ;ESS: "+str(ESS)+
" ;steps:" + str(n_steps+1) + " ;particle moved: "+
str(np.mean(acc_rate) * 100) + "%")
# print(np.mean(acc_rate2==0),np.mean(acc_rate2), n_steps)
cloud = pd.DataFrame(particles_moved)
cloud.columns = parms_names
# Updating unormalized weights
cloud['logw'] = logw
# Updating normalized weights
cloud['W'] = W
clouds.append(cloud)
marginal_log_likelihood = sum([ sp.logsumexp(cloud['logw'] - np.log(popSize))
for cloud in clouds[1:g+1]])
# marginal_log_likelihood = sum(np.log(([np.exp(cloud['w'].values).mean()
# for cloud in clouds[1:g+1]])))
log_probs = [log_prob(particle) for particle in particles_moved]
DIC = - 2* log_prob(np.mean(particles_moved, axis = 0)) + \
2* (2* np.mean(log_probs) - 2* log_prob(np.mean(particles_moved, axis = 0)))
logd = logd_wrap(particles_moved, loss_model)
logds= np.array([logd(x) for x in X])
WAIC = - 2*(sum(np.log(np.mean(np.exp(logds), axis = 1))) -
sum(np.var(logds, axis = 1)))
return(clouds[-1][parms_names], marginal_log_likelihood, DIC, WAIC)
def log_marg_bridge_sampling(logp, logp_prior, trace, r_init, err):
th_1 = trace.values
MN_rv = st.multivariate_normal(mean=trace.mean(), cov=trace.cov())
th_2 = MN_rv.rvs(len(th_1))
log_probs_prior_th_1 = np.array([logp_prior(particle) for particle in th_1])
logps_th_1 = np.array([logp(particle) for particle in th_1])
log_probs_prior_th_2 = np.array([logp_prior(particle) for particle in th_2])
logps_th_2 = np.array([logp(particle) for particle in th_2])
eta_1_th_1 = np.exp(logps_th_1+log_probs_prior_th_1)
eta_1_th_2 = np.exp(logps_th_2+log_probs_prior_th_2)
eta_2_th_1 = MN_rv.pdf(th_1)
eta_2_th_2 = MN_rv.pdf(th_2)
r = [r_init]
while True:
r_new = np.sum(eta_1_th_2 / (eta_1_th_2 + r[-1]*eta_2_th_2)) / np.sum(eta_2_th_1 / (eta_1_th_1 + r[-1]*eta_2_th_1))
if abs(np.log(r[-1]) - np.log(r_new)) < err :
r.append(r_new)
break
else:
r.append(r_new)
return(np.log(r[-1]))
| 40.130952 | 125 | 0.577796 |
73daae0b84aab088147d9a7423bc64f4df502178 | 4,469 | py | Python | optimizers.py | fedetask/reactor-torch | a5f2eeeaa7d8bebb7446a2e8ce996b7ad8913177 | [
"MIT"
] | 2 | 2020-12-19T18:46:18.000Z | 2021-01-18T11:02:11.000Z | optimizers.py | fedetask/reactor-torch | a5f2eeeaa7d8bebb7446a2e8ce996b7ad8913177 | [
"MIT"
] | null | null | null | optimizers.py | fedetask/reactor-torch | a5f2eeeaa7d8bebb7446a2e8ce996b7ad8913177 | [
"MIT"
] | null | null | null | """This module contains shared optimizers.
"""
import math
import torch
import torch.optim as optim
class SharedAdam(optim.Adam):
"""Implements Adam algorithm with shared states, taken from
https://github.com/ikostrikov/pytorch-a3c/
"""
def __init__(self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0):
super(SharedAdam, self).__init__(params, lr, betas, eps, weight_decay)
# Is this really necessary?
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = torch.zeros(1)
state['exp_avg'] = p.data.new().resize_as_(p.data).zero_()
state['exp_avg_sq'] = p.data.new().resize_as_(p.data).zero_()
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'].share_memory_()
state['exp_avg'].share_memory_()
state['exp_avg_sq'].share_memory_()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step'].item()
bias_correction2 = 1 - beta2 ** state['step'].item()
step_size = group['lr'] * math.sqrt(
bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
class SharedRMSprop(optim.RMSprop):
"""Implementation of a shared RMSprop, taken from
https://github.com/Kaixhin/ACER/blob/master/optim.py
"""
def __init__(self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0):
super(SharedRMSprop, self).__init__(params, lr=lr, alpha=alpha,
eps=eps, weight_decay=weight_decay,
momentum=0, centered=False)
# State initialisation (must be done before step, else will not be shared between threads)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = p.data.new().resize_(1).zero_()
state['square_avg'] = p.data.new().resize_as_(p.data).zero_()
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'].share_memory_()
state['square_avg'].share_memory_()
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
square_avg = state['square_avg']
alpha = group['alpha']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# g = αg + (1 - α)Δθ^2
square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad)
# θ ← θ - ηΔθ/√(g + ε)
avg = square_avg.sqrt().add_(group['eps'])
p.data.addcdiv_(-group['lr'], grad, avg)
return loss
| 34.643411 | 98 | 0.516894 |
73dabac4cbd7cf9cf502ce9223b7a90337ceb891 | 1,040 | py | Python | bballmain/lib/python2.7/site-packages/model_utils/tests/test_fields/test_status_field.py | bjf2015/bballmain | 81130df9e546211e34da6c4377cf0b19ce773f88 | [
"MIT"
] | 1 | 2017-03-05T01:43:57.000Z | 2017-03-05T01:43:57.000Z | bballmain/lib/python2.7/site-packages/model_utils/tests/test_fields/test_status_field.py | bjf2015/bballmain | 81130df9e546211e34da6c4377cf0b19ce773f88 | [
"MIT"
] | null | null | null | bballmain/lib/python2.7/site-packages/model_utils/tests/test_fields/test_status_field.py | bjf2015/bballmain | 81130df9e546211e34da6c4377cf0b19ce773f88 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.test import TestCase
from model_utils.fields import StatusField
from model_utils.tests.models import (
Article, StatusFieldDefaultFilled, StatusFieldDefaultNotFilled,
StatusFieldChoicesName,
)
class StatusFieldTests(TestCase):
def test_status_with_default_filled(self):
instance = StatusFieldDefaultFilled()
self.assertEqual(instance.status, instance.STATUS.yes)
def test_status_with_default_not_filled(self):
instance = StatusFieldDefaultNotFilled()
self.assertEqual(instance.status, instance.STATUS.no)
def test_no_check_for_status(self):
field = StatusField(no_check_for_status=True)
# this model has no STATUS attribute, so checking for it would error
field.prepare_class(Article)
def test_get_status_display(self):
instance = StatusFieldDefaultFilled()
self.assertEqual(instance.get_status_display(), "Yes")
def test_choices_name(self):
StatusFieldChoicesName()
| 31.515152 | 76 | 0.751923 |
73dafb99b43779dc45629587e28429301df550a0 | 6,441 | py | Python | components/collectors.py | mavroskardia/oworld | 840421818b04c0d7242881519f83acf348efbb5e | [
"MIT"
] | null | null | null | components/collectors.py | mavroskardia/oworld | 840421818b04c0d7242881519f83acf348efbb5e | [
"MIT"
] | null | null | null | components/collectors.py | mavroskardia/oworld | 840421818b04c0d7242881519f83acf348efbb5e | [
"MIT"
] | null | null | null | import pygame
import configuration
import constants
import os
import items
import types
import gui
from cStringIO import StringIO
from zipfile import ZipFile
class DialogCollector(object):
"""This class reads the appropriate data directories for actor dialogs. It then stores them by id
(based on the directory name) for reference by npc actors.
"""
def __init__(self, scene, dialogset):
self.scene = scene
self.dialogset = dialogset
self.dialogs = {}
self.otherFuncs = []
self._loadDialogset()
def splitSets(self, namelist):
sets = {}
for name in namelist:
set, item = tuple(os.path.split(os.path.split(name)[0]))
if not set in sets: sets[set] = {}
if not item in sets[set]: sets[set][item] = []
sets[set][item].append(name)
return sets
def _loadDialogset(self):
"""Read the "dialogs" data directory for zip files that contain
dialog.py files. Incorporate that into the collection of dialogs.
"""
z = ZipFile(configuration.dialogs)
dialogs = self.splitSets(z.namelist())
for dialog in dialogs:
for id in dialogs[dialog]:
for dialog_file in dialogs[dialog][id]:
dialog_script = z.read(dialog_file)
speakFunc, otherFuncs = self.getFunctions(z, dialog_file)
self.dialogs[id] = speakFunc
self.otherFuncs += otherFuncs
def getFunctions(self, zipFile, dialog):
dialogStr = zipFile.read(dialog)
# clean up the script
dialogStr = dialogStr.replace('\r', '')
if not dialogStr.endswith('\n'): dialogStr += "\n"
otherFuncs = []
speakFunc = None
locals = {'scene': self.scene, 'engine': self.scene.engine, 'gui': gui}
globals = {}
exec(dialogStr,locals,globals)
for glbl in globals:
if type(globals[glbl]) == types.FunctionType:
if glbl == "speak": speakFunc = globals[glbl]
else: otherFuncs.append(globals[glbl])
return speakFunc, otherFuncs
def runDialog(self, actor):
class otherFuncs: pass
for i in self.otherFuncs: setattr(otherFuncs, i.__name__, staticmethod(i))
self.dialogs[actor.dialogId](otherFuncs, actor, self.scene)
class ItemCollector(object):
"Handles the loading and instancing of items."
def __init__(self, scene, itemset):
self.scene = scene
self.itemset = itemset
self.items = {}
self.itemDescriptions = {}
self._loadItems()
def splitSets(self, namelist):
sets = {}
for name in namelist:
set, item = tuple(os.path.split(os.path.split(name)[0]))
if not set in sets: sets[set] = {}
if not item in sets[set]: sets[set][item] = []
sets[set][item].append(name)
return sets
def _loadItems(self):
"Read the itemset zip file for item zips, creating Item objects along the way."
print "loading itemset: %s" % self.itemset
z = ZipFile(configuration.items)
itemsets = self.splitSets(z.namelist())
for itemset in itemsets[self.itemset]:
item_filenames = itemsets[self.itemset][itemset]
item = items.Item(None, self)
for item_filename in item_filenames:
if item_filename.endswith('.py'):
script = z.read(item_filename)
self._setItemData(item, script)
elif item_filename.endswith('.png'):
item.image = pygame.image.load(StringIO(z.read(item_filename))).convert_alpha()
else:
print 'Skipping unknown filetype "%s"' % item_filename
continue
print "Created item: %s" % item.name
self.items[item.name] = item
z.close()
def _setItemData(self, item, script):
# clean up the script
script = script.replace('\r', '')
if not script.endswith('\n'): script = script+"\n"
l = {'EquipTypes': constants.EquipTypes, 'JobTypes': constants.JobTypes, 'EquipLocation': constants.EquipLocation}
g = {}
exec(script,l,g)
for t in g:
i = t.lower()
if type(g[t]) == types.FunctionType: item.events[t] = g[t]
elif i == "name": item.name = g[t]
elif i == "value": item.fullValue = g[t]
elif i == "equip_type" or i.startswith('type') or i == "equiptype": item.types = g[t]
elif i.startswith('equip_loc') or i.startswith('loc'): item.locations = g[t]
elif i == "jobs" or i == "jobtype" or i == "job" : item.jobs = g[t]
elif i == "attack": item.attack = g[t]
elif i == "attack_bonus" or i == "attackbonus": item.attackBonus = g[t]
elif i == "defense": item.defense = g[t]
elif i == "defense_bonus" or i == "defensebonus": item.defenseBonus = g[t]
elif i == "description" or i == "desc": self.itemDescriptions[item.name] = g[t]
else: print "Skipping %s: %s" % (type(g[t]), i)
def createItem(self, character, itemName):
"""Creates a new copy of the specified item by replicating the "master" version created
when the item files were loaded"""
if itemName not in self.items:
raise Exception, "Invalid item name"
if not character.inventory or not isinstance(character.inventory, items.Inventory):
raise Exception, "Must pass a valid inventory"
ret = items.Item(character.inventory, self)
orig = self.items[itemName]
for attr in orig.__dict__.keys():
ret.__setattr__(attr, orig.__dict__[attr])
ret.inventory = character.inventory
return ret
def getDescription(self, itemName):
"""Gets the singular instance of an item's description. No need to duplicate
big strings unnecessarily"""
if itemName in self.itemDescriptions: return self.itemDescriptions[itemName]
else: return None
| 40.509434 | 122 | 0.565595 |
73db1dbd8bdd1a94772d11b2ccb33eac525fa903 | 635 | py | Python | iced/forms.py | sharonmaswai/IceTube | 98d933132ee684aa5f04926f2bcca88cd84bfbb9 | [
"Unlicense"
] | 1 | 2020-01-13T13:49:53.000Z | 2020-01-13T13:49:53.000Z | iced/forms.py | sharonmaswai/IceTube | 98d933132ee684aa5f04926f2bcca88cd84bfbb9 | [
"Unlicense"
] | 8 | 2020-06-05T21:53:54.000Z | 2022-03-11T23:53:42.000Z | iced/forms.py | sharonmaswai/IceTube | 98d933132ee684aa5f04926f2bcca88cd84bfbb9 | [
"Unlicense"
] | 1 | 2022-03-13T18:08:48.000Z | 2022-03-13T18:08:48.000Z | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import Profile, Rating
class UserSignUpForm(UserCreationForm):
email = forms.EmailField(max_length=100, help_text='Required')
class Meta:
model = User
fields = ('username', 'email', 'password1', 'password2')
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('name','profile_pic', 'specialisation','county')
class RateForm(forms.ModelForm):
class Meta:
model= Rating
fields= ('rating',)
| 25.4 | 67 | 0.670866 |
73db1f723307418015aa8029e49d076917c1534e | 2,952 | py | Python | tests/websocket_test.py | TeamJumpstart/InformatiCup2021 | a4d07992f772d3a1e9ef715fa8e9ce2234cd47a4 | [
"MIT"
] | 10 | 2021-04-18T17:54:02.000Z | 2021-07-26T19:58:41.000Z | tests/websocket_test.py | DiddiZ/InformatiCup2021 | a4d07992f772d3a1e9ef715fa8e9ce2234cd47a4 | [
"MIT"
] | 1 | 2021-04-21T15:13:41.000Z | 2021-04-21T15:13:41.000Z | tests/websocket_test.py | DiddiZ/InformatiCup2021 | a4d07992f772d3a1e9ef715fa8e9ce2234cd47a4 | [
"MIT"
] | 1 | 2021-04-20T09:42:50.000Z | 2021-04-20T09:42:50.000Z | import asyncio
import json
import threading
import unittest
import websockets
from environments import WebsocketEnv
class DummyServer:
def __init__(self, url, port):
self.url = url
self.port = port
self.step_counter = 0
file = open("tests/spe_ed-1603124417603.json", "r")
self.states = json.load(file)
file.close()
# Start new thread from current loop
self.__serving = asyncio.get_event_loop().run_in_executor(None, self.run)
self.__starting = asyncio.get_event_loop().create_future()
asyncio.get_event_loop().run_until_complete(self.__starting)
async def handler(self, websocket, path):
"""Handler"""
for state in self.states:
print("Server: await send state")
await websocket.send(json.dumps(self.states[self.step_counter]))
print("Server: send state", flush=True)
message = await websocket.recv()
print("Server: received command")
assert (json.loads(message)['action'] == "change_nothing")
self.step_counter += 1
if self.step_counter >= len(self.states):
print("counter exceeded")
self.stop()
print("Server: websocket open: ", websocket.open)
async def serve(self):
"""Run server until stopped.
Executed in loop of server thread.
"""
async with websockets.serve(self.handler, self.url, self.port):
print("Server is up")
await self.__stop
print("Server is down")
def run(self): # Executed in new thread
print("Server is starting")
loop = asyncio.new_event_loop() # Create new loop
self.__stop = loop.create_future() # Create stop signal
# Release __init__
self.__starting.get_loop().call_soon_threadsafe(self.__starting.set_result, None)
loop.run_until_complete(self.serve()) # Run serve in loop until stopped
# threading.Thread(target=loop.run_forever).start()
loop.close()
print("Server stopped")
def stop(self):
# Send stop signal in server loop
self.__stop.get_loop().call_soon_threadsafe(self.__stop.set_result, None)
# Wait until server has actually stopped
asyncio.get_event_loop().run_until_complete(self.__serving)
class TestWebsocketEnvironment(unittest.TestCase):
def setUp(self):
self.url = "127.0.0.1"
self.port = 8000
self.key = ""
self.server = DummyServer(self.url, self.port)
def tearDown(self):
print("Stop")
self.server.stop()
# def test_connection(self):
# env = WebsocketEnv(f"ws://{self.url}:{self.port}", self.key)
# obs = env.reset()
# done = False
# while not done:
# action = "change_nothing"
# obs, reward, done, _ = env.step(action)
# print(done)
| 33.545455 | 89 | 0.613144 |
73db380bac8ab06438f3aec72f6175e7a19882be | 12,833 | py | Python | prm/modules/peak_response_mapping.py | FinchZHU/ultra-thin-PRM | 1cdfd1c521f4420164ea55ff8f940fa6d29eb7ac | [
"MIT"
] | null | null | null | prm/modules/peak_response_mapping.py | FinchZHU/ultra-thin-PRM | 1cdfd1c521f4420164ea55ff8f940fa6d29eb7ac | [
"MIT"
] | null | null | null | prm/modules/peak_response_mapping.py | FinchZHU/ultra-thin-PRM | 1cdfd1c521f4420164ea55ff8f940fa6d29eb7ac | [
"MIT"
] | null | null | null | from types import MethodType
import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# from scipy.misc import imresize
from PIL import Image
from ..functions import pr_conv2d, peak_stimulation
class PeakResponseMapping(nn.Sequential):
def __init__(self, *args, **kargs):
super(PeakResponseMapping, self).__init__(*args)
self.inferencing = False
# use global average pooling to aggregate responses if peak stimulation is disabled
self.enable_peak_stimulation = kargs.get('enable_peak_stimulation', True)
# return only the class response maps in inference mode if peak backpropagation is disabled
self.enable_peak_backprop = kargs.get('enable_peak_backprop', True)
# window size for peak finding
self.win_size = kargs.get('win_size', 3)
# sub-pixel peak finding
self.sub_pixel_locating_factor = kargs.get('sub_pixel_locating_factor', 1)
# peak filtering
self.filter_type = kargs.get('filter_type', 'median')
if self.filter_type == 'median':
self.peak_filter = self._median_filter
elif self.filter_type == 'mean':
self.peak_filter = self._mean_filter
elif self.filter_type == 'max':
self.peak_filter = self._max_filter
elif isinstance(self.filter_type, (int, float)):
self.peak_filter = lambda x: self.filter_type
else:
self.peak_filter = None
@staticmethod
def _median_filter(input):
batch_size, num_channels, h, w = input.size()
threshold, _ = torch.median(input.view(batch_size, num_channels, h * w), dim=2)
return threshold.contiguous().view(batch_size, num_channels, 1, 1)
@staticmethod
def _mean_filter(input):
batch_size, num_channels, h, w = input.size()
threshold = torch.mean(input.view(batch_size, num_channels, h * w), dim=2)
return threshold.contiguous().view(batch_size, num_channels, 1, 1)
@staticmethod
def _max_filter(input):
batch_size, num_channels, h, w = input.size()
threshold, _ = torch.max(input.view(batch_size, num_channels, h * w), dim=2)
return threshold.contiguous().view(batch_size, num_channels, 1, 1)
def _patch(self):
for module in self.modules():
if isinstance(module, nn.Conv2d):
module._original_forward = module.forward
module.forward = MethodType(pr_conv2d, module)
def _recover(self):
for module in self.modules():
if isinstance(module, nn.Conv2d) and hasattr(module, '_original_forward'):
module.forward = module._original_forward
def instance_nms(self, instance_list, threshold=0.3, merge_peak_response=True):
selected_instances = []
while len(instance_list) > 0:
instance = instance_list.pop(0)
selected_instances.append(instance)
src_mask = instance[2].astype(bool)
src_peak_response = instance[3]
def iou_filter(x):
dst_mask = x[2].astype(bool)
# IoU
intersection = np.logical_and(src_mask, dst_mask).sum()
union = np.logical_or(src_mask, dst_mask).sum()
iou = intersection / (union + 1e-10)
if iou < threshold:
return x
else:
if merge_peak_response:
nonlocal src_peak_response
src_peak_response += x[3]
return None
instance_list = list(filter(iou_filter, instance_list))
return selected_instances
def instance_seg(self, class_response_maps, peak_list, peak_response_maps, retrieval_cfg):
# cast tensors to numpy array
class_response_maps = class_response_maps.squeeze().cpu().numpy()
peak_list = peak_list.cpu().numpy()
peak_response_maps = peak_response_maps.cpu().numpy()
img_height, img_width = peak_response_maps.shape[1], peak_response_maps.shape[2]
# image size
img_area = img_height * img_width
# segment proposals off-the-shelf
proposals = retrieval_cfg['proposals']
# proposal contour width
contour_width = retrieval_cfg.get('contour_width', 5)
# limit range of proposal size
proposal_size_limit = retrieval_cfg.get('proposal_size_limit', (0.00002, 0.85))
# selected number of proposals
proposal_count = retrieval_cfg.get('proposal_count', 100)
# nms threshold
nms_threshold = retrieval_cfg.get('nms_threshold', 0.3)
# merge peak response during nms
merge_peak_response = retrieval_cfg.get('merge_peak_response', True)
# metric free parameters
param = retrieval_cfg.get('param', None)
# process each peak
instance_list = []
for i in range(len(peak_response_maps)):
class_idx = peak_list[i, 1]
# extract hyper-params
if isinstance(param, tuple):
# shared param
bg_threshold_factor, penalty_factor, balance_factor = param
elif isinstance(param, list):
# independent params between classes
bg_threshold_factor, penalty_factor, balance_factor = param[class_idx]
else:
raise TypeError('Invalid hyper-params "%s".' % param)
# class_response = imresize(class_response_maps[class_idx], (img_height, img_width), interp='bicubic')
if (len(class_response_maps.shape) == 3):
class_response = np.array(Image.fromarray(class_response_maps[class_idx]).resize((img_height, img_width), Image.BICUBIC))
elif (len(class_response_maps.shape) == 2):
class_response = np.array(Image.fromarray(class_response_maps).resize((img_height, img_width), Image.BICUBIC))
bg_response = (class_response < bg_threshold_factor * class_response.mean()).astype(np.float32)
peak_response_map = peak_response_maps[i]
# select proposal
max_val = -np.inf
instance_mask = None
for j in range(min(proposal_count, len(proposals))):
# raw_mask = imresize(proposals[j].astype(int), peak_response_map.shape, interp='nearest')
raw_mask = np.array(Image.fromarray(proposals[j].astype(np.uint8)).resize(peak_response_map.shape, Image.NEAREST))
# get contour of the proposal
contour_mask = cv2.morphologyEx(np.uint8(raw_mask), cv2.MORPH_GRADIENT, np.ones((contour_width, contour_width), np.uint8)).astype(bool)
mask = raw_mask.astype(bool)
# metric
mask_area = mask.sum()
if (mask_area >= proposal_size_limit[1] * img_area) or \
(mask_area < proposal_size_limit[0] * img_area):
continue
else:
val = balance_factor * peak_response_map[mask].sum() + \
peak_response_map[contour_mask].sum() - \
penalty_factor * bg_response[mask].sum()
if val > max_val:
max_val = val
instance_mask = mask
if instance_mask is not None:
instance_list.append((max_val, class_idx, instance_mask, peak_response_map))
instance_list = sorted(instance_list, key=lambda x: x[0], reverse=True)
if nms_threshold is not None:
instance_list = self.instance_nms(sorted(instance_list, key=lambda x: x[0], reverse=True), nms_threshold, merge_peak_response)
return [dict(category=v[1], mask=v[2], prm=v[3]) for v in instance_list]
def forward(self, input, class_threshold=0, peak_threshold=30, retrieval_cfg=None):
assert input.dim() == 4, 'PeakResponseMapping layer only supports batch mode.'
if self.inferencing:
input.requires_grad_()
# classification network forwarding
class_response_maps = super(PeakResponseMapping, self).forward(input)
# print("class_response_maps", class_response_maps)
if self.enable_peak_stimulation:
# sub-pixel peak finding
if self.sub_pixel_locating_factor > 1:
class_response_maps = F.upsample(class_response_maps, scale_factor=self.sub_pixel_locating_factor, mode='bilinear', align_corners=True)
# aggregate responses from informative receptive fields estimated via class peak responses
peak_list, aggregation = peak_stimulation(class_response_maps, win_size=self.win_size, peak_filter=self.peak_filter)
else:
# aggregate responses from all receptive fields
peak_list, aggregation = None, F.adaptive_avg_pool2d(class_response_maps, 1).squeeze(2).squeeze(2)
# print(self.inferencing)
# print("aggregation", aggregation)
# exit()
if self.inferencing:
if not self.enable_peak_backprop:
# extract only class-aware visual cues
return aggregation, class_response_maps
# extract instance-aware visual cues, i.e., peak response maps
assert class_response_maps.size(0) == 1, 'Currently inference mode (with peak backpropagation) only supports one image at a time.'
if peak_list is None:
peak_list = peak_stimulation(class_response_maps, return_aggregation=False, win_size=self.win_size, peak_filter=self.peak_filter)
# print(peak_list)
peak_response_maps = []
valid_peak_list = []
# peak backpropagation
grad_output = class_response_maps.new_empty(class_response_maps.size())
for idx in range(peak_list.size(0)):
# print(aggregation[peak_list[idx, 0], peak_list[idx, 1]])
if aggregation[peak_list[idx, 0], peak_list[idx, 1]] >= class_threshold:
peak_val = class_response_maps[peak_list[idx, 0], peak_list[idx, 1], peak_list[idx, 2], peak_list[idx, 3]]
# print(peak_val)
if peak_val > peak_threshold:
# print('k ')
grad_output.zero_()
# starting from the peak
grad_output[peak_list[idx, 0], peak_list[idx, 1], peak_list[idx, 2], peak_list[idx, 3]] = 1
if input.grad is not None:
input.grad.zero_()
class_response_maps.backward(grad_output, retain_graph=True)
prm = input.grad.detach().sum(1).clone().clamp(min=0)
# print('prm ', prm)
peak_response_maps.append(prm / prm.sum())
valid_peak_list.append(peak_list[idx, :])
# return results
class_response_maps = class_response_maps.detach()
aggregation = aggregation.detach()
if len(peak_response_maps) > 0:
# print('kkkkkkkkkkkkkk')
valid_peak_list = torch.stack(valid_peak_list)
peak_response_maps = torch.cat(peak_response_maps, 0)
if retrieval_cfg is None:
# classification confidence scores, class-aware and instance-aware visual cues
return aggregation, class_response_maps, valid_peak_list, peak_response_maps
else:
# instance segmentation using build-in proposal retriever
return self.instance_seg(class_response_maps, valid_peak_list, peak_response_maps, retrieval_cfg)
else:
return None
else:
# classification confidence scores
return aggregation
def train(self, mode=True):
super(PeakResponseMapping, self).train(mode)
if self.inferencing:
self._recover()
self.inferencing = False
return self
def inference(self):
super(PeakResponseMapping, self).train(False)
self._patch()
self.inferencing = True
return self
def uninference(self):
super(PeakResponseMapping, self).train(True)
self.inferencing = False
return self
| 46.496377 | 152 | 0.598691 |
73db3affb1c4728a4c1251d0a72b2606ed424428 | 2,998 | py | Python | server/controller/destroy_vm.py | Zhe-Shen/CloudTides | edf51534f62901dcc2e69cbf3fea76e5652b5d17 | [
"Apache-2.0"
] | 3 | 2019-12-07T17:14:39.000Z | 2022-03-31T06:41:51.000Z | server/controller/destroy_vm.py | Zhe-Shen/CloudTides | edf51534f62901dcc2e69cbf3fea76e5652b5d17 | [
"Apache-2.0"
] | 10 | 2019-12-08T06:37:55.000Z | 2022-02-10T11:41:09.000Z | server/controller/destroy_vm.py | Zhe-Shen/CloudTides | edf51534f62901dcc2e69cbf3fea76e5652b5d17 | [
"Apache-2.0"
] | 9 | 2020-06-28T01:08:35.000Z | 2020-07-26T03:51:47.000Z | '''
Destroy VM in vSphere.
'''
from __future__ import print_function
import atexit
from pyVim import connect
from pyVmomi import vim
import argparse
def get_args():
parser = argparse.ArgumentParser(
description='Arguments for talking to vCenter')
parser.add_argument('-s', '--host',
required=True,
action='store',
help='vSpehre service to connect to')
parser.add_argument('-o', '--port',
type=int,
default=443,
action='store',
help='Port to connect on')
parser.add_argument('-u', '--user',
required=True,
action='store',
help='Username to use')
parser.add_argument('-p', '--password',
required=False,
action='store',
help='Password to use')
parser.add_argument('-j', '--uuid',
action='store',
help='BIOS UUID of the virtual machine')
parser.add_argument('-d', '--dns',
action='store',
help='DNS name of the virtual machine')
parser.add_argument('-i', '--ip',
help='IP address of the virtual machine')
parser.add_argument('-n', '--name',
help='VM name of the virtual machine')
args = parser.parse_args()
return args
def get_obj(content, vimtype, name):
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for c in container.view:
if name:
if c.name == name:
obj = c
break
else:
obj = c
break
container.Destroy()
return obj
args = get_args()
si = None
try:
si = connect.SmartConnectNoSSL(host=args.host,
user=args.user,
pwd=args.password,
port=args.port)
atexit.register(connect.Disconnect, si)
except:
print("Failed to connect")
exit()
vm = None
if args.name:
vm = get_obj(si.content, [vim.VirtualMachine], args.name)
elif args.uuid:
vm = si.content.searchIndex.FindByUuid(None, args.uuid, True, False)
elif args.dns:
vm = si.content.searchIndex.FindByDnsName(None, args.name, True)
elif args.ip:
vm = si.content.searchIndex.FindByIp(None, args.ip, True)
else:
print("Lack identifier of VM.")
exit()
print("Found VM: {0}".format(vm.name))
print("The current power state is: {0}".format(vm.runtime.powerState))
if format(vm.runtime.powerState) == "poweredOn":
print("Attempting to power off {0}".format(vm.name))
task = vm.PowerOffVM_Task()
print("{0}".format(task.info.state))
print("Destroying VM from vSphere.")
task = vm.Destroy_Task()
print("Done.")
| 27.759259 | 72 | 0.538359 |
73db92835649a541758da6e1e48dd36c0b4f92ae | 5,362 | py | Python | tests/test_time_entries_adapter.py | damnit/pymite | 1e9b9bf6aef790af2d8781f9f77c098c54ca0342 | [
"MIT"
] | 4 | 2016-09-25T17:25:41.000Z | 2019-12-28T06:25:10.000Z | tests/test_time_entries_adapter.py | damnit/pymite | 1e9b9bf6aef790af2d8781f9f77c098c54ca0342 | [
"MIT"
] | null | null | null | tests/test_time_entries_adapter.py | damnit/pymite | 1e9b9bf6aef790af2d8781f9f77c098c54ca0342 | [
"MIT"
] | 2 | 2017-09-23T14:55:59.000Z | 2018-10-28T09:18:14.000Z | # -*- coding: utf-8 -*-
# File: test_time_entries_adapter.py
""" time entries adapter test module. """
import urllib.request
import json
from .conftest import (
mock_urlopen, _get_url, _post_url, _delete_url, parse_params
)
from pymite.adapters import TimeEntries
def test_time_entries_setup(libfactory):
""" Test tracker setup. """
factory = libfactory.time_entries_adapter
assert factory is not None
te = TimeEntries(factory.realm, factory.apikey)
assert te.adapter == 'time_entries'
def test_time_entries_at(monkeypatch, libfactory):
te = libfactory.time_entries_adapter
at_data = [
{'time_entry': {}},
{'time_entry': {}}
]
urlopen_at = mock_urlopen(at_data)
monkeypatch.setattr(urllib.request, 'urlopen', urlopen_at)
at = te.query(at='2015-02-02')
assert at == list(map(lambda x: x['time_entry'], at_data))
assert len(at) == len(at_data)
def test_time_entries_at_url(monkeypatch, libfactory):
te = libfactory.time_entries_adapter
monkeypatch.setattr(TimeEntries, '_get', _get_url('time_entry'))
url = te.query(at='2015-02-02')['api']
assert url == 'https://foo.mite.yo.lk/time_entries.json?at=2015-02-02'
def test_time_entries_by_id(monkeypatch, libfactory):
te = libfactory.time_entries_adapter
te_data = {'time_entry': {}}
urlopen_te = mock_urlopen(te_data)
monkeypatch.setattr(urllib.request, 'urlopen', urlopen_te)
time = te.by_id(42)
assert time == te_data['time_entry']
def test_time_entries_by_id_url(monkeypatch, libfactory):
te = libfactory.time_entries_adapter
monkeypatch.setattr(TimeEntries, '_get', _get_url('time_entry'))
url = te.by_id(42)['api']
assert url == 'https://foo.mite.yo.lk/time_entries/42.json'
def test_time_entries_all(monkeypatch, libfactory):
te = libfactory.time_entries_adapter
all_data = [{'time_entry': {}} for _ in range(100)]
urlopen_all = mock_urlopen(all_data)
monkeypatch.setattr(urllib.request, 'urlopen', urlopen_all)
time_entries = te.query()
assert time_entries == list(map(lambda x: x['time_entry'], all_data))
assert len(time_entries) == len(all_data)
def test_time_entries_all_url(monkeypatch, libfactory):
te = libfactory.time_entries_adapter
monkeypatch.setattr(TimeEntries, '_get', _get_url('time_entry'))
url = te.query()['api']
assert url == 'https://foo.mite.yo.lk/time_entries.json'
def test_time_entries_all_limited_url(monkeypatch, libfactory):
te = libfactory.time_entries_adapter
monkeypatch.setattr(TimeEntries, '_get', _get_url('time_entry'))
url = te.query(limit=10)['api']
assert url == 'https://foo.mite.yo.lk/time_entries.json?limit=10'
def test_time_entries_from_to(monkeypatch, libfactory):
te = libfactory.time_entries_adapter
ft_data = [{'time_entry': {}} for _ in range(1000)]
urlopen_ft = mock_urlopen(ft_data)
monkeypatch.setattr(urllib.request, 'urlopen', urlopen_ft)
kws = {'from': '2015-02-02', 'to': '2015-02-14'}
ft = te.query(**kws)
assert ft == list(map(lambda x: x['time_entry'], ft_data))
assert len(ft) == len(ft_data)
def test_time_entries_from_to_url(monkeypatch, libfactory):
te = libfactory.time_entries_adapter
monkeypatch.setattr(TimeEntries, '_get', _get_url('time_entry'))
kws = {'from': '2015-02-02', 'to': '2015-02-14'}
url = te.query(**kws)['api']
def test_time_entries_from_to_paginated_url(monkeypatch, libfactory):
te = libfactory.time_entries_adapter
monkeypatch.setattr(TimeEntries, '_get', _get_url('time_entry'))
kws = {'from': '2015-02-02', 'to': '2015-02-14', 'page': '4'}
url = te.query(**kws)['api']
assert ('https://foo.mite.yo.lk/time_entries.json?' in url)
assert kws == parse_params(url)
def test_time_entries_delete(monkeypatch, libfactory):
te = libfactory.time_entries_adapter
# ft_data = {'success': 200}
ft_data = b' '
urlopen_ft = mock_urlopen(ft_data, resp_code=200)
monkeypatch.setattr(urllib.request, 'urlopen', urlopen_ft)
ft = te.delete(42)
assert ft == {'success': 200}
def test_time_entries_delete_url(monkeypatch, libfactory):
te = libfactory.time_entries_adapter
monkeypatch.setattr(TimeEntries, '_delete', _delete_url(200))
data = te.delete(1337)
base_url = 'https://foo.mite.yo.lk/time_entries/1337.json'
assert data['api'] == base_url
assert data['code'] == 200
assert data['method'] == 'DELETE'
def test_time_entries_create(monkeypatch, libfactory):
te = libfactory.time_entries_adapter
cr_data = {'time_entry': {}}
urlopen_cr = mock_urlopen(cr_data)
monkeypatch.setattr(urllib.request, 'urlopen', urlopen_cr)
cr = te.create(minutes=42, note='foo', user_id=666)
assert cr == cr_data['time_entry']
def test_time_entries_create_url(monkeypatch, libfactory):
te = libfactory.time_entries_adapter
monkeypatch.setattr(TimeEntries, '_post', _post_url(201))
post_data = {'time_entry': {'minutes': 42, 'user_id': 123, 'note': 'bam'}}
data = te.create(**post_data['time_entry'])
base_url = 'https://foo.mite.yo.lk/time_entries.json'
assert data['api'] == base_url
assert data['code'] == 201
assert data['method'] == 'POST'
assert json.loads(data['data'].decode()) == post_data
# vim: set ft=python ts=4 sw=4 expandtab :
| 33.936709 | 78 | 0.697874 |
73db93e32cfc911467cd93df1e7d19304df98f9a | 38,581 | py | Python | nyoka/tests/testScoreWithAdapaSklearn.py | nimeshgit/nyoka | 43bf049825922213eeb3e6a8f39864f9b75d01d5 | [
"Apache-2.0"
] | null | null | null | nyoka/tests/testScoreWithAdapaSklearn.py | nimeshgit/nyoka | 43bf049825922213eeb3e6a8f39864f9b75d01d5 | [
"Apache-2.0"
] | 2 | 2021-08-25T16:16:45.000Z | 2022-02-10T05:28:52.000Z | nyoka/tests/testScoreWithAdapaSklearn.py | nimeshgit/nyoka | 43bf049825922213eeb3e6a8f39864f9b75d01d5 | [
"Apache-2.0"
] | null | null | null | import sys, os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.append(BASE_DIR)
import pandas as pd
from sklearn import datasets
from sklearn.pipeline import Pipeline
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler, Imputer, LabelEncoder, LabelBinarizer,\
Binarizer, MinMaxScaler, MaxAbsScaler, RobustScaler
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.svm import SVC, SVR, LinearSVC, LinearSVR, OneClassSVM
from sklearn.decomposition import PCA
from sklearn.naive_bayes import GaussianNB
from sklearn_pandas import DataFrameMapper
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor, \
RandomForestClassifier, RandomForestRegressor, IsolationForest
from sklearn.linear_model import LinearRegression, LogisticRegression, RidgeClassifier, SGDClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neural_network import MLPClassifier, MLPRegressor
from nyoka import skl_to_pmml
from nyoka import PMML44 as pml
import unittest
import ast
import numpy
from adapaUtilities import AdapaUtility
from dataUtilities import DataUtility
class TestCases(unittest.TestCase):
@classmethod
def setUpClass(self):
print("******* Unit Test for sklearn *******")
self.data_utility = DataUtility()
self.adapa_utility = AdapaUtility()
def test_01_linear_regression(self):
print("\ntest 01 (linear regression without preprocessing)\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_regression()
model = LinearRegression()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test01sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, _ = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
def test_02_linear_regression_with_scaler(self):
print("\ntest 02 (linear regression with preprocessing)\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_regression()
model = LinearRegression()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test02sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, _ = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
def test_03_logistic_regression_with_scaler(self):
print("\ntest 03 (logistic regression with preprocessing) [multi-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_multi_class_classification()
model = LogisticRegression()
pipeline_obj = Pipeline([
("mapper", DataFrameMapper([
(["sepal length (cm)", "sepal width (cm)"], MinMaxScaler()),
(["petal length (cm)", "petal width (cm)"], None)
])
),
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test03sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
def test_04_logistic_regression_with_scaler(self):
print("\ntest 04 (logistic regression with preprocessing) [binary-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_binary_classification()
model = LogisticRegression()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test04sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
def test_05_logistic_regression(self):
print("\ntest 05 (logistic regression without preprocessing) [multi-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_multi_class_classification()
model = LogisticRegression()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test05sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
def test_06_logistic_regression(self):
print("\ntest 06 (logistic regression without preprocessing) [binary-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_binary_classification()
model = LogisticRegression()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test06sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
def test_07_ridge_classifier(self):
print("\ntest 07 (Ridge Classifier) [multi-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_multi_class_classification()
model = RidgeClassifier()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test07sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = model._predict_proba_lr(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
def test_08_ridge_classifier(self):
print("\ntest 08 (Ridge Classifier) [binary-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_binary_classification()
model = RidgeClassifier()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test08sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = model._predict_proba_lr(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
@unittest.skip("")
def test_09_sgd_classifier(self):
print("\ntest 09 (SGD Classifier with preprocessing) [multi-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_multi_class_classification()
model = SGDClassifier(loss="log")
pipeline_obj = Pipeline([
("scaler", StandardScaler()),
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test09sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
def test_10_sgd_classifier(self):
print("\ntest 10 (SGD Classifier with preprocessing) [binary-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_binary_classification()
model = SGDClassifier(loss="log")
pipeline_obj = Pipeline([
("scaler", StandardScaler()),
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test10sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
def test_11_lda(self):
print("\ntest 11 (LDA with preprocessing) [multi-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_multi_class_classification()
model = LinearDiscriminantAnalysis()
pipeline_obj = Pipeline([
("scaler", MaxAbsScaler()),
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test11sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
def test_12_lda(self):
print("\ntest 12 (LDA with preprocessing) [binary-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_binary_classification()
model = LinearDiscriminantAnalysis()
pipeline_obj = Pipeline([
("scaler", StandardScaler()),
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test12sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
def test_13_linearsvc(self):
print("\ntest 13 (LinearSVC with preprocessing) [multi-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_multi_class_classification()
model = LinearSVC()
pipeline_obj = Pipeline([
("scaler", StandardScaler()),
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test13sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.decision_function(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
def test_14_linearsvc(self):
print("\ntest 14 (LinearSVC with preprocessing) [binary-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_binary_classification()
model = LinearSVC()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test14sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = model._predict_proba_lr(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
def test_15_linearsvr(self):
print("\ntest 15 (linear svr without preprocessing)\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_regression()
model = LinearSVR()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test15sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, _ = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
def test_16_linearsvr(self):
print("\ntest 16 (linear svr with preprocessing)\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_regression()
model = LinearSVR()
pipeline_obj = Pipeline([
("scaler", MinMaxScaler()),
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test16sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, _ = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
def test_17_decisiontreeclassifier(self):
print("\ntest 17 (decision tree classifier with preprocessing) [multi-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_multi_class_classification()
model = DecisionTreeClassifier()
pipeline_obj = Pipeline([
("scaler", Binarizer()),
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test17sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
def test_18_decisiontreeclassifier(self):
print("\ntest 18 (decision tree classifier with preprocessing) [binary-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_binary_classification()
model = DecisionTreeClassifier()
pipeline_obj = Pipeline([
("scaler", Binarizer()),
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test18sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
def test_19_decisiontreeclassifier(self):
print("\ntest 19 (decision tree classifier without preprocessing) [multi-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_multi_class_classification()
model = DecisionTreeClassifier()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test19sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
def test_20_decisiontreeclassifier(self):
print("\ntest 20 (decision tree classifier without preprocessing) [binary-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_binary_classification()
model = DecisionTreeClassifier()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test20sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
def test_21_svr(self):
print("\ntest 21 (SVR without preprocessing)\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_regression()
model = SVR()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test21sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, _ = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
def test_22_gaussian_nb(self):
print("\ntest 22 (GaussianNB without preprocessing) [binary-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_binary_classification()
model = GaussianNB()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test22sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
def test_23_gaussian_nb(self):
print("\ntest 23 (GaussianNB without preprocessing) [multi-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_multi_class_classification()
model = GaussianNB()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test23sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
def test_24_gaussian_nb(self):
print("\ntest 24 (GaussianNB with preprocessing) [multi-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_multi_class_classification()
model = GaussianNB()
pipeline_obj = Pipeline([
('scaler', StandardScaler()),
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test24sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
@unittest.skip("")
def test_25_random_forest_regressor(self):
print("\ntest 25 (random forest regressor without preprocessing)\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_regression()
model = RandomForestRegressor()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test25sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, _ = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
@unittest.skip("")
def test_26_random_forest_classifier(self):
print("\ntest 26 (random forest classifier with preprocessing) [multi-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_multi_class_classification()
model = RandomForestClassifier()
pipeline_obj = Pipeline([
('scaler',MinMaxScaler()),
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test26sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
@unittest.skip("")
def test_27_random_forest_classifier(self):
print("\ntest 27 (random forest classifier with preprocessing) [binary-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_binary_classification()
model = RandomForestClassifier()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test27sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
def test_28_gradient_boosting_classifier(self):
print("\ntest 28 (gradient boosting classifier with preprocessing) [binary-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_binary_classification()
model = GradientBoostingClassifier()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test28sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
@unittest.skip("")
def test_29_gradient_boosting_classifier(self):
print("\ntest 29 (gradient boosting classifier with preprocessing) [multi-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_multi_class_classification()
model = GradientBoostingClassifier()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test29sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
@unittest.skip("")
def test_30_gradient_boosting_regressor(self):
print("\ntest 30 (gradient boosting regressor without preprocessing)\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_regression()
model = GradientBoostingRegressor()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test30sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, _ = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
@unittest.skip("")
def test_31_knn_classifier(self):
print("\ntest 31 (knn classifier without preprocessing) [binary-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_binary_classification()
model = KNeighborsClassifier()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test31sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
def test_32_knn_classifier(self):
print("\ntest 32 (knn classifier without preprocessing) [multi-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_multi_class_classification()
model = KNeighborsClassifier()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test32sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
def test_33_knn_regressor(self):
print("\ntest 33 (knn regressor without preprocessing)\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_regression()
model = KNeighborsRegressor()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test33sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, _ = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
def test_34_kmeans(self):
print("\ntest 34 (kmeans without preprocessing\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_multi_class_classification()
model = KMeans(n_clusters=2)
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test34sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.transform(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
@unittest.skip("")
def test_35_isolation_forest(self):
print("\ntest 34 (Isolation Forest\n")
detection_map = {
'true': -1,
'false': 1
}
X = numpy.array([
[1,2,3,4],
[2,1,3,4],
[3,2,1,4],
[3,2,4,1],
[4,3,2,1],
[2,4,3,1]
], dtype=numpy.float32)
test_data = numpy.array([[0,4,0,7],[4,0,4,7]])
features = ['a','b','c','d']
model = IsolationForest(n_estimators=40,contamination=0)
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X)
file_name = 'test35sklearn.pmml'
skl_to_pmml(pipeline_obj, features, '', file_name)
model_pred = pipeline_obj.predict(test_data)
model_scores = model.score_samples(test_data)
model_name = self.adapa_utility.upload_to_zserver(file_name)
z_predictions = self.adapa_utility.score_in_zserver(model_name,'nyoka/tests/test_forest.csv','ANOMALY')
cnt = 0
for idx, value in enumerate(z_predictions):
score, is_anomaly = value.split(",")
score = -1 * float(score)
if "{:.6f}".format(score) != "{:.6f}".format(model_scores[idx]) or model_pred[idx] != detection_map[is_anomaly]:
cnt += 1
self.assertEqual(cnt,0)
@unittest.skip("")
def test_36_one_class_svm(self):
print("\ntest 36 (One Class SVM\n")
detection_map = {
'true': -1,
'false': 1
}
df = pd.read_csv("nyoka/tests/train_ocsvm.csv")
df_test = pd.read_csv("nyoka/tests/test_ocsvm.csv")
features = df.columns
model = OneClassSVM(nu=0.1)
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(df)
file_name = 'test36sklearn.pmml'
skl_to_pmml(pipeline_obj, features, '', file_name)
model_pred = pipeline_obj.predict(df_test)
model_scores = pipeline_obj.decision_function(df_test)
model_name = self.adapa_utility.upload_to_zserver(file_name)
z_predictions = self.adapa_utility.score_in_zserver(model_name,'nyoka/tests/test_ocsvm.csv','ANOMALY')
cnt = 0
for idx, value in enumerate(z_predictions):
score, is_anomaly = value.split(",")
score = float(score)
if "{:.6f}".format(score) != "{:.6f}".format(model_scores[idx]) or model_pred[idx] != detection_map[is_anomaly]:
cnt += 1
self.assertEqual(cnt,0)
def test_37_mlp_regressor(self):
print("\ntest 37 (mlp regressor without preprocessing)\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_regression()
model = MLPRegressor()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test37sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, _ = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
def test_38_mlp_classifier(self):
print("\ntest 38 (mlp classifier without preprocessing)[multi-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_multi_class_classification()
model = MLPClassifier()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test38sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
def test_39_mlp_classifier(self):
print("\ntest 39 (mlp classifier without preprocessing)[binary-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_binary_classification()
model = MLPClassifier()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test39sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
@classmethod
def tearDownClass(self):
print("\n******* Finished *******\n")
if __name__ == '__main__':
unittest.main(warnings='ignore') | 47.107448 | 124 | 0.68977 |
73db9a3b2bf1e9880650f38cdf616f5be6981361 | 361 | py | Python | pyinstrument/renderers/__init__.py | sthagen/pyinstrument | 7b52e037bc3b5736037171003ef3318938e081fe | [
"BSD-3-Clause"
] | 1 | 2020-12-13T06:22:51.000Z | 2020-12-13T06:22:51.000Z | pyinstrument/renderers/__init__.py | sthagen/pyinstrument | 7b52e037bc3b5736037171003ef3318938e081fe | [
"BSD-3-Clause"
] | null | null | null | pyinstrument/renderers/__init__.py | sthagen/pyinstrument | 7b52e037bc3b5736037171003ef3318938e081fe | [
"BSD-3-Clause"
] | null | null | null | from pyinstrument.renderers.base import FrameRenderer, Renderer
from pyinstrument.renderers.console import ConsoleRenderer
from pyinstrument.renderers.html import HTMLRenderer
from pyinstrument.renderers.jsonrenderer import JSONRenderer
from pyinstrument.renderers.session import SessionRenderer
from pyinstrument.renderers.speedscope import SpeedscopeRenderer
| 51.571429 | 64 | 0.894737 |
73db9ad1c87b8b1e72575ff5460261ffb670e96b | 3,895 | py | Python | src/api/models/userModel.py | 0x6f736f646f/backend-blog-application | e2f5444def814776df5754aaf56e4d1eec9336d6 | [
"MIT"
] | null | null | null | src/api/models/userModel.py | 0x6f736f646f/backend-blog-application | e2f5444def814776df5754aaf56e4d1eec9336d6 | [
"MIT"
] | 33 | 2020-04-20T23:15:59.000Z | 2020-04-24T22:14:05.000Z | src/api/models/userModel.py | 0x6f736f646f/backend-blog-application | e2f5444def814776df5754aaf56e4d1eec9336d6 | [
"MIT"
] | null | null | null | import datetime
import jwt
from api import app, db, bcrypt
class UserModel(db.Model):
"""
User model for storing user related details
"""
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(255), nullable=False)
photo = db.Column(db.String(255))
email = db.Column(db.String(255), unique=True, nullable=False)
password = db.Column(db.String(255), nullable=False)
bio = db.Column(db.Text())
registered_on = db.Column(db.DateTime, nullable=False)
role = db.Column(db.Boolean, nullable=True)
confirmed = db.Column(db.Boolean, nullable=False, default=False)
confirmed_on = db.Column(db.DateTime, nullable=True)
def __init__(self, post_data):
self.name = post_data.get('name')
self.photo = post_data.get('photo')
self.email = post_data.get('email')
self.password = bcrypt.generate_password_hash(
post_data.get('password'), app.config.get("BCRYPT_LOG_ROUNDS")
).decode('utf-8')
self.bio = post_data.get('bio')
self.registered_on = datetime.datetime.now()
self.role = post_data.get('role')
self.confirmed = False
@staticmethod
def encode_auth_token(user_id):
"""
Generates the auth token
:return string
"""
try:
time_to_leave = datetime.datetime.utcnow() + datetime.timedelta(
days=0, seconds=60)
payload = {
'expiry': time_to_leave.__str__(),
'time_now': datetime.datetime.utcnow().__str__(),
'user': user_id
}
return jwt.encode(payload,
app.config.get("SECRET_KEY"),
algorithm='HS256')
except Exception as e:
return e
@staticmethod
def decode_auth_token(auth_token):
"""
Validates the auth token
:param auth_token
:return integer|string
"""
try:
payload = jwt.decode(auth_token,
app.config.get("SECRET_KEY"))
is_blacklist = BlackListToken.check_blacklist(auth_token)
if is_blacklist:
return "Token blacklisted. Please log in again"
else:
return payload['user']
except jwt.ExpiredSignatureError:
return "Signature expired. Please log in again"
except jwt.InvalidTokenError:
return "Invalid token. Please log in again"
def save_user(self):
db.session.add(self)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def update_confirmation(self):
self.confirmed = True
self.confirmed_on = datetime.datetime.now()
db.session.add(self)
db.session.commit()
@staticmethod
def get_all_blogpost(self):
return UserModel.query.all()
@staticmethod
def get_one_blogpost(self, id):
return UserModel.query.get(id)
def __repr__(self):
return "<id: {}>".format(self.id)
class BlackListToken(db.Model):
"""
Token model for storing jwt tokens
"""
__tablename__ = "blacklist_tokens"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
token = db.Column(db.String(255), unique=True, nullable=False)
blacklisted_on = db.Column(db.DateTime, nullable=False)
def __init__(self, token):
self.token = token
self.blacklisted_on = datetime.datetime.now()
def __repr__(self):
return "<id: token: {}>".format(self.token)
@staticmethod
def check_blacklist(auth_token):
result = BlackListToken.query.filter_by(token=str(auth_token)).first()
if result:
return True
else:
return False
| 31.41129 | 78 | 0.601027 |
73dbaefc8736cd3da51a43371b6f01360d83c8a8 | 3,076 | py | Python | formalchemy/tests/test_multiple_keys.py | coredumperror/formalchemy | 6d89bb3ac19248e114684df17bbf3885dc26de3e | [
"MIT"
] | 20 | 2015-01-25T13:54:08.000Z | 2021-03-09T16:54:53.000Z | formalchemy/tests/test_multiple_keys.py | coredumperror/formalchemy | 6d89bb3ac19248e114684df17bbf3885dc26de3e | [
"MIT"
] | 11 | 2015-02-05T16:36:19.000Z | 2020-11-13T11:41:19.000Z | formalchemy/tests/test_multiple_keys.py | coredumperror/formalchemy | 6d89bb3ac19248e114684df17bbf3885dc26de3e | [
"MIT"
] | 12 | 2015-02-19T14:06:21.000Z | 2021-07-30T08:44:45.000Z | # -*- coding: utf-8 -*-
from formalchemy.tests import *
def test_renderer_names():
"""
Check that the input name take care of multiple primary keys::
>>> fs = FieldSet(primary1)
>>> print(fs.field.render())
<input id="PrimaryKeys-1_22-field" maxlength="10" name="PrimaryKeys-1_22-field" type="text" value="value1" />
>>> fs = FieldSet(primary2)
>>> print(fs.field.render())
<input id="PrimaryKeys-1_33-field" maxlength="10" name="PrimaryKeys-1_33-field" type="text" value="value2" />
Check form rendering with keys::
>>> fs = FieldSet(primary2)
>>> fs.configure(pk=True)
>>> print(fs.render())
<div>
<label class="field_req" for="PrimaryKeys-1_33-id">
Id
</label>
<input id="PrimaryKeys-1_33-id" name="PrimaryKeys-1_33-id" type="text" value="1" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("PrimaryKeys-1_33-id").focus();
//]]>
</script>
<div>
<label class="field_req" for="PrimaryKeys-1_33-id2">
Id2
</label>
<input id="PrimaryKeys-1_33-id2" maxlength="10" name="PrimaryKeys-1_33-id2" type="text" value="33" />
</div>
<div>
<label class="field_req" for="PrimaryKeys-1_33-field">
Field
</label>
<input id="PrimaryKeys-1_33-field" maxlength="10" name="PrimaryKeys-1_33-field" type="text" value="value2" />
</div>
"""
def test_foreign_keys():
"""
Assume that we can have more than one ForeignKey as primary key::
>>> fs = FieldSet(orderuser2)
>>> fs.configure(pk=True)
>>> print(pretty_html(fs.user.render()))
<select id="OrderUser-1_2-user_id" name="OrderUser-1_2-user_id">
<option selected="selected" value="1">
Bill
</option>
<option value="2">
John
</option>
</select>
>>> print(pretty_html(fs.order.render()))
<select id="OrderUser-1_2-order_id" name="OrderUser-1_2-order_id">
<option value="1">
Quantity: 10
</option>
<option selected="selected" value="2">
Quantity: 5
</option>
<option value="3">
Quantity: 6
</option>
</select>
"""
def test_deserialize():
"""
Assume that we can deserialize a value
"""
fs = FieldSet(primary1, data={'PrimaryKeys-1_22-field':'new_value'})
assert fs.validate() is True
assert fs.field.value == 'new_value'
fs.sync()
session.rollback()
def test_deserialize_new_record():
"""
Assume that we can deserialize a value
"""
fs = FieldSet(PrimaryKeys(), data={'PrimaryKeys-_-id':'8',
'PrimaryKeys-_-id2':'9'})
fs.configure(include=[fs.id, fs.id2])
assert fs.validate() is True
fs.sync()
assert fs.model.id == 8, fs.model.id
assert fs.model.id2 == '9', fs.model.id2
session.rollback()
| 30.156863 | 118 | 0.559818 |
73dbafa2b2009025c163a91a60c8359d523f8133 | 1,377 | py | Python | commandbased/commandbasedrobot.py | M-Shadow/robotpy-wpilib-utilities | ba95d45041e00842afadaedd2f3cbf2a3ac9cdf7 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | commandbased/commandbasedrobot.py | M-Shadow/robotpy-wpilib-utilities | ba95d45041e00842afadaedd2f3cbf2a3ac9cdf7 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | commandbased/commandbasedrobot.py | M-Shadow/robotpy-wpilib-utilities | ba95d45041e00842afadaedd2f3cbf2a3ac9cdf7 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2019-04-14T14:47:41.000Z | 2019-04-14T14:47:41.000Z | from wpilib import TimedRobot
from wpilib.command import Scheduler
class CommandBasedRobot(TimedRobot):
"""
The base class for a Command-Based Robot. To use, instantiate commands and
trigger them.
"""
def startCompetition(self):
"""Initalizes the scheduler before starting robotInit()"""
self.scheduler = Scheduler.getInstance()
super().startCompetition()
def commandPeriodic(self):
"""
Run the scheduler regularly. If an error occurs during a competition,
prevent it from crashing the program.
"""
try:
self.scheduler.run()
except Exception as error:
if not self.ds.isFMSAttached():
raise
"""Just to be safe, stop all running commands."""
self.scheduler.removeAll()
self.handleCrash(error)
autonomousPeriodic = commandPeriodic
teleopPeriodic = commandPeriodic
disabledPeriodic = commandPeriodic
# testPeriodic deliberately omitted
def handleCrash(self, error):
"""
Called if an exception is raised in the Scheduler during a competition.
Writes an error message to the driver station by default. If you want
more complex behavior, override this method in your robot class.
"""
self.ds.reportError(str(error), printTrace=True)
| 29.297872 | 79 | 0.648511 |
73dbb24252b0aaaa1f50edafcd1e7c6a35cd4c02 | 438 | py | Python | recipes/how_to_use_odfdo_in_command_line_scripts_odfdo-show.py | jdum/odfdo | 2494d0bed39f5a55974643206e9bafeed40f3a6b | [
"Apache-2.0"
] | 18 | 2018-04-19T08:30:48.000Z | 2022-02-14T11:00:27.000Z | recipes/how_to_use_odfdo_in_command_line_scripts_odfdo-show.py | jdum/odfdo | 2494d0bed39f5a55974643206e9bafeed40f3a6b | [
"Apache-2.0"
] | 15 | 2018-04-22T00:52:41.000Z | 2021-07-05T10:16:38.000Z | recipes/how_to_use_odfdo_in_command_line_scripts_odfdo-show.py | jdum/odfdo | 2494d0bed39f5a55974643206e9bafeed40f3a6b | [
"Apache-2.0"
] | 6 | 2018-04-22T00:14:12.000Z | 2021-12-06T01:42:07.000Z | print(
"Example of the use of odfdo in a script : odfdo-show.py. This command "
"line tool dumps text from an OpenDocument file to the standard "
"output, optionally styles and meta.\n\n"
"odfdo-show.py is available in the script directory of the odfdo-python"
" package.\n"
)
from subprocess import call
from pathlib import PurePath
p = PurePath("../scripts/odfdo-show.py")
call("python %s --help" % p, shell=True)
| 31.285714 | 76 | 0.703196 |
73dbb7285ad1ab1f6850ab21d7b216b2c972b5dc | 14,481 | py | Python | algorithms/maddpg.py | tuladhay/rob538_project | 67a19ee34ca256f6d103018c844165bab68ed96d | [
"MIT"
] | null | null | null | algorithms/maddpg.py | tuladhay/rob538_project | 67a19ee34ca256f6d103018c844165bab68ed96d | [
"MIT"
] | 1 | 2020-06-30T11:47:59.000Z | 2020-06-30T11:47:59.000Z | algorithms/maddpg.py | tuladhay/rob538_project | 67a19ee34ca256f6d103018c844165bab68ed96d | [
"MIT"
] | null | null | null | import torch
import torch.nn.functional as F
from gym.spaces import Box, Discrete
from utils.networks import MLPNetwork
from utils.misc import soft_update, average_gradients, onehot_from_logits, gumbel_softmax
from utils.agents import DDPGAgent
import numpy as np
MSELoss = torch.nn.MSELoss()
class MADDPG(object):
"""
Wrapper class for DDPG-esque (i.e. also MADDPG) agents in multi-agent task
"""
def __init__(self, agent_init_params, alg_types,
gamma=0.95, tau=0.01, lr=0.01, hidden_dim=64,
discrete_action=False):
"""
Inputs:
agent_init_params (list of dict): List of dicts with parameters to
initialize each agent
num_in_pol (int): Input dimensions to policy
num_out_pol (int): Output dimensions to policy
num_in_critic (int): Input dimensions to critic
alg_types (list of str): Learning algorithm for each agent (DDPG
or MADDPG)
gamma (float): Discount factor
tau (float): Target update rate
lr (float): Learning rate for policy and critic
hidden_dim (int): Number of hidden dimensions for networks
discrete_action (bool): Whether or not to use discrete action space
"""
self.nagents = len(alg_types)
self.alg_types = alg_types
self.agents = [DDPGAgent(lr=lr, discrete_action=discrete_action,
hidden_dim=hidden_dim,
isRNN=True, #TODO: RNN or not?
**params)
for params in agent_init_params]
self.agent_init_params = agent_init_params
self.gamma = gamma
self.tau = tau
self.lr = lr
self.discrete_action = discrete_action
self.pol_dev = 'cpu' # device for policies
self.critic_dev = 'cpu' # device for critics
self.trgt_pol_dev = 'cpu' # device for target policies
self.trgt_critic_dev = 'cpu' # device for target critics
self.niter = 0
@property
def policies(self):
return [a.policy for a in self.agents]
@property
def target_policies(self):
return [a.target_policy for a in self.agents]
def scale_noise(self, scale):
"""
Scale noise for each agent
Inputs:
scale (float): scale of noise
"""
for a in self.agents:
a.scale_noise(scale)
def reset_noise(self):
for a in self.agents:
a.reset_noise()
def step(self, observations, explore=False):
"""
Take a step forward in environment with all agents
Inputs:
observations: List of observations for each agent
explore (boolean): Whether or not to add exploration noise
Outputs:
actions: List of actions for each agent
"""
return [a.step(obs, explore=explore) for a, obs in zip(self.agents,
observations)]
def update(self, sample, agent_i, parallel=False, logger=None):
"""
Update parameters of agent model based on sample from replay buffer
Inputs:
sample: tuple of (observations, actions, rewards, next
observations, and episode end masks) sampled randomly from
the replay buffer. Each is a list with entries
corresponding to each agent
agent_i (int): index of agent to update
parallel (bool): If true, will average gradients across threads
logger (SummaryWriter from Tensorboard-Pytorch):
If passed in, important quantities will be logged
"""
# For RNN, the obs and next_obs both have histories
obs, acs, rews, next_obs, dones = sample
curr_agent = self.agents[agent_i]
curr_agent.critic_optimizer.zero_grad()
if self.alg_types[agent_i] == 'MADDPG':
if self.discrete_action: # one-hot encode action
# This is original one, 'pi': policy, 'nobs' n_observations
#all_trgt_acs = [onehot_from_logits(pi(nobs)) for pi, nobs in
# zip(self.target_policies, next_obs)]
# Original till here
#-------- Expanding out for debugging --------#
all_trgt_acs = []
for pi, nobs in zip(self.target_policies, next_obs):
temp = onehot_from_logits(pi(nobs))
#print(temp)
all_trgt_acs.append(temp)
# -------- End debug -------------------------#
else:
all_trgt_acs = [pi(nobs) for pi, nobs in zip(self.target_policies,
next_obs)]
# TODO: Get the most current observation from the history to calculate the target value
t0_next_obs = [[],[],[]]
for a in range(self.nagents):
t0_next_obs[a] = torch.tensor(np.zeros((next_obs[0].shape[0], 18)), dtype=torch.float)
# the next_obs[0].shape[0] gives the batch size
# TODO: change it to be a parameter
# Only keep the current obs for critic VF
for n in range(self.nagents): # for each agents
for b in range(next_obs[0].shape[0]): # for the number of batches
t0_next_obs[n][b][:] = next_obs[n][b][0:18]
# TODO: ORIGINAL was \/
#trgt_vf_in = torch.cat((*next_obs, *all_trgt_acs), dim=1)
trgt_vf_in = torch.cat((*t0_next_obs, *all_trgt_acs), dim=1)
# It is working till here. Only kept the current obs for critic VF
else: # DDPG
if self.discrete_action:
trgt_vf_in = torch.cat((next_obs[agent_i],
onehot_from_logits(
curr_agent.target_policy(
next_obs[agent_i]))),
dim=1)
else:
trgt_vf_in = torch.cat((next_obs[agent_i],
curr_agent.target_policy(next_obs[agent_i])),
dim=1)
target_value = (rews[agent_i].view(-1, 1) + self.gamma *
curr_agent.target_critic(trgt_vf_in) *
(1 - dones[agent_i].view(-1, 1)))
##### Just get the current observation (i.e., without history) ##########
# Reason: Critic VF does not need history
# Copied the same as in t0_next_obs, since BOTH obs and next_obs have HISTORIES.
t0_obs = [[], [], []]
for a in range(self.nagents):
t0_obs[a] = torch.tensor(np.zeros((obs[0].shape[0], 18)), dtype=torch.float)
for n in range(self.nagents): # for each agents
for b in range(obs[0].shape[0]): # for the number of batches
t0_obs[n][b][:] = obs[n][b][0:18]
###################################################
if self.alg_types[agent_i] == 'MADDPG':
vf_in = torch.cat((*t0_obs, *acs), dim=1)
else: # DDPG #TODO: below, might have to change obs to t0_obs, when using DDPG
vf_in = torch.cat((obs[agent_i], acs[agent_i]), dim=1)
actual_value = curr_agent.critic(vf_in)
vf_loss = MSELoss(actual_value, target_value.detach())
vf_loss.backward()
if parallel:
average_gradients(curr_agent.critic)
torch.nn.utils.clip_grad_norm(curr_agent.critic.parameters(), 0.5)
curr_agent.critic_optimizer.step()
curr_agent.policy_optimizer.zero_grad()
if self.discrete_action:
# Forward pass as if onehot (hard=True) but backprop through a differentiable
# Gumbel-Softmax sample. The MADDPG paper uses the Gumbel-Softmax trick to backprop
# through discrete categorical samples, but I'm not sure if that is
# correct since it removes the assumption of a deterministic policy for
# DDPG. Regardless, discrete policies don't seem to learn properly without it.
'''
Now, we are back to forwarding policy, so we need to use obs with history
'''
curr_pol_out = curr_agent.policy(obs[agent_i])
curr_pol_vf_in = gumbel_softmax(curr_pol_out, hard=True)
# Seems to be working fine till here
else:
curr_pol_out = curr_agent.policy(obs[agent_i])
curr_pol_vf_in = curr_pol_out
if self.alg_types[agent_i] == 'MADDPG':
all_pol_acs = []
for i, pi, ob in zip(range(self.nagents), self.policies, obs):
if i == agent_i:
all_pol_acs.append(curr_pol_vf_in)
elif self.discrete_action:
all_pol_acs.append(onehot_from_logits(pi(ob)))
else:
all_pol_acs.append(pi(ob))
# Originally:
#vf_in = torch.cat((*obs, *all_pol_acs), dim=1)
vf_in = torch.cat((*t0_obs, *all_pol_acs), dim=1)
else: # DDPG
vf_in = torch.cat((obs[agent_i], curr_pol_vf_in),
dim=1)
# TODO: FIX THIS
pol_loss = -curr_agent.critic(vf_in).mean()
pol_loss += (curr_pol_out**2).mean() * 1e-3
pol_loss.backward()
if parallel:
average_gradients(curr_agent.policy)
torch.nn.utils.clip_grad_norm(curr_agent.policy.parameters(), 0.5)
curr_agent.policy_optimizer.step()
if logger is not None:
logger.add_scalars('agent%i/losses' % agent_i,
{'vf_loss': vf_loss,
'pol_loss': pol_loss},
self.niter)
def update_all_targets(self):
"""
Update all target networks (called after normal updates have been
performed for each agent)
"""
for a in self.agents:
soft_update(a.target_critic, a.critic, self.tau)
soft_update(a.target_policy, a.policy, self.tau)
self.niter += 1
def prep_training(self, device='gpu'):
for a in self.agents:
a.policy.train()
a.critic.train()
a.target_policy.train()
a.target_critic.train()
if device == 'gpu':
fn = lambda x: x.cuda()
else:
fn = lambda x: x.cpu()
if not self.pol_dev == device:
for a in self.agents:
a.policy = fn(a.policy)
self.pol_dev = device
if not self.critic_dev == device:
for a in self.agents:
a.critic = fn(a.critic)
self.critic_dev = device
if not self.trgt_pol_dev == device:
for a in self.agents:
a.target_policy = fn(a.target_policy)
self.trgt_pol_dev = device
if not self.trgt_critic_dev == device:
for a in self.agents:
a.target_critic = fn(a.target_critic)
self.trgt_critic_dev = device
def prep_rollouts(self, device='cpu'):
for a in self.agents:
a.policy.eval()
if device == 'gpu':
fn = lambda x: x.cuda()
else:
fn = lambda x: x.cpu()
# only need main policy for rollouts
if not self.pol_dev == device:
for a in self.agents:
a.policy = fn(a.policy)
self.pol_dev = device
def save(self, filename):
"""
Save trained parameters of all agents into one file
"""
self.prep_training(device='cpu') # move parameters to CPU before saving
save_dict = {'init_dict': self.init_dict,
'agent_params': [a.get_params() for a in self.agents]}
torch.save(save_dict, filename)
@classmethod
def init_from_env(cls, env, agent_alg="MADDPG", adversary_alg="MADDPG",
gamma=0.95, tau=0.01, lr=0.01, hidden_dim=64):
"""
Instantiate instance of this class from multi-agent environment
"""
agent_init_params = []
alg_types = [adversary_alg if atype == 'adversary' else agent_alg for
atype in env.agent_types]
for acsp, obsp, algtype in zip(env.action_space, env.observation_space,
alg_types):
num_in_pol = obsp.shape[0]
if isinstance(acsp, Box):
discrete_action = False
get_shape = lambda x: x.shape[0]
else: # Discrete
discrete_action = True
get_shape = lambda x: x.n
num_out_pol = get_shape(acsp)
if algtype == "MADDPG":
num_in_critic = 0
for oobsp in env.observation_space:
num_in_critic += oobsp.shape[0]
for oacsp in env.action_space:
num_in_critic += get_shape(oacsp) # Here, we have the critic's input space
else:
num_in_critic = obsp.shape[0] + get_shape(acsp)
agent_init_params.append({'num_in_pol': num_in_pol, # actor policy inputs
'num_out_pol': num_out_pol, # actor policy outputs
'num_in_critic': num_in_critic})
init_dict = {'gamma': gamma, 'tau': tau, 'lr': lr,
'hidden_dim': hidden_dim,
'alg_types': alg_types,
'agent_init_params': agent_init_params,
'discrete_action': discrete_action}
instance = cls(**init_dict)
instance.init_dict = init_dict
return instance
@classmethod
def init_from_save(cls, filename):
"""
Instantiate instance of this class from file created by 'save' method
"""
save_dict = torch.load(filename)
instance = cls(**save_dict['init_dict'])
instance.init_dict = save_dict['init_dict']
for a, params in zip(instance.agents, save_dict['agent_params']):
a.load_params(params)
return instance
| 43.486486 | 102 | 0.545611 |
73dbe1163c58529cca3dba35315662a9da5c16b0 | 559 | py | Python | build/lib/events/migrations/0005_auto_20180618_0742.py | aswinzz/django-events-rest-framework | f87358b7e416f3d8bbe870da2f56babbb98d741a | [
"MIT"
] | 7 | 2018-06-18T20:30:26.000Z | 2021-01-21T19:16:25.000Z | build/lib/events/migrations/0005_auto_20180618_0742.py | aswinzz/django-events-rest-framework | f87358b7e416f3d8bbe870da2f56babbb98d741a | [
"MIT"
] | null | null | null | build/lib/events/migrations/0005_auto_20180618_0742.py | aswinzz/django-events-rest-framework | f87358b7e416f3d8bbe870da2f56babbb98d741a | [
"MIT"
] | 1 | 2020-01-29T15:42:46.000Z | 2020-01-29T15:42:46.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-06-18 07:42
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0004_auto_20180618_0602'),
]
operations = [
migrations.AlterModelOptions(
name='events',
options={'verbose_name_plural': 'Events'},
),
migrations.AlterModelOptions(
name='occurrences',
options={'verbose_name_plural': 'Occurrences'},
),
]
| 23.291667 | 59 | 0.608229 |
73dc024cedb8d11adc0c9bf5705ba347f1385842 | 24,789 | py | Python | heat/tests/functional/util.py | CiscoSystems/heat | 1b609f3c0621c44e4988a166a38f36c2b57eb4c6 | [
"Apache-2.0"
] | 1 | 2020-08-15T14:29:15.000Z | 2020-08-15T14:29:15.000Z | heat/tests/functional/util.py | CiscoSystems/heat | 1b609f3c0621c44e4988a166a38f36c2b57eb4c6 | [
"Apache-2.0"
] | null | null | null | heat/tests/functional/util.py | CiscoSystems/heat | 1b609f3c0621c44e4988a166a38f36c2b57eb4c6 | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import optparse
import paramiko
import subprocess
import hashlib
import email
import time # for sleep
import errno
import tempfile
import stat
import re
from pkg_resources import resource_string
from lxml import etree
from nose.exc import SkipTest
from glanceclient import client as glance_client
from keystoneclient.v2_0 import client as keystone_client
from novaclient.v1_1 import client as nova_client
import heat
from heat.common import template_format
from heat.engine import parser
from heat.cfn_client import client as heat_client
from heat.cfn_client import boto_client as heat_client_boto
from keystoneclient.v2_0 import client
DEFAULT_STACKNAME = 'teststack'
# this test is in heat/tests/functional, so go up 3 dirs
basepath = os.path.join(heat.__path__[0], os.path.pardir)
class Instance(object):
def __init__(self, testcase, instance_name, stackname=DEFAULT_STACKNAME):
self.testcase = testcase
self.name = '%s.%s' % (stackname, instance_name)
# during nose test execution this file will be imported even if
# the unit tag was specified
try:
os.environ['OS_AUTH_STRATEGY']
except KeyError:
raise SkipTest('OS_AUTH_STRATEGY unset, skipping functional test')
self.testcase.assertEqual(os.environ['OS_AUTH_STRATEGY'],
'keystone',
'keystone authentication required')
self.creds = dict(username=os.environ['OS_USERNAME'],
password=os.environ['OS_PASSWORD'],
tenant=os.environ['OS_TENANT_NAME'],
auth_url=os.environ['OS_AUTH_URL'],
strategy=os.environ['OS_AUTH_STRATEGY'])
dbusername = 'testuser'
self.novaclient = nova_client.Client(self.creds['username'],
self.creds['password'],
self.creds['tenant'],
self.creds['auth_url'],
service_type='compute')
self.ssh = paramiko.SSHClient()
self.sftp = None
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ip = None
def wait_for_boot(self):
tries = 0
while self.ip is None:
servers = self.novaclient.servers.list()
for server in servers:
if server.name == self.name:
address = server.addresses
if address:
self.ip = address.items()[0][1][0]['addr']
tries += 1
self.testcase.assertTrue(tries < 150, 'Timed out')
time.sleep(10)
print 'Instance (%s) ip (%s) status (%s)' % (self.name, self.ip,
server.status)
tries = 0
while True:
try:
subprocess.check_output(['nc', '-z', self.ip, '22'])
except Exception:
print('Instance (%s) ip (%s) SSH not up yet, waiting...' %
(self.name, self.ip))
tries += 1
self.testcase.assertTrue(tries < 50, 'Timed out')
time.sleep(10)
else:
print 'Instance (%s) ip (%s) SSH detected.' % (self.name,
self.ip)
break
tries = 0
while True:
try:
tries += 1
self.testcase.assertTrue(tries < 50, 'Timed out')
self.ssh.connect(self.ip,
username='ec2-user',
allow_agent=True,
look_for_keys=True,
password='password')
except paramiko.AuthenticationException:
print 'Authentication error'
time.sleep(2)
except Exception as e:
if e.errno != errno.EHOSTUNREACH:
raise
print('Instance (%s) ip (%s) connecting via SSH.' %
(self.name, self.ip))
time.sleep(2)
else:
print('Instance (%s) ip (%s) connected via SSH.' %
(self.name, self.ip))
break
self.sftp = self.ssh.open_sftp()
tries = 0
while True:
try:
self.sftp.stat('/var/lib/heat-cfntools/boot-finished')
except IOError, e:
tries += 1
if e.errno == errno.ENOENT:
self.testcase.assertTrue(tries < 50, 'Timed out')
print("Instance (%s) ip (%s) not booted, waiting..." %
(self.name, self.ip))
time.sleep(15)
else:
print e.errno
raise
else:
print("Instance (%s) ip (%s) finished booting." %
(self.name, self.ip))
break
def exec_sudo_command(self, cmd):
# Force a tty or sudo commands fail
channel = self.ssh.invoke_shell()
channel.sendall("sudo %s\n" % cmd)
channel.sendall('exit\n')
time.sleep(1) # necessary for sendall to complete
stdin = channel.makefile('wb')
stdout = channel.makefile('rb')
stderr = channel.makefile_stderr('rb')
return stdin, stdout, stderr
def exec_command(self, cmd):
return self.ssh.exec_command(cmd)
def exists(self):
servers = self.novaclient.servers.list()
for server in servers:
if server.name == self.name:
return True
return False
def file_present(self, path):
print "Verifying file '%s' exists" % path
stdin, stdout, sterr = self.ssh.exec_command('ls "%s"' % path)
lines = stdout.readlines()
self.testcase.assertEqual(len(lines), 1)
result = lines.pop().rstrip()
return result == path
def floating_ip_present(self):
floating_ips = self.novaclient.floating_ips.list()
for eip in floating_ips:
if self.ip == eip.fixed_ip:
return True
return False
def check_cfntools(self):
stdin, stdout, stderr = \
self.ssh.exec_command('cd /opt/aws/bin; sha1sum *')
files = stdout.readlines()
cfn_tools_files = ['cfn-init', 'cfn-hup', 'cfn-signal',
'cfn-get-metadata', 'cfn_helper.py']
cfntools = {}
for file in cfn_tools_files:
file_data = resource_string('heat_jeos', 'cfntools/' + file)
sha = hashlib.sha1(file_data).hexdigest()
cfntools[file] = sha
# 1. make sure installed cfntools SHA match VM's version
for x in range(len(files)):
data = files.pop().split(' ')
cur_file = data[1].rstrip()
if cur_file in cfn_tools_files:
self.testcase.assertEqual(data[0], cfntools[cur_file])
print 'Instance (%s) cfntools integrity verified.' % self.name
def wait_for_provisioning(self):
print "Instance (%s) waiting for provisioning to complete." % self.name
tries = 0
while True:
try:
self.sftp.stat('/var/lib/heat-cfntools/provision-finished')
except paramiko.SSHException as e:
print e
except IOError as e:
if e.errno != errno.ENOENT:
raise
else:
print "Instance (%s) provisioning completed." % self.name
return
tries += 1
self.testcase.assertTrue(tries < 50, 'Timed out')
print("Instance (%s) provisioning incomplete, waiting..." %
self.name)
time.sleep(15)
def check_user_data(self, template_file):
return # until TODO is fixed
# transport = self.ssh.get_transport()
# channel = transport.open_session()
# channel.get_pty()
# channel.invoke_shell() # sudo requires tty
# channel.sendall('sudo chmod 777 \
# sudo chmod 777 /var/lib/cloud/instance/user-data.txt.i\n')
# time.sleep(1) # necessary for sendall to complete
f = open(basepath + '/templates/' + template_file)
t = template_format.parse(f.read())
f.close()
template = parser.Template(t)
params = parser.Parameters('test', t,
{'KeyName': 'required_parameter',
'DBUsername': self.dbusername,
'DBPassword': self.creds['password']})
stack = parser.Stack(None, 'test', template, params)
parsed_t = stack.resolve_static_data(t)
remote_file = self.sftp.open('/var/lib/heat-cfntools/cfn-userdata')
remote_file_list = remote_file.read().split('\n')
remote_file_list_u = map(unicode, remote_file_list)
remote_file.close()
# TODO: make server name generic
t_data = parsed_t['Resources']['WikiDatabase']['Properties']
t_data = t_data['UserData']['Fn::Base64']['Fn::Join'].pop()
joined_t_data = ''.join(t_data)
t_data_list = joined_t_data.split('\n')
self.testcase.assertEqual(t_data_list, remote_file_list_u)
remote_file = self.sftp.open('/var/lib/cloud/instance/user-data.txt.i')
msg = email.message_from_file(remote_file)
remote_file.close()
filepaths = {
'cloud-config': basepath + '/heat/cloudinit/config',
'part-handler.py': basepath +
'/heat/cloudinit/part-handler.py'
}
# check multipart mime accuracy
for part in msg.walk():
# multipart/* are just containers
if part.get_content_maintype() == 'multipart':
continue
file = part.get_filename()
data = part.get_payload()
if file in filepaths.keys():
with open(filepaths[file]) as f:
self.testcase.assertEqual(data, f.read())
def close_ssh_client(self):
self.ssh.close()
class Stack(object):
def __init__(self, testcase, template_file, distribution, arch, jeos_type,
stack_paramstr, stackname=DEFAULT_STACKNAME):
self.testcase = testcase
self.stackname = stackname
self.template_file = template_file
self.distribution = distribution
self.stack_paramstr = stack_paramstr
self.stack_id_re = re.compile("^arn:openstack:heat::[0-9a-z]{32}:" +
"stacks/" + self.stackname +
# Stack ID UUID in standard form
# as returned by uuid.uuid4()
"/[0-9a-f]{8}-" +
"[0-9a-f]{4}-" +
"[0-9a-f]{4}-" +
"[0-9a-f]{4}-" +
"[0-9a-f]{12}$")
self.creds = dict(username=os.environ['OS_USERNAME'],
password=os.environ['OS_PASSWORD'],
tenant=os.environ['OS_TENANT_NAME'],
auth_url=os.environ['OS_AUTH_URL'],
strategy=os.environ['OS_AUTH_STRATEGY'])
self.dbusername = 'testuser'
self.testcase.assertEqual(os.environ['OS_AUTH_STRATEGY'],
'keystone',
'keystone authentication required')
kc_creds = dict(username=os.environ['OS_USERNAME'],
password=os.environ['OS_PASSWORD'],
tenant_name=os.environ['OS_TENANT_NAME'],
auth_url=os.environ['OS_AUTH_URL'])
kc = keystone_client.Client(**kc_creds)
glance_url = kc.service_catalog.url_for(service_type='image',
endpoint_type='publicURL')
version_string = '/v1'
if glance_url.endswith(version_string):
glance_url = glance_url[:-len(version_string)]
auth_token = kc.auth_token
self.glanceclient = glance_client.Client(1, glance_url,
token=auth_token)
self.prepare_jeos(distribution, arch, jeos_type)
self.novaclient = nova_client.Client(self.creds['username'],
self.creds['password'],
self.creds['tenant'],
self.creds['auth_url'],
service_type='compute')
self.heatclient = self._create_heat_client()
def format_parameters(self):
self.keyname = self.novaclient.keypairs.list().pop().name
self.testcase.assertTrue(self.heatclient)
full_paramstr = ';'.join([self.stack_paramstr,
'KeyName=' + self.keyname,
'LinuxDistribution=' + self.distribution])
template_params = optparse.Values({'parameters': full_paramstr})
# Format parameters and create the stack
parameters = {}
parameters['StackName'] = self.stackname
template_path = os.path.join(basepath,
'templates',
self.template_file)
parameters['TemplateBody'] = open(template_path).read()
parameters.update(self.heatclient.format_parameters(template_params))
return parameters
def create(self):
parameters = self.format_parameters()
result = self.heatclient.create_stack(**parameters)
self._check_create_result(result)
alist = None
tries = 0
print 'Waiting for stack creation to be completed'
while self.get_state() == 'CREATE_IN_PROGRESS':
tries += 1
self.testcase.assertTrue(tries < 150, 'Timed out')
time.sleep(10)
self.testcase.assertEqual(self.get_state(), 'CREATE_COMPLETE')
def update(self):
parameters = self.format_parameters()
result = self.heatclient.update_stack(**parameters)
self._check_update_result(result)
alist = None
tries = 0
print 'Waiting for stack update to be completed'
while self.get_state() == 'UPDATE_IN_PROGRESS':
tries += 1
self.testcase.assertTrue(tries < 150, 'Timed out')
time.sleep(10)
self.testcase.assertEqual(self.get_state(), 'UPDATE_COMPLETE')
def _check_create_result(self, result):
# Check result looks OK
root = etree.fromstring(result)
create_list = root.xpath('/CreateStackResponse/CreateStackResult')
self.testcase.assertTrue(create_list)
self.testcase.assertEqual(len(create_list), 1)
stack_id = create_list[0].findtext('StackId')
self.testcase.assertTrue(stack_id is not None)
self.check_stackid(stack_id)
def _check_update_result(self, result):
# Check result looks OK
root = etree.fromstring(result)
update_list = root.xpath('/UpdateStackResponse/UpdateStackResult')
self.testcase.assertTrue(update_list)
self.testcase.assertEqual(len(update_list), 1)
stack_id = update_list[0].findtext('StackId')
self.testcase.assertTrue(stack_id is not None)
self.check_stackid(stack_id)
def check_stackid(self, stack_id):
print "Checking %s matches expected format" % (stack_id)
self.testcase.assertTrue(self.stack_id_re.match(stack_id) is not None)
def _create_heat_client(self):
return heat_client.get_client('0.0.0.0', 8000,
self.creds['username'],
self.creds['password'],
self.creds['tenant'],
self.creds['auth_url'],
self.creds['strategy'],
None, None, False)
def get_state(self):
stack_list = self.heatclient.list_stacks(StackName=self.stackname)
root = etree.fromstring(stack_list)
xpq = '//member[StackName="%s"]'
alist = root.xpath(xpq % (self.stackname))
result = None
if len(alist):
item = alist.pop()
result = item.findtext("StackStatus")
if result and result.find('FAILED') >= 0:
print stack_list
return result
def cleanup(self):
parameters = {'StackName': self.stackname}
self.heatclient.delete_stack(**parameters)
print 'Waiting for stack deletion to be completed'
tries = 0
while self.get_state() == 'DELETE_IN_PROGRESS':
tries += 1
self.testcase.assertTrue(tries < 50, 'Timed out')
time.sleep(10)
# final state for all stacks is DELETE_COMPLETE, but then they
# dissappear hence no result from list_stacks/get_state
# depending on timing, we could get either result here
end_state = self.get_state()
if end_state is not None:
self.testcase.assertEqual(end_state, 'DELETE_COMPLETE')
def prepare_jeos(self, p_os, arch, type):
imagename = p_os + '-' + arch + '-' + type
# skip creating jeos if image already available
if not self.poll_glance(imagename, False):
self.testcase.assertEqual(os.geteuid(), 0,
'No JEOS found - run as root to create')
# -d: debug, -G: register with glance
subprocess.call(['heat-jeos', '-d', '-G', 'create', imagename])
# Nose seems to change the behavior of the subprocess call to be
# asynchronous. So poll glance until image is registered.
self.poll_glance(self.glanceclient, imagename, True)
def poll_glance(self, imagename, block):
image = None
tries = 0
while image is None:
tries += 1
self.testcase.assertTrue(tries < 50, 'Timed out')
if block:
time.sleep(15)
print "Checking glance for image registration"
imageslist = self.glanceclient.images.list(
filters={'name': imagename})
image = next(imageslist, None)
if image:
print "Found image registration for %s" % imagename
# technically not necessary, but glance registers image
# before completely through with its operations
time.sleep(10)
return True
if not block:
break
return False
def get_stack_output(self, output_key):
'''
Extract a specified output from the DescribeStacks details
'''
# Get the DescribeStacks result for this stack
parameters = {'StackName': self.stackname}
result = self.heatclient.describe_stacks(**parameters)
return self._find_stack_output(result, output_key)
def _find_stack_output(self, result, output_key):
# Extract the OutputValue for the specified OutputKey
root = etree.fromstring(result)
output_list = root.xpath('//member[OutputKey="' + output_key + '"]')
output = output_list.pop()
value = output.findtext('OutputValue')
return value
def instance_phys_ids(self):
events = self.heatclient.list_stack_events(StackName=self.stackname)
root = etree.fromstring(events)
xpq = ('//member[StackName="%s" and '
'ResourceStatus="CREATE_COMPLETE" and '
'ResourceType="AWS::EC2::Instance"]')
alist = root.xpath(xpq % self.stackname)
return [elem.findtext('PhysicalResourceId') for elem in alist]
def response_xml_item(self, response, prefix, key):
'''
Extract response item via xpath prefix and key name
we expect the prefix to map to a single Element item
'''
root = etree.fromstring(response)
output_list = root.xpath(prefix)
self.testcase.assertTrue(output_list)
self.testcase.assertEqual(len(output_list), 1)
output = output_list.pop()
value = output.findtext(key)
return value
class StackBoto(Stack):
'''
Version of the Stack class which uses the boto client (hence AWS auth and
the CFN API).
'''
def _check_create_result(self, result):
self.check_stackid(result)
def _check_update_result(self, result):
self.check_stackid(result)
def _create_heat_client(self):
# Connect to the keystone client with the supplied credentials
# and extract the ec2-credentials, so we can pass them into the
# boto client
keystone = client.Client(username=self.creds['username'],
password=self.creds['password'],
tenant_name=self.creds['tenant'],
auth_url=self.creds['auth_url'])
ksusers = keystone.users.list()
ksuid = [u.id for u in ksusers if u.name == self.creds['username']]
self.testcase.assertEqual(len(ksuid), 1)
ec2creds = keystone.ec2.list(ksuid[0])
self.testcase.assertEqual(len(ec2creds), 1)
self.testcase.assertTrue(ec2creds[0].access)
self.testcase.assertTrue(ec2creds[0].secret)
print "Got EC2 credentials from keystone"
# most of the arguments passed to heat_client_boto are for
# compatibility with the non-boto client wrapper, and are
# actually ignored, only the port and credentials are used
return heat_client_boto.get_client('0.0.0.0', 8000,
self.creds['username'],
self.creds['password'],
self.creds['tenant'],
self.creds['auth_url'],
self.creds['strategy'],
None, None, False,
aws_access_key=ec2creds[0].access,
aws_secret_key=ec2creds[0].secret)
def get_state(self):
stack_list = self.heatclient.list_stacks()
this = [s for s in stack_list if s.stack_name == self.stackname]
result = None
if len(this):
result = this[0].stack_status
return result
def instance_phys_ids(self):
events = self.heatclient.list_stack_events(StackName=self.stackname)
def match(e):
return (e.stack_name == self.stackname and
e.resource_status == "CREATE_COMPLETE" and
e.resource_type == "AWS::EC2::Instance")
return [e.physical_resource_id for e in events if match(e)]
def _find_stack_output(self, result, output_key):
self.testcase.assertEqual(len(result), 1)
for o in result[0].outputs:
if o.key == output_key:
return o.value
def add_host(ip, hostname):
with open('/etc/hosts', 'a') as hostfile:
hostfile.write(ip + '\t' + hostname)
def remove_host(ip, hostname):
data = None
with open('/etc/hosts', 'r') as hostfile:
data = hostfile.readlines()
perms = stat.S_IMODE(os.stat('/etc/hosts').st_mode)
with tempfile.NamedTemporaryFile('w', dir='/etc', delete=False) as tmp:
for line in data:
if line.rstrip() == ip + '\t' + hostname:
continue
tmp.write(line)
os.chmod(tmp.name, perms)
os.rename(tmp.name, '/etc/hosts')
| 38.5521 | 79 | 0.555045 |
73dc0bc667ed21c44f78b6b2cb31cb46480ed39c | 2,668 | py | Python | tests/test_commands.py | pielmach/wuerfelbecher | 8a9cfdab8793983c10853e2e311ff1b98dae7da4 | [
"MIT"
] | null | null | null | tests/test_commands.py | pielmach/wuerfelbecher | 8a9cfdab8793983c10853e2e311ff1b98dae7da4 | [
"MIT"
] | 9 | 2021-06-05T18:46:43.000Z | 2022-03-08T20:25:37.000Z | tests/test_commands.py | pielmach/wuerfelbecher | 8a9cfdab8793983c10853e2e311ff1b98dae7da4 | [
"MIT"
] | 1 | 2021-09-02T21:22:37.000Z | 2021-09-02T21:22:37.000Z | import unittest
from wuerfelbecher import commands, statistics
class TestCommands(unittest.TestCase):
def setUp(self):
statistics.init_statcounter()
def test_roll(self):
self.assertTrue(commands.roll("d2") in ["[ **1** ]", "[ **2** ]"])
self.assertTrue(commands.roll("D2") in ["[ **1** ]", "[ **2** ]"])
self.assertTrue(commands.roll("w2") in ["[ **1** ]", "[ **2** ]"])
self.assertTrue(commands.roll("W2") in ["[ **1** ]", "[ **2** ]"])
self.assertTrue(commands.roll(" d2") in ["[ **1** ]", "[ **2** ]"])
self.assertTrue(commands.roll("d2 ") in ["[ **1** ]", "[ **2** ]"])
self.assertTrue(commands.roll("d2-1") in ["[ **1** ]-1=**0**", "[ **2** ]-1=**1**"])
self.assertTrue(commands.roll("d2+1") in ["[ **1** ]+1=**2**", "[ **2** ]+1=**3**"])
self.assertTrue(commands.roll("2d2") in ["[ **1 1** ]", "[ **1 2** ]", "[ **2 1** ]", "[ **2 2** ]"])
self.assertTrue(
commands.roll("2d2-1")
in ["[ **1 1** ]-1=**1**", "[ **1 2** ]-1=**2**", "[ **2 1** ]-1=**2**", "[ **2 2** ]-1=**3**"]
)
self.assertTrue(
commands.roll("2d2+") in ["[ **1 1** ]=**2**", "[ **1 2** ]=**3**", "[ **2 1** ]=**3**", "[ **2 2** ]=**4**"]
)
self.assertTrue(
commands.roll("2d2+0") in ["[ **1 1** ]=**2**", "[ **1 2** ]=**3**", "[ **2 1** ]=**3**", "[ **2 2** ]=**4**"]
)
self.assertTrue(
commands.roll("d2 d2")
in ["[ **1** ] [ **1** ]", "[ **1** ] [ **2** ]", "[ **2** ] [ **1** ]", "[ **2** ] [ **2** ]"]
)
self.assertTrue(
commands.roll(" ".join(["d2", "d2"]))
in ["[ **1** ] [ **1** ]", "[ **1** ] [ **2** ]", "[ **2** ] [ **1** ]", "[ **2** ] [ **2** ]"]
)
self.assertEqual(commands.roll("foo"), "You shall not pass! Ask for *!help* if you fear my power!")
self.assertEqual(commands.roll("d2!"), "You shall not pass! Ask for *!help* if you fear my power!")
self.assertEqual(commands.roll("!w2"), "You shall not pass! Ask for *!help* if you fear my power!")
self.assertEqual(commands.roll("d0"), "You shall not pass! Ask for *!help* if you fear my power!")
def test_stats(self):
self.assertEqual(commands.stats("d2"), "You rolled 0 times and those were the rolls:\n>>> 1: 0\n2: 0\n")
self.assertEqual(commands.stats("W2"), "You rolled 0 times and those were the rolls:\n>>> 1: 0\n2: 0\n")
self.assertEqual(commands.stats("foo"), "You shall not pass! Ask for *!help* if you fear my power!")
if __name__ == "__main__":
unittest.main()
| 52.313725 | 126 | 0.453523 |
73dc51ea3cceb33884c0c494271b616fd040d038 | 5,930 | py | Python | pyop2/pyparloop.py | chromy/PyOP2 | 8a1955c628b795019485c9771709c338a806e661 | [
"BSD-3-Clause"
] | null | null | null | pyop2/pyparloop.py | chromy/PyOP2 | 8a1955c628b795019485c9771709c338a806e661 | [
"BSD-3-Clause"
] | null | null | null | pyop2/pyparloop.py | chromy/PyOP2 | 8a1955c628b795019485c9771709c338a806e661 | [
"BSD-3-Clause"
] | null | null | null | # This file is part of PyOP2
#
# PyOP2 is Copyright (c) 2012-2014, Imperial College London and
# others. Please see the AUTHORS file in the main source directory for
# a full list of copyright holders. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The name of Imperial College London or that of other
# contributors may not be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTERS
# ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
"""A stub implementation of "Python" parallel loops.
This basically executes a python function over the iteration set,
feeding it the appropriate data for each set entity.
Example usage::
.. code-block:: python
s = op2.Set(10)
d = op2.Dat(s)
d2 = op2.Dat(s**2)
m = op2.Map(s, s, 2, np.dstack(np.arange(4),
np.roll(np.arange(4), -1)))
def fn(x, y):
x[0] = y[0]
x[1] = y[1]
d.data[:] = np.arange(4)
op2.par_loop(fn, s, d2(op2.WRITE), d(op2.READ, m))
print d2.data
# [[ 0. 1.]
# [ 1. 2.]
# [ 2. 3.]
# [ 3. 0.]]
def fn2(x, y):
x[0] += y[0]
x[1] += y[0]
op2.par_loop(fn, s, d2(op2.INC), d(op2.READ, m[1]))
print d2.data
# [[ 1. 2.]
# [ 3. 4.]
# [ 5. 6.]
# [ 3. 0.]]
"""
import base
import device
# Fake kernel for type checking
class Kernel(base.Kernel):
@classmethod
def _cache_key(cls, *args, **kwargs):
return None
def __init__(self, code, name=None, **kwargs):
self._func = code
def __call__(self, *args):
return self._func(*args)
# Inherit from parloop for type checking and init
class ParLoop(base.ParLoop):
def _compute(self, part):
if part.set._extruded:
raise NotImplementedError
if any(arg._is_mat for arg in self.args):
raise NotImplementedError
subset = isinstance(self._it_space._iterset, base.Subset)
for arg in self.args:
if arg._is_dat and arg.data._is_allocated:
for d in arg.data:
d._data.setflags(write=True)
# UGH, we need to move data back from the device, since
# evaluation tries to leave it on the device as much as
# possible. We can't use public accessors here to get
# round this, because they'd force the evaluation of any
# pending computation, which includes this computation.
if arg._is_dat and isinstance(arg.data, device.Dat):
arg.data._from_device()
# Just walk over the iteration set
for e in range(part.offset, part.offset + part.size):
args = []
if subset:
idx = self._it_space._iterset._indices[e]
else:
idx = e
for arg in self.args:
if arg._is_global:
args.append(arg.data._data)
elif arg._is_direct:
args.append(arg.data._data[idx, ...])
elif arg._is_indirect:
if isinstance(arg.idx, base.IterationIndex):
raise NotImplementedError
if arg._is_vec_map:
args.append(arg.data._data[arg.map.values_with_halo[idx], ...])
else:
args.append(arg.data._data[arg.map.values_with_halo[idx, arg.idx:arg.idx+1],
...])
if arg.access is base.READ:
args[-1].setflags(write=False)
if args[-1].shape == ():
args[-1] = args[-1].reshape(1)
self._kernel(*args)
for arg, tmp in zip(self.args, args):
if arg.access is base.READ:
continue
if arg._is_global:
arg.data._data[:] = tmp[:]
elif arg._is_direct:
arg.data._data[idx, ...] = tmp[:]
elif arg._is_indirect:
if arg._is_vec_map:
arg.data._data[arg.map.values_with_halo[idx], ...] = tmp[:]
else:
arg.data._data[arg.map.values_with_halo[idx, arg.idx:arg.idx+1]] = tmp[:]
for arg in self.args:
if arg._is_dat and arg.data._is_allocated:
for d in arg.data:
d._data.setflags(write=False)
# UGH, set state of data to HOST, marking device data as
# out of date.
if arg._is_dat and isinstance(arg.data, device.Dat):
arg.data.state = device.DeviceDataMixin.HOST
| 36.832298 | 100 | 0.587184 |
73dc65335d81042de142d1b85b6d6b13e3b10450 | 4,419 | py | Python | drf_haystack/mixins.py | d1g1tinc/drf-haystack | 7bf138948562ebcf3470954a4627dc12f6b09b96 | [
"MIT"
] | null | null | null | drf_haystack/mixins.py | d1g1tinc/drf-haystack | 7bf138948562ebcf3470954a4627dc12f6b09b96 | [
"MIT"
] | null | null | null | drf_haystack/mixins.py | d1g1tinc/drf-haystack | 7bf138948562ebcf3470954a4627dc12f6b09b96 | [
"MIT"
] | null | null | null | from rest_framework.decorators import action
from rest_framework.response import Response
from drf_haystack.filters import HaystackFacetFilter
class MoreLikeThisMixin:
"""
Mixin class for supporting "more like this" on an API View.
"""
@action(detail=True, methods=["get"], url_path="more-like-this")
def more_like_this(self, request, pk=None):
"""
Sets up a detail route for ``more-like-this`` results.
Note that you'll need backend support in order to take advantage of this.
This will add ie. ^search/{pk}/more-like-this/$ to your existing ^search pattern.
"""
obj = self.get_object().object
queryset = self.filter_queryset(self.get_queryset()).more_like_this(obj)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
class FacetMixin:
"""
Mixin class for supporting faceting on an API View.
"""
facet_filter_backends = [HaystackFacetFilter]
facet_serializer_class = None
facet_objects_serializer_class = None
facet_query_params_text = "selected_facets"
@action(detail=False, methods=["get"], url_path="facets")
def facets(self, request):
"""
Sets up a list route for ``faceted`` results.
This will add ie ^search/facets/$ to your existing ^search pattern.
"""
queryset = self.filter_facet_queryset(self.get_queryset())
for facet in request.query_params.getlist(self.facet_query_params_text):
if ":" not in facet:
continue
field, value = facet.split(":", 1)
if value:
queryset = queryset.narrow('%s:"%s"' % (field, queryset.query.clean(value)))
serializer = self.get_facet_serializer(queryset.facet_counts(), objects=queryset, many=False)
return Response(serializer.data)
def filter_facet_queryset(self, queryset):
"""
Given a search queryset, filter it with whichever facet filter backends
in use.
"""
for backend in list(self.facet_filter_backends):
queryset = backend().filter_queryset(self.request, queryset, self)
if self.load_all:
queryset = queryset.load_all()
return queryset
def get_facet_serializer(self, *args, **kwargs):
"""
Return the facet serializer instance that should be used for
serializing faceted output.
"""
assert "objects" in kwargs, "`objects` is a required argument to `get_facet_serializer()`"
facet_serializer_class = self.get_facet_serializer_class()
kwargs["context"] = self.get_serializer_context()
kwargs["context"].update(
{
"objects": kwargs.pop("objects"),
"facet_query_params_text": self.facet_query_params_text,
}
)
return facet_serializer_class(*args, **kwargs)
def get_facet_serializer_class(self):
"""
Return the class to use for serializing facets.
Defaults to using ``self.facet_serializer_class``.
"""
if self.facet_serializer_class is None:
raise AttributeError(
"%(cls)s should either include a `facet_serializer_class` attribute, "
"or override %(cls)s.get_facet_serializer_class() method." % {"cls": self.__class__.__name__}
)
return self.facet_serializer_class
def get_facet_objects_serializer(self, *args, **kwargs):
"""
Return the serializer instance which should be used for
serializing faceted objects.
"""
facet_objects_serializer_class = self.get_facet_objects_serializer_class()
kwargs["context"] = self.get_serializer_context()
return facet_objects_serializer_class(*args, **kwargs)
def get_facet_objects_serializer_class(self):
"""
Return the class to use for serializing faceted objects.
Defaults to using the views ``self.serializer_class`` if not
``self.facet_objects_serializer_class`` is set.
"""
return self.facet_objects_serializer_class or super(FacetMixin, self).get_serializer_class()
| 36.825 | 109 | 0.651957 |
73dc771b45e36d7a009a600ea210b7e0b0fdd1ee | 1,156 | py | Python | Python/models.py | hobogalaxy/ISA | aae4b9b4e30d07a4fe2ddc0900973bf134cc8fda | [
"MIT"
] | 1 | 2020-07-23T12:55:01.000Z | 2020-07-23T12:55:01.000Z | Python/models.py | hobogalaxy/ISA | aae4b9b4e30d07a4fe2ddc0900973bf134cc8fda | [
"MIT"
] | 22 | 2020-07-23T12:55:36.000Z | 2021-03-19T00:05:49.000Z | Python/models.py | hobogalaxy/ISA | aae4b9b4e30d07a4fe2ddc0900973bf134cc8fda | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from torch.distributions import Normal
class Actor(nn.Module):
def __init__(self, obs_space, hidden_size, action_space):
super(Actor, self).__init__()
self.actor = nn.Sequential(
nn.Linear(obs_space, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, 64),
nn.Tanh(),
nn.Linear(64, action_space),
nn.Tanh()
)
self.log_std = nn.Parameter(torch.zeros(action_space), requires_grad=True)
def forward(self, state):
mean = self.actor(state)
std = self.log_std.expand_as(mean).exp()
pi = Normal(mean, std)
a = pi.sample()
return a, pi.log_prob(a).sum(1, keepdim=True), pi
class Critic(nn.Module):
def __init__(self, obs_space, hidden_size):
super(Critic, self).__init__()
self.critic = nn.Sequential(
nn.Linear(obs_space, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, 64),
nn.Tanh(),
nn.Linear(64, 1)
)
def forward(self, state):
v = self.critic(state)
return v
| 26.272727 | 82 | 0.570069 |
73dc823f2d1601947b8e421956d707138415a05f | 14,121 | py | Python | Utils/HandlerUtil.py | jamvar/azure-linux-extensions | 66610daae2ef09f7920d9c4aa2e99a3035fe76a6 | [
"Apache-2.0"
] | null | null | null | Utils/HandlerUtil.py | jamvar/azure-linux-extensions | 66610daae2ef09f7920d9c4aa2e99a3035fe76a6 | [
"Apache-2.0"
] | null | null | null | Utils/HandlerUtil.py | jamvar/azure-linux-extensions | 66610daae2ef09f7920d9c4aa2e99a3035fe76a6 | [
"Apache-2.0"
] | null | null | null | #
# Handler library for Linux IaaS
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
JSON def:
HandlerEnvironment.json
[{
"name": "ExampleHandlerLinux",
"seqNo": "seqNo",
"version": "1.0",
"handlerEnvironment": {
"logFolder": "<your log folder location>",
"configFolder": "<your config folder location>",
"statusFolder": "<your status folder location>",
"heartbeatFile": "<your heartbeat file location>",
}
}]
Example ./config/1.settings
"{"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"1BE9A13AA1321C7C515EF109746998BAB6D86FD1","protectedSettings":
"MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgR+nhc6VHQTQpCiiV2zANBgkqhkiG9w0BAQEFAASCAQCKr09QKMGhwYe+O4/a8td+vpB4eTR+BQso84cV5KCAnD6iUIMcSYTrn9aveY6v6ykRLEw8GRKfri2d6tvVDggUrBqDwIgzejGTlCstcMJItWa8Je8gHZVSDfoN80AEOTws9Fp+wNXAbSuMJNb8EnpkpvigAWU2v6pGLEFvSKC0MCjDTkjpjqciGMcbe/r85RG3Zo21HLl0xNOpjDs/qqikc/ri43Y76E/Xv1vBSHEGMFprPy/Hwo3PqZCnulcbVzNnaXN3qi/kxV897xGMPPC3IrO7Nc++AT9qRLFI0841JLcLTlnoVG1okPzK9w6ttksDQmKBSHt3mfYV+skqs+EOMDsGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQITgu0Nu3iFPuAGD6/QzKdtrnCI5425fIUy7LtpXJGmpWDUA==","publicSettings":{"port":"3000"}}}]}"
Example HeartBeat
{
"version": 1.0,
"heartbeat" : {
"status": "ready",
"code": 0,
"Message": "Sample Handler running. Waiting for a new configuration from user."
}
}
Example Status Report:
[{"version":"1.0","timestampUTC":"2014-05-29T04:20:13Z","status":{"name":"Chef Extension Handler","operation":"chef-client-run","status":"success","code":0,"formattedMessage":{"lang":"en-US","message":"Chef-client run success"}}}]
"""
import os
import os.path
import sys
import imp
import base64
import json
import time
from xml.etree import ElementTree
from os.path import join
from Utils.WAAgentUtil import waagent
from waagent import LoggerInit
DateTimeFormat = "%Y-%m-%dT%H:%M:%SZ"
MANIFEST_XML = "manifest.xml"
class HandlerContext:
def __init__(self,name):
self._name = name
self._version = '0.0'
self._config_dir = None
self._log_dir = None
self._log_file = None
self._status_dir = None
self._heartbeat_file = None
self._seq_no = -1
self._status_file = None
self._settings_file = None
self._config = None
return
class HandlerUtility:
def __init__(self, log, error, s_name=None, l_name=None, extension_version=None, logFileName = 'extension.log'):
self._log = log
self._error = error
self._logFileName = logFileName
if s_name is None or l_name is None or extension_version is None:
(l_name, s_name, extension_version) = self._get_extension_info()
self._short_name = s_name
self._extension_version = extension_version
self._log_prefix = '[%s-%s] ' % (l_name, extension_version)
def get_extension_version(self):
return self._extension_version
def _get_log_prefix(self):
return self._log_prefix
def _get_extension_info(self):
if os.path.isfile(MANIFEST_XML):
return self._get_extension_info_manifest()
ext_dir = os.path.basename(os.getcwd())
(long_name, version) = ext_dir.split('-')
short_name = long_name.split('.')[-1]
return long_name, short_name, version
def _get_extension_info_manifest(self):
with open(MANIFEST_XML) as fh:
doc = ElementTree.parse(fh)
namespace = doc.find('{http://schemas.microsoft.com/windowsazure}ProviderNameSpace').text
short_name = doc.find('{http://schemas.microsoft.com/windowsazure}Type').text
version = doc.find('{http://schemas.microsoft.com/windowsazure}Version').text
long_name = "%s.%s" % (namespace, short_name)
return (long_name, short_name, version)
def _get_current_seq_no(self, config_folder):
seq_no = -1
cur_seq_no = -1
freshest_time = None
for subdir, dirs, files in os.walk(config_folder):
for file in files:
try:
cur_seq_no = int(os.path.basename(file).split('.')[0])
if(freshest_time == None):
freshest_time = os.path.getmtime(join(config_folder,file))
seq_no = cur_seq_no
else:
current_file_m_time = os.path.getmtime(join(config_folder,file))
if(current_file_m_time > freshest_time):
freshest_time=current_file_m_time
seq_no = cur_seq_no
except ValueError:
continue
return seq_no
def log(self, message):
self._log(self._get_log_prefix() + message)
def error(self, message):
self._error(self._get_log_prefix() + message)
def _parse_config(self, ctxt):
config = None
try:
config=json.loads(ctxt)
except:
self.error('JSON exception decoding ' + ctxt)
if config is None:
self.error("JSON error processing settings file:" + ctxt)
else:
handlerSettings = config['runtimeSettings'][0]['handlerSettings']
if 'protectedSettings' in handlerSettings and \
'protectedSettingsCertThumbprint' in handlerSettings and \
handlerSettings['protectedSettings'] is not None and \
handlerSettings["protectedSettingsCertThumbprint"] is not None:
protectedSettings = handlerSettings['protectedSettings']
thumb=handlerSettings['protectedSettingsCertThumbprint']
cert=waagent.LibDir+'/'+thumb+'.crt'
pkey=waagent.LibDir+'/'+thumb+'.prv'
unencodedSettings = base64.standard_b64decode(protectedSettings)
openSSLcmd = "openssl smime -inform DER -decrypt -recip {0} -inkey {1}"
cleartxt = waagent.RunSendStdin(openSSLcmd.format(cert, pkey), unencodedSettings)[1]
if cleartxt is None:
self.error("OpenSSL decode error using thumbprint " + thumb )
self.do_exit(1,"Enable",'error','1', 'Failed to decrypt protectedSettings')
jctxt=''
try:
jctxt=json.loads(cleartxt)
except:
self.error('JSON exception decoding ' + cleartxt)
handlerSettings['protectedSettings']=jctxt
self.log('Config decoded correctly.')
return config
def do_parse_context(self,operation):
_context = self.try_parse_context()
if not _context:
self.do_exit(1,operation,'error','1', operation + ' Failed')
return _context
def try_parse_context(self):
self._context = HandlerContext(self._short_name)
handler_env=None
config=None
ctxt=None
code=0
# get the HandlerEnvironment.json. According to the extension handler spec, it is always in the ./ directory
self.log('cwd is ' + os.path.realpath(os.path.curdir))
handler_env_file='./HandlerEnvironment.json'
if not os.path.isfile(handler_env_file):
self.error("Unable to locate " + handler_env_file)
return None
ctxt = waagent.GetFileContents(handler_env_file)
if ctxt == None :
self.error("Unable to read " + handler_env_file)
try:
handler_env=json.loads(ctxt)
except:
pass
if handler_env == None :
self.log("JSON error processing " + handler_env_file)
return None
if type(handler_env) == list:
handler_env = handler_env[0]
self._context._name = handler_env['name']
self._context._version = str(handler_env['version'])
self._context._config_dir=handler_env['handlerEnvironment']['configFolder']
self._context._log_dir= handler_env['handlerEnvironment']['logFolder']
self._context._log_file= os.path.join(handler_env['handlerEnvironment']['logFolder'],self._logFileName)
self._change_log_file()
self._context._status_dir=handler_env['handlerEnvironment']['statusFolder']
self._context._heartbeat_file=handler_env['handlerEnvironment']['heartbeatFile']
self._context._seq_no = self._get_current_seq_no(self._context._config_dir)
if self._context._seq_no < 0:
self.error("Unable to locate a .settings file!")
return None
self._context._seq_no = str(self._context._seq_no)
self.log('sequence number is ' + self._context._seq_no)
self._context._status_file= os.path.join(self._context._status_dir, self._context._seq_no +'.status')
self._context._settings_file = os.path.join(self._context._config_dir, self._context._seq_no + '.settings')
self.log("setting file path is" + self._context._settings_file)
ctxt=None
ctxt=waagent.GetFileContents(self._context._settings_file)
if ctxt == None :
error_msg = 'Unable to read ' + self._context._settings_file + '. '
self.error(error_msg)
return None
self.log("JSON config: " + ctxt)
self._context._config = self._parse_config(ctxt)
return self._context
def _change_log_file(self):
self.log("Change log file to " + self._context._log_file)
LoggerInit(self._context._log_file,'/dev/stdout')
self._log = waagent.Log
self._error = waagent.Error
def set_verbose_log(self, verbose):
if(verbose == "1" or verbose == 1):
self.log("Enable verbose log")
LoggerInit(self._context._log_file, '/dev/stdout', verbose=True)
else:
self.log("Disable verbose log")
LoggerInit(self._context._log_file, '/dev/stdout', verbose=False)
def is_seq_smaller(self):
return int(self._context._seq_no) <= self._get_most_recent_seq()
def save_seq(self):
self._set_most_recent_seq(self._context._seq_no)
self.log("set most recent sequence number to " + self._context._seq_no)
def exit_if_enabled(self):
self.exit_if_seq_smaller()
def exit_if_seq_smaller(self):
if(self.is_seq_smaller()):
self.log("Current sequence number, " + self._context._seq_no + ", is not greater than the sequnce number of the most recent executed configuration. Exiting...")
sys.exit(0)
self.save_seq()
def _get_most_recent_seq(self):
if(os.path.isfile('mrseq')):
seq = waagent.GetFileContents('mrseq')
if(seq):
return int(seq)
return -1
def is_current_config_seq_greater_inused(self):
return int(self._context._seq_no) > self._get_most_recent_seq()
def get_inused_config_seq(self):
return self._get_most_recent_seq()
def set_inused_config_seq(self,seq):
self._set_most_recent_seq(seq)
def _set_most_recent_seq(self,seq):
waagent.SetFileContents('mrseq', str(seq))
def do_status_report(self, operation, status, status_code, message):
self.log("{0},{1},{2},{3}".format(operation, status, status_code, message))
tstamp=time.strftime(DateTimeFormat, time.gmtime())
stat = [{
"version" : self._context._version,
"timestampUTC" : tstamp,
"status" : {
"name" : self._context._name,
"operation" : operation,
"status" : status,
"code" : status_code,
"formattedMessage" : {
"lang" : "en-US",
"message" : message
}
}
}]
stat_rept = json.dumps(stat)
if self._context._status_file:
tmp = "%s.tmp" %(self._context._status_file)
with open(tmp,'w+') as f:
f.write(stat_rept)
os.rename(tmp, self._context._status_file)
def do_heartbeat_report(self, heartbeat_file,status,code,message):
# heartbeat
health_report='[{"version":"1.0","heartbeat":{"status":"' + status+ '","code":"'+ code + '","Message":"' + message + '"}}]'
if waagent.SetFileContents(heartbeat_file,health_report) == None :
self.error('Unable to wite heartbeat info to ' + heartbeat_file)
def do_exit(self,exit_code,operation,status,code,message):
try:
self.do_status_report(operation, status,code,message)
except Exception as e:
self.log("Can't update status: "+str(e))
sys.exit(exit_code)
def get_name(self):
return self._context._name
def get_seq_no(self):
return self._context._seq_no
def get_log_dir(self):
return self._context._log_dir
def get_handler_settings(self):
if (self._context._config != None):
return self._context._config['runtimeSettings'][0]['handlerSettings']
return None
def get_protected_settings(self):
if (self._context._config != None):
return self.get_handler_settings().get('protectedSettings')
return None
def get_public_settings(self):
handlerSettings = self.get_handler_settings()
if (handlerSettings != None):
return self.get_handler_settings().get('publicSettings')
return None
| 39.444134 | 636 | 0.638269 |
73dcc0aab7a2c88ba61706b4d89f885d2bfb1b40 | 26,085 | py | Python | DCGAN_FID_batched/model.py | LaudateCorpus1/first_order_gan | ed9d738b6967c269ba23c814c314a156de64a27b | [
"Apache-2.0"
] | 41 | 2018-02-13T12:28:05.000Z | 2021-11-14T17:34:19.000Z | DCGAN_FID_batched/model.py | LaudateCorpus1/first_order_gan | ed9d738b6967c269ba23c814c314a156de64a27b | [
"Apache-2.0"
] | 1 | 2022-02-24T01:51:07.000Z | 2022-02-24T01:51:07.000Z | DCGAN_FID_batched/model.py | LaudateCorpus1/first_order_gan | ed9d738b6967c269ba23c814c314a156de64a27b | [
"Apache-2.0"
] | 11 | 2018-02-15T00:29:55.000Z | 2022-02-24T01:50:57.000Z | import os
import sys
import time
import math
from glob import glob
import tensorflow as tf
import numpy as np
from random import sample
from data_loader import get_loader
from ops import *
from utils import *
# import fid
sys.path.append(os.path.abspath('../'))
import fid
def conv_out_size_same(size, stride):
return int(math.ceil(float(size) / float(stride)))
def l2norm(x, axis=[1,2,3]):
return tf.squeeze(tf.sqrt(tf.reduce_sum(tf.square(x), axis=axis)))
class DCGAN(object):
def __init__(self, sess, input_height=108, input_width=108, is_crop=True,
batch_size=64, sample_num = 64,
output_height=64, output_width=64,
y_dim=None, z_dim=100, gf_dim=64, df_dim=64,
gfc_dim=1024, dfc_dim=1024, c_dim=3,
gan_method='regular_gan',
lipschitz_penalty=0.1,
gradient_penalty=0.1,
optimize_penalty=False,
calculate_slope=True,
dataset_name='default',
discriminator_batch_norm=True,
input_fname_pattern='*.jpg',
load_checkpoint=False, counter_start=0,
checkpoint_dir=None,
sample_dir=None,
log_dir=None,
stats_path=None,
data_path=None,
fid_n_samples=10000,
fid_sample_batchsize=5000,
fid_batch_size=500,
fid_verbose=False,
num_discriminator_updates=1,
beta1=0.5):
"""
Args:
sess: TensorFlow session
batch_size: The size of batch. Should be specified before training.
y_dim: (optional) Dimension of dim for y. [None]
z_dim: (optional) Dimension of dim for Z. [100]
gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]
dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]
gan_method: (optional) Type of gan, penalized_wgan, improved_wgan and regular_gan possible [regular_gan]
lipschitz_penalty: (optional) Weight of Lipschitz-penalty in penalized wasserstein and setting [0.1]
gradient_penalty: (optional) Weight of gradient-penalty in penalized and improved wasserstein setting [0.1]
optimize_penalty: (optional) When learning the generator, also optimize (increase) the penalty [False]
calculate_slope: (optional) Explicitly claculate target slope for discriminator in gradient penalty, otherwise set to 1/(2\lambda) [False]
"""
assert lipschitz_penalty >= 0,'the lipschitz penalty must be non-negative'
assert gradient_penalty >= 0,'the lipschitz penalty must be non-negative'
assert gan_method in ['penalized_wgan', 'improved_wgan', 'regular_gan'], "the " \
"gan method must be of type 'penalized_wgan', 'improved_wgan' or 'regular_gan', not " + str(gan_method)
assert num_discriminator_updates >= 1, 'num_discriminator_updates must be at least 1'
self.sess = sess
self.is_crop = is_crop
self.is_grayscale = (c_dim == 1)
self.batch_size = batch_size
self.sample_num = sample_num
self.input_height = input_height
self.input_width = input_width
self.output_height = output_height
self.output_width = output_width
self.y_dim = y_dim
self.z_dim = z_dim
self.gf_dim = gf_dim
self.df_dim = df_dim
self.gfc_dim = gfc_dim
self.dfc_dim = dfc_dim
self.c_dim = c_dim
# Batch normalization : deals with poor initialization helps gradient flow
if discriminator_batch_norm:
self.d_bn1 = batch_norm(name='d_bn1')
self.d_bn2 = batch_norm(name='d_bn2')
self.d_bn3 = batch_norm(name='d_bn3')
else:
self.d_bn1 = lambda x: tf.identity(x, name='d_bn1_id')
self.d_bn2 = lambda x: tf.identity(x, name='d_bn2_id')
self.d_bn3 = lambda x: tf.identity(x, name='d_bn3_id')
self.g_bn0 = batch_norm(name='g_bn0')
self.g_bn1 = batch_norm(name='g_bn1')
self.g_bn2 = batch_norm(name='g_bn2')
self.g_bn3 = batch_norm(name='g_bn3')
self.coord = None
self.gan_method = gan_method
self.lipschitz_penalty = lipschitz_penalty
self.gradient_penalty = gradient_penalty
self.optimize_penalty = optimize_penalty
self.calculate_slope = calculate_slope
self.num_discriminator_updates = num_discriminator_updates
self.dataset_name = dataset_name
self.input_fname_pattern = input_fname_pattern
self.load_checkpoint = load_checkpoint
self.checkpoint_dir = checkpoint_dir
self.counter_start = counter_start
self.log_dir = log_dir
self.stats_path = stats_path
self.data_path = data_path
self.file_paths = []
self.fid_n_samples=fid_n_samples
self.fid_sample_batchsize=fid_sample_batchsize
self.fid_batch_size = fid_batch_size
self.fid_verbose = fid_verbose
self.beta1 = beta1
print("build model.. ", end="", flush=True)
self.build_model()
print("ok")
# Model
def build_model(self):
# Learning rate
self.learning_rate_d = tf.Variable(0.0, trainable=False)
self.learning_rate_g = tf.Variable(0.0, trainable=False)
# Inputs
self.inputs, self.paths = get_loader(self.data_path, self.dataset_name, self.batch_size,
self.output_height, 'NHWC', split=None)
inputs = self.inputs
# Random inputs
self.z = tf.random_normal((self.batch_size, self.z_dim), 0., 1.0, dtype=tf.float32, name='z')
self.z_fid = tf.random_normal((self.fid_sample_batchsize, self.z_dim), 0., 1.0,
dtype=tf.float32, name='z_fid')
self.z_samp = tf.random_normal((self.sample_num, self.z_dim), 0., 1.0, dtype=tf.float32, name='z_samp')
self.z_sum = tf.summary.histogram("z", self.z)
# Placeholders
self.fid = tf.Variable(0.0, trainable=False)
# Discriminator and generator
if self.y_dim:
print()
print("Conditional GAN for MNIST not supported.")
raise SystemExit()
else:
self.G = self.generator(self.z, batch_size=self.batch_size)
self.D_real, self.D_logits_real = self.discriminator(inputs)
self.sampler_fid = self.sampler_func(self.z_fid, self.fid_sample_batchsize)
self.sampler = self.sampler_func(self.z_samp, self.sample_num)
self.D_fake, self.D_logits_fake = self.discriminator(self.G, reuse=True)
alpha = tf.random_uniform(
shape=[self.batch_size,1,1,1],
minval=0.,
maxval=0.01
)
self.G_interpolate = alpha * inputs + (1. - alpha) * self.G
self.D_interpolate, self.D_logits_interpolate = self.discriminator(self.G_interpolate, reuse=True)
# Summaries
self.d_real_sum = tf.summary.histogram("d_real", self.D_real)
self.d_fake_sum = tf.summary.histogram("d_fake", self.D_fake)
self.G_sum = tf.summary.image("G", self.G)
def sigmoid_cross_entropy_with_logits(x, y):
try:
return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, labels=y)
except:
return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, targets=y)
# Discriminator Loss Penalty
self.d_lipschitz_penalty_loss = tf.zeros([])
self.d_gradient_penalty_loss = tf.zeros([])
self.d_mean_slope_target = tf.zeros([])
self.d_lipschitz_estimate = tf.zeros([])
if self.gan_method == 'penalized_wgan':
# Discriminator Loss Real and Fake
self.d_loss_real = -1 * tf.reduce_mean(self.D_logits_real)
# Discriminator Loss Fake
self.d_loss_fake = tf.reduce_mean(self.D_logits_interpolate)
# Generator Loss
self.g_loss = -1 * tf.reduce_mean(self.D_logits_interpolate)
# Discriminator Lipschitz Loss Penalty
top = tf.squeeze(self.D_logits_real - self.D_logits_interpolate)
# L2 distance between real and fake inputs
bot = l2norm(inputs - self.G_interpolate)
# If bot == 0 return 0, else return top / bot
diff_penalty = tf.where(tf.less(bot, 10e-9 * tf.ones_like(top,dtype=tf.float32)), tf.zeros_like(top,dtype=tf.float32), tf.square(top) / bot)
self.d_lipschitz_estimate = tf.reduce_mean(tf.where(tf.less(bot, 10e-9 * tf.ones_like(top,dtype=tf.float32)), tf.zeros_like(top,dtype=tf.float32), top / bot))
self.d_lipschitz_penalty_loss = self.lipschitz_penalty * tf.reduce_mean(diff_penalty)
# Discriminator Gradient Loss Penalty
gradients = tf.gradients(self.D_logits_interpolate, [self.G_interpolate])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1,2,3]))
if self.calculate_slope:
# this whole bit calculates the formula
#\frac{
#\left\Vert\mathbb E_{\tilde x\sim\mathbb P}[(\tilde x- x')\frac{f(\tilde x)-f(x')}{\Vert x'-\tilde x\Vert^3}]\right\Vert}
#{\mathbb E_{\tilde x\sim\mathbb P}[\frac{1}{\Vert x'-\tilde x\Vert}]})
#
# by doing a mapping that (batch wise) outputs everything that needs to be summed up to calculate the expected
# value in both the bottom and top part of the formula
def map_function(map_pack):
map_G_interpolate, map_g_logits_interpolate = map_pack
map_input_difference = inputs - map_G_interpolate
map_norm = l2norm(map_input_difference) + 10e-7
map_bot = tf.squeeze(tf.pow(map_norm , -3))
map_top = tf.squeeze(tf.stop_gradient(self.D_logits_real) - map_g_logits_interpolate)
first_output = map_input_difference * tf.reshape(map_top * map_bot, [self.batch_size, 1, 1, 1])
first_output = tf.norm(tf.reduce_mean(first_output, axis=[0]))
second_output = tf.reduce_mean(tf.pow(map_norm , -1))
return tf.maximum(0., first_output / second_output)
def map_function_alt(map_pack):
map_G_interpolate, map_g_logits_interpolate = map_pack
map_input_difference = inputs - map_G_interpolate
map_norm = l2norm(map_input_difference) + 10e-7
map_bot = tf.squeeze(tf.pow(map_norm , -2))
map_top = 1 / (2 * self.lipschitz_penalty)
first_output = map_input_difference * tf.reshape(map_top * map_bot, [self.batch_size, 1, 1, 1])
first_output = tf.norm(tf.reduce_mean(first_output, axis=[0]))
second_output = tf.reduce_mean(tf.pow(map_norm , -1))
return tf.maximum(0., first_output / second_output)
d_slope_target = tf.map_fn(map_function,
(tf.stop_gradient(self.G_interpolate), tf.stop_gradient(self.D_logits_interpolate)),
back_prop=False,
dtype=tf.float32)
else:
d_slope_target = (1. / (2. * self.lipschitz_penalty))
gradient_penalty = tf.reduce_mean(tf.square(slopes - d_slope_target))
#gradient_penalty = tf.reduce_mean(tf.square(slopes - d_slope_target))
self.d_gradient_penalty_loss = self.gradient_penalty * gradient_penalty
self.d_mean_slope_target = tf.reduce_mean(d_slope_target)
if self.gan_method == 'improved_wgan':
# Discriminator Loss Real and Fake
self.d_loss_real = -1 * tf.reduce_mean(self.D_logits_real)
# Discriminator Loss Fake
self.d_loss_fake = tf.reduce_mean(self.D_logits_fake)
# Generator Loss
self.g_loss = -1 * tf.reduce_mean(self.D_logits_fake)
# Discriminator Loss Penalty
alpha = tf.random_uniform(
shape=[self.batch_size,1],
minval=0.,
maxval=1.
)
interpolates = alpha*inputs + ((1-alpha)*self.G)
_, disc_interpolates = self.discriminator(interpolates, reuse=True)
gradients = tf.gradients(disc_interpolates, [interpolates])[0]
slopes = l2norm(gradients)
gradient_penalty = tf.reduce_mean(tf.square(slopes-1.))
self.d_gradient_penalty_loss = self.gradient_penalty * gradient_penalty
if self.gan_method == 'regular_gan':
# Discriminator Loss Real
self.d_loss_real = tf.reduce_mean(
sigmoid_cross_entropy_with_logits(self.D_logits_real, tf.ones_like(self.D_real)))
# Discriminator Loss Fake
self.d_loss_fake = tf.reduce_mean(
sigmoid_cross_entropy_with_logits(self.D_logits_fake, tf.zeros_like(self.D_fake)))
# Generator Loss
self.g_loss = tf.reduce_mean(
sigmoid_cross_entropy_with_logits(self.D_logits_fake, tf.ones_like(self.D_fake)))
self.d_loss_real_sum = tf.summary.scalar("d_loss_real", self.d_loss_real)
self.d_loss_fake_sum = tf.summary.scalar("d_loss_fake", self.d_loss_fake)
# Discriminator Loss Combined
self.d_loss = self.d_loss_real + self.d_loss_fake + self.d_lipschitz_penalty_loss + self.d_gradient_penalty_loss
self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
self.d_gradient_penalty_loss_sum = tf.summary.scalar("d_gradient_penalty_loss", self.d_gradient_penalty_loss)
self.d_lipschitz_penalty_loss_sum = tf.summary.scalar("d_lipschitz_penalty_loss", self.d_lipschitz_penalty_loss)
self.lrate_sum_d = tf.summary.scalar('learning rate d', self.learning_rate_d)
self.lrate_sum_g = tf.summary.scalar('learning rate g', self.learning_rate_g)
self.fid_sum = tf.summary.scalar("FID", self.fid)
# Variables
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
# Train optimizers
opt_d = tf.train.AdamOptimizer(self.learning_rate_d, beta1=self.beta1)
opt_g = tf.train.AdamOptimizer(self.learning_rate_g, beta1=self.beta1)
# Discriminator
grads_and_vars = opt_d.compute_gradients(self.d_loss, var_list=self.d_vars)
grads = []
self.d_optim = opt_d.apply_gradients(grads_and_vars)
# Gradient summaries discriminator
sum_grad_d = []
for i, (grad, vars_) in enumerate(grads_and_vars):
grad_l2 = tf.sqrt(tf.reduce_sum(tf.square(grad)))
sum_grad_d.append(tf.summary.scalar("grad_l2_d_%d_%s" % (i, vars_.name), grad_l2))
# Generator
if self.optimize_penalty:
grads_and_vars = opt_g.compute_gradients(self.g_loss - self.d_lipschitz_penalty_loss - self.d_gradient_penalty_loss, var_list=self.g_vars)
else:
grads_and_vars = opt_g.compute_gradients(self.g_loss, var_list=self.g_vars)
self.g_optim = opt_g.apply_gradients(grads_and_vars)
# Gradient summaries generator
sum_grad_g = []
for i, (grad, vars_) in enumerate(grads_and_vars):
grad_l2 = tf.sqrt(tf.reduce_sum(tf.square(grad)))
sum_grad_g.append(tf.summary.scalar("grad_l2_g_%d_%s" % (i, vars_.name), grad_l2))
# Init:
tf.global_variables_initializer().run()
self.coord = tf.train.Coordinator()
self.threads = tf.train.start_queue_runners(self.sess, self.coord)
# Summaries
self.g_sum = tf.summary.merge([self.z_sum, self.d_fake_sum,
self.G_sum, self.d_loss_fake_sum, self.g_loss_sum, self.lrate_sum_g] + sum_grad_g)
self.d_sum = tf.summary.merge(
[self.z_sum, self.d_real_sum, self.d_loss_real_sum, self.d_loss_sum, self.lrate_sum_d,
self.d_gradient_penalty_loss_sum, self.d_lipschitz_penalty_loss_sum] + sum_grad_d)
self.writer = tf.summary.FileWriter(self.log_dir, self.sess.graph)
# Checkpoint saver
self.saver = tf.train.Saver()
# check if fid_sample_batchsize is a multiple of fid_n_samples
if not (self.fid_n_samples % self.fid_sample_batchsize == 0):
new_bs = self.fid_n_samples // self.fid_sample_batchsize
n_old = self.fid_n_samples
self.fid_n_samples = new_bs * self.fid_sample_batchsize
print("""!WARNING: fid_sample_batchsize is not a multiple of fid_n_samples.
Number of generated sample will be adjusted form %d to %d """ % (n_old, self.fid_n_samples))
# Train model
def train(self, config):
"""Train DCGAN"""
assert len(self.paths) > 0, 'no data loaded, was model not built?'
print("load train stats.. ", end="", flush=True)
# load precalculated training set statistics
f = np.load(self.stats_path)
mu_real, sigma_real = f['mu'][:], f['sigma'][:]
f.close()
print("ok")
if self.load_checkpoint:
if self.load(self.checkpoint_dir):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
# Batch preparing
batch_nums = min(len(self.paths), config.train_size) // config.batch_size
counter = self.counter_start
errD_fake = 0.
errD_real = 0.
errG = 0.
errG_count = 0
penD_gradient = 0.
penD_lipschitz = 0.
esti_slope = 0.
lipschitz_estimate = 0.
start_time = time.time()
try:
# Loop over epochs
for epoch in range(config.epoch):
# Assign learning rates for d and g
lrate = config.learning_rate_d # * (config.lr_decay_rate_d ** epoch)
self.sess.run(tf.assign(self.learning_rate_d, lrate))
lrate = config.learning_rate_g # * (config.lr_decay_rate_g ** epoch)
self.sess.run(tf.assign(self.learning_rate_g, lrate))
# Loop over batches
for batch_idx in range(batch_nums):
# Update D network
_, errD_fake_, errD_real_, summary_str, penD_gradient_, penD_lipschitz_, esti_slope_, lipschitz_estimate_ = self.sess.run(
[self.d_optim, self.d_loss_fake, self.d_loss_real, self.d_sum,
self.d_gradient_penalty_loss, self.d_lipschitz_penalty_loss, self.d_mean_slope_target, self.d_lipschitz_estimate])
for i in range(self.num_discriminator_updates - 1):
self.sess.run([self.d_optim, self.d_loss_fake, self.d_loss_real, self.d_sum,
self.d_gradient_penalty_loss, self.d_lipschitz_penalty_loss])
if np.mod(counter, 20) == 0:
self.writer.add_summary(summary_str, counter)
# Update G network
if config.learning_rate_g > 0.: # and (np.mod(counter, 100) == 0 or lipschitz_estimate_ > 1 / (20 * self.lipschitz_penalty)):
_, errG_, summary_str = self.sess.run([self.g_optim, self.g_loss, self.g_sum])
if np.mod(counter, 20) == 0:
self.writer.add_summary(summary_str, counter)
errG += errG_
errG_count += 1
errD_fake += errD_fake_
errD_real += errD_real_
penD_gradient += penD_gradient_
penD_lipschitz += penD_lipschitz_
esti_slope += esti_slope_
lipschitz_estimate += lipschitz_estimate_
# Print
if np.mod(counter, 100) == 0:
print("Epoch: [%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, lip_pen: %.8f, gradient_pen: %.8f, g_loss: %.8f, d_tgt_slope: %.6f, d_avg_lip: %.6f, g_updates: %3d" \
% (epoch, batch_idx, batch_nums, time.time() - start_time, (errD_fake+errD_real) / 100.,
penD_lipschitz / 100., penD_gradient / 100., errG / 100., esti_slope / 100., lipschitz_estimate / 100., errG_count))
errD_fake = 0.
errD_real = 0.
errG = 0.
errG_count = 0
penD_gradient = 0.
penD_lipschitz = 0.
esti_slope = 0.
lipschitz_estimate = 0.
# Save generated samples and FID
if np.mod(counter, config.fid_eval_steps) == 0:
# Save
try:
samples, d_loss, g_loss = self.sess.run(
[self.sampler, self.d_loss, self.g_loss])
save_images(samples, [8, 8], '{}/train_{:02d}_{:04d}.png'.format(config.sample_dir, epoch, batch_idx))
print("[Sample] d_loss: %.8f, g_loss: %.8f" % (d_loss, g_loss))
except Exception as e:
print(e)
print("sample image error!")
# FID
print("samples for incept", end="", flush=True)
samples = np.zeros((self.fid_n_samples, self.output_height, self.output_width, 3))
n_batches = self.fid_n_samples // self.fid_sample_batchsize
lo = 0
for btch in range(n_batches):
print("\rsamples for incept %d/%d" % (btch + 1, n_batches), end=" ", flush=True)
samples[lo:(lo+self.fid_sample_batchsize)] = self.sess.run(self.sampler_fid)
lo += self.fid_sample_batchsize
samples = (samples + 1.) * 127.5
print("ok")
mu_gen, sigma_gen = fid.calculate_activation_statistics(samples,
self.sess,
batch_size=self.fid_batch_size,
verbose=self.fid_verbose)
print("calculate FID:", end=" ", flush=True)
try:
FID = fid.calculate_frechet_distance(mu_gen, sigma_gen, mu_real, sigma_real)
except Exception as e:
print(e)
FID=500
print(FID)
# Update event log with FID
self.sess.run(tf.assign(self.fid, FID))
summary_str = self.sess.run(self.fid_sum)
self.writer.add_summary(summary_str, counter)
# Save checkpoint
if (counter != 0) and (np.mod(counter, 2000) == 0):
self.save(config.checkpoint_dir, counter)
counter += 1
except KeyboardInterrupt as e:
self.save(config.checkpoint_dir, counter)
except Exception as e:
print(e)
finally:
# When done, ask the threads to stop.
self.coord.request_stop()
self.coord.join(self.threads)
# Discriminator
def discriminator(self, image, y=None, reuse=False):
with tf.variable_scope("discriminator") as scope:
if reuse:
scope.reuse_variables()
h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv')))
h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, name='d_h3_conv')))
h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin')
return tf.nn.sigmoid(h4), h4
# Generator
def generator(self, z, y=None, batch_size=None):
with tf.variable_scope("generator") as scope:
if not self.y_dim:
s_h, s_w = self.output_height, self.output_width
s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)
# Project `z` and reshape
self.z_, self.h0_w, self.h0_b = linear(
z, self.gf_dim*8*s_h16*s_w16, 'g_h0_lin', with_w=True)
self.h0 = tf.reshape(
self.z_, [-1, s_h16, s_w16, self.gf_dim * 8])
h0 = tf.nn.relu(self.g_bn0(self.h0))
# Deconv
self.h1, self.h1_w, self.h1_b = deconv2d(
h0, [batch_size, s_h8, s_w8, self.gf_dim*4], name='g_h1', with_w=True)
h1 = tf.nn.relu(self.g_bn1(self.h1))
h2, self.h2_w, self.h2_b = deconv2d(
h1, [batch_size, s_h4, s_w4, self.gf_dim*2], name='g_h2', with_w=True)
h2 = tf.nn.relu(self.g_bn2(h2))
h3, self.h3_w, self.h3_b = deconv2d(
h2, [batch_size, s_h2, s_w2, self.gf_dim*1], name='g_h3', with_w=True)
h3 = tf.nn.relu(self.g_bn3(h3))
h4, self.h4_w, self.h4_b = deconv2d(
h3, [batch_size, s_h, s_w, self.c_dim], name='g_h4', with_w=True)
return tf.nn.tanh(h4)
# Sampler
def sampler_func(self, z, batch_size, y=None):
with tf.variable_scope("generator") as scope:
scope.reuse_variables()
if not self.y_dim:
s_h, s_w = self.output_height, self.output_width
s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)
# Project `z` and reshape
h0 = tf.reshape(
linear(z, self.gf_dim*8*s_h16*s_w16, 'g_h0_lin'),
[-1, s_h16, s_w16, self.gf_dim * 8])
h0 = tf.nn.relu(self.g_bn0(h0, train=False))
# Deconv
h1 = deconv2d(h0, [batch_size, s_h8, s_w8, self.gf_dim*4], name='g_h1')
h1 = tf.nn.relu(self.g_bn1(h1, train=False))
h2 = deconv2d(h1, [batch_size, s_h4, s_w4, self.gf_dim*2], name='g_h2')
h2 = tf.nn.relu(self.g_bn2(h2, train=False))
h3 = deconv2d(h2, [batch_size, s_h2, s_w2, self.gf_dim*1], name='g_h3')
h3 = tf.nn.relu(self.g_bn3(h3, train=False))
h4 = deconv2d(h3, [batch_size, s_h, s_w, self.c_dim], name='g_h4')
return tf.nn.tanh(h4)
@property
def model_dir(self):
return "{}_{}_{}_{}".format(
self.dataset_name, self.batch_size,
self.output_height, self.output_width)
# Save checkpoint
def save(self, checkpoint_dir, step):
model_name = "DCGAN.model"
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
# Load checkpoint
def load(self, checkpoint_dir):
print(" [*] Reading checkpoints...")
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
print(" [*] Success to read {}".format(ckpt_name))
return True
else:
print(" [*] Failed to find a checkpoint")
return False
| 41.208531 | 172 | 0.654821 |
73dcd61fe5dfcc076bec59c1ab78d7fbae2e0679 | 5,846 | py | Python | dynaphopy/analysis/modes.py | faradaymahe/DynaPhopy | 8519ff616386651acf71166bee02c1a2aef89312 | [
"MIT"
] | 76 | 2015-02-24T02:55:09.000Z | 2022-03-31T09:38:09.000Z | dynaphopy/analysis/modes.py | faradaymahe/DynaPhopy | 8519ff616386651acf71166bee02c1a2aef89312 | [
"MIT"
] | 14 | 2017-07-21T12:37:28.000Z | 2021-09-15T08:50:55.000Z | dynaphopy/analysis/modes.py | faradaymahe/DynaPhopy | 8519ff616386651acf71166bee02c1a2aef89312 | [
"MIT"
] | 38 | 2015-07-02T01:17:27.000Z | 2022-03-25T14:24:33.000Z | from matplotlib import pyplot as plt
from matplotlib.patches import FancyArrowPatch
from matplotlib import lines
from mpl_toolkits.mplot3d import proj3d
import numpy as np
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0.0, 0.0), (0.0, 0.0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
def plot_phonon_modes(structure, eigenvectors, q_vector,
supercell=(1, 1, 1),
draw_primitive=False,
vectors_scale=10,
by_element=True):
atom_type = structure.get_atom_type_index(supercell=supercell)
positions = structure.get_positions(supercell=supercell)
masses = structure.get_masses(supercell=supercell)
elements = structure.get_atomic_elements(supercell=supercell)
np.set_printoptions(precision=8, suppress=True)
cell_t = structure.get_cell()
if draw_primitive:
cell_t = structure.get_primitive_cell()
for i_phonon in range(eigenvectors.shape[0]):
fig = plt.figure(i_phonon+1)
if draw_primitive:
fig.suptitle('Primitive cell')
else:
fig.suptitle('Unit cell')
ax = fig.add_subplot(111, projection='3d')
color_atom=['g','b','m', 'c', 'y', 'k', 'w', 'g', 'b', 'm', 'c', 'y', 'k', 'w','g','b','m', 'c', 'y',
'k', 'w', 'g', 'b', 'm', 'c', 'y', 'k', 'w','g','b','m', 'c', 'y', 'k', 'w', 'g', 'b']
if by_element:
elements_unique = np.unique(elements, return_inverse=True)[1]
else:
elements_unique = atom_type
# Atom positions
for i, atom in enumerate(positions):
ax.plot(atom[0][None], atom[1][None], atom[2][None], 'o', markersize=atom_radius[elements[i]]*30, color=color_atom[elements_unique[i]], alpha=0.8)
# Cell frame
for i in range(3):
cell_side = [(0, cell_t[i, 0]), (0, cell_t[i, 1]), (0, cell_t[i, 2])]
ax.plot3D(*cell_side, color='b')
for j in range(3):
if i != j:
cell_side = [(cell_t[i, l],
cell_t[i, l]+cell_t[j, l]) for l in range(3)]
ax.plot3D(*cell_side, color='b')
for k in range(3):
if k != i and k != j and j > i:
cell_side = [(cell_t[i, l]+cell_t[j, l],
cell_t[i, l]+cell_t[j, l]+cell_t[k, l]) for l in range(3)]
ax.plot3D(*cell_side, color='b')
# Atom positions
for i, position in enumerate(positions):
eigenvector_atom = np.array(eigenvectors[i_phonon, atom_type[i], :])
phase = 1.j * np.dot(position, q_vector)
vector = (eigenvector_atom / np.sqrt(masses[atom_type[i]]) * np.exp(phase) * vectors_scale).real
# vector = np.dot(vector, np.linalg.inv(structure.get_primitive_matrix().T))
a = Arrow3D([position[0], position[0]+vector[0]], [position[1], position[1]+vector[1]],
[position[2], position[2]+vector[2]], mutation_scale=20, lw=3, arrowstyle="-|>", color="r")
ax.add_artist(a)
# Legend
atom_type_index_unique = np.unique(atom_type, return_index=True)[0]
if by_element:
atomic_types_unique = np.unique(elements, return_inverse=True)[0]
else:
atomic_types_unique = [elements[i] for i in atom_type_index_unique]
legend_atoms = [ lines.Line2D([0],[0], linestyle='none', c=color_atom[i], marker='o') for i, element in enumerate(atomic_types_unique)]
ax.legend(legend_atoms, atomic_types_unique, numpoints = 1)
# ax.set_axis_off()
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.title('Phonon {0}'.format(i_phonon+1))
plt.axis('auto')
plt.show()
return
atom_radius = {
'H':0.32,
'He':0.93,
'Li':1.23,
'Be':0.9,
'B':0.82,
'C':0.77,
'N':0.75,
'O':0.73,
'F':0.72,
'Ne':0.71,
'Na':1.54,
'Mg':1.36,
'Al':1.18,
'Si':1.11,
'P':1.06,
'S':1.02,
'Cl':0.99,
'Ar':0.98,
'K':2.03,
'Ca':1.74,
'Sc':1.44,
'Ti':1.32,
'V':1.22,
'Cr':1.18,
'Mn':1.17,
'Fe':1.17,
'Co':1.16,
'Ni':1.15,
'Cu':1.17,
'Zn':1.25,
'Ga':1.26,
'Ge':1.22,
'As':1.2,
'Se':1.16,
'Br':1.14,
'Kr':1.12,
'Rb':2.16,
'Sr':1.91,
'Y':1.62,
'Zr':1.45,
'Nb':1.34,
'Mo':1.3,
'Tc':1.27,
'Ru':1.25,
'Rh':1.25,
'Pd':1.28,
'Ag':1.34,
'Cd':1.48,
'In':1.44,
'Sn':1.41,
'Sb':1.4,
'Te':1.36,
'I':1.33,
'Xe':1.31,
'Cs':2.35,
'Ba':1.98,
'La':1.69,
'Ce':1.65,
'Pr':1.65,
'Nd':1.64,
'Pm':1.63,
'Sm':1.62,
'Eu':1.85,
'Gd':1.61,
'Tb':1.59,
'Dy':1.59,
'Ho':1.58,
'Er':1.57,
'Tm':1.56,
'Yb':1.74,
'Lu':1.56,
'Hf':1.44,
'Ta':1.34,
'W':1.3,
'Re':1.28,
'Os':1.26,
'Ir':1.27,
'Pt':1.3,
'Au':1.34,
'Hg':1.49,
'Tl':1.48,
'Pb':1.47,
'Bi':1.46,
'Po':1.46,
'At':1.45,
'Rn':0.0,
'Fr':0.0,
'Ra':0.0,
'Ac':0.0,
'Th':1.65,
'Pa':0.0,
'U':1.42,
'Np':0.0,
'Pu':0.0,
'Am':0.0,
'Cm':0.0,
'Bk':0.0,
'Cf':0.0,
'Es':0.0,
'Fm':0.0,
'Md':0.0,
'No':0.0,
'Lr':0.0,
'Rf':0.0,
'Db':0.0,
'Sg':0.0,
'Bh':0.0,
'Hs':0.0,
'Mt':0.0,
} | 26.572727 | 158 | 0.494355 |
73dcee3fc500b4cd4d8d9ca2e4dfbcc6ec11bee6 | 6,381 | py | Python | src/sage/geometry/hyperbolic_space/hyperbolic_interface.py | saraedum/sage-renamed | d2da67b14da2ad766a5906425d60d43a3b3e1270 | [
"BSL-1.0"
] | 3 | 2016-06-19T14:48:31.000Z | 2022-01-28T08:46:01.000Z | src/sage/geometry/hyperbolic_space/hyperbolic_interface.py | rwst/sage | a9d274b9338e6ee24bf35ea8d25875507e51e455 | [
"BSL-1.0"
] | null | null | null | src/sage/geometry/hyperbolic_space/hyperbolic_interface.py | rwst/sage | a9d274b9338e6ee24bf35ea8d25875507e51e455 | [
"BSL-1.0"
] | 7 | 2021-11-08T10:01:59.000Z | 2022-03-03T11:25:52.000Z | # -*- coding: utf-8 -*-
r"""
Interface to Hyperbolic Models
This module provides a convenient interface for interacting with models
of hyperbolic space as well as their points, geodesics, and isometries.
The primary point of this module is to allow the code that implements
hyperbolic space to be sufficiently decoupled while still providing a
convenient user experience.
The interfaces are by default given abbreviated names. For example,
UHP (upper half plane model), PD (Poincaré disk model), KM (Klein disk
model), and HM (hyperboloid model).
.. NOTE::
All of the current models of 2 dimensional hyperbolic space
use the upper half plane model for their computations. This can
lead to some problems, such as long coordinate strings for symbolic
points. For example, the vector ``(1, 0, sqrt(2))`` defines a point
in the hyperboloid model. Performing mapping this point to the upper
half plane and performing computations there may return with vector
whose components are unsimplified strings have several ``sqrt(2)``'s.
Presently, this drawback is outweighted by the rapidity with which new
models can be implemented.
AUTHORS:
- Greg Laun (2013): Initial version.
- Rania Amer, Jean-Philippe Burelle, Bill Goldman, Zach Groton,
Jeremy Lent, Leila Vaden, Derrick Wigglesworth (2011): many of the
methods spread across the files.
EXAMPLES::
sage: HyperbolicPlane().UHP().get_point(2 + I)
Point in UHP I + 2
sage: HyperbolicPlane().PD().get_point(1/2 + I/2)
Point in PD 1/2*I + 1/2
"""
#***********************************************************************
#
# Copyright (C) 2013 Greg Laun <glaun@math.umd.edu>
#
#
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#***********************************************************************
from __future__ import division
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.parent import Parent
from sage.misc.abstract_method import abstract_method
from sage.categories.sets_cat import Sets
from sage.categories.realizations import Realizations, Category_realization_of_parent
from sage.geometry.hyperbolic_space.hyperbolic_model import (
HyperbolicModelUHP, HyperbolicModelPD,
HyperbolicModelHM, HyperbolicModelKM)
def HyperbolicSpace(n):
"""
Return ``n`` dimensional hyperbolic space.
EXAMPLES::
sage: from sage.geometry.hyperbolic_space.hyperbolic_interface import HyperbolicSpace
sage: HyperbolicSpace(2)
Hyperbolic plane
"""
if n == 2:
return HyperbolicPlane()
raise NotImplementedError("currently only implemented in dimension 2")
class HyperbolicPlane(Parent, UniqueRepresentation):
"""
The hyperbolic plane `\mathbb{H}^2`.
Here are the models currently implemented:
- ``UHP`` -- upper half plane
- ``PD`` -- Poincaré disk
- ``KM`` -- Klein disk
- ``HM`` -- hyperboloid model
"""
def __init__(self):
"""
Initialize ``self``.
EXAMPLES::
sage: H = HyperbolicPlane()
sage: TestSuite(H).run()
"""
Parent.__init__(self, category=Sets().Metric().WithRealizations())
self.a_realization() # We create a realization so at least one is known
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: HyperbolicPlane()
Hyperbolic plane
"""
return "Hyperbolic plane"
def a_realization(self):
"""
Return a realization of ``self``.
EXAMPLES::
sage: H = HyperbolicPlane()
sage: H.a_realization()
Hyperbolic plane in the Upper Half Plane Model
"""
return self.UHP()
UHP = HyperbolicModelUHP
UpperHalfPlane = UHP
PD = HyperbolicModelPD
PoincareDisk = PD
KM = HyperbolicModelKM
KleinDisk = KM
HM = HyperbolicModelHM
Hyperboloid = HM
class HyperbolicModels(Category_realization_of_parent):
r"""
The category of hyperbolic models of hyperbolic space.
"""
def __init__(self, base):
r"""
Initialize the hyperbolic models of hyperbolic space.
INPUT:
- ``base`` -- a hyperbolic space
TESTS::
sage: from sage.geometry.hyperbolic_space.hyperbolic_interface import HyperbolicModels
sage: H = HyperbolicPlane()
sage: models = HyperbolicModels(H)
sage: H.UHP() in models
True
"""
Category_realization_of_parent.__init__(self, base)
def _repr_(self):
r"""
Return the representation of ``self``.
EXAMPLES::
sage: from sage.geometry.hyperbolic_space.hyperbolic_interface import HyperbolicModels
sage: H = HyperbolicPlane()
sage: HyperbolicModels(H)
Category of hyperbolic models of Hyperbolic plane
"""
return "Category of hyperbolic models of {}".format(self.base())
def super_categories(self):
r"""
The super categories of ``self``.
EXAMPLES::
sage: from sage.geometry.hyperbolic_space.hyperbolic_interface import HyperbolicModels
sage: H = HyperbolicPlane()
sage: models = HyperbolicModels(H)
sage: models.super_categories()
[Category of metric spaces,
Category of realizations of Hyperbolic plane]
"""
return [Sets().Metric(), Realizations(self.base())]
class ParentMethods:
def _an_element_(self):
"""
Return an element of ``self``.
EXAMPLES::
sage: H = HyperbolicPlane()
sage: H.UHP().an_element()
Point in UHP I
sage: H.PD().an_element()
Point in PD 0
sage: H.KM().an_element()
Point in KM (0, 0)
sage: H.HM().an_element()
Point in HM (0, 0, 1)
"""
from sage.rings.integer_ring import ZZ
return self(self.realization_of().PD().get_point(ZZ.zero()))
| 30.5311 | 98 | 0.62498 |
73dcfaa2855a61522e201feddfc0e638727fd0df | 73,346 | py | Python | pyradio/browser.py | jirikrepl/pyradio | 6fae44aafc24acab3e33cf6b2918709a2c4548cb | [
"MIT"
] | null | null | null | pyradio/browser.py | jirikrepl/pyradio | 6fae44aafc24acab3e33cf6b2918709a2c4548cb | [
"MIT"
] | null | null | null | pyradio/browser.py | jirikrepl/pyradio | 6fae44aafc24acab3e33cf6b2918709a2c4548cb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import curses
try:
from dns import resolver
except ImportError:
pass
from copy import deepcopy
import random
import json
import collections
from operator import itemgetter
try:
import requests
except ImportError:
pass
import threading
import logging
from .player import info_dict_to_list
from .cjkwrap import cjklen, PY3
from .countries import countries
from .simple_curses_widgets import SimpleCursesLineEdit, SimpleCursesHorizontalPushButtons, SimpleCursesWidgetColumns, SimpleCursesCheckBox
import locale
locale.setlocale(locale.LC_ALL, '') # set your locale
logger = logging.getLogger(__name__)
def country_from_server(a_server):
if a_server:
country = a_server.split('.')[0]
up = country[:-1].upper()
if up in countries.keys():
return countries[up]
else:
return country
else:
return None
def capitalize_comma_separated_string(a_string):
sp = a_string.split(',')
for i, n in enumerate(sp):
sp[i] = n.strip().capitalize()
return ', '.join(sp)
class PyRadioStationsBrowser(object):
''' A base class to get results from online radio directory services.
Actual implementations should be subclasses of this one.
'''
BASE_URL = ''
TITLE = ''
_parent = _outer_parent = None
_raw_stations = []
_last_search = None
_internal_header_height = 0
_url_timeout = 3
_search_timeout = 3
_vote_callback = None
_sort = _sort_win = None
# Normally outer boddy (holding box, header, internal header) is
# 2 chars wider that the internal body (holding the stations)
# This property value is half the difference (normally 2 / 2 = 1)
# Used to chgat the columns' separators in internal body
# Check if the cursor is divided as required and adjust
_outer_internal_body_diff = 2
_outer_internal_body_half_diff = 1
def __init__(self,
config,
config_encoding,
session=None,
search=None,
pyradio_info=None,
search_return_function=None,
message_function=None):
''' Initialize the station's browser.
It should return a valid search result (for example,
www.radio-browser.info implementation, returns 100 stations
sorted by number of votes).
Parameters
----------
search
Search parameters to be used instead of the default.
'''
pass
@property
def parent(self):
return self._parent
@parent.setter
def parent(self, val):
self._parent = val
if self._sort:
self._sort._parent = val
@property
def outer_parent(self):
return self._outer_parent
@outer_parent.setter
def outer_parent(self, val):
self._outer_parent = val
if self._sort_win:
self._sort_win._parent = val
@property
def outer_internal_body_half_diff(self):
return self._outer_internal_body_half_diff
@outer_internal_body_half_diff.setter
def outer_internal_body_half_diff(self, value):
raise ValueError('property is read only')
@property
def internal_header_height(self):
return self._internal_header_height
@internal_header_height.setter
def internal_header_height(self, value):
raise ValueError('property is read only')
@property
def title(self):
return self.TITLE
@title.setter
def title(self, value):
self.TITLE = value
@property
def vote_callback(self):
return self._vote_callback
@vote_callback.setter
def vote_callback(self, val):
self._vote_callback = val
def stations(self, playlist_format=1):
return []
def url(self, id_in_list):
''' Return a station's real/playable url
It has to be implemented only in case have_to_retrieve_url is True
Parameters
----------
id_in_list
id in list of stations (0..len-1)
Returns
-------
Real/playable url or '' if failed (string)
'''
return ''
def set_played(self, id_in_list, played):
''' Note that a player has been played.
Parameters
----------
id_in_list
id in list of stations (0..len-1)
played
True or False
'''
pass
def search(self, go_back_in_history=True):
return []
def set_encoding(self, id_in_list, new_encoding):
return
def format_station_line(self, id_in_list, pad, width):
return ''
def click(self, a_station):
pass
def vote(self, a_station):
pass
class RadioBrowserInfo(PyRadioStationsBrowser):
BASE_URL = 'api.radio-browser.info'
TITLE = 'Radio Browser '
_headers = {'User-Agent': 'PyRadio/dev',
'Content-Type': 'application/json'}
_raw_stations = []
# the output format to use based on window width
# Default value: -1
# Possible values: 0..5
# Look at format_station_line() for info
_output_format = -1
_info_len = []
_info_name_len = 0
_raw_stations = []
_internal_header_height = 1
_search_history = []
_search_history_index = -1
_columns_width = {
'votes': 7,
'clickcount': 7,
'bitrate': 7,
'country': 18,
'language': 15,
'state': 18,
'tags': 20,
'codec': 5
}
_server_selection_window = None
_dns_info = None
search_by = _old_search_by = None
keyboard_handler = None
def __init__(self,
config,
config_encoding,
session=None,
search=None,
pyradio_info=None,
search_return_function=None,
message_function=None):
'''
When first_search is True, it means that we are opening
the browser. If empty result is returned by the first
browser search, we show an empty stations' list.
if it is False and an empty result is returned by the first
browser search, which means we are already in the browser's
search screen, we just display the 'no result message'.
All of this is done at radio.py
'''
self.first_search = True
self._cnf = config
if session:
self._session = session
else:
self._session = requests.Session()
self._pyradio_info = pyradio_info.strip()
if self._pyradio_info:
self._headers['User-Agent'] = self._pyradio_info.replace(' ', '/')
self._config_encoding = config_encoding
self._message_function = message_function
self._search_return_function = search_return_function
def initialize(self):
self._dns_info = RadioBrowserInfoDns()
self._server = self._dns_info.give_me_a_server_url()
if logger.isEnabledFor(logging.INFO):
logger.info('random server is ' + self._server)
if self._server:
self._get_title()
self._search_history.append({
'type': 'topvote',
'term': '100',
'post_data': None,
})
self._search_history.append({
'type': 'bytagexact',
'term': 'big band',
'post_data': {'order': 'votes', 'reverse': 'true'},
})
self._search_history.append({
'type': 'search',
'term': '',
'post_data': {'name': 'jaz'},
})
self._search_history_index = 0
return True
return False
@property
def server(self):
return self._server
@property
def add_to_title(self):
return self._server.split('.')[0]
def _get_title(self):
self.TITLE = 'Radio Browser ({})'.format(country_from_server(self._server))
def stations(self, playlist_format=1):
''' Return stations' list (in PyRadio playlist format)
Parameters
----------
playlist_format
0: station name, url
1: station name, url, encoding
2: station name, url, encoding, browser flag (default)
'''
ret = []
for n in self._raw_stations:
if playlist_format == 0:
ret.append([n['name'], n['url']])
elif playlist_format == 1:
enc = '' if n['encoding'] == self._config_encoding else n['encoding']
ret.append([n['name'], n['url'], enc])
else:
enc = '' if n['encoding'] == self._config_encoding else n['encoding']
ret.append([n['name'], n['url'], enc, ''])
return ret
def url(self, id_in_list):
''' Get a station's url using resolved_url
Parameters
----------
id_in_list
id in list of stations (0..len-1)
Returns
-------
url or '' if failed
'''
if self._raw_stations:
if id_in_list < len(self._raw_stations):
if self._raw_stations[id_in_list]['url_resolved']:
return self._raw_stations[id_in_list]['url_resolved']
else:
return self._raw_stations[id_in_list]['url']
return ''
def click(self, a_station):
def do_click(a_station_uuid):
url = 'http://' + self._server + '/json/url/' + a_station_uuid
try:
r = self._session.get(url=url, headers=self._headers, timeout=(self._search_timeout, 2 * self._search_timeout))
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Station click result: "{}"'.format(r.text))
except:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Station click failed...')
threading.Thread(target=do_click, args=(self._raw_stations[a_station]['stationuuid'], )).start()
def vote(self, a_station):
url = 'http://' + self._server + '/json/vote/' + self._raw_stations[a_station]['stationuuid']
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Voting for: {}'.format(self._raw_stations[a_station]))
logger.debug('Voting url: ' + url)
try:
r = self._session.get(url=url, headers=self._headers, timeout=(self._search_timeout, 2 * self._search_timeout))
message = json.loads(r.text)
self.vote_result = self._raw_stations[a_station]['name'], message['message'][0].upper() + message['message'][1:]
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Voting result: "{}"'.format(message))
except:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Station voting failed...')
self.vote_result = self._raw_stations[a_station]['name'], 'Voting for station failed'
if self._vote_callback:
self._vote_callback()
def get_info_string(self, a_station, max_width=60):
guide = [
('Name', 'name'),
('URL', 'url'),
('Resolved URL', 'url_resolved'),
('Website', 'homepage'),
('Tags', 'tags'),
('Votes', 'votes'),
('Clicks', 'clickcount'),
('Country', 'country'),
('State', 'state'),
('Language', 'language'),
('Bitrate', 'bitrate'),
('Codec', 'codec')
]
if self._raw_stations[a_station]['url'] == self._raw_stations[a_station]['url_resolved']:
guide.pop(2)
info = collections.OrderedDict()
for n in guide:
info[n[0]] = str(self._raw_stations[a_station][n[1]])
if n[1] == 'bitrate':
info[n[0]] += ' kb/s'
a_list = []
fix_highlight = []
a_list = info_dict_to_list(info, fix_highlight, max_width)
ret = '|' + '\n|'.join(a_list)
# logger.error('DE \n\n{}\n\n'.format(ret))
sp = ret.split('\n')
wrong_wrap = -1
for i, n in enumerate(sp):
# logger.exception('DE {0}: "{1}"'.format(i, n))
if wrong_wrap == i:
sp[i] = n.replace('|', '')
sp[i-1] += sp[i].replace('_', '')
sp[i] = '*' + sp[i]
wrong_wrap = -1
else:
if ': ' not in n:
sp[i] = n[1:]
if n[-1] == ':':
''' wrong wrapping! '''
wrong_wrap = i + 1
sp[i] += '|'
if sp[i][-1] != ' ':
sp[i] += ' '
if sp[i][0] != '|':
sp[i] = '|' + sp[i]
for i, n in enumerate(sp):
if n[0] == '*':
sp.pop(i)
ret = '\n'.join(sp).replace(': |', ':| ').replace(': ', ':| ')
# logger.error('DE \n\n{}\n\n'.format(ret))
return ret, ''
def search(self, go_back_in_history=True):
''' Search for stations with parameters.
Result is limited to 100 stations by default (use the
'limit' parameter to change it).
Parameters
----------
data
A dictionary containing the fields described at
http://www.radio-browser.info/webservice/#Advanced_station_search
Returns
-------
self._raw_stations
A dictionary with a subset of returned station data.
Its format is:
name : station name
id : station id
url : station url
resolved_url : station resolved_url
tags : starion tags
bitrate : station bitrate
hls : HLS status
votes : station votes
clickcount : station clicks
country : station country
state : statiob state
language : station language
codec : station codec
encoding : station encoding ('' means utf-8)
'''
if self._message_function:
self._message_function()
self.search_by = self._old_search_by = None
self._get_search_elements(
self._search_history[self._search_history_index]
)
self._old_search_by = self.search_by
self._sort = None
url = self._format_url(self._search_history[self._search_history_index])
post_data = {}
if self._search_history[self._search_history_index]['post_data']:
post_data = deepcopy(self._search_history[self._search_history_index]['post_data'])
self._output_format = -1
if self._search_type > 0:
if 'limit' not in post_data.keys():
post_data['limit'] = 100
if not 'hidebroken' not in post_data.keys():
post_data['hidebroken'] = True
if logger.isEnabledFor(logging.DEBUG):
logger.debug(' == history = {}'.format(self._search_history[self._search_history_index]))
logger.debug(' == url = "{}"'.format(url))
logger.debug(' == headers = "{}"'.format(self._headers))
logger.debug(' == post_data = "{}"'.format(post_data))
''' keep server results here '''
new_raw_stations = []
try:
r = self._session.get(url=url, headers=self._headers, params=post_data, timeout=(self._search_timeout, 2 * self._search_timeout))
r.raise_for_status()
new_raw_stations = self._extract_data(json.loads(r.text))
# logger.error('DE \n\n{}'.format(new_raw_stations))
ret = True, len(new_raw_stations), go_back_in_history
except requests.exceptions.RequestException as e:
if logger.isEnabledFor(logging.ERROR):
logger.error(e)
self._raw_stations = []
ret = False, 0, go_back_in_history
''' use server result '''
if len(new_raw_stations) > 0:
self._raw_stations = new_raw_stations[:]
if self._search_return_function:
self._search_return_function(ret)
def _get_search_elements(self, a_search):
'''
get "by search" and "reverse"
values from a search dict.
To be used with the sort function
'''
logger.error('DE search in function is "{}"'.format(a_search))
a_term = a_search['term']
p_data = a_search['post_data']
self.search_by = None
self.reverse = False
if a_search['post_data']:
if 'order' in a_search['post_data'].keys():
self.search_by = a_search['post_data']['order']
if 'reverse' in a_search['post_data']:
self.reverse = True if a_search['post_data']['reverse'] == 'true' else False
logger.error('DE search by was "{}"'.format(self.search_by))
if self.search_by is None:
a_type = a_search['type']
if a_type == 'byname':
self.search_by = 'name'
elif a_type == 'topvote':
self.search_by = 'votes'
logger.error('DE search by is votes')
elif a_type == 'clickcount':
self.search_by = 'clickcount'
elif a_type == 'bitrate':
self.search_by = 'bitrate'
elif a_type == 'codec':
self.search_by = 'codec'
elif a_type == 'country':
self.search_by = 'country'
elif a_type == 'state':
self.search_by = 'state'
elif a_type == 'language':
self.search_by = 'language'
elif a_type == 'tags':
self.search_by = 'tags'
if self.search_by is None:
if p_data:
if 'name' in p_data.keys():
self.search_by = 'name'
logger.error('DE search by is name (default)')
if self.search_by is None:
self.search_by = 'name'
logger.error('DE search by is name (default)')
logger.error('DE search by is "{}"'.format(self.search_by))
def get_next(self, search_term, start=0, stop=None):
if search_term:
for n in range(start, len(self._raw_stations)):
if self._search_in_station(search_term, n):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('forward search term "{0}" found at {1}'.format(search_term, n))
return n
""" if not found start from list top """
for n in range(0, start):
if self._search_in_station(search_term, n):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('forward search term "{0}" found at {1}'.format(search_term, n))
return n
""" if not found return None """
if logger.isEnabledFor(logging.DEBUG):
logger.debug('forward search term "{}" not found'.format(search_term))
return None
else:
return None
def get_previous(self, search_term, start=0, stop=None):
if search_term:
for n in range(start, -1, -1):
if self._search_in_station(search_term, n):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('backward search term "{0}" found at {1}'.format(search_term, n))
return n
""" if not found start from list end """
for n in range(len(self._raw_stations) - 1, start, -1):
if self._search_in_station(search_term, n):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('backward search term "{0}" found at {1}'.format(search_term, n))
return n
""" if not found return None """
if logger.isEnabledFor(logging.DEBUG):
logger.debug('backward search term "{}" not found'.format(search_term))
return None
else:
return None
def _search_in_station(self, a_search_term, a_station):
guide = (
'name',
'country',
'codec',
'tags',
'bitrate',
'language'
)
for n in guide:
source = self._raw_stations[a_station][n]
if isinstance(source, int):
''' this is one of the numerical data '''
source = str(source)
if a_search_term.lower() in source.lower():
return True
return False
def _format_url(self, a_search):
if a_search['type'] in ('topvote',
'topclick',
'lastclick',
'lastchange',
'changed',
'improvable',
'broken',
):
url = 'http://{0}{1}'.format(
self._server,
'/json/stations/{}'.format(a_search['type'])
)
if a_search['term'] not in ('', '0'):
url += '/{}'.format(a_search['term'])
self._search_type = 0
elif a_search['type'] in ('byuuid',
'byname',
'bynameexact',
'bycodec',
'bycodecexact',
'bycountry',
'bycountryexact',
'bycountrycodeexact',
'bystate',
'bystateexact',
'bylanguage',
'bylanguageexact',
'bytag',
'bytagexact',
):
url = 'http://{0}{1}/{2}'.format(
self._server,
'/json/stations/{}'.format(a_search['type']),
a_search['term']
)
self._search_type = 1
elif a_search['type'] == 'search':
url = 'http://{0}{1}'.format(
self._server,
'/json/stations/search'
)
self._search_type = 2
return url
def format_empty_line(self, width):
if self._output_format == 0:
return -1, ' '
info = (
(),
('bitrate', ),
('votes', 'bitrate'),
('votes', 'clickcount', 'bitrate'),
('votes', 'clickcount', 'bitrate', 'country'),
('votes', 'clickcount', 'bitrate', 'country', 'language'),
('votes', 'clickcount', 'bitrate', 'country', 'state', 'language'),
('votes', 'clickcount', 'bitrate', 'codec', 'country', 'state', 'language', 'tags')
)
out = ['', '']
i_out = []
for i, n in enumerate(info[self._output_format]):
i_out.append(u'│' + ' ' * self._columns_width[n])
out[1] = ''.join(i_out)
name_width = width-len(out[1])
out[0] = ' ' * name_width
if PY3:
return -1, '{0}{1}'.format(*out)
else:
return -1 , '{0}{1}'.format(
out[0],
out[1].encode('utf-8', 'replace')
)
def format_station_line(self, id_in_list, pad, width):
''' Create a formated line for a station
Parameters
----------
id_in_list
id in list of stations (0..len-1)
pad
length of NUMBER
width
final length of created string
Returns
-------
A string of the following format:
NUMBER. STATION NAME [INFO]
where:
NUMBER
Right padded counter (id_in_list + 1)
STATION NAME
Left padded station name
INFO
Station info. Depending on window width, it can be:
[Votes: XX, Clicks: XX, Bitrate: XXXkb, Country: XXXX],
[Votes: XX, Clicks: XX, Bitrate: XXXkb],
[XXXX v, XXXX, cl, XXXkb],
[Bitrate: XXXkb], or
empty string
'''
info = (u'',
u' {0}{1}kb',
u' {0}{1}│{2}kb',
u' {0}{1}│{2}│{3}kb',
u' {0}{1}│{2}│{3}kb│{4}',
u' {0}{1}│{2}│{3}kb│{4}│{5}',
u' {0}{1}│{2}│{3}kb│{4}│{5}│{6}',
u' {0}{1}│{2}│{3}kb│{4}│{5}│{6}│{7}│{8}',
)
self._get_output_format(width)
# logger.error('DE self._output_format = {}'.format(self._output_format))
out = ['{0}. '.format(str(id_in_list + 1).rjust(pad)), '', '']
# format info field
pl = u'├' if self._raw_stations[id_in_list]['played'] else u'│'
if self._output_format == 7:
# full with state
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['clickcount']).rjust(self._columns_width['clickcount'])[:self._columns_width['clickcount']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2],
self._raw_stations[id_in_list]['codec'].rjust(self._columns_width['codec'])[:self._columns_width['codec']],
self._raw_stations[id_in_list]['country'].ljust(self._columns_width['country'])[:self._columns_width['country']],
self._raw_stations[id_in_list]['state'].ljust(self._columns_width['state'])[:self._columns_width['state']],
self._raw_stations[id_in_list]['language'].ljust(self._columns_width['language'])[:self._columns_width['language']],
self._raw_stations[id_in_list]['tags'].ljust(self._columns_width['tags'])[:self._columns_width['tags']]
)
if self._output_format == 6:
# full with state
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['clickcount']).rjust(self._columns_width['clickcount'])[:self._columns_width['clickcount']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2],
self._raw_stations[id_in_list]['country'].ljust(self._columns_width['country'])[:self._columns_width['country']],
self._raw_stations[id_in_list]['state'].ljust(self._columns_width['state'])[:self._columns_width['state']],
self._raw_stations[id_in_list]['language'].ljust(self._columns_width['language'])[:self._columns_width['language']]
)
if self._output_format == 5:
# full with state
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['clickcount']).rjust(self._columns_width['clickcount'])[:self._columns_width['clickcount']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2],
self._raw_stations[id_in_list]['country'].ljust(self._columns_width['country'])[:self._columns_width['country']],
self._raw_stations[id_in_list]['language'].ljust(self._columns_width['language'])[:self._columns_width['language']]
)
if self._output_format == 4:
# full or condensed info
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['clickcount']).rjust(self._columns_width['clickcount'])[:self._columns_width['clickcount']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2],
self._raw_stations[id_in_list]['country'].ljust(self._columns_width['country'])[:self._columns_width['country']]
)
elif self._output_format == 2:
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2]
)
elif self._output_format == 3:
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['clickcount']).rjust(self._columns_width['clickcount'])[:self._columns_width['clickcount']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2]
)
elif self._output_format == 1:
# Bitrate only
out[2] = info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2]
)
name_width = width-len(out[0])-len(out[2])
out[1] = self._fix_cjk_string_width(self._raw_stations[id_in_list]['name'].ljust(name_width)[:name_width], name_width)
if PY3:
# if pl == '╞':
# out[2] += '╡'
return (self._raw_stations[id_in_list]['played'],
'{0}{1}{2}'.format(*out))
else:
# on python 2, strings are already in utf-8
return (self._raw_stations[id_in_list]['played'],
'{0}{1}{2}'.format(
out[0].encode('utf-8', 'replace'),
out[1].encode('utf-8', 'replace'),
out[2].encode('utf-8', 'replace')))
def set_encoding(self, id_in_list, new_encoding):
if id_in_list < len(self._raw_stations):
self._raw_stations[id_in_list]['encoding'] = new_encoding
if logger.isEnabledFor(logging.DEBUG):
logger.debug('New encoding set to "{0}" for station "{1}"'.format(new_encoding, self._raw_stations[id_in_list]['name']))
def _fix_cjk_string_width(self, a_string, width):
while cjklen(a_string) > width:
a_string = a_string[:-1]
return a_string
def _extract_data(self, a_search_result):
ret = []
self._max_len = [0, 0]
if a_search_result:
for n in a_search_result:
ret.append({'name': n['name'].replace(',', ' ')})
ret[-1]['stationuuid'] = n['stationuuid']
ret[-1]['url'] = n['url']
ret[-1]['url_resolved'] = n['url_resolved']
ret[-1]['url'] = n['url']
ret[-1]['played'] = False
ret[-1]['hls'] = n['hls']
ret[-1]['stationuuid'] = n['stationuuid']
ret[-1]['countrycode'] = n['countrycode']
ret[-1]['country'] = n['country']
ret[-1]['codec'] = n['codec']
ret[-1]['state'] = n['state']
ret[-1]['tags'] = n['tags'].replace(',', ', ')
ret[-1]['homepage'] = n['homepage']
if isinstance(n['clickcount'], int):
# old API
ret[-1]['votes'] = n['votes']
ret[-1]['clickcount'] = n['clickcount']
ret[-1]['bitrate'] = n['bitrate']
else:
# new API
ret[-1]['votes'] = int(n['votes'])
ret[-1]['clickcount'] = int(n['clickcount'])
ret[-1]['bitrate'] = int(n['bitrate'])
ret[-1]['language'] = capitalize_comma_separated_string(n['language'])
ret[-1]['encoding'] = ''
self._get_max_len(ret[-1]['votes'],
ret[-1]['clickcount'])
return ret
def _get_max_len(self, votes, clicks):
''' Calculate the maximum length of numeric_data / country
Parameters
----------
votes
Number of station's vote (string)
clicks
Number of station's clicks (string)
numeric_data
Returns
-------
self._max_len
A list [max votes length,
max clickcount length]
'''
numeric_data = (votes, clicks)
# logger.error('DE numeric_data = {}'.format(numeric_data))
min_data = (6, 7)
for i, x in enumerate(numeric_data):
n = str(x)
if len(n) > self._max_len[i]:
self._max_len[i] = len(n) if len(n) > min_data[i] else min_data[i]
def _get_output_format(self, width):
''' Return output format based on window width
Paramaters
----------
width
Window width
Returns
-------
self._output_format
A number 0..5
'''
# now_width = get_terminal_size().columns - 2
if width <= 50:
self._output_format = 0
elif width < 57:
self._output_format = 1
elif width < 65:
self._output_format = 2
elif width < 80:
self._output_format = 3
elif width < 95:
self._output_format = 4
elif width < 120:
self._output_format = 5
elif width < 145:
self._output_format = 6
else:
self._output_format = 7
def _populate_columns_separators(self, a_tuple, width):
ret = []
for i, n in enumerate(a_tuple):
if i == 0:
# logger.error('DE {0} - {1} = {2} - {3}'.format(width, self._columns_width[n], width-self._columns_width[n]-2, n))
ret.append(width - self._columns_width[n] - 2)
else:
# logger.error('{0} -1 - {1} = {2} - {3}'.format(ret[-1], self._columns_width[n], ret[-1] - 1 - self._columns_width[n], n))
ret.append(ret[-1] - 1 - self._columns_width[n])
ret.reverse()
# logger.error('DE \n\nret = {}\n\n'.format(ret))
return ret
def get_columns_separators(self,
width,
use_old_output_format=False,
adjust=0,
adjust_for_body=False,
adjust_for_header=False,
):
''' Calculates columns separators for a given width
based on self._output_format.
Parameters
----------
width
Window width to use for the calculation.
use_old_output_format
If True, do not calculate self._output_format
(use what's already calculated).
adjust
Delete adjust from the output
Example:
if the output was [55, 67]
and adjust was 2
the output would become [53, 65]
adjust_for_header
Delete self._outer_internal_body_diff from output
This is to be used for displaying the internal header
adjust_for_body
Delete self._outer_internal_body_half_diff from output
This is to be used for changing columns' separators
color, when displaying body lines (stations' lines).
IMPORTANT
---------
The adjust* parameters are mutually exclusive, which means
that ONLY ONE of them can be used at any given call to the
function. If you fail to comply, the result will be wrong.
Returns
-------
A list containing columns_separotors (e.g. [55, 65]).
'''
columns_separotors = []
if not use_old_output_format:
self._get_output_format(width)
if self._output_format == 0:
columns_separotors = []
elif self._output_format == 1:
columns_separotors = [width - self._columns_width['bitrate']]
elif self._output_format == 2:
columns_separotors = self._populate_columns_separators(('bitrate', 'votes'), width)
elif self._output_format == 3:
columns_separotors = self._populate_columns_separators(('bitrate', 'clickcount', 'votes'), width)
elif self._output_format == 4:
columns_separotors = self._populate_columns_separators(('country', 'bitrate', 'clickcount', 'votes'), width)
elif self._output_format == 5:
columns_separotors = self._populate_columns_separators(('language', 'country', 'bitrate', 'clickcount', 'votes'), width)
elif self._output_format == 6:
columns_separotors = self._populate_columns_separators(('language', 'state', 'country', 'bitrate', 'clickcount', 'votes'), width)
else:
columns_separotors = self._populate_columns_separators(('tags', 'language', 'state', 'country', 'codec', 'bitrate', 'clickcount', 'votes'), width)
if adjust_for_header and self._output_format == 1:
columns_separotors[0] -= self._outer_internal_body_diff
if adjust_for_body:
if self._output_format == 1:
columns_separotors[0] -= self._outer_internal_body_half_diff
else:
for n in range(0, len(columns_separotors)):
columns_separotors[n] += self._outer_internal_body_half_diff
if adjust > 0:
for n in range(0, len(columns_separotors)):
columns_separotors[n] -= adjust
return columns_separotors
def get_internal_header(self, pad, width):
guide = {
'name': 'Name',
'votes': ' Votes',
'clickcount': ' Clicks',
'bitrate': 'Bitrate',
'codec': 'Codec',
'country': 'Country',
'state': 'State',
'language': 'Language',
'tags': 'Tags',
}
# logger.error('DE search = {}'.format(self._search_history[self._search_history_index]))
reset_search_elements = False
if self.search_by is None:
reset_search_elements = True
self._get_search_elements(self._search_history[self._search_history_index])
# logger.error('DE search by = {}'.format(self.search_by))
columns = ((),
('Bitrate', ),
(' Votes', 'Bitrate'),
(' Votes', ' Clicks', 'Bitrate'),
(' Votes', ' Clicks', 'Bitrate', 'Country'),
(' Votes', ' Clicks', 'Bitrate', 'Country', 'Language'),
(' Votes', ' Clicks', 'Bitrate', 'Country', 'State', 'Language'),
(' Votes', ' Clicks', 'Bitrate', 'Codec', 'Country', 'State', 'Language', 'Tags')
)
columns_separotors = self.get_columns_separators(width, use_old_output_format=True)
if self._output_format == 1:
columns_separotors[0] -= 2
title = '#'.rjust(pad), ' Name '
if reset_search_elements:
self._old_search_by = self.search_by
# logger.error('DE search by = {}'.format(self.search_by))
# logger.error('DE Looking for: "{}"'.format(guide[self.search_by]))
# logger.error('DE Names = {}'.format(columns[self._output_format]))
if guide[self.search_by] == 'Name':
highlight = -2
else:
try:
highlight = columns[self._output_format].index(guide[self.search_by])
except:
highlight = -1
return highlight, ((title, columns_separotors, columns[self._output_format]), )
def select_servers(self):
if self._server_selection_window is None:
self._server_selection_window = RadioBrowserInfoServersSelect(
self.parent, self._dns_info.server_urls, self._server)
else:
self._server_selection_window.set_parent(self.parent)
self.keyboard_handler = self._server_selection_window
self._server_selection_window.show()
def sort(self):
'''
Create and show the Sort window
'''
if self._sort is None:
self._get_search_elements(
self._search_history[self._search_history_index]
)
self._sort = RadioBrowserInfoSort(
parent=self.parent,
search_by=self.search_by
)
self.keyboard_handler = self._sort
self._sort.show()
def keypress(self, char):
''' RadioBrowserInfo keypress
Returns:
-1: Cancel
0: Done, result is in ....
1: Continue
'''
ret = self.keyboard_handler.keypress(char)
if ret == 0:
if self.keyboard_handler == self._sort:
self.search_by = self._sort.search_by
if self.search_by == self._old_search_by:
self.reverse = not self.reverse
else:
self.reverse = False
if self.search_by != self._old_search_by:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('search by = "{}"'.format(self.search_by))
''' set reverse to True for numerical values
when changing sort type
'''
if self.search_by in (
'votes',
'clickcount',
'bitrate'
):
self.reverse = True
if logger.isEnabledFor(logging.DEBUG):
logger.debug('settng reverse to {}'.format(self.reverse))
self._raw_stations = sorted(self._raw_stations, key=itemgetter(self.search_by), reverse=self.reverse)
self._old_search_by = self.search_by
elif self.keyboard_handler == self._server_selection_window:
if ret == 0:
self._server = self._server_selection_window.server
if logger.isEnabledFor(logging.INFO):
logger.info('user selected server is ' + self._server)
self._get_title()
return ret
def do_search(self, parent=None, init=False):
if init:
self._sort_win = RadioBrowserInfoSearchWindow(
parent=parent,
init=init
)
self.keyboard_handler = self._sort_win
self._sort_win.show()
class RadioBrowserInfoSearchWindow(object):
# search_by_items = (
# 'No search term',
# 'Name',
# 'Tag',
# 'Country',
# 'State',
# 'Codec',
# 'Language',
# )
search_by_items = (
'Votes',
'Clicks',
'Recent click',
'Recently changed'
)
sort_by_items = (
'No sorting',
'Random',
'Name',
'Tag',
'Country',
'State',
'Language',
'Votes',
'Clicks',
'Bitrate',
'Codec',
)
def __init__(self,
parent,
init=False
):
self._parent = parent
self._init = init
self._too_small = False
self._focus = 0
self._win = None
self.maxY = self.maxX = 0
self.TITLE = ' Radio Browser Search '
''' we have two columns;
this is the width of each of them
'''
self._half_width = 0
self._widgets = [ None, None, None, None, None, None, None, None]
@property
def focus(self):
return self._focus
@focus.setter
def focus(self, val):
if val in range(0, len(self._widgets)):
self._focus = val
else:
if val < 0:
self._focus = len(self._widgets) - 1
else:
self._focus = 0
self.show()
def show(self):
pY, pX = self._parent.getmaxyx()
logger.error('DE pY = {}, pX = {}'.format(pY, pX))
self.Y, self.X = self._parent.getbegyx()
if self.maxY != pY or self.maxX != pX:
logger.error('DE --== SEARCH ==--')
pY, pX = self._parent.getmaxyx()
logger.error('DE pY = {}, pX = {}'.format(pY, pX))
self.maxY = pY
self.maxX = pX
self._win = self._parent
# self._win = curses.newwin(
# self.maxY, self.maxX,
# Y, X
# )
self._half_width = int((self.maxX -2 ) / 2) -3
self._win.bkgdset(' ', curses.color_pair(5))
# self._win.erase()
self._win.box()
self._win.addstr(0, int((self.maxX - len(self.TITLE)) / 2),
self.TITLE,
curses.color_pair(4))
self._win.refresh()
# self._erase_win(self.maxY, self.maxX, self.Y, self.X)
''' start displaying things '''
self._win.addstr(1, 2, 'Search for', curses.color_pair(5))
self._win.addstr(4, 2, 'Search by', curses.color_pair(5))
for i, n in enumerate(self._widgets):
if n is None:
if i == 0:
#self._widgets[2] = SimpleCursesCheckBox(
# 1, 2, 'Display by',
# curses.color_pair(9),
# curses.color_pair(4),
# curses.color_pair(5))
self._widgets[0] = SimpleCursesLineEdit(
parent=self._win,
width=-2,
begin_y=3,
begin_x=2,
boxed=False,
has_history=False,
caption='',
box_color=curses.color_pair(9),
caption_color=curses.color_pair(4),
edit_color=curses.color_pair(9),
cursor_color=curses.color_pair(8),
unfocused_color=curses.color_pair(5),
string_changed_handler='')
self._widgets[0].bracket = False
self._line_editor = self._widgets[0]
elif i == 1:
''' search by '''
self._widgets[i] = SimpleCursesWidgetColumns(
Y=5, X=3, window=self._win,
selection=0,
active=0,
items=self.search_by_items,
color=curses.color_pair(5),
color_active=curses.color_pair(4),
color_cursor_selection=curses.color_pair(6),
color_cursor_active=curses.color_pair(9),
margin=1,
max_width=self._half_width
)
elif i == 2:
''' search exact '''
self._widgets[2] = SimpleCursesCheckBox(
self._widgets[1].Y + self._widgets[1].height + 2, 2,
'Exact match',
curses.color_pair(9), curses.color_pair(4), curses.color_pair(5))
elif i == 3:
''' sort by '''
self._widgets[i] = SimpleCursesWidgetColumns(
Y=5, X=self.maxX - 1 - self._half_width,
max_width=self._half_width,
window=self._win,
selection=0,
active=0,
items=self.sort_by_items,
color=curses.color_pair(5),
color_active=curses.color_pair(4),
color_cursor_selection=curses.color_pair(6),
color_cursor_active=curses.color_pair(9),
margin=1
)
elif i == 4:
'''' sort ascending / descending '''
self._widgets[4] = SimpleCursesCheckBox(
self._widgets[3].Y + self._widgets[3].height + 1, self._widgets[3].X - 2 + self._widgets[3].margin,
'Sort descending',
curses.color_pair(9), curses.color_pair(4), curses.color_pair(5))
elif i == 5:
'''' limit results '''
self._widgets[5] = None
elif i == 6:
self._widgets[i] = None
''' add horizontal push buttons '''
self._h_buttons = SimpleCursesHorizontalPushButtons(
Y=5 + len(self.search_by_items) + 2,
captions=('OK', 'Cancel'),
color_focused=curses.color_pair(9),
color=curses.color_pair(4),
bracket_color=curses.color_pair(5),
parent=self._win)
#self._h_buttons.calculate_buttons_position()
self._widgets[6], self._widgets[7] = self._h_buttons.buttons
self._widgets[6]._focused = self._widgets[7].focused = False
else:
if i in (1, 3):
''' update lists' window '''
if i == 3:
self._widgets[3].X = self.maxX - 1 - self._half_width
self._widgets[i].window = self._win
self._widgets[i].max_width = self._half_width
self._win.addstr(
4,
self._widgets[3].X - 2 + self._widgets[3].margin,
'Sort by',
curses.color_pair(5)
)
self._win.refresh()
self._update_focus()
if not self._too_small:
self._line_editor.show(self._win, opening=False)
self._h_buttons.calculate_buttons_position()
for n in range(1, len(self._widgets)):
if self._widgets[n]:
if n in (2, 4):
if n == 2:
self._widgets[2].Y = self._widgets[1].Y + self._widgets[1].height + 2
else:
self._widgets[4].Y = self._widgets[3].Y + self._widgets[3].height + 1
self._widgets[4].X = self._widgets[3].X - 2 + self._widgets[3].margin
self._widgets[n].move()
# self._widgets[n].resize()
self._widgets[n].show()
self._win.refresh()
# self._refresh()
def _update_focus(self):
# use _focused here to avoid triggering
# widgets' refresh
for i, x in enumerate(self._widgets):
if x:
if self._focus == i:
x._focused = True
else:
x._focused = False
def keypress(self, char):
''' RadioBrowserInfoSearchWindow keypress
Returns
-------
-1 - Cancel
0 - do search
1 - Continue
2 - Display help
'''
if self._too_small:
return 1
if char == ord('?'):
return 2
if char in (
curses.KEY_EXIT, ord('q'), 27,
ord('h'), curses.KEY_LEFT
):
return -1
elif char in (
ord('l'), ord(' '), ord('\n'), ord('\r'),
curses.KEY_RIGHT, curses.KEY_ENTER
):
return 0
class RadioBrowserInfoData(object):
''' Read search parameters for radio.browser.info service
parameters are:
tags, countries(and states), codecs, languages
'''
_data = {}
_connection_error = False
_lock = threading.Lock()
_stop_thread = False
_timeout = 3
data_thread = None
def __init__(self, url, timeout=3):
self._url = url
self._timeout = timeout
def start(self, force_update=False):
''' Start data acquisition thread '''
self.data_thread = threading.Thread(
target=self._get_all_data_thread,
args=(
self._lock, force_update, lambda: self._stop_thread,
self._update_data
)
)
self.data_thread.start()
def stop(self):
''' Stop (cancel) data acquisition thread '''
self._stop_thread = True
@property
def lock(self):
''' Return thread lock (read only)'''
return self._lock
@lock.setter
def lock(self, val):
raise ValueError('property is read only')
@property
def terminated(self):
''' Return True if thread is not alive (read only)
which means that data has been retrieved'''
if self.data_thread.is_alive():
return False
return True
@terminated.setter
def terminated(self, val):
raise ValueError('property is read only')
@property
def connection_error(self):
self._lock.acquire()
ret = self._connection_error
self._lock.release()
return ret
@connection_error.setter
def connection_error(self, val):
raise ValueError('property is read only')
@property
def tags(self):
self._lock.acquire()
ret = self._data['tags']
self._lock.release()
return ret
@tags.setter
def tags(self, val):
raise ValueError('property is read only')
@property
def codecs(self):
self._lock.acquire()
if 'codecs' in self._data:
ret = self._data['codecs']
else:
ret = {}
self._lock.release()
return ret
@codecs.setter
def codecs(self, val):
raise ValueError('property is read only')
@property
def countries(self):
self._lock.acquire()
ret = self._data['countries']
self._lock.release()
return ret
@countries.setter
def countries(self, val):
raise ValueError('property is read only')
@property
def languages(self):
self._lock.acquire()
ret = self._data['languages']
self._lock.release()
return ret
@languages.setter
def languages(self, val):
raise ValueError('property is read only')
def reset_all_data(self):
self._data = {}
self.start()
def _update_data(self, data, connection_error):
self._connection_error = connection_error
self._data = data
def _get_all_data_thread(self, lock, force_update, stop, callback): # noqa
def get_data(data):
ret = {}
json_data = []
connection_error, json_data = get_data_dict(data)
if connection_error:
return True, {}
if json_data:
for a_tag in json_data:
ret[a_tag['name']] = a_tag['stationcount']
return False, ret
def get_countries(stop):
ret = {}
connection_error, json_countrycodes = get_data_dict('countrycodes')
if connection_error:
return True, {}
from countries import countries
st = 'stationcount'
for n in json_countrycodes:
if n['name'] in countries.keys():
ret[countries[n['name']]] = {}
ret[countries[n['name']]]['code'] = n['name']
ret[countries[n['name']]]['stationcount'] = n[st]
ret[countries[n['name']]]['states'] = {}
connection_error, json_states = get_data_dict('states')
if connection_error:
return True, {}
for n in json_states:
if n['country'] in ret.keys():
ret[n['country']]['states'][n['name']] = n['stationcount']
return False, ret
def get_data_dict(data):
url = 'http://' + self._url + '/json/' + data
jdata = {'hidebroken': 'true'}
headers = {'user-agent': 'PyRadio/dev',
'encoding': 'application/json'}
if self._pyradio_info:
headers['user-agent'] = self._pyradio_info.replace(' ', '/')
try:
r = requests.get(url, headers=headers, json=jdata, timeout=self._timeout)
r.raise_for_status()
return False, json.loads(r.text)
# if r.status_code == 200:
# return False, json.loads(r.text)
# else:
# return True, []
except requests.exceptions.RequestException as e:
if logger.isEnabledFor(logger.ERROR):
logger.error(e)
return True, []
my_data = {}
data_items = ['tags', 'countries', 'codecs', 'languages']
for an_item in data_items:
if an_item == 'countries':
ret, my_data['countries'] = get_countries(stop)
else:
ret, my_data[an_item] = get_data(an_item)
if stop():
if logger.isEnabledFor(logging.DEBUG):
logger.info('Asked to stop after working on "{}"...'.format(an_item))
self._terminated = True
return
lock.acquire()
callback(my_data, ret)
lock.release()
class RadioBrowserInfoDns(object):
''' Preforms query the DNS SRV record of
_api._tcp.radio-browser.info which
gives the list of server names directly
without reverse dns lookups '''
_urls = None
def __init__(self):
pass
@property
def server_urls(self):
''' Returns server urls in a tuple '''
if self._urls is None:
self._get_urls()
return tuple(self._urls) if self._urls is not None else None
@server_urls.setter
def server_urls(self, val):
return
def _get_urls(self):
self._urls = []
result = None
try:
result = resolver.query('_api._tcp.radio-browser.info', 'SRV')
except:
self._urls = None
for n in result:
self._urls.append(str(n).split(' ')[-1][:-1])
def give_me_a_server_url(self):
''' Returns a random server '''
if self._urls is None:
self._get_urls()
if self._urls:
num = random.randint(0, len(self._urls) - 1)
return self._urls[num]
else:
return None
def servers(self):
''' server urls as generator '''
if self._urls is None:
self._get_urls()
for a_url in self._urls:
yield a_url
class RadioBrowserInfoSort(object):
TITLE = ' Sort by '
items = collections.OrderedDict({
'Name': 'name',
'Votes': 'votes',
'Clicks': 'clickcount',
'Bitrate': 'bitrate',
'Codec': 'codec',
'Country': 'country',
'State': 'state',
'Language': 'language',
'Tag': 'tags'
})
_too_small = False
def __init__(self, parent, search_by=None):
self._parent = parent
self.active = self.selection = 0
if search_by:
if search_by in self.items.values():
self.active = self.selection = self._value_to_index(search_by)
self.maxY = len(self.items) + 2
self.maxX = max(len(x) for x in self.items.keys()) + 4
if len(self.TITLE) + 4 > self.maxX:
self.maxX = len(self.TITLE) + 4
self._win = None
if search_by:
self.set_active_by_value(search_by)
def _value_to_index(self, val):
for i, n in enumerate(self.items.values()):
if val == n:
return i
return -1
def set_parent(self, parent):
self._parent = parent
self.show()
def set_active_by_value(self, a_string, set_selection=True):
for i, n in enumerate(self.items.values()):
if a_string == n:
if set_selection:
self.active = self.selection = i
else:
self.active = i
return
if set_selection:
self.active = self.selection = 0
else:
self.active = 0
def show(self):
self._too_small = False
pY, pX = self._parent.getmaxyx()
Y, X = self._parent.getbegyx()
if self.maxY > pY or self.maxX > pX -2:
self._too_small = True
msg = 'Window too small to display content!'
if pX < len(msg) + 2:
msg = 'Window too small!'
self._win = curses.newwin(
3, len(msg) + 2,
Y + int((pY - 3) / 2),
int((pX - len(msg)) / 2))
self._win.bkgdset(' ', curses.color_pair(3))
self._win.box()
try:
self._win.addstr( 1, 1, msg,
curses.color_pair(5))
except:
pass
self._win.refresh()
return
self._win = curses.newwin(
self.maxY, self.maxX,
Y + int((pY - self.maxY) / 2),
int((pX - self.maxX) / 2)
)
self._win.bkgdset(' ', curses.color_pair(3))
# self._win.erase()
self._win.box()
self._win.addstr(0, 1,
self.TITLE,
curses.color_pair(4))
self._refresh()
def _refresh(self):
for i, n in enumerate(self.items.keys()):
col = 5
if i == self.active == self.selection:
col = 9
elif i == self.selection:
col = 6
elif i == self.active:
col = 4
self._win.addstr(i + 1, 1,
' {}'.format(n.ljust(self.maxX - 3)),
curses.color_pair(col))
self._win.refresh()
def keypress(self, char):
''' RadioBrowserInfoSort keypress
Returns:
-1: Cancel
0: Done, result is in ....
1: Continue
'''
if self._too_small:
return 1
if char in (
curses.KEY_EXIT, ord('q'), 27,
ord('h'), curses.KEY_LEFT
):
return -1
elif char in (
ord('l'), ord(' '), ord('\n'), ord('\r'),
curses.KEY_RIGHT, curses.KEY_ENTER
):
for i, n in enumerate(self.items.keys()):
if i == self.selection:
self.search_by = self.items[n]
self.active = i
break
return 0
elif char in (ord('g'), curses.KEY_HOME):
self.selection = 0
self._refresh()
elif char in (ord('G'), curses.KEY_END):
self.selection = len(self.items) - 1
self._refresh()
elif char in (curses.KEY_PPAGE, ):
if self.selection == 0:
self.selection = len(self.items) - 1
else:
self.selection -= 5
if self.selection < 0:
self.selection = 0
self._refresh()
elif char in (curses.KEY_NPAGE, ):
if self.selection == len(self.items) - 1:
self.selection = 0
else:
self.selection += 5
if self.selection >= len(self.items):
self.selection = len(self.items) - 1
self._refresh()
elif char in (ord('k'), curses.KEY_UP):
self.selection -= 1
if self.selection < 0:
self.selection = len(self.items) - 1
self._refresh()
elif char in (ord('j'), curses.KEY_DOWN):
self.selection += 1
if self.selection == len(self.items):
self.selection = 0
self._refresh()
return 1
class RadioBrowserInfoServersSelect(object):
TITLE = ' Server Selection '
def __init__(self, parent, servers, current_server):
self._parent = parent
self.items = list(servers)
self.server = current_server
self.servers = RadioBrowserInfoServers(
parent, servers, current_server
)
self.maxY = self.servers.maxY + 2
self.maxX = self.servers.maxX + 2
def show(self):
self._too_small = False
pY, pX = self._parent.getmaxyx()
Y, X = self._parent.getbegyx()
if self.maxY > pY or self.maxX > pX -2:
self._too_small = True
msg = 'Window too small to display content!'
if pX < len(msg) + 2:
msg = 'Window too small!'
self._win = curses.newwin(
3, len(msg) + 2,
Y + int((pY - 3) / 2),
int((pX - len(msg)) / 2))
self._win.bkgdset(' ', curses.color_pair(3))
self._win.box()
try:
self._win.addstr( 1, 1, msg,
curses.color_pair(5))
except:
pass
self._win.refresh()
return
self._win = curses.newwin(
self.maxY, self.maxX,
Y + int((pY - self.maxY) / 2),
int((pX - self.maxX) / 2)
)
self._win.bkgdset(' ', curses.color_pair(3))
# self._win.erase()
self._win.box()
self._win.addstr(
0, int((self.maxX - len(self.TITLE)) / 2),
self.TITLE,
curses.color_pair(4)
)
self._win.refresh()
self.servers._parent = self._win
self.servers.show()
def set_parent(self, parent):
self._parent = parent
self.servers._parent = parent
def keypress(self, char):
''' RadioBrowserInfoServersSelect keypress
Returns:
-1: Cancel
0: Done, result is in ....
1: Continue
'''
ret = self.servers.keypress(char)
if ret == 2:
ret = 1
if ret == 0:
self.server = self.servers.server
return ret
class RadioBrowserInfoServers(object):
''' Display Radio Browser server
This is supposed to be pluged into
another widget
'''
_too_small = False
def __init__(self, parent, servers, current_server):
self._parent = parent
self.items = list(servers)
self.server = current_server
s_max = 0
for i, n in enumerate(self.items):
if self.server == n:
self.selection = self.active = i
self.items[i] = ' ' + country_from_server(n) + ' ({}) '.format(n)
if len(self.items[i]) > s_max:
s_max = len(self.items[i])
self.items.sort()
for i, n in enumerate(self.items):
if len(self.items[i]) < s_max:
self.items[i] = self.items[i].replace('(', ' ' * (s_max - len(self.items[i])) + '(')
self.maxY = len(self.items)
self.maxX = len(self.items[0])
''' get selection and active server id '''
for i, n in enumerate(self.items):
if self.server in n:
self.active = self.selection = i
break
def show(self):
self._too_small = False
pY, pX = self._parent.getmaxyx()
Y, X = self._parent.getbegyx()
if self.maxY > pY or self.maxX > pX -2:
''' display nothing
let the parent do whatever
'''
self._too_small = True
else:
self._win = curses.newwin(
self.maxY, self.maxX,
Y + 1, X + 1
)
for i, n in enumerate(self.items):
col = 5
if i == self.active == self.selection:
col = 9
elif i == self.selection:
col = 6
elif i == self.active:
col = 4
try:
self._win.addstr(i, 0 , n, curses.color_pair(col))
except:
pass
self._win.refresh()
def keypress(self, char):
''' RadioBrowserInfoServers keypress
Returns:
-1: Cancel
0: Done, result is in ....
1: Continue
2: Show help
'''
if self._too_small:
return 1
if char in (
curses.KEY_EXIT, ord('q'), 27,
ord('h'), curses.KEY_LEFT
):
return -1
elif char in (
ord('l'), ord(' '), ord('\n'), ord('\r'),
curses.KEY_RIGHT, curses.KEY_ENTER
):
for i, n in enumerate(self.items):
if i == self.selection:
self.server = n.split('(')[1].replace(') ', '')
self.active = i
break
return 0
elif char in (ord('?'), ):
return 2
elif char in (ord('g'), curses.KEY_HOME):
self.selection = 0
self.show()
elif char in (ord('G'), curses.KEY_END):
self.selection = len(self.items) - 1
self.show()
elif char in (curses.KEY_PPAGE, ):
if self.selection == 0:
self.selection = len(self.items) - 1
else:
self.selection -= 5
if self.selection < 0:
self.selection = 0
self.show()
elif char in (curses.KEY_NPAGE, ):
if self.selection == len(self.items) - 1:
self.selection = 0
else:
self.selection += 5
if self.selection >= len(self.items):
self.selection = len(self.items) - 1
self.show()
elif char in (ord('k'), curses.KEY_UP):
self.selection -= 1
if self.selection < 0:
self.selection = len(self.items) - 1
self.show()
elif char in (ord('j'), curses.KEY_DOWN):
self.selection += 1
if self.selection == len(self.items):
self.selection = 0
self.show()
return 1
def probeBrowsers(a_browser_url):
base_url = a_browser_url.split('/')[2]
if not base_url:
base_url = a_browser_url
implementedBrowsers = PyRadioStationsBrowser.__subclasses__()
if logger.isEnabledFor(logging.INFO):
logger.info('Implemented browsers: {}'.format(implementedBrowsers))
for a_browser in implementedBrowsers:
if a_browser.BASE_URL == base_url:
if logger.isEnabledFor(logging.INFO):
logger.info('Supported browser: {}'.format(a_browser))
return a_browser
if logger.isEnabledFor(logging.INFO):
logger.info('No supported browser found for: ' + a_browser_url)
return None
| 35.726254 | 158 | 0.506353 |
73dd4a9ff421cc8166d76162d17d0fdcc1b4054b | 203 | py | Python | maps/off_the_coast/off_the_coast.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | maps/off_the_coast/off_the_coast.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | maps/off_the_coast/off_the_coast.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | from map import Map
class OffTheCoast(Map):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = 'off_the_coast'
self.difficulty = 'advanced'
| 20.3 | 41 | 0.62069 |
73dd4d4015168096d7a28af5840ed20440f42cd1 | 842 | py | Python | tensorflow/contrib/nn/python/ops/__init__.py | tianyapiaozi/tensorflow | fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a | [
"Apache-2.0"
] | 848 | 2019-12-03T00:16:17.000Z | 2022-03-31T22:53:17.000Z | tensorflow/contrib/nn/python/ops/__init__.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 656 | 2019-12-03T00:48:46.000Z | 2022-03-31T18:41:54.000Z | tensorflow/contrib/nn/python/ops/__init__.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 506 | 2019-12-03T00:46:26.000Z | 2022-03-30T10:34:56.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for variants of ops in tf.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| 42.1 | 80 | 0.7019 |