hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9f10cc17acd62c179ea44373769b26c8bb3baa94 | 9,231 | py | Python | train.py | qigtang/ssd.pytorch | f39c59f08c0688bae162639f7c82b9566d51f9df | [
"MIT"
] | null | null | null | train.py | qigtang/ssd.pytorch | f39c59f08c0688bae162639f7c82b9566d51f9df | [
"MIT"
] | null | null | null | train.py | qigtang/ssd.pytorch | f39c59f08c0688bae162639f7c82b9566d51f9df | [
"MIT"
] | null | null | null | from data import *
from utils.augmentations import SSDAugmentation
from layers.modules import MultiBoxLoss
from ssd import build_ssd
import os
import sys
import time
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.utils.data as data
import numpy as np
import argparse
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Training With Pytorch')
train_set = parser.add_mutually_exclusive_group()
parser.add_argument('--dataset', default='VOC', choices=['VOC', 'COCO'],
type=str, help='VOC or COCO')
parser.add_argument('--dataset_root', default=VOC_ROOT,
help='Dataset root directory path')
parser.add_argument('--basenet', default='vgg16_reducedfc.pth',
help='Pretrained base model')
parser.add_argument('--batch_size', default=32, type=int,
help='Batch size for training')
parser.add_argument('--resume', default=None, type=str,
help='Checkpoint state_dict file to resume training from')
parser.add_argument('--start_iter', default=0, type=int,
help='Resume training at this iter')
parser.add_argument('--num_workers', default=4, type=int,
help='Number of workers used in dataloading')
parser.add_argument('--cuda', default=True, type=str2bool,
help='Use CUDA to train model')
parser.add_argument('--lr', '--learning-rate', default=1e-3, type=float,
help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float,
help='Momentum value for optim')
parser.add_argument('--weight_decay', default=5e-4, type=float,
help='Weight decay for SGD')
parser.add_argument('--gamma', default=0.1, type=float,
help='Gamma update for SGD')
parser.add_argument('--visdom', default=False, type=str2bool,
help='Use visdom for loss visualization')
parser.add_argument('--save_folder', default='weights/',
help='Directory for saving checkpoint models')
args = parser.parse_args()
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: It looks like you have a CUDA device, but aren't " +
"using CUDA.\nRun with --cuda for optimal training speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
def train():
if args.dataset == 'COCO':
if args.dataset_root == VOC_ROOT:
if not os.path.exists(COCO_ROOT):
parser.error('Must specify dataset_root if specifying dataset')
print("WARNING: Using default COCO dataset_root because " +
"--dataset_root was not specified.")
args.dataset_root = COCO_ROOT
cfg = coco
dataset = COCODetection(root=args.dataset_root,
transform=SSDAugmentation(cfg['min_dim'],
MEANS))
elif args.dataset == 'VOC':
if args.dataset_root == COCO_ROOT:
parser.error('Must specify dataset if specifying dataset_root')
cfg = voc
dataset = VOCDetection(root=args.dataset_root,
transform=SSDAugmentation(cfg['min_dim'],
MEANS))
if args.visdom:
import visdom
viz = visdom.Visdom()
ssd_net = build_ssd('train', cfg['min_dim'], cfg['num_classes'])
net = ssd_net
if args.cuda:
net = torch.nn.DataParallel(ssd_net)
cudnn.benchmark = True
if args.resume:
print('Resuming training, loading {}...'.format(args.resume))
ssd_net.load_weights(args.resume)
else:
vgg_weights = torch.load(args.save_folder + args.basenet)
print('Loading base network...')
ssd_net.vgg.load_state_dict(vgg_weights)
if args.cuda:
net = net.cuda()
if not args.resume:
print('Initializing weights...')
# initialize newly added layers' weights with xavier method
ssd_net.extras.apply(weights_init)
ssd_net.loc.apply(weights_init)
ssd_net.conf.apply(weights_init)
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum,
weight_decay=args.weight_decay)
criterion = MultiBoxLoss(cfg['num_classes'], 0.5, True, 0, True, 3, 0.5,
False, args.cuda)
net.train()
# loss counters
loc_loss = 0
conf_loss = 0
epoch = 0
print('Loading the dataset...')
epoch_size = len(dataset) // args.batch_size
print('Training SSD on:', dataset.name)
print('Using the specified args:')
print(args)
step_index = 0
if args.visdom:
vis_title = 'SSD.PyTorch on ' + dataset.name
vis_legend = ['Loc Loss', 'Conf Loss', 'Total Loss']
iter_plot = create_vis_plot('Iteration', 'Loss', vis_title, vis_legend)
epoch_plot = create_vis_plot('Epoch', 'Loss', vis_title, vis_legend)
data_loader = data.DataLoader(dataset, args.batch_size,
num_workers=args.num_workers,
shuffle=False, collate_fn=detection_collate,
pin_memory=True)
# create batch iterator
batch_iterator = iter(data_loader)
for iteration in range(args.start_iter, cfg['max_iter']):
if args.visdom and iteration != 0 and (iteration % epoch_size == 0):
update_vis_plot(epoch, loc_loss, conf_loss, epoch_plot, None,
'append', epoch_size)
# reset epoch loss counters
loc_loss = 0
conf_loss = 0
epoch += 1
if iteration in cfg['lr_steps']:
step_index += 1
adjust_learning_rate(optimizer, args.gamma, step_index)
try:
# load train data
images, targets = next(batch_iterator)
except StopIteration:
batch_iterator = iter(data_loader)
images, targets = next(batch_iterator)
if args.cuda:
images = Variable(images.cuda())
targets = [Variable(ann.cuda(), volatile=True) for ann in targets]
else:
images = Variable(images)
targets = [Variable(ann, volatile=True) for ann in targets]
# forward
t0 = time.time()
out = net(images)
# backprop
optimizer.zero_grad()
loss_l, loss_c = criterion(out, targets)
loss = loss_l + loss_c
loss.backward()
optimizer.step()
t1 = time.time()
loc_loss += loss_l.data[0]
conf_loss += loss_c.data[0]
if iteration % 10 == 0:
print('timer: %.4f sec.' % (t1 - t0))
print('iter ' + repr(iteration) + ' || Loss: %.4f ||' % (loss.data[0]), end=' ')
if args.visdom:
update_vis_plot(iteration, loss_l.data[0], loss_c.data[0],
iter_plot, epoch_plot, 'append')
if iteration != 0 and iteration % 5000 == 0:
print('Saving state, iter:', iteration)
torch.save(ssd_net.state_dict(), 'weights/ssd300_COCO_' +
repr(iteration) + '.pth')
torch.save(ssd_net.state_dict(),
args.save_folder + '' + args.dataset + '.pth')
def adjust_learning_rate(optimizer, gamma, step):
"""Sets the learning rate to the initial LR decayed by 10 at every
specified step
# Adapted from PyTorch Imagenet example:
# https://github.com/pytorch/examples/blob/master/imagenet/main.py
"""
lr = args.lr * (gamma ** (step))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def xavier(param):
init.xavier_uniform(param)
def weights_init(m):
if isinstance(m, nn.Conv2d):
xavier(m.weight.data)
m.bias.data.zero_()
def create_vis_plot(_xlabel, _ylabel, _title, _legend):
return viz.line(
X=torch.zeros((1,)).cpu(),
Y=torch.zeros((1, 3)).cpu(),
opts=dict(
xlabel=_xlabel,
ylabel=_ylabel,
title=_title,
legend=_legend
)
)
def update_vis_plot(iteration, loc, conf, window1, window2, update_type,
epoch_size=1):
viz.line(
X=torch.ones((1, 3)).cpu() * iteration,
Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu() / epoch_size,
win=window1,
update=update_type
)
# initialize epoch plot on first iteration
if iteration == 0:
viz.line(
X=torch.zeros((1, 3)).cpu(),
Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu(),
win=window2,
update=True
)
if __name__ == '__main__':
train()
| 35.367816 | 92 | 0.596793 |
ba821648338a9b1da3958da5f4e2067eec275992 | 2,540 | py | Python | ginza/tag_map.py | polm/ginza | b868823f793057ac3976fa343fd9bd14ebe1c75e | [
"MIT"
] | 1 | 2020-04-08T04:45:20.000Z | 2020-04-08T04:45:20.000Z | ginza/tag_map.py | joreyolo/ginza | b868823f793057ac3976fa343fd9bd14ebe1c75e | [
"MIT"
] | null | null | null | ginza/tag_map.py | joreyolo/ginza | b868823f793057ac3976fa343fd9bd14ebe1c75e | [
"MIT"
] | null | null | null | # encoding: utf8
from __future__ import unicode_literals
from spacy.symbols import POS, PUNCT, INTJ, X, ADJ, AUX, ADP, PART, CCONJ, SCONJ, NOUN
from spacy.symbols import SPACE, SYM, PRON, VERB, ADV, PROPN, NUM, DET
TAG_MAP = {
# Universal Dependencies Mapping
# (private repository)
# https://github.com/mynlp/udjapanese/blob/master/UDJapaneseBCCWJ/unidic_to_udpos_mapping/bccwj_pos_suw_rule.json
"記号-一般": {POS: SYM},
"記号-文字": {POS: SYM},
"感動詞-フィラー": {POS: INTJ},
"感動詞-一般": {POS: INTJ},
# spaces should be treated as token.whitespace_
"空白": {POS: SPACE},
"形状詞-一般": {POS: ADJ},
"形状詞-タリ": {POS: ADJ},
"形状詞-助動詞語幹": {POS: ADJ},
"形容詞-一般": {POS: ADJ},
"形容詞-非自立可能": {POS: ADJ}, # All the root tokens are ADJ
"助詞-格助詞": {POS: ADP},
"助詞-係助詞": {POS: ADP},
"助詞-終助詞": {POS: PART},
"助詞-準体助詞": {POS: SCONJ},
"助詞-接続助詞": {POS: CCONJ},
"助詞-副助詞": {POS: ADP},
"助動詞": {POS: AUX},
"接続詞": {POS: SCONJ},
"接頭辞": {POS: NOUN},
"接尾辞-形状詞的": {POS: NOUN},
"接尾辞-形容詞的": {POS: NOUN},
"接尾辞-動詞的": {POS: NOUN},
"接尾辞-名詞的-サ変可能": {POS: NOUN}, # All the root tokens are NOUN
"接尾辞-名詞的-一般": {POS: NOUN},
"接尾辞-名詞的-助数詞": {POS: NOUN},
"接尾辞-名詞的-副詞可能": {POS: NOUN}, # All the root tokens are NOUN
"代名詞": {POS: PRON},
"動詞-一般": {POS: VERB},
"動詞-非自立可能": {POS: VERB}, # All the root tokens are VERB except the tokens lemma is '為る' and POS is AUX
"副詞": {POS: ADV},
"補助記号-AA-一般": {POS: SYM}, # text art
"補助記号-AA-顔文字": {POS: SYM}, # kaomoji
"補助記号-一般": {POS: PUNCT},
"補助記号-括弧開": {POS: PUNCT}, # open bracket
"補助記号-括弧閉": {POS: PUNCT}, # close bracket
"補助記号-句点": {POS: PUNCT}, # period or other EOS marker
"補助記号-読点": {POS: PUNCT}, # comma
"名詞-固有名詞-一般": {POS: PROPN}, # general proper noun
"名詞-固有名詞-人名-一般": {POS: PROPN}, # person's name
"名詞-固有名詞-人名-姓": {POS: PROPN}, # surname
"名詞-固有名詞-人名-名": {POS: PROPN}, # first name
"名詞-固有名詞-地名-一般": {POS: PROPN}, # place name
"名詞-固有名詞-地名-国": {POS: PROPN}, # country name
"名詞-助動詞語幹": {POS: AUX},
"名詞-数詞": {POS: NUM}, # includes Chinese numerals
"名詞-普通名詞-サ変可能": {POS: NOUN}, # ADJ=3349 and VERB=3411 for root
"名詞-普通名詞-サ変形状詞可能": {POS: NOUN}, # ADJ=40 and NOUN=30 for root
"名詞-普通名詞-一般": {POS: NOUN},
"名詞-普通名詞-形状詞可能": {POS: ADJ}, # ADJ=404 and NOUN=161 for root
"名詞-普通名詞-助数詞可能": {POS: NOUN}, # All the root tokens are NOUN
"名詞-普通名詞-副詞可能": {POS: NOUN}, # All the root tokens are NOUN
"連体詞": {POS: DET},
}
| 32.151899 | 117 | 0.572047 |
3f3a352d4fdb283b1eb9a76f28c8f0b3b11ff62a | 615,817 | py | Python | docassemble_base/docassemble/base/parse.py | Partnervine/docassemble | 05a154d4788ada27ad220a0d95456b0b0a26c46b | [
"MIT"
] | null | null | null | docassemble_base/docassemble/base/parse.py | Partnervine/docassemble | 05a154d4788ada27ad220a0d95456b0b0a26c46b | [
"MIT"
] | null | null | null | docassemble_base/docassemble/base/parse.py | Partnervine/docassemble | 05a154d4788ada27ad220a0d95456b0b0a26c46b | [
"MIT"
] | null | null | null | import mimetypes
import traceback
import re
from jinja2.runtime import StrictUndefined, UndefinedError
from jinja2.exceptions import TemplateError
from jinja2.environment import Environment
from jinja2.environment import Template as JinjaTemplate
from jinja2 import meta as jinja2meta
from jinja2.lexer import Token
from jinja2.utils import internalcode, missing, object_type_repr
from jinja2.ext import Extension
import ast
import ruamel.yaml as yaml
import string
import os
import os.path
import sys
import types
from urllib.request import urlretrieve
equals_byte = bytes('=', 'utf-8')
import httplib2
import datetime
import time
import operator
import pprint
import copy
import codecs
import array
import random
import tempfile
import json
import docassemble.base.filter
import docassemble.base.pdftk
import docassemble.base.file_docx
from docassemble.base.error import DAError, DANotFoundError, MandatoryQuestion, DAErrorNoEndpoint, DAErrorMissingVariable, ForcedNameError, QuestionError, ResponseError, BackgroundResponseError, BackgroundResponseActionError, CommandError, CodeExecute, DAValidationError, ForcedReRun, LazyNameError, DAAttributeError, DAIndexError
import docassemble.base.functions
import docassemble.base.util
from docassemble.base.functions import pickleable_objects, word, get_language, server, RawValue, get_config
from docassemble.base.logger import logmessage
from docassemble.base.pandoc import MyPandoc, word_to_markdown
from docassemble.base.mako.template import Template as MakoTemplate
from docassemble.base.mako.exceptions import SyntaxException, CompileException
from docassemble.base.astparser import myvisitnode
import collections.abc as abc
from collections import OrderedDict
from types import CodeType
import pandas
import dateutil.parser
import pytz
from itertools import groupby, chain
from collections import namedtuple
from bs4 import BeautifulSoup
import xml.etree.ElementTree as ET
from docassemble_textstat.textstat import textstat
from html.parser import HTMLParser
from io import StringIO
import qrcode
import qrcode.image.svg
RangeType = type(range(1,2))
NoneType = type(None)
debug = True
import_core = compile("from docassemble.base.core import objects_from_file, objects_from_structure", '<code block>', 'exec')
import_util = compile('from docassemble.base.util import *', '<code block>', 'exec')
import_process_action = compile('from docassemble.base.util import process_action', '<code block>', 'exec')
run_process_action = compile('process_action()', '<code block>', 'exec')
match_process_action = re.compile(r'process_action\(')
match_mako = re.compile(r'<%|\${|% if|% for|% while|\#\#')
emoji_match = re.compile(r':([^ ]+):')
valid_variable_match = re.compile(r'^[^\d][A-Za-z0-9\_]*$')
nameerror_match = re.compile(r'\'(.*)\' (is not defined|referenced before assignment|is undefined)')
document_match = re.compile(r'^--- *$', flags=re.MULTILINE)
remove_trailing_dots = re.compile(r'[\n\r]+\.\.\.$')
fix_tabs = re.compile(r'\t')
dot_split = re.compile(r'([^\.\[\]]+(?:\[.*?\])?)')
match_brackets_at_end = re.compile(r'^(.*)(\[.+?\])')
match_inside_brackets = re.compile(r'\[(.+?)\]')
match_brackets = re.compile(r'(\[.+?\])')
match_brackets_or_dot = re.compile(r'(\[.+?\]|\.[a-zA-Z_][a-zA-Z0-9_]*)')
complications = re.compile(r'[\.\[]')
list_of_indices = ['i', 'j', 'k', 'l', 'm', 'n']
extension_of_doc_format = {'pdf':'pdf', 'docx': 'docx', 'rtf': 'rtf', 'rtf to docx': 'docx', 'tex': 'tex', 'html': 'html'}
do_not_translate = """<%doc>
do not translate
</%doc>
"""
def process_audio_video_list(the_list, the_user_dict):
output = list()
for the_item in the_list:
output.append({'text': the_item['text'].text(the_user_dict), 'package': the_item['package'], 'type': the_item['type']})
return output
def textify(data, the_user_dict):
return list(map((lambda x: x.text(the_user_dict)), data))
# def set_absolute_filename(func):
# #logmessage("Running set_absolute_filename in parse")
# docassemble.base.functions.set_absolute_filename(func)
# def set_url_finder(func):
# docassemble.base.filter.set_url_finder(func)
# docassemble.base.functions.set_url_finder(func)
# def set_url_for(func):
# docassemble.base.filter.set_url_for(func)
# def set_file_finder(func):
# docassemble.base.filter.set_file_finder(func)
# def set_da_send_mail(func):
# docassemble.base.filter.set_da_send_mail(func)
# def blank_save_numbered_file(*args, **kwargs):
# return(None, None, None)
# save_numbered_file = blank_save_numbered_file
# def set_save_numbered_file(func):
# global save_numbered_file
# #logmessage("set the save_numbered_file function to " + str(func))
# save_numbered_file = func
# return
initial_dict = dict(_internal=dict(session_local=dict(), device_local=dict(), user_local=dict(), dirty=dict(), progress=0, tracker=0, docvar=dict(), doc_cache=dict(), steps=1, steps_offset=0, secret=None, informed=dict(), livehelp=dict(availability='unavailable', mode='help', roles=list(), partner_roles=list()), answered=set(), answers=dict(), objselections=dict(), starttime=None, modtime=None, accesstime=dict(), tasks=dict(), gather=list(), event_stack=dict(), misc=dict()), url_args=dict(), nav=docassemble.base.functions.DANav())
def set_initial_dict(the_dict):
global initial_dict
initial_dict = the_dict
return
def get_initial_dict():
return copy.deepcopy(initial_dict);
class PackageImage:
def __init__(self, **kwargs):
self.filename = kwargs.get('filename', None)
self.attribution = kwargs.get('attribution', None)
self.setname = kwargs.get('setname', None)
self.package = kwargs.get('package', 'docassemble.base')
def get_filename(self):
return(docassemble.base.functions.static_filename_path(str(self.package) + ':' + str(self.filename)))
def get_reference(self):
#logmessage("get_reference is considering " + str(self.package) + ':' + str(self.filename))
return str(self.package) + ':' + str(self.filename)
class InterviewSource:
def __init__(self, **kwargs):
if not hasattr(self, 'package'):
self.package = kwargs.get('package', None)
self.language = kwargs.get('language', '*')
self.dialect = kwargs.get('dialect', None)
self.testing = kwargs.get('testing', False)
self.translating = kwargs.get('translating', False)
def __le__(self, other):
return str(self) <= (str(other) if isinstance(other, InterviewSource) else other)
def __ge__(self, other):
return str(self) >= (str(other) if isinstance(other, InterviewSource) else other)
def __gt__(self, other):
return str(self) > (str(other) if isinstance(other, InterviewSource) else other)
def __lt__(self, other):
return str(self) < (str(other) if isinstance(other, InterviewSource) else other)
def __eq__(self, other):
return self is other
def __ne__(self, other):
return self is not other
def __str__(self):
if hasattr(self, 'path'):
return str(self.path)
return 'interviewsource'
def __hash__(self):
if hasattr(self, 'path'):
return hash((self.path,))
else:
return hash(('interviewsource',))
def set_path(self, path):
self.path = path
return
def get_name(self):
if ':' in self.path:
return self.path
return self.get_package() + ':data/questions/' + self.path
def get_index(self):
the_index = docassemble.base.functions.server.server_redis.get('da:interviewsource:' + self.path)
if the_index is None:
#sys.stderr.write("Updating index from get_index for " + self.path + "\n")
the_index = docassemble.base.functions.server.server_redis.incr('da:interviewsource:' + self.path)
return the_index
def update_index(self):
#sys.stderr.write("Updating index for " + self.path + "\n")
docassemble.base.functions.server.server_redis.incr('da:interviewsource:' + self.path)
def set_filepath(self, filepath):
self.filepath = filepath
return
def set_directory(self, directory):
self.directory = directory
return
def set_content(self, content):
self.content = content
return
def set_language(self, language):
self.language = language
return
def set_dialect(self, dialect):
self.dialect = dialect
return
def set_testing(self, testing):
self.testing = testing
return
def set_package(self, package):
self.package = package
return
def update(self):
return True
def get_modtime(self):
return self._modtime
def get_language(self):
return self.language
def get_dialect(self):
return self.dialect
def get_package(self):
return self.package
def get_testing(self):
return self.testing
def get_interview(self):
return Interview(source=self)
def append(self, path):
return None
class InterviewSourceString(InterviewSource):
def __init__(self, **kwargs):
self.set_path(kwargs.get('path', None))
self.set_directory(kwargs.get('directory', None))
self.set_content(kwargs.get('content', None))
self._modtime = datetime.datetime.utcnow()
return super().__init__(**kwargs)
class InterviewSourceFile(InterviewSource):
def __init__(self, **kwargs):
self.playground = None
if 'filepath' in kwargs:
if re.search(r'SavedFile', str(type(kwargs['filepath']))):
self.playground = kwargs['filepath']
if self.playground.subdir and self.playground.subdir != 'default':
self.playground_file = os.path.join(self.playground.subdir, self.playground.filename)
else:
self.playground_file = self.playground.filename
#sys.stderr.write("The path is " + repr(self.playground.path) + "\n")
if os.path.isfile(self.playground.path) and os.access(self.playground.path, os.R_OK):
self.set_filepath(self.playground.path)
else:
raise DAError("Reference to invalid playground path")
else:
self.set_filepath(kwargs['filepath'])
else:
self.filepath = None
if 'path' in kwargs:
self.set_path(kwargs['path'])
return super().__init__(**kwargs)
def set_path(self, path):
self.path = path
parts = path.split(":")
if len(parts) == 2:
self.package = parts[0]
self.basename = parts[1]
else:
self.package = None
# if self.package is None:
# m = re.search(r'^/(playground\.[0-9]+)/', path)
# if m:
# self.package = m.group(1)
if self.filepath is None:
self.set_filepath(interview_source_from_string(self.path))
if self.package is None and re.search(r'docassemble.base.data.', self.filepath):
self.package = 'docassemble.base'
return
def set_filepath(self, filepath):
#logmessage("Called set_filepath with " + str(filepath))
self.filepath = filepath
if self.filepath is None:
self.directory = None
else:
self.set_directory(os.path.dirname(self.filepath))
return
def reset_modtime(self):
try:
with open(self.filepath, 'a'):
os.utime(self.filepath, None)
except:
logmessage("InterviewSourceFile: could not reset modification time on interview")
def update(self):
#logmessage("Update: " + str(self.filepath))
try:
with open(self.filepath, 'r', encoding='utf-8') as the_file:
self.set_content(the_file.read())
#sys.stderr.write("Returning true\n")
return True
except Exception as errmess:
#sys.stderr.write("Error: " + str(errmess) + "\n")
pass
return False
def get_modtime(self):
#logmessage("get_modtime called in parse where path is " + str(self.path))
if self.playground is not None:
return self.playground.get_modtime(filename=self.playground_file)
self._modtime = os.path.getmtime(self.filepath)
return(self._modtime)
def append(self, path):
new_file = os.path.join(self.directory, path)
if os.path.isfile(new_file) and os.access(new_file, os.R_OK):
new_source = InterviewSourceFile()
new_source.path = path
new_source.directory = self.directory
new_source.basename = path
new_source.filepath = new_file
new_source.playground = self.playground
if hasattr(self, 'package'):
new_source.package = self.package
if new_source.update():
return(new_source)
return(None)
def dummy_embed_input(status, variable):
return variable
class InterviewStatus:
def __init__(self, current_info=dict(), **kwargs):
self.current_info = current_info
self.attributions = set()
self.seeking = list()
self.tracker = kwargs.get('tracker', -1)
self.maps = list()
self.extra_scripts = list()
self.extra_css = list()
self.using_screen_reader = False
self.can_go_back = True
self.attachments = None
self.linkcounter = 0
#restore this, maybe
#self.next_action = list()
self.embedded = set()
self.extras = dict()
self.followed_mc = False
self.tentatively_answered = set()
self.checkin = False
def get_all_fields_used(self, user_dict):
if 'list_collect' in self.extras:
all_fields = set()
allow_append = self.extras['list_collect_allow_append']
iterator_re = re.compile(r"\[%s\]" % (self.extras['list_iterator'],))
list_len = len(self.extras['list_collect'].elements)
if hasattr(self.extras['list_collect'], 'minimum_number') and self.extras['list_collect'].minimum_number is not None and self.extras['list_collect'].minimum_number > list_len:
list_len = self.extras['list_collect'].minimum_number
if list_len == 0:
list_len = 1
if self.extras['list_collect'].ask_object_type or not allow_append:
extra_amount = 0
else:
extra_amount = get_config('list collect extra count', 15)
for list_indexno in range(list_len + extra_amount):
for field_used in self.question.fields_used:
all_fields.add(re.sub(iterator_re, '[' + str(list_indexno) +']', field_used))
return all_fields
else:
return self.question.fields_used
def get_fields_and_sub_fields_and_collect_fields(self, user_dict):
all_fields = self.question.get_fields_and_sub_fields(user_dict)
if 'list_collect' in self.extras:
allow_append = self.extras['list_collect_allow_append']
iterator_re = re.compile(r"\[%s\]" % (self.extras['list_iterator'],))
if 'sub_fields' in self.extras:
field_list = list()
for field in self.question.fields:
if field.number in self.extras['sub_fields']:
field_list.extend(self.extras['sub_fields'][field.number])
else:
field_list.append(field)
else:
field_list = self.question.fields
list_len = len(self.extras['list_collect'].elements)
if hasattr(self.extras['list_collect'], 'minimum_number') and self.extras['list_collect'].minimum_number is not None and self.extras['list_collect'].minimum_number > list_len:
list_len = self.extras['list_collect'].minimum_number
if list_len == 0:
list_len = 1
if self.extras['list_collect'].ask_object_type or not allow_append:
extra_amount = 0
else:
extra_amount = get_config('list collect extra count', 15)
for list_indexno in range(list_len + extra_amount):
for field in field_list:
the_field = copy.deepcopy(field)
the_field.number = str(list_indexno) + '_' + str(the_field.number)
if hasattr(the_field, 'saveas'):
the_field.saveas = safeid(re.sub(iterator_re, '[' + str(list_indexno) +']', from_safeid(field.saveas)))
all_fields.append(the_field)
return all_fields
def is_empty_mc(self, field):
if hasattr(field, 'choicetype') and not (hasattr(field, 'inputtype') and field.inputtype == 'combobox'):
if field.choicetype in ['compute', 'manual']:
if field.number not in self.selectcompute:
return False
pairlist = list(self.selectcompute[field.number])
else:
logmessage("is_empty_mc: unknown choicetype " + str(field.choicetype))
return False
if len(pairlist) == 0:
return True
return False
def get_field_info(self):
datatypes = dict()
hiddens = dict()
files = list()
ml_info = dict()
checkboxes = dict()
saveas_by_number = dict()
saveas_to_use = dict()
if self.extras.get('list_collect', False) is not False:
list_collect_list = self.extras['list_collect'].instanceName
else:
list_collect_list = None
if self.orig_sought is not None:
orig_sought = self.orig_sought
else:
orig_sought = None
if self.question.question_type == "signature":
signature_saveas = self.question.fields[0].saveas
else:
signature_saveas = None
if hasattr(self.question, 'fields_saveas'):
datatypes[safeid(self.question.fields_saveas)] = "boolean"
fields_saveas = self.question.fields_saveas
else:
fields_saveas = None
if self.question.question_type in ["yesno", "yesnomaybe"]:
datatypes[self.question.fields[0].saveas] = self.question.fields[0].datatype
elif self.question.question_type in ["noyes", "noyesmaybe"]:
datatypes[self.question.fields[0].saveas] = self.question.fields[0].datatype
elif self.question.question_type == "review" and hasattr(self.question, 'review_saveas'):
datatypes[safeid(self.question.review_saveas)] = "boolean"
elif self.question.question_type == "fields":
the_field_list = self.get_field_list()
for field in the_field_list:
if hasattr(field, 'saveas'):
if (hasattr(field, 'extras') and (('show_if_var' in field.extras and 'show_if_val' in self.extras) or 'show_if_js' in field.extras)) or (hasattr(field, 'disableothers') and field.disableothers):
the_saveas = safeid('_field_' + str(field.number))
else:
the_saveas = field.saveas
saveas_to_use[field.saveas] = the_saveas
saveas_by_number[field.number] = the_saveas
for field in the_field_list:
if not self.extras['ok'][field.number]:
continue
if self.is_empty_mc(field):
if hasattr(field, 'datatype'):
hiddens[field.saveas] = field.datatype
else:
hiddens[field.saveas] = True
if hasattr(field, 'datatype'):
datatypes[field.saveas] = field.datatype
if field.datatype in ('object_multiselect', 'object_checkboxes'):
datatypes[safeid(from_safeid(field.saveas) + ".gathered")] = 'boolean'
continue
if hasattr(field, 'extras'):
if 'ml_group' in field.extras or 'ml_train' in field.extras:
ml_info[field.saveas] = dict()
if 'ml_group' in field.extras:
ml_info[field.saveas]['group_id'] = self.extras['ml_group'][field.number]
if 'ml_train' in field.extras:
ml_info[field.saveas]['train'] = self.extras['ml_train'][field.number]
if hasattr(field, 'choicetype'):
vals = set([str(x['key']) for x in self.selectcompute[field.number]])
if len(vals) == 1 and ('True' in vals or 'False' in vals):
datatypes[field.saveas] = 'boolean'
elif len(vals) == 1 and 'None' in vals:
datatypes[field.saveas] = 'threestate'
elif len(vals) == 2 and ('True' in vals and 'False' in vals):
datatypes[field.saveas] = 'boolean'
elif len(vals) == 2 and (('True' in vals and 'None' in vals) or ('False' in vals and 'None' in vals)):
datatypes[field.saveas] = 'threestate'
elif len(vals) == 3 and ('True' in vals and 'False' in vals and 'None' in vals):
datatypes[field.saveas] = 'threestate'
else:
datatypes[field.saveas] = field.datatype
elif hasattr(field, 'datatype') and hasattr(field, 'saveas'):
datatypes[field.saveas] = field.datatype
if hasattr(field, 'datatype') and hasattr(field, 'saveas'):
if (field.datatype in ['files', 'file', 'camera', 'user', 'environment', 'camcorder', 'microphone']):
files.append(saveas_by_number[field.number])
if not hasattr(field, 'choicetype'):
datatypes[field.saveas] = field.datatype
if field.datatype == 'boolean':
if field.sign > 0:
checkboxes[field.saveas] = 'False'
else:
checkboxes[field.saveas] = 'True'
elif field.datatype == 'threestate':
checkboxes[field.saveas] = 'None'
elif field.datatype in ['multiselect', 'object_multiselect', 'checkboxes', 'object_checkboxes']:
if field.choicetype in ['compute', 'manual']:
pairlist = list(self.selectcompute[field.number])
else:
pairlist = list()
for pair in pairlist:
if isinstance(pair['key'], str):
checkboxes[safeid(from_safeid(field.saveas) + "[B" + myb64quote(pair['key']) + "]")] = 'False'
else:
checkboxes[safeid(from_safeid(field.saveas) + "[R" + myb64quote(repr(pair['key'])) + "]")] = 'False'
elif not self.extras['required'][field.number]:
checkboxes[field.saveas] = 'None'
if field.datatype in ('object_multiselect', 'object_checkboxes'):
datatypes[safeid(from_safeid(field.saveas) + ".gathered")] = 'boolean'
if self.extras.get('list_collect_is_final', False):
if self.extras['list_collect'].ask_number:
datatypes[safeid(self.extras['list_collect'].instanceName + ".target_number")] = 'integer'
else:
datatypes[safeid(self.extras['list_collect'].instanceName + ".there_is_another")] = 'boolean'
elif self.question.question_type == "settrue":
datatypes[self.question.fields[0].saveas] = "boolean"
elif self.question.question_type == "multiple_choice" and hasattr(self.question.fields[0], 'datatype'):
datatypes[self.question.fields[0].saveas] = self.question.fields[0].datatype
return {'datatypes': datatypes, 'hiddens': hiddens, 'files': files, 'ml_info': ml_info, 'checkboxes': checkboxes, 'list_collect_list': list_collect_list, 'orig_sought': orig_sought, 'fields_saveas': fields_saveas, 'signature_saveas': signature_saveas}
def do_sleep(self):
if hasattr(self.question, 'sleep'):
try:
time.sleep(self.question.sleep)
except:
sys.stderr.write("do_sleep: invalid sleep amount " + repr(self.question.sleep) + "\n")
def get_field_list(self):
if 'sub_fields' in self.extras:
field_list = list()
for field in self.question.fields:
if field.number in self.extras['sub_fields']:
field_list.extend(self.extras['sub_fields'][field.number])
else:
field_list.append(field)
else:
field_list = self.question.fields
if 'list_collect' in self.extras:
full_field_list = list()
allow_append = self.extras['list_collect_allow_append']
iterator_re = re.compile(r"\[%s\]" % (self.extras['list_iterator'],))
list_len = len(self.extras['list_collect'].elements)
if hasattr(self.extras['list_collect'], 'minimum_number') and self.extras['list_collect'].minimum_number is not None and self.extras['list_collect'].minimum_number > list_len:
list_len = self.extras['list_collect'].minimum_number
if list_len == 0:
list_len = 1
if self.extras['list_collect'].ask_object_type or not allow_append:
extra_amount = 0
else:
extra_amount = get_config('list collect extra count', 15)
for list_indexno in range(list_len + extra_amount):
header_field = Field({'type': 'html', 'extras': {'html': TextObject('')}})
if list_indexno >= list_len:
header_field.collect_type = 'extraheader'
elif list_indexno == 0:
header_field.collect_type = 'firstheader'
else:
header_field.collect_type = 'header'
header_field.collect_number = list_indexno
header_field.number = str(list_indexno)
full_field_list.append(header_field)
self.extras['ok'][str(list_indexno)] = True
self.extras['required'][str(list_indexno)] = False
for field in field_list:
the_field = copy.deepcopy(field)
the_field.number = str(list_indexno) + '_' + str(the_field.number)
if hasattr(the_field, 'saveas'):
the_field.saveas = safeid(re.sub(iterator_re, '[' + str(list_indexno) + ']', from_safeid(the_field.saveas)))
if hasattr(the_field, 'disableothers') and the_field.disableothers:
list_of_other_fields = list()
if isinstance(the_field.disableothers, list):
for other_saveas in the_field.disableothers:
list_of_other_fields.append(re.sub(iterator_re, '[' + str(list_indexno) + ']', other_saveas))
else:
for other_field in field_list:
if not hasattr(other_field, 'saveas'):
continue
if other_field.number == field.number:
continue
list_of_other_fields.append(re.sub(iterator_re, '[' + str(list_indexno) +']', from_safeid(other_field.saveas)))
the_field.disableothers = list_of_other_fields
if hasattr(the_field, 'uncheckothers') and the_field.uncheckothers:
list_of_other_fields = list()
if isinstance(the_field.uncheckothers, list):
for other_saveas in the_field.uncheckothers:
list_of_other_fields.append(re.sub(iterator_re, '[' + str(list_indexno) +']', from_safeid(other_saveas)))
else:
for other_field in field_list:
if not hasattr(other_field, 'saveas'):
continue
if other_field.number == field.number or not (hasattr(other_field, 'inputtype') and other_field.inputtype in ['yesno', 'noyes', 'yesnowide', 'noyeswide']):
continue
list_of_other_fields.append(re.sub(iterator_re, '[' + str(list_indexno) +']', from_safeid(other_field.saveas)))
the_field.uncheckothers = list_of_other_fields
if hasattr(the_field, 'extras'):
if 'show_if_var' in the_field.extras:
the_field.extras['show_if_var'] = safeid(re.sub(r'\[' + self.extras['list_iterator'] + r'\]', '[' + str(list_indexno) + ']', from_safeid(the_field.extras['show_if_var'])))
if 'show_if_js' in the_field.extras:
the_field.extras['show_if_js']['expression'].original_text = re.sub(iterator_re, '[' + str(list_indexno) + ']', the_field.extras['show_if_js']['expression'].original_text)
self.extras['show_if_js'][the_field.number]['expression'] = re.sub(iterator_re, '[' + str(list_indexno) + ']', self.extras['show_if_js'][the_field.number]['expression'])
if the_field.extras['show_if_js']['expression'].uses_mako:
the_field.extras['show_if_js']['expression'].template = MakoTemplate(the_field.extras['show_if_js']['expression'].original_text, strict_undefined=True, input_encoding='utf-8')
for ii in range(len(the_field.extras['show_if_js']['vars'])):
the_field.extras['show_if_js']['vars'][ii] = re.sub(iterator_re, '[' + str(list_indexno) + ']', the_field.extras['show_if_js']['vars'][ii])
for ii in range(len(self.extras['show_if_js'][the_field.number]['vars'])):
self.extras['show_if_js'][the_field.number]['vars'][ii] = re.sub(iterator_re, '[' + str(list_indexno) + ']', self.extras['show_if_js'][the_field.number]['vars'][ii])
if list_indexno >= list_len:
the_field.collect_type = 'extra'
else:
the_field.collect_type = 'mid'
the_field.collect_number = list_indexno
full_field_list.append(the_field)
post_header_field = Field({'type': 'html', 'extras': {'html': TextObject('')}})
if extra_amount > 0 and list_indexno == list_len + extra_amount - 1:
post_header_field.collect_type = 'extrafinalpostheader'
elif list_indexno >= list_len:
post_header_field.collect_type = 'extrapostheader'
else:
post_header_field.collect_type = 'postheader'
post_header_field.collect_number = list_indexno
post_header_field.number = str(list_indexno)
full_field_list.append(post_header_field)
return full_field_list
else:
return field_list
def mark_tentative_as_answered(self, the_user_dict):
for question in self.tentatively_answered:
question.mark_as_answered(the_user_dict)
self.tentatively_answered.clear()
def initialize_screen_reader(self):
self.using_screen_reader = True
self.screen_reader_text = dict()
self.screen_reader_links = {'question': [], 'help': []}
def populate(self, question_result):
self.question = question_result['question']
self.questionText = question_result['question_text']
self.subquestionText = question_result['subquestion_text']
self.continueLabel = question_result['continue_label']
self.decorations = question_result['decorations']
self.audiovideo = question_result['audiovideo']
self.helpText = question_result['help_text']
self.attachments = question_result['attachments']
self.selectcompute = question_result['selectcompute']
self.defaults = question_result['defaults']
self.other_defaults = dict()
#self.defined = question_result['defined']
self.hints = question_result['hints']
self.helptexts = question_result['helptexts']
self.extras = question_result['extras']
self.labels = question_result['labels']
self.sought = question_result['sought']
self.orig_sought = question_result['orig_sought']
def set_tracker(self, tracker):
self.tracker = tracker
def get_history(self):
output = {'steps': []}
if self.question.from_source.path != self.question.interview.source.path and self.question.from_source.path is not None:
output['source_file'] = self.question.from_source.path
if hasattr(self.question, 'source_code') and self.question.source_code is not None:
output['source_code'] = self.question.source_code
index = 0
seeking_len = len(self.seeking)
if seeking_len:
starttime = self.seeking[0]['time']
for stage in self.seeking:
index += 1
if index < seeking_len and 'reason' in self.seeking[index] and self.seeking[index]['reason'] in ('asking', 'running') and self.seeking[index]['question'] is stage['question'] and 'question' in stage and 'reason' in stage and stage['reason'] == 'considering':
continue
the_stage = {'time': "%.5fs" % (stage['time'] - starttime), 'index': index}
if 'question' in stage and 'reason' in stage and (index < (seeking_len - 1) or stage['question'] is not self.question):
the_stage['reason'] = stage['reason']
if stage['reason'] == 'initial':
the_stage['reason_text'] = "Ran initial code"
elif stage['reason'] == 'mandatory question':
the_stage['reason_text'] = "Tried to ask mandatory question"
elif stage['reason'] == 'mandatory code':
the_stage['reason_text'] = "Tried to run mandatory code"
elif stage['reason'] == 'asking':
the_stage['reason_text'] = "Tried to ask question"
elif stage['reason'] == 'running':
the_stage['reason_text'] = "Tried to run block"
elif stage['reason'] == 'considering':
the_stage['reason_text'] = "Considered using block"
elif stage['reason'] == 'objects from file':
the_stage['reason_text'] = "Tried to load objects from file"
elif stage['reason'] == 'data':
the_stage['reason_text'] = "Tried to load data"
elif stage['reason'] == 'objects':
the_stage['reason_text'] = "Tried to load objects"
elif stage['reason'] == 'result of multiple choice':
the_stage['reason_text'] = "Followed the result of multiple choice selection"
if stage['question'].from_source.path != self.question.interview.source.path and stage['question'].from_source.path is not None:
the_stage['source_file'] = stage['question'].from_source.path
if (not hasattr(stage['question'], 'source_code')) or stage['question'].source_code is None:
the_stage['embedded'] = True
else:
the_stage['code'] = stage['question'].source_code
elif 'variable' in stage:
the_stage['reason'] = 'needed'
the_stage['reason_text'] = "Needed definition of"
the_stage['variable_name'] = str(stage['variable'])
elif 'done' in stage:
the_stage['reason'] = 'complete'
the_stage['reason_text'] = "Completed processing"
else:
continue
output['steps'].append(the_stage)
return output
def as_data(self, the_user_dict, encode=True):
result = dict(language=self.question.language)
debug = self.question.interview.debug
if debug:
output = dict(question='', help='')
if 'progress' in the_user_dict['_internal']:
result['progress'] = the_user_dict['_internal']['progress']
if self.question.language in self.question.interview.default_validation_messages:
result['validation_messages'] = copy.copy(self.question.interview.default_validation_messages[self.question.language])
else:
result['validation_messages'] = dict()
if 'reload_after' in self.extras:
result['reload'] = 1000 * int(self.extras['reload_after'])
lang = docassemble.base.functions.get_language()
if len(self.question.terms) or len(self.question.interview.terms):
result['terms'] = dict()
if 'terms' in self.extras:
for term, vals in self.extras['terms'].items():
result['terms'][term] = vals['definition']
if lang in self.question.interview.terms and len(self.question.interview.terms[lang]):
for term, vals in self.question.interview.terms[lang].items():
result['terms'][term] = vals['definition']
elif self.question.language in self.question.interview.terms and len(self.question.interview.terms[self.question.language]):
for term, vals in self.question.interview.terms[self.question.language].items():
result['terms'][term] = vals['definition']
if len(self.question.autoterms) or len(self.question.interview.autoterms):
result['autoterms'] = dict()
if 'autoterms' in self.extras:
for term, vals in self.extras['autoterms'].items():
result['autoterms'][term] = vals['definition']
if lang in self.question.interview.autoterms and len(self.question.interview.autoterms[lang]):
for term, vals in question.interview.autoterms[lang].items():
result['autoterms'][term] = vals['definition']
elif self.question.language in self.question.interview.autoterms and len(self.question.interview.autoterms[self.question.language]):
for term, vals in self.question.interview.autoterms[self.question.language].items():
result['autoterms'][term] = vals['definition']
if self.orig_sought is not None:
result['event_list'] = [self.orig_sought]
if 'action_buttons' in self.extras:
result['additional_buttons'] = []
for item in self.extras['action_buttons']:
new_item = copy.deepcopy(item)
new_item['label'] = docassemble.base.filter.markdown_to_html(item['label'], trim=True, do_terms=False, status=self, verbatim=(not encode))
if debug:
output['question'] += '<p>' + new_item['label'] + '</p>'
for param in ('questionText',):
if hasattr(self, param) and getattr(self, param) is not None:
result[param] = docassemble.base.filter.markdown_to_html(getattr(self, param).rstrip(), trim=True, status=self, verbatim=(not encode))
if debug:
output['question'] += result[param]
if hasattr(self, 'subquestionText') and self.subquestionText is not None:
if self.question.question_type == "fields":
embedder = dummy_embed_input
else:
embedder = None
result['subquestionText'] = docassemble.base.filter.markdown_to_html(self.subquestionText.rstrip(), status=self, verbatim=(not encode), embedder=embedder)
if debug:
output['question'] += result['subquestionText']
for param in ('continueLabel', 'helpLabel'):
if hasattr(self, param) and getattr(self, param) is not None:
result[param] = docassemble.base.filter.markdown_to_html(getattr(self, param).rstrip(), trim=True, do_terms=False, status=self, verbatim=(not encode))
if debug:
output['question'] += '<p>' + result[param] + '</p>'
if 'menu_items' in self.extras and isinstance(self.extras['menu_items'], list):
result['menu_items'] = self.extras['menu_items']
for param in ('cssClass', 'tableCssClass', 'css', 'script'):
if param in self.extras and isinstance(self.extras[param], str):
result[param] = self.extras[param].rstrip()
for param in ('back_button_label',):
if param in self.extras and isinstance(self.extras[param], str):
result[param] = docassemble.base.filter.markdown_to_html(self.extras[param].rstrip(), trim=True, do_terms=False, status=self, verbatim=(not encode))
for param in ('rightText', 'underText'):
if param in self.extras and isinstance(self.extras[param], str):
result[param] = docassemble.base.filter.markdown_to_html(self.extras[param].rstrip(), status=self, verbatim=(not encode))
if debug:
output['question'] += result[param]
if 'continueLabel' not in result:
if self.question.question_type == "review":
result['continueLabel'] = word('Resume')
else:
result['continueLabel'] = word('Continue')
if debug:
output['question'] += '<p>' + result['continueLabel'] + '</p>'
if self.question.question_type == "yesno":
result['yesLabel'] = self.question.yes()
result['noLabel'] = self.question.no()
elif self.question.question_type == "noyes":
result['noLabel'] = self.question.yes()
result['yesLabel'] = self.question.no()
elif self.question.question_type == "yesnomaybe":
result['yesLabel'] = self.question.yes()
result['noLabel'] = self.question.no()
result['maybeLabel'] = self.question.maybe()
elif self.question.question_type == "noyesmaybe":
result['noLabel'] = self.question.yes()
result['yesLabel'] = self.question.no()
result['maybeLabel'] = self.question.maybe()
steps = the_user_dict['_internal']['steps'] - the_user_dict['_internal']['steps_offset']
if self.can_go_back and steps > 1:
result['allow_going_back'] = True
result['backTitle'] = word("Go back to the previous question")
back_button_val = self.extras.get('back_button', None)
if (back_button_val or (back_button_val is None and self.question.interview.question_back_button)):
result['questionBackButton'] = self.back
else:
result['allow_going_back'] = False
if self.question.question_type == "signature":
result['signaturePhrases'] = {
'clear': word('Clear'),
'noSignature': word("You must sign your name to continue."),
'loading': word('Loading. Please wait . . . '),
}
if 'questionMetadata' in self.extras:
result['question_metadata'] = self.extras['questionMetadata']
if 'segment' in self.extras:
result['segment'] = self.extras['segment']
if 'ga_id' in self.extras:
result['ga_id'] = self.extras['ga_id']
if hasattr(self.question, 'id'):
result['id'] = self.question.id
if hasattr(self, 'audiovideo') and self.audiovideo is not None:
audio_result = docassemble.base.filter.get_audio_urls(self.audiovideo)
video_result = docassemble.base.filter.get_video_urls(self.audiovideo)
if len(audio_result) > 0:
result['audio'] = [dict(url=re.sub(r'.*"(http[^"]+)".*', r'\1', x)) if isinstance(x, str) else dict(url=x[0], mime_type=x[1]) for x in audio_result]
if len(video_result) > 0:
result['video'] = [dict(url=re.sub(r'.*"(http[^"]+)".*', r'\1', x)) if isinstance(x, str) else dict(url=x[0], mime_type=x[1]) for x in video_result]
if hasattr(self, 'helpText') and len(self.helpText) > 0:
result['helpText'] = list()
result['helpBackLabel'] = word("Back to question")
for help_text in self.helpText:
the_help = dict()
if 'audiovideo' in help_text and help_text['audiovideo'] is not None:
audio_result = docassemble.base.filter.get_audio_urls(help_text['audiovideo'])
video_result = docassemble.base.filter.get_video_urls(help_text['audiovideo'])
if len(audio_result) > 0:
the_help['audio'] = [dict(url=x[0], mime_type=x[1]) for x in audio_result]
if len(video_result) > 0:
the_help['video'] = [dict(url=x[0], mime_type=x[1]) for x in video_result]
if 'content' in help_text and help_text['content'] is not None:
the_help['content'] = docassemble.base.filter.markdown_to_html(help_text['content'].rstrip(), status=self, verbatim=(not encode))
if debug:
output['help'] += the_help['content']
if 'heading' in help_text and help_text['heading'] is not None:
the_help['heading'] = help_text['heading'].rstrip()
if debug:
output['help'] += '<p>' + the_help['heading'] + '</p>'
elif len(self.helpText) > 1:
the_help['heading'] = word('Help with this question')
result['helpText'].append(the_help)
result['help'] = dict()
if self.helpText[0]['label']:
result['help']['label'] = docassemble.base.filter.markdown_to_html(self.helpText[0]['label'], trim=True, do_terms=False, status=self, verbatim=(not encode))
else:
result['help']['label'] = self.question.help()
result['help']['title'] = word("Help is available for this question")
result['help']['specific'] = False if self.question.helptext is None else True
if 'questionText' not in result and self.question.question_type == "signature":
result['questionText'] = word('Sign Your Name')
if debug:
output['question'] += '<p>' + result['questionText'] + '</p>'
result['questionType'] = self.question.question_type
if hasattr(self.question, 'question_variety'):
result['questionVariety'] = self.question.question_variety
if self.question.is_mandatory or self.question.mandatory_code is not None:
result['mandatory'] = True
if hasattr(self.question, 'name'):
result['_question_name'] = self.question.name
result['_tracker'] = self.tracker
if hasattr(self, 'datatypes'):
result['_datatypes'] = safeid(json.dumps(self.datatypes))
if hasattr(self, 'varnames'):
result['_varnames'] = safeid(json.dumps(self.varnames))
if len(self.question.fields) > 0:
result['fields'] = list()
if hasattr(self.question, 'review_saveas'):
result['question_variable_name'] = self.question.review_saveas
if hasattr(self.question, 'fields_saveas'):
result['question_variable_name'] = self.question.fields_saveas
if self.decorations is not None:
width_value = get_config('decoration size', 2.0)
width_units = get_config('decoration units', 'em')
for decoration in self.decorations:
if 'image' in decoration:
result['decoration'] = {}
the_image = self.question.interview.images.get(decoration['image'], None)
if the_image is not None:
the_url = docassemble.base.functions.server.url_finder(str(the_image.package) + ':' + str(the_image.filename))
width = str(width_value) + str(width_units)
filename = docassemble.base.functions.server.file_finder(str(the_image.package) + ':' + str(the_image.filename))
if 'extension' in filename and filename['extension'] == 'svg' and 'width' in filename:
if filename['width'] and filename['height']:
height = str(width_value * (filename['height']/filename['width'])) + str(width_units)
else:
height = 'auto'
if the_url is not None:
result['decoration']['url'] = the_url
result['decoration']['size'] = {"width": width, "height": height}
if the_image.attribution is not None:
self.attributions.add(the_image.attribution)
break
elif get_config('default icons', None) in ('material icons', 'font awesome'):
result['decoration']['name'] = decoration['image']
result['decoration']['size'] = str(width_value) + str(width_units)
break
if len(self.attachments) > 0:
result['attachments'] = list()
if self.current_info['user']['is_authenticated'] and self.current_info['user']['email']:
result['default_email'] = self.current_info['user']['email']
for attachment in self.attachments:
the_attachment = dict(url=dict(), number=dict(), filename_with_extension=dict())
if 'orig_variable_name' in attachment and attachment['orig_variable_name']:
the_attachment['variable_name'] = attachment['orig_variable_name']
if 'name' in attachment:
if attachment['name']:
the_attachment['name'] = docassemble.base.filter.markdown_to_html(attachment['name'], trim=True, status=self, verbatim=(not encode))
if debug:
output['question'] += '<p>' + the_attachment['name'] + '</p>'
if 'description' in attachment:
if attachment['description']:
the_attachment['description'] = docassemble.base.filter.markdown_to_html(attachment['description'], status=self, verbatim=(not encode))
if debug:
output['question'] += the_attachment['description']
for key in ('valid_formats', 'filename', 'content', 'markdown', 'raw'):
if key in attachment:
if attachment[key]:
the_attachment[key] = attachment[key]
for the_format in attachment['file']:
the_attachment['url'][the_format] = docassemble.base.functions.server.url_finder(attachment['file'][the_format], filename=attachment['filename'] + '.' + extension_of_doc_format[the_format])
the_attachment['number'][the_format] = attachment['file'][the_format]
the_attachment['filename_with_extension'][the_format] = attachment['filename'] + '.' + extension_of_doc_format[the_format]
result['attachments'].append(the_attachment)
if self.extras.get('list_collect', False) is not False:
result['listCollect'] = {
'deleteLabel': word('Delete'),
'addAnotherLabel': self.extras['list_collect_add_another_label'] if self.extras['list_collect_add_another_label'] else word("Add another"),
'deletedLabel': word("(Deleted)"),
'undeleteLabel': word("Undelete"),
}
validation_rules_used = set()
file_fields = list()
for field in self.question.fields:
the_field = dict()
the_field['number'] = field.number
if hasattr(field, 'saveas'):
the_field['variable_name'] = from_safeid(field.saveas)
if encode:
the_field['variable_name_encoded'] = field.saveas
the_field['validation_messages'] = dict()
if self.question.question_type == 'multiple_choice' and self.question.question_variety in ["radio", "dropdown", "combobox"]:
if self.question.question_variety == 'combobox':
the_field['validation_messages']['required'] = field.validation_message('combobox required', self, word("You need to select one or type in a new value."))
else:
the_field['validation_messages']['required'] = field.validation_message('multiple choice required', self, word("You need to select one."))
elif not (hasattr(field, 'datatype') and field.datatype in ['multiselect', 'object_multiselect', 'checkboxes', 'object_checkboxes']):
if hasattr(field, 'inputtype') and field.inputtype == 'combobox':
the_field['validation_messages']['required'] = field.validation_message('combobox required', self, word("You need to select one or type in a new value."))
elif hasattr(field, 'inputtype') and field.inputtype == 'ajax':
the_field['validation_messages']['required'] = field.validation_message('combobox required', self, word("You need to select one."))
elif hasattr(field, 'datatype') and (field.datatype == 'object_radio' or (hasattr(field, 'inputtype') and field.inputtype in ('yesnoradio', 'noyesradio', 'radio', 'dropdown'))):
the_field['validation_messages']['required'] = field.validation_message('multiple choice required', self, word("You need to select one."))
else:
the_field['validation_messages']['required'] = field.validation_message('required', self, word("This field is required."))
if hasattr(field, 'inputtype') and field.inputtype in ['yesno', 'noyes', 'yesnowide', 'noyeswide'] and hasattr(field, 'uncheckothers') and field.uncheckothers is not False:
the_field['validation_messages']['uncheckothers'] = field.validation_message('checkboxes required', self, word("Check at least one option, or check “%s”"), parameters=tuple([strip_tags(self.labels[field.number])]))
if hasattr(field, 'datatype') and field.datatype not in ('multiselect', 'object_multiselect', 'checkboxes', 'object_checkboxes'):
for key in ('minlength', 'maxlength'):
if hasattr(field, 'extras') and key in field.extras and key in self.extras:
if key == 'minlength':
the_field['validation_messages'][key] = field.validation_message(key, self, word("You must type at least %s characters."), parameters=tuple([self.extras[key][field.number]]))
elif key == 'maxlength':
the_field['validation_messages'][key] = field.validation_message(key, self, word("You cannot type more than %s characters."), parameters=tuple([self.extras[key][field.number]]))
if hasattr(field, 'datatype'):
if field.datatype in ('multiselect', 'object_multiselect', 'checkboxes', 'object_checkboxes') and ((hasattr(field, 'nota') and self.extras['nota'][field.number] is not False) or (hasattr(field, 'extras') and (('minlength' in field.extras and 'minlength' in self.extras) or ('maxlength' in field.extras and 'maxlength' in self.extras)))):
if field.datatype.endswith('checkboxes'):
d_type = 'checkbox'
else:
d_type = 'multiselect'
if hasattr(field, 'extras') and (('minlength' in field.extras and 'minlength' in self.extras) or ('maxlength' in field.extras and 'maxlength' in self.extras)):
checkbox_messages = dict()
if 'minlength' in field.extras and 'minlength' in self.extras and 'maxlength' in field.extras and 'maxlength' in self.extras and self.extras['minlength'][field.number] == self.extras['maxlength'][field.number] and self.extras['minlength'][field.number] > 0:
if 'nota' not in self.extras:
self.extras['nota'] = dict()
self.extras['nota'][field.number] = False
if d_type == 'checkbox':
checkbox_messages['checkexactly'] = field.validation_message(d_type + ' minmaxlength', self, word("Please select exactly %s."), parameters=tuple([self.extras['maxlength'][field.number]]))
else:
checkbox_messages['selectexactly'] = field.validation_message(d_type + ' minmaxlength', self, word("Please select exactly %s."), parameters=tuple([self.extras['maxlength'][field.number]]))
else:
if 'minlength' in field.extras and 'minlength' in self.extras:
if d_type == 'checkbox':
if self.extras['minlength'][field.number] == 1:
checkbox_messages['checkatleast'] = field.validation_message('checkbox minlength', self, word("Please select one."))
else:
checkbox_messages['checkatleast'] = field.validation_message('checkbox minlength', self, word("Please select at least %s."), parameters=tuple([self.extras['minlength'][field.number]]))
if int(float(self.extras['minlength'][field.number])) > 0:
if 'nota' not in self.extras:
self.extras['nota'] = dict()
self.extras['nota'][field.number] = False
else:
if self.extras['minlength'][field.number] == 1:
checkbox_messages['minlength'] = field.validation_message(d_type + ' minlength', self, word("Please select one."))
else:
checkbox_messages['minlength'] = field.validation_message(d_type + ' minlength', self, word("Please select at least %s."), parameters=tuple([self.extras['minlength'][field.number]]))
if 'maxlength' in field.extras and 'maxlength' in self.extras:
if d_type == 'checkbox':
checkbox_messages['checkatmost'] = field.validation_message(d_type + ' maxlength', self, word("Please select no more than %s."), parameters=tuple([self.extras['maxlength'][field.number]]))
else:
checkbox_messages['maxlength'] = field.validation_message(d_type + ' maxlength', self, word("Please select no more than %s."), parameters=tuple([self.extras['maxlength'][field.number]]))
the_field['validation_messages'].update(checkbox_messages)
if d_type == 'checkbox':
if hasattr(field, 'nota') and self.extras['nota'][field.number] is not False:
the_field['validation_messages']['checkatleast'] = field.validation_message('checkboxes required', self, word("Check at least one option, or check “%s”"), parameters=tuple([self.extras['nota'][field.number]]))
if field.datatype == 'date':
the_field['validation_messages']['date'] = field.validation_message('date', self, word("You need to enter a valid date."))
if hasattr(field, 'extras') and 'min' in field.extras and 'min' in self.extras and 'max' in field.extras and 'max' in self.extras and field.number in self.extras['min'] and field.number in self.extras['max']:
the_field['validation_messages']['minmax'] = field.validation_message('date minmax', self, word("You need to enter a date between %s and %s."), parameters=(docassemble.base.util.format_date(self.extras['min'][field.number], format='medium'), docassemble.base.util.format_date(self.extras['max'][field.number], format='medium')))
else:
was_defined = dict()
for key in ['min', 'max']:
if hasattr(field, 'extras') and key in field.extras and key in self.extras and field.number in self.extras[key]:
was_defined[key] = True
if key == 'min':
the_field['validation_messages']['min'] = field.validation_message('date min', self, word("You need to enter a date on or after %s."), parameters=tuple([docassemble.base.util.format_date(self.extras[key][field.number], format='medium')]))
elif key == 'max':
the_field['validation_messages']['max'] = field.validation_message('date max', self, word("You need to enter a date on or before %s."), parameters=tuple([docassemble.base.util.format_date(self.extras[key][field.number], format='medium')]))
if len(was_defined) == 0 and 'default date min' in self.question.interview.options and 'default date max' in self.question.interview.options:
the_field['min'] = docassemble.base.util.format_date(self.question.interview.options['default date min'], format='yyyy-MM-dd')
the_field['max'] = docassemble.base.util.format_date(self.question.interview.options['default date max'], format='yyyy-MM-dd')
the_field['validation_messages']['minmax'] = field.validation_message('date minmax', self, word("You need to enter a date between %s and %s."), parameters=(docassemble.base.util.format_date(self.question.interview.options['default date min'], format='medium'), docassemble.base.util.format_date(self.question.interview.options['default date max'], format='medium')))
elif 'max' not in was_defined and 'default date max' in self.question.interview.options:
the_field['max'] = docassemble.base.util.format_date(self.question.interview.options['default date max'], format='yyyy-MM-dd')
the_field['validation_messages']['max'] = field.validation_message('date max', self, word("You need to enter a date on or before %s."), parameters=tuple([docassemble.base.util.format_date(self.question.interview.options['default date max'], format='medium')]))
elif 'min' not in was_defined and 'default date min' in self.question.interview.options:
the_field['min'] = docassemble.base.util.format_date(self.question.interview.options['default date min'], format='yyyy-MM-dd')
the_field['validation_messages']['min'] = field.validation_message('date min', self, word("You need to enter a date on or after %s."), parameters=tuple([docassemble.base.util.format_date(self.question.interview.options['default date min'], format='medium')]))
if field.datatype == 'time':
the_field['validation_messages']['time'] = field.validation_message('time', self, word("You need to enter a valid time."))
if field.datatype in ['datetime', 'datetime-local']:
the_field['validation_messages']['datetime'] = field.validation_message('datetime', self, word("You need to enter a valid date and time."))
if field.datatype == 'email':
the_field['validation_messages']['email'] = field.validation_message('email', self, word("You need to enter a complete e-mail address."))
if field.datatype in ['number', 'currency', 'float', 'integer']:
the_field['validation_messages']['number'] = field.validation_message('number', self, word("You need to enter a number."))
if field.datatype == 'integer' and not ('step' in self.extras and field.number in self.extras['step']):
the_field['validation_messages']['step'] = field.validation_message('integer', self, word("Please enter a whole number."))
elif 'step' in self.extras and field.number in self.extras['step']:
the_field['validation_messages']['step'] = field.validation_message('step', self, word("Please enter a multiple of {0}."))
for key in ['min', 'max']:
if hasattr(field, 'extras') and key in field.extras and key in self.extras and field.number in self.extras[key]:
if key == 'min':
the_field['validation_messages'][key] = field.validation_message('min', self, word("You need to enter a number that is at least %s."), parameters=tuple([self.extras[key][field.number]]))
elif key == 'max':
the_field['validation_messages'][key] = field.validation_message('max', self, word("You need to enter a number that is at most %s."), parameters=tuple([self.extras[key][field.number]]))
if (field.datatype in ['files', 'file', 'camera', 'user', 'environment', 'camcorder', 'microphone']):
file_fields.append(field)
the_field['validation_messages']['required'] = field.validation_message('file required', self, word("You must provide a file."))
if 'accept' in self.extras and field.number in self.extras['accept']:
the_field['validation_messages']['accept'] = field.validation_message('accept', self, word("Please upload a file with a valid file format."))
if get_config('maximum content length') is not None:
the_field['max'] = get_config('maximum content length')
the_field['validation_messages']['max'] = field.validation_message('maxuploadsize', self, word("Your file upload is larger than the server can accept. Please reduce the size of your file upload."))
for param in ('datatype', 'fieldtype', 'sign', 'inputtype', 'address_autocomplete'):
if hasattr(field, param):
the_field[param] = getattr(field, param)
if hasattr(field, 'shuffle') and field.shuffle is not False:
the_field['shuffle'] = True
if hasattr(field, 'disableothers') and field.disableothers and hasattr(field, 'saveas'):
the_field['disable_others'] = True
if hasattr(field, 'uncheckothers') and field.uncheckothers is not False:
the_field['uncheck_others'] = True
for key in ('minlength', 'maxlength', 'min', 'max', 'step', 'scale', 'inline', 'inline width', 'rows', 'accept', 'currency symbol', 'field metadata'):
if key in self.extras and field.number in self.extras[key]:
if key in ('minlength', 'maxlength', 'min', 'max', 'step'):
validation_rules_used.add(key)
the_field[key] = self.extras[key][field.number]
if hasattr(field, 'saveas') and field.saveas in self.embedded:
the_field['embedded'] = True
if hasattr(self, 'shuffle'):
the_field['shuffle'] = self.shuffle
if field.number in self.defaults:
the_default = self.defaults[field.number]
if isinstance(the_default, (str, int, bool, float)):
the_field['default'] = the_default
else:
the_default = None
if self.question.question_type == 'multiple_choice' or hasattr(field, 'choicetype') or (hasattr(field, 'datatype') and field.datatype in ('object', 'multiselect', 'object_multiselect', 'checkboxes', 'object_checkboxes', 'object_radio')):
the_field['choices'] = self.get_choices_data(field, the_default, the_user_dict, encode=encode)
if hasattr(field, 'nota'):
the_field['none_of_the_above'] = docassemble.base.filter.markdown_to_html(self.extras['nota'][field.number], do_terms=False, status=self, verbatim=(not encode))
the_field['active'] = self.extras['ok'][field.number]
if field.number in self.extras['required']:
the_field['required'] = self.extras['required'][field.number]
if the_field['required']:
validation_rules_used.add('required')
if 'validation messages' in self.extras and field.number in self.extras['validation messages']:
the_field['validation_messages'].update(self.extras['validation messages'][field.number])
if 'permissions' in self.extras:
the_field['permissions'] = self.extras['permissions'][field.number]
if hasattr(field, 'datatype') and field.datatype in ('file', 'files', 'camera', 'user', 'environment') and 'max_image_size' in self.extras and self.extras['max_image_size']:
the_field['max_image_size'] = self.extras['max_image_size']
if hasattr(field, 'datatype') and field.datatype in ('file', 'files', 'camera', 'user', 'environment') and 'image_type' in self.extras and self.extras['image_type']:
the_field['image_type'] = self.extras['image_type']
if hasattr(field, 'extras'):
if 'ml_group' in field.extras or 'ml_train' in field.extras:
the_field['ml_info'] = dict()
if 'ml_group' in field.extras:
the_field['ml_info']['group_id'] = self.extras['ml_group'][field.number]
if 'ml_train' in field.extras:
the_field['ml_info']['train'] = self.extras['ml_train'][field.number]
if 'show_if_var' in field.extras and 'show_if_val' in self.extras:
the_field['show_if_sign'] = field.extras['show_if_sign']
the_field['show_if_var'] = from_safeid(field.extras['show_if_var'])
the_field['show_if_val'] = self.extras['show_if_val'][field.number]
if 'show_if_js' in field.extras:
the_field['show_if_js'] = dict(expression=field.extras['show_if_js']['expression'].text(the_user_dict), vars=field.extras['show_if_js']['vars'], sign=field.extras['show_if_js']['sign'], mode=field.extras['show_if_js']['mode'])
if 'note' in self.extras and field.number in self.extras['note']:
the_field['note'] = docassemble.base.filter.markdown_to_html(self.extras['note'][field.number], status=self, verbatim=(not encode))
if 'html' in self.extras and field.number in self.extras['html']:
the_field['html'] = self.extras['html'][field.number]
if field.number in self.hints:
the_field['hint'] = self.hints[field.number]
if debug:
output['question'] += '<p>' + the_field['hint'] + '</p>'
if field.number in self.labels:
the_field['label'] = docassemble.base.filter.markdown_to_html(self.labels[field.number], trim=True, status=self, verbatim=(not encode))
if debug:
output['question'] += '<p>' + the_field['label'] + '</p>'
if field.number in self.helptexts:
the_field['helptext'] = docassemble.base.filter.markdown_to_html(self.helptexts[field.number], status=self, verbatim=(not encode))
if debug:
output['question'] += the_field['helptext']
if self.question.question_type in ("yesno", "yesnomaybe"):
the_field['true_label'] = docassemble.base.filter.markdown_to_html(self.question.yes(), trim=True, do_terms=False, status=self, verbatim=(not encode))
the_field['false_label'] = docassemble.base.filter.markdown_to_html(self.question.no(), trim=True, do_terms=False, status=self, verbatim=(not encode))
if debug:
output['question'] += '<p>' + the_field['true_label'] + '</p>'
output['question'] += '<p>' + the_field['false_label'] + '</p>'
if self.question.question_type == 'yesnomaybe':
the_field['maybe_label'] = docassemble.base.filter.markdown_to_html(self.question.maybe(), trim=True, do_terms=False, status=self, verbatim=(not encode))
if debug:
output['question'] += '<p>' + the_field['maybe_label'] + '</p>'
result['fields'].append(the_field)
if len(self.attributions):
result['attributions'] = [x.rstrip() for x in self.attributions]
if 'track_location' in self.extras and self.extras['track_location']:
result['track_location'] = True
if 'inverse navbar' in self.question.interview.options:
if self.question.interview.options['inverse navbar']:
result['navbarVariant'] = 'dark'
else:
result['navbarVariant'] = 'light'
elif get_config('inverse navbar', True):
result['navbarVariant'] = 'dark'
else:
result['navbarVariant'] = 'light'
if debug:
readability = dict()
for question_type in ('question', 'help'):
if question_type not in output:
continue
phrase = docassemble.base.functions.server.to_text(output[question_type])
if (not phrase) or len(phrase) < 10:
phrase = "The sky is blue."
phrase = re.sub(r'[^A-Za-z 0-9\.\,\?\#\!\%\&\(\)]', r' ', phrase)
readability[question_type] = [('Flesch Reading Ease', textstat.flesch_reading_ease(phrase)),
('Flesch-Kincaid Grade Level', textstat.flesch_kincaid_grade(phrase)),
('Gunning FOG Scale', textstat.gunning_fog(phrase)),
('SMOG Index', textstat.smog_index(phrase)),
('Automated Readability Index', textstat.automated_readability_index(phrase)),
('Coleman-Liau Index', textstat.coleman_liau_index(phrase)),
('Linsear Write Formula', textstat.linsear_write_formula(phrase)),
('Dale-Chall Readability Score', textstat.dale_chall_readability_score(phrase)),
('Readability Consensus', textstat.text_standard(phrase))]
result['source'] = {'label': word("Source"), 'title': word("How this question came to be asked"), 'history': self.get_history(), 'readability': readability}
return result
def get_choices(self, field, the_user_dict):
question = self.question
choice_list = list()
if hasattr(field, 'saveas') and field.saveas is not None:
saveas = from_safeid(field.saveas)
if self.question.question_type == "multiple_choice":
#if hasattr(field, 'has_code') and field.has_code:
pairlist = list(self.selectcompute[field.number])
for pair in pairlist:
choice_list.append([pair['label'], saveas, pair['key']])
elif hasattr(field, 'choicetype'):
if field.choicetype in ('compute', 'manual'):
pairlist = list(self.selectcompute[field.number])
elif field.datatype in ('multiselect', 'object_multiselect', 'checkboxes', 'object_checkboxes'):
pairlist = list()
if field.datatype in ('object_multiselect', 'object_checkboxes'):
for pair in pairlist:
choice_list.append([pair['label'], saveas, from_safeid(pair['key'])])
elif field.datatype in ('object', 'object_radio'):
for pair in pairlist:
choice_list.append([pair['label'], saveas, from_safeid(pair['key'])])
elif field.datatype in ('multiselect', 'checkboxes'):
for pair in pairlist:
choice_list.append([pair['label'], saveas + "[" + repr(pair['key']) + "]", True])
else:
for pair in pairlist:
choice_list.append([pair['label'], saveas, pair['key']])
if hasattr(field, 'nota') and (field.datatype.endswith('checkboxes') and self.extras['nota'][field.number] is not False): #or (field.datatype.endswith('multiselect') and self.extras['nota'][field.number] is True)
if self.extras['nota'][field.number] is True:
formatted_item = word("None of the above")
else:
formatted_item = self.extras['nota'][field.number]
choice_list.append([formatted_item, None, None])
else:
indexno = 0
for choice in self.selectcompute[field.number]:
choice_list.append([choice['label'], '_internal["answers"][' + repr(question.extended_question_name(the_user_dict)) + ']', indexno])
indexno += 1
return choice_list
def icon_url(self, name):
the_image = self.question.interview.images.get(name, None)
if the_image is None:
return None
if the_image.attribution is not None:
self.attributions.add(the_image.attribution)
url = docassemble.base.functions.server.url_finder(str(the_image.package) + ':' + str(the_image.filename))
return url
def get_choices_data(self, field, defaultvalue, the_user_dict, encode=True):
question = self.question
choice_list = list()
if hasattr(field, 'saveas') and field.saveas is not None:
saveas = from_safeid(field.saveas)
if self.question.question_type == "multiple_choice":
pairlist = list(self.selectcompute[field.number])
for pair in pairlist:
item = dict(label=docassemble.base.filter.markdown_to_html(pair['label'], trim=True, do_terms=False, status=self, verbatim=encode), value=pair['key'])
if 'help' in pair:
item['help'] = docassemble.base.filter.markdown_to_html(pair['help'].rstrip(), trim=True, do_terms=False, status=self, verbatim=encode)
if 'default' in pair:
item['default'] = pair['default']
if 'image' in pair:
if isinstance(pair['image'], dict):
if pair['image']['type'] == 'url':
item['image'] = pair['image']['value']
else:
item['image'] = self.icon_url(pair['image']['value'])
else:
item['image'] = self.icon_url(pair['image'])
choice_list.append(item)
elif hasattr(field, 'choicetype'):
if field.choicetype in ('compute', 'manual'):
pairlist = list(self.selectcompute[field.number])
elif field.datatype in ('multiselect', 'object_multiselect', 'checkboxes', 'object_checkboxes'):
pairlist = list()
if field.datatype in ('object_multiselect', 'object_checkboxes'):
for pair in pairlist:
item = dict(label=docassemble.base.filter.markdown_to_html(pair['label'], trim=True, do_terms=False, status=self, verbatim=encode), value=from_safeid(pair['key']))
if ('default' in pair and pair['default']) or (defaultvalue is not None and isinstance(defaultvalue, (list, set)) and str(pair['key']) in defaultvalue) or (isinstance(defaultvalue, dict) and str(pair['key']) in defaultvalue and defaultvalue[str(pair['key'])]) or (isinstance(defaultvalue, (str, int, bool, float)) and str(pair['key']) == str(defaultvalue)):
item['selected'] = True
if 'help' in pair:
item['help'] = pair['help']
choice_list.append(item)
elif field.datatype in ('object', 'object_radio'):
for pair in pairlist:
item = dict(label=docassemble.base.filter.markdown_to_html(pair['label'], trim=True, do_terms=False, status=self, verbatim=encode), value=from_safeid(pair['key']))
if ('default' in pair and pair['default']) or (defaultvalue is not None and isinstance(defaultvalue, (str, int, bool, float)) and str(pair['key']) == str(defaultvalue)):
item['selected'] = True
if 'default' in pair:
item['default'] = str(pair['default'])
if 'help' in pair:
item['help'] = pair['help']
choice_list.append(item)
elif field.datatype in ('multiselect', 'checkboxes'):
for pair in pairlist:
item = dict(label=docassemble.base.filter.markdown_to_html(pair['label'], trim=True, do_terms=False, status=self, verbatim=encode), variable_name=saveas + "[" + repr(pair['key']) + "]", value=True)
if encode:
item['variable_name_encoded'] = safeid(saveas + "[" + repr(pair['key']) + "]")
if ('default' in pair and pair['default']) or (defaultvalue is not None and isinstance(defaultvalue, (list, set)) and str(pair['key']) in defaultvalue) or (isinstance(defaultvalue, dict) and str(pair['key']) in defaultvalue and defaultvalue[str(pair['key'])]) or (isinstance(defaultvalue, (str, int, bool, float)) and str(pair['key']) == str(defaultvalue)):
item['selected'] = True
if 'help' in pair:
item['help'] = pair['help']
choice_list.append(item)
else:
for pair in pairlist:
item = dict(label=docassemble.base.filter.markdown_to_html(pair['label'], trim=True, do_terms=False, status=self, verbatim=encode), value=pair['key'])
if ('default' in pair and pair['default']) or (defaultvalue is not None and isinstance(defaultvalue, (str, int, bool, float)) and str(pair['key']) == str(defaultvalue)):
item['selected'] = True
choice_list.append(item)
if hasattr(field, 'nota') and self.extras['nota'][field.number] is not False:
if self.extras['nota'][field.number] is True:
formatted_item = word("None of the above")
else:
formatted_item = self.extras['nota'][field.number]
choice_list.append(dict(label=docassemble.base.filter.markdown_to_html(formatted_item, trim=True, do_terms=False, status=self, verbatim=encode)))
else:
indexno = 0
for choice in self.selectcompute[field.number]:
item = dict(label=docassemble.base.filter.markdown_to_html(choice['label'], trim=True, do_terms=False, status=self, verbatim=encode), variable_name='_internal["answers"][' + repr(question.extended_question_name(the_user_dict)) + ']', value=indexno)
if encode:
item['variable_name_encoded'] = safeid('_internal["answers"][' + repr(question.extended_question_name(the_user_dict)) + ']')
if 'image' in choice:
the_image = self.icon_url(choice['image'])
if the_image:
item['image'] = the_image
if 'help' in choice:
item['help'] = choice['help']
if 'default' in choice:
item['default'] = choice['default']
choice_list.append(item)
indexno += 1
return choice_list
# def new_counter(initial_value=0):
# d = {'counter': initial_value}
# def f():
# return_value = d['counter']
# d['counter'] += 1
# return(return_value)
# return f
# increment_question_counter = new_counter()
class TextObject:
def __deepcopy__(self, memo):
return TextObject(self.original_text)
def __init__(self, x, question=None, translate=True):
self.original_text = x
self.other_lang = dict()
if translate and question is not None and question.interview.source.translating and isinstance(x, str) and re.search(r'[^\s0-9]', self.original_text) and not re.search(r'\<%doc\>\s*do not translate', self.original_text, re.IGNORECASE) and self.original_text != 'no label':
if not hasattr(question, 'translations'):
question.translations = list()
if self.original_text not in question.translations:
question.translations.append(self.original_text)
if isinstance(x, str) and match_mako.search(x):
if question is None:
names_used = set()
else:
names_used = question.names_used
self.template = MakoTemplate(x, strict_undefined=True, input_encoding='utf-8')
for x in self.template.names_used - self.template.names_set:
names_used.add(x)
self.uses_mako = True
else:
self.uses_mako = False
if translate and question is not None and len(question.interview.translations) and isinstance(x, str):
if self.original_text in question.interview.translation_dict:
if question.language == '*':
self.language = docassemble.base.functions.server.default_language
else:
self.language = question.language
for orig_lang in question.interview.translation_dict[self.original_text]:
if orig_lang == question.language or (question.language == '*' and orig_lang == docassemble.base.functions.server.default_language):
for target_lang in question.interview.translation_dict[self.original_text][orig_lang]:
if self.uses_mako:
self.other_lang[target_lang] = (question.interview.translation_dict[self.original_text][orig_lang][target_lang], MakoTemplate(question.interview.translation_dict[self.original_text][orig_lang][target_lang], strict_undefined=True, input_encoding='utf-8'))
else:
self.other_lang[target_lang] = (question.interview.translation_dict[self.original_text][orig_lang][target_lang],)
def text(self, the_user_dict):
if len(self.other_lang):
target_lang = docassemble.base.functions.get_language()
if self.language != target_lang and target_lang in self.other_lang:
if self.uses_mako:
return(self.other_lang[target_lang][1].render(**the_user_dict))
else:
return(self.other_lang[target_lang][0])
if self.uses_mako:
return(self.template.render(**the_user_dict))
else:
return(self.original_text)
def myb64quote(text):
return "'" + re.sub(r'[\n=]', '', codecs.encode(text.encode('utf8'), 'base64').decode()) + "'"
def safeid(text):
return re.sub(r'[\n=]', '', codecs.encode(text.encode('utf8'), 'base64').decode())
def from_safeid(text):
return(codecs.decode(repad(bytearray(text, encoding='utf-8')), 'base64').decode('utf8'))
def repad(text):
return text + (equals_byte * ((4 - len(text) % 4) % 4))
class Field:
def __init__(self, data):
if 'number' in data:
self.number = data['number']
else:
self.number = 0
if 'saveas' in data:
self.saveas = safeid(data['saveas'])
if 'saveas_code' in data:
self.saveas_code = data['saveas_code']
if 'showif_code' in data:
self.showif_code = data['showif_code']
if 'action' in data:
self.action = data['action']
if 'label' in data:
self.label = data['label']
if 'type' in data:
self.datatype = data['type']
if 'choicetype' in data:
self.choicetype = data['choicetype']
if 'disable others' in data:
self.disableothers = data['disable others']
if 'uncheck others' in data:
self.uncheckothers = data['uncheck others']
if 'default' in data:
self.default = data['default']
if 'combobox action' in data:
self.combobox_action = data['combobox action']
if 'hint' in data:
self.hint = data['hint']
if 'data' in data:
self.data = data['data']
if 'help' in data:
self.helptext = data['help']
if 'validate' in data:
self.validate = data['validate']
if 'validation messages' in data:
self.validation_messages = data['validation messages']
if 'address_autocomplete' in data:
self.address_autocomplete = data['address_autocomplete']
if 'max_image_size' in data:
self.max_image_size = data['max_image_size']
if 'image_type' in data:
self.image_type = data['image_type']
if 'accept' in data:
self.accept = data['accept']
if 'persistent' in data or 'private' in data or 'allow_users' in data or 'allow_privileges' in data:
self.permissions = dict(persistent=data.get('persistent', None), private=data.get('private', None), allow_users=data.get('allow_users', None), allow_privileges=data.get('allow_privileges', None))
if 'rows' in data:
self.rows = data['rows']
if 'object_labeler' in data:
self.object_labeler = data['object_labeler']
if 'help_generator' in data:
self.help_generator = data['help_generator']
if 'image_generator' in data:
self.image_generator = data['image_generator']
if 'extras' in data:
self.extras = data['extras']
if 'selections' in data:
self.selections = data['selections']
if 'boolean' in data:
self.datatype = 'boolean'
self.sign = data['boolean']
if 'type' in data:
self.inputtype = data['type']
if 'threestate' in data:
self.datatype = 'threestate'
self.sign = data['threestate']
if 'type' in data:
self.inputtype = data['type']
if 'choices' in data:
self.fieldtype = 'multiple_choice'
self.choices = data['choices']
if 'inputtype' in data:
self.inputtype = data['inputtype']
if 'has_code' in data:
self.has_code = True
# if 'script' in data:
# self.script = data['script']
# if 'css' in data:
# self.css = data['css']
if 'shuffle' in data:
self.shuffle = data['shuffle']
if 'nota' in data:
self.nota = data['nota']
if 'required' in data:
self.required = data['required']
else:
self.required = True
def validation_message(self, validation_type, status, default_message, parameters=None):
message = None
if 'validation messages' in status.extras and self.number in status.extras['validation messages']:
validation_type_tail = re.sub(r'.* ', '', validation_type)
if validation_type in status.extras['validation messages'][self.number]:
message = status.extras['validation messages'][self.number][validation_type]
elif validation_type != validation_type_tail and validation_type_tail in status.extras['validation messages'][self.number]:
message = status.extras['validation messages'][self.number][validation_type_tail]
if message is None and status.question.language in status.question.interview.default_validation_messages and validation_type in status.question.interview.default_validation_messages[status.question.language]:
message = status.question.interview.default_validation_messages[status.question.language][validation_type]
if message is None:
message = default_message
if parameters is not None and len(parameters) > 0:
try:
message = message % parameters
except TypeError:
pass
return message
def recursive_eval_dataobject(target, the_user_dict):
if isinstance(target, dict) or (hasattr(target, 'elements') and isinstance(target.elements, dict)):
new_dict = dict()
for key, val in target.items():
new_dict[key] = recursive_eval_dataobject(val, the_user_dict)
return new_dict
if isinstance(target, list) or (hasattr(target, 'elements') and isinstance(target.elements, list)):
new_list = list()
for val in target.__iter__():
new_list.append(recursive_eval_dataobject(val, the_user_dict))
return new_list
if isinstance(target, set) or (hasattr(target, 'elements') and isinstance(target.elements, set)):
new_set = set()
for val in target.__iter__():
new_set.add(recursive_eval_dataobject(val, the_user_dict))
return new_set
if isinstance(target, (bool, float, int, NoneType)):
return target
if isinstance(target, TextObject):
return target.text(the_user_dict)
else:
raise DAError("recursive_eval_dataobject: expected a TextObject, but found a " + str(type(target)))
def recursive_eval_data_from_code(target, the_user_dict):
if isinstance(target, dict):
new_dict = dict()
for key, val in target.items():
new_dict[key] = recursive_eval_data_from_code(val, the_user_dict)
return new_dict
if isinstance(target, list):
new_list = list()
for val in target:
new_list.append(recursive_eval_data_from_code(val, the_user_dict))
return new_list
if isinstance(target, set):
new_set = set()
for val in target:
new_set.add(recursive_eval_data_from_code(val, the_user_dict))
return new_set
if isinstance(target, CodeType):
return eval(target, the_user_dict)
else:
return target
def recursive_textobject(target, question):
if isinstance(target, dict) or (hasattr(target, 'elements') and isinstance(target.elements, dict)):
new_dict = dict()
for key, val in target.items():
new_dict[key] = recursive_textobject(val, question)
return new_dict
if isinstance(target, list) or (hasattr(target, 'elements') and isinstance(target.elements, list)):
new_list = list()
for val in target.__iter__():
new_list.append(recursive_textobject(val, question))
return new_list
if isinstance(target, set) or (hasattr(target, 'elements') and isinstance(target.elements, set)):
new_set = set()
for val in target.__iter__():
new_set.add(recursive_textobject(val, question))
return new_set
return TextObject(str(target), question=question)
def recursive_eval_textobject(target, the_user_dict, question, tpl, skip_undefined):
if isinstance(target, dict) or (hasattr(target, 'elements') and isinstance(target.elements, dict)):
new_dict = dict()
for key, val in target.items():
new_dict[key] = recursive_eval_textobject(val, the_user_dict, question, tpl, skip_undefined)
return new_dict
if isinstance(target, list) or (hasattr(target, 'elements') and isinstance(target.elements, list)):
new_list = list()
for val in target.__iter__():
new_list.append(recursive_eval_textobject(val, the_user_dict, question, tpl, skip_undefined))
return new_list
if isinstance(target, set) or (hasattr(target, 'elements') and isinstance(target.elements, set)):
new_set = set()
for val in target.__iter__():
new_set.add(recursive_eval_textobject(val, the_user_dict, question, tpl, skip_undefined))
return new_set
if isinstance(target, (bool, NoneType)):
return target
if isinstance(target, TextObject):
if skip_undefined:
try:
text = target.text(the_user_dict)
except:
text = ''
else:
text = target.text(the_user_dict)
return docassemble.base.file_docx.transform_for_docx(text, question, tpl)
else:
raise DAError("recursive_eval_textobject: expected a TextObject, but found a " + str(type(target)))
def recursive_textobject_or_primitive(target, question):
if isinstance(target, dict) or (hasattr(target, 'elements') and isinstance(target.elements, dict)):
new_dict = dict()
for key, val in target.items():
new_dict[key] = recursive_textobject_or_primitive(val, question)
return new_dict
if isinstance(target, list) or (hasattr(target, 'elements') and isinstance(target.elements, list)):
new_list = list()
for val in target.__iter__():
new_list.append(recursive_textobject_or_primitive(val, question))
return new_list
if isinstance(target, set) or (hasattr(target, 'elements') and isinstance(target.elements, set)):
new_set = set()
for val in target.__iter__():
new_set.add(recursive_textobject_or_primitive(val, question))
return new_set
if isinstance(target, (int, bool, float, NoneType)):
return target
return TextObject(str(target), question=question)
def recursive_eval_textobject_or_primitive(target, the_user_dict):
if isinstance(target, dict) or (hasattr(target, 'elements') and isinstance(target.elements, dict)):
new_dict = dict()
for key, val in target.items():
new_dict[key] = recursive_eval_textobject_or_primitive(val, the_user_dict)
return new_dict
if isinstance(target, list) or (hasattr(target, 'elements') and isinstance(target.elements, list)):
new_list = list()
for val in target.__iter__():
new_list.append(recursive_eval_textobject_or_primitive(val, the_user_dict))
return new_list
if isinstance(target, set) or (hasattr(target, 'elements') and isinstance(target.elements, set)):
new_set = set()
for val in target.__iter__():
new_set.add(recursive_eval_textobject_or_primitive(val, the_user_dict))
return new_set
if isinstance(target, (bool, int, float, NoneType)):
return target
if isinstance(target, TextObject):
return target.text(the_user_dict)
else:
raise DAError("recursive_eval_textobject_or_primitive: expected a TextObject, but found a " + str(type(target)))
def fix_quotes(match):
instring = match.group(1)
n = len(instring)
output = ''
i = 0
while i < n:
if instring[i] == '\u201c' or instring[i] == '\u201d':
output += '"'
elif instring[i] == '\u2018' or instring[i] == '\u2019':
output += "'"
elif instring[i] == '&' and i + 4 < n and instring[i:i+5] == '&':
output += '&'
i += 4
else:
output += instring[i]
i += 1
return output
def docx_variable_fix(variable):
variable = re.sub(r'\\', '', variable)
variable = re.sub(r'^([A-Za-z\_][A-Za-z\_0-9]*).*', r'\1', variable)
return variable
def url_sanitize(url):
return re.sub(r'\s', ' ', url)
class FileInPackage:
def __init__(self, fileref, area, package):
if area == 'template' and not isinstance(fileref, dict):
docassemble.base.functions.package_template_filename(fileref, package=package)
self.fileref = fileref
if isinstance(self.fileref, dict):
self.is_code = True
if 'code' not in self.fileref:
raise DAError("A docx or pdf template file expressed in the form of a dictionary must have 'code' as the key" + str(self.fileref))
self.code = compile(self.fileref['code'], '<template file code>', 'eval')
else:
self.is_code = False
self.area = area
self.package = package
def path(self, the_user_dict=dict()):
if self.area == 'template':
if self.is_code:
if len(the_user_dict) == 0:
raise Exception("FileInPackage.path: called with empty dict")
the_file_ref = eval(self.code, the_user_dict)
if isinstance(the_file_ref, list) and len(the_file_ref):
the_file_ref = the_file_ref[0]
if the_file_ref.__class__.__name__ == 'DAFile':
the_file_ref = the_file_ref.path()
elif the_file_ref.__class__.__name__ == 'DAFileList' and len(the_file_ref.elements) > 0:
the_file_ref = the_file_ref.elements[0].path()
elif the_file_ref.__class__.__name__ == 'DAStaticFile':
the_file_ref = the_file_ref.path()
elif re.search(r'^https?://', str(the_file_ref)):
temp_template_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", delete=False)
try:
urlretrieve(url_sanitize(str(the_file_ref)), temp_template_file.name)
except Exception as err:
raise DAError("FileInPackage: error downloading " + str(the_file_ref) + ": " + str(err))
the_file_ref = temp_template_file.name
if not str(the_file_ref).startswith('/'):
the_file_ref = docassemble.base.functions.package_template_filename(str(the_file_ref), package=self.package)
return the_file_ref
else:
return docassemble.base.functions.package_template_filename(self.fileref, package=self.package)
def paths(self, the_user_dict=dict()):
if self.area == 'template':
result = []
if self.is_code:
if len(the_user_dict) == 0:
raise Exception("FileInPackage.path: called with empty dict")
the_file_refs = eval(self.code, the_user_dict)
if not isinstance(the_file_refs, list):
the_file_refs = [the_file_refs]
for the_file_ref in the_file_refs:
if the_file_ref.__class__.__name__ == 'DAFile':
result.append(the_file_ref.path())
elif the_file_ref.__class__.__name__ == 'DAFileList' and len(the_file_ref.elements) > 0:
for item in the_file_ref.elements:
result.append(item.path())
elif the_file_ref.__class__.__name__ == 'DAStaticFile':
result.append(the_file_ref.path())
elif re.search(r'^https?://', str(the_file_ref)):
temp_template_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", delete=False)
try:
urlretrieve(url_sanitize(str(the_file_ref)), temp_template_file.name)
except Exception as err:
raise DAError("FileInPackage: error downloading " + str(the_file_ref) + ": " + str(err))
result.append(temp_template_file.name)
else:
result.append(the_file_ref)
else:
result.append(docassemble.base.functions.package_template_filename(self.fileref, package=self.package))
final_result = []
for the_file_ref in result:
if not str(the_file_ref).startswith('/'):
final_result.append(docassemble.base.functions.package_template_filename(str(the_file_ref), package=self.package))
else:
final_result.append(the_file_ref)
return final_result
class FileOnServer:
def __init__(self, fileref, question):
self.fileref = fileref
self.question = question
def path(self):
info = docassemble.base.functions.server.file_finder(self.fileref, question=self.question)
if 'fullpath' in info and info['fullpath']:
return info['fullpath']
raise DAError("Could not find the file " + str(self.fileref))
class Question:
def idebug(self, data):
if hasattr(self, 'from_source') and hasattr(self, 'package'):
return "\nIn file " + str(self.from_source.path) + " from package " + str(self.package) + ":\n\n" + yaml.dump(data)
else:
return yaml.dump(data)
def __init__(self, orig_data, caller, **kwargs):
if not isinstance(orig_data, dict):
raise DAError("A block must be in the form of a dictionary." + self.idebug(orig_data))
data = dict()
for key, value in orig_data.items():
data[key.lower()] = value
should_append = True
if 'register_target' in kwargs:
register_target = kwargs['register_target']
main_list = False
else:
register_target = self
main_list = True
self.from_source = kwargs.get('source', None)
self.package = kwargs.get('package', None)
self.interview = caller
if self.interview.debug:
self.source_code = kwargs.get('source_code', None)
self.fields = []
self.attachments = []
self.is_generic = False
self.name = None
self.role = list()
self.condition = list()
self.terms = dict()
self.autoterms = dict()
self.need = None
self.need_post = None
self.scan_for_variables = True
self.embeds = False
self.helptext = None
self.subcontent = None
self.reload_after = None
self.continuelabel = None
self.backbuttonlabel = None
self.cornerbackbuttonlabel = None
self.helplabel = None
self.progress = None
self.section = None
self.script = None
self.css = None
self.checkin = None
self.target = None
self.decorations = None
self.audiovideo = None
self.compute_attachment = None
self.can_go_back = True
self.other_fields_used = set()
self.fields_used = set()
self.fields_for_invalidation = set()
self.fields_for_onchange = set()
self.names_used = set()
self.mako_names = set()
self.reconsider = list()
self.undefine = list()
self.action_buttons = list()
self.validation_code = None
num_directives = 0
for directive in ('yesno', 'noyes', 'yesnomaybe', 'noyesmaybe', 'fields', 'buttons', 'choices', 'dropdown', 'combobox', 'signature', 'review'):
if directive in data:
num_directives += 1
if num_directives > 1:
raise DAError("There can only be one directive in a question. You had more than one.\nThe directives are yesno, noyes, yesnomaybe, noyesmaybe, fields, buttons, choices, dropdown, combobox, and signature." + self.idebug(data))
if num_directives > 0 and 'question' not in data:
raise DAError("This block is missing a 'question' directive." + self.idebug(data))
if self.interview.debug:
for key in data:
if key not in ('features', 'scan for variables', 'only sets', 'question', 'code', 'event', 'translations', 'default language', 'on change', 'sections', 'progressive', 'auto open', 'section', 'machine learning storage', 'language', 'prevent going back', 'back button', 'usedefs', 'continue button label', 'resume button label', 'back button label', 'corner back button label', 'skip undefined', 'list collect', 'mandatory', 'attachment options', 'script', 'css', 'initial', 'default role', 'command', 'objects from file', 'use objects', 'data', 'variable name', 'data from code', 'objects', 'id', 'ga id', 'segment id', 'segment', 'supersedes', 'order', 'image sets', 'images', 'def', 'mako', 'interview help', 'default screen parts', 'default validation messages', 'generic object', 'generic list object', 'comment', 'metadata', 'modules', 'reset', 'imports', 'terms', 'auto terms', 'role', 'include', 'action buttons', 'if', 'validation code', 'require', 'orelse', 'attachment', 'attachments', 'attachment code', 'attachments code', 'allow emailing', 'allow downloading', 'email subject', 'email body', 'email address default', 'progress', 'zip filename', 'action', 'backgroundresponse', 'response', 'binaryresponse', 'all_variables', 'response filename', 'content type', 'redirect url', 'null response', 'sleep', 'include_internal', 'css class', 'table css class', 'response code', 'subquestion', 'reload', 'help', 'audio', 'video', 'decoration', 'signature', 'under', 'pre', 'post', 'right', 'check in', 'yesno', 'noyes', 'yesnomaybe', 'noyesmaybe', 'sets', 'event', 'choices', 'buttons', 'dropdown', 'combobox', 'field', 'shuffle', 'review', 'need', 'depends on', 'target', 'table', 'rows', 'columns', 'require gathered', 'allow reordering', 'edit', 'delete buttons', 'confirm', 'read only', 'edit header', 'confirm', 'show if empty', 'template', 'content file', 'content', 'subject', 'reconsider', 'undefine', 'continue button field', 'fields', 'indent', 'url', 'default', 'datatype', 'extras', 'allowed to set', 'show incomplete', 'not available label', 'required', 'always include editable files', 'question metadata', 'include attachment notice', 'include download tab', 'manual attachment list'):
logmessage("Ignoring unknown dictionary key '" + key + "'." + self.idebug(data))
if 'features' in data:
should_append = False
if not isinstance(data['features'], dict):
raise DAError("A features section must be a dictionary." + self.idebug(data))
if data['features'].get('use catchall', False):
self.interview.options['use catchall'] = True
if 'table width' in data['features']:
if not isinstance(data['features']['table width'], int):
raise DAError("Table width in features must be an integer." + self.idebug(data))
self.interview.table_width = data['features']['table width']
if 'progress bar' in data['features']:
self.interview.use_progress_bar = True if data['features']['progress bar'] else False
if 'progress can go backwards' in data['features'] and data['features']['progress can go backwards']:
self.interview.options['strict progress'] = True
if 'show progress bar percentage' in data['features'] and data['features']['show progress bar percentage']:
self.interview.show_progress_bar_percentage = True
if 'progress bar method' in data['features'] and isinstance(data['features']['progress bar method'], str):
self.interview.progress_bar_method = data['features']['progress bar method']
if 'progress bar multiplier' in data['features'] and isinstance(data['features']['progress bar multiplier'], (int, float)):
if data['features']['progress bar multiplier'] <= 0.0 or data['features']['progress bar multiplier'] >= 1.0:
raise DAError("progress bar multiplier in features must be between 0 and 1." + self.idebug(data))
self.interview.progress_bar_method = data['features']['progress bar multiplier']
if 'question back button' in data['features']:
self.interview.question_back_button = True if data['features']['question back button'] else False
if 'question help button' in data['features']:
self.interview.question_help_button = True if data['features']['question help button'] else False
if 'navigation back button' in data['features']:
self.interview.navigation_back_button = True if data['features']['navigation back button'] else False
if 'go full screen' in data['features'] and data['features']['go full screen']:
self.interview.force_fullscreen = data['features']['go full screen']
if 'navigation' in data['features'] and data['features']['navigation']:
self.interview.use_navigation = data['features']['navigation']
if 'small screen navigation' in data['features']:
if data['features']['small screen navigation'] == 'dropdown':
self.interview.use_navigation_on_small_screens = 'dropdown'
else:
if not data['features']['small screen navigation']:
self.interview.use_navigation_on_small_screens = False
if 'centered' in data['features'] and not data['features']['centered']:
self.interview.flush_left = True
if 'maximum image size' in data['features']:
self.interview.max_image_size = eval(str(data['features']['maximum image size']))
if 'image upload type' in data['features']:
self.interview.image_type = str(data['features']['image upload type'])
if 'debug' in data['features'] and isinstance(data['features']['debug'], bool):
self.interview.debug = data['features']['debug']
if 'cache documents' in data['features']:
self.interview.cache_documents = data['features']['cache documents']
if 'loop limit' in data['features']:
self.interview.loop_limit = data['features']['loop limit']
if 'recursion limit' in data['features']:
self.interview.recursion_limit = data['features']['recursion limit']
if 'pdf/a' in data['features'] and data['features']['pdf/a'] in (True, False):
self.interview.use_pdf_a = data['features']['pdf/a']
if 'tagged pdf' in data['features'] and data['features']['tagged pdf'] in (True, False):
self.interview.use_tagged_pdf = data['features']['tagged pdf']
if 'bootstrap theme' in data['features'] and data['features']['bootstrap theme']:
self.interview.bootstrap_theme = data['features']['bootstrap theme']
if 'inverse navbar' in data['features']:
self.interview.options['inverse navbar'] = data['features']['inverse navbar']
if 'popover trigger' in data['features']:
self.interview.options['popover trigger'] = data['features']['popover trigger']
if 'review button color' in data['features']:
self.interview.options['review button color'] = data['features']['review button color']
if 'review button icon' in data['features']:
self.interview.options['review button icon'] = data['features']['review button icon']
if 'disable analytics' in data['features'] and data['features']['disable analytics']:
self.interview.options['analyics on'] = data['features']['disable analytics']
if 'hide navbar' in data['features']:
self.interview.options['hide navbar'] = data['features']['hide navbar']
if 'hide standard menu' in data['features']:
self.interview.options['hide standard menu'] = data['features']['hide standard menu']
if 'labels above fields' in data['features']:
self.interview.options['labels above'] = True if data['features']['labels above fields'] else False
if 'send question data' in data['features']:
self.interview.options['send question data'] = True if data['features']['send question data'] else False
if 'checkin interval' in data['features']:
if not isinstance(data['features']['checkin interval'], int):
raise DAError("A features section checkin interval entry must be an integer." + self.idebug(data))
if data['features']['checkin interval'] > 0 and data['features']['checkin interval'] < 1000:
raise DAError("A features section checkin interval entry must be at least 1000, if not 0." + self.idebug(data))
self.interview.options['checkin interval'] = data['features']['checkin interval']
for key in ('javascript', 'css'):
if key in data['features']:
if isinstance(data['features'][key], list):
the_list = data['features'][key]
elif isinstance(data['features'][key], dict):
raise DAError("A features section " + key + " entry must be a list or plain text." + self.idebug(data))
else:
the_list = [data['features'][key]]
for the_file in the_list:
if key not in self.interview.external_files:
self.interview.external_files[key] = list()
self.interview.external_files[key].append((self.from_source.get_package(), the_file))
for key in ('default date min', 'default date max'):
if key in data['features']:
if not isinstance(data['features'][key], str):
raise DAError("A features section " + key + " entry must be plain text." + self.idebug(data))
try:
self.interview.options[key] = pytz.timezone(docassemble.base.functions.get_default_timezone()).localize(dateutil.parser.parse(data['features'][key]))
except:
raise DAError("The " + key + " in features did not contain a valid date." + self.idebug(data))
if 'field' in data and not ('yesno' in data or 'noyes' in data or 'yesnomaybe' in data or 'noyesmaybe' in data or 'buttons' in data or 'choices' in data or 'dropdown' in data or 'combobox' in data):
data['continue button field'] = data['field']
del data['field']
if 'scan for variables' in data:
if data['scan for variables']:
self.scan_for_variables = True
else:
self.scan_for_variables = False
if 'only sets' in data:
if isinstance(data['only sets'], str):
self.fields_used.add(data['only sets'])
elif isinstance(data['only sets'], list):
for key in data['only sets']:
self.fields_used.add(key)
else:
raise DAError("An only sets phrase must be text or a list." + self.idebug(data))
self.scan_for_variables = False
if 'question' in data and 'code' in data:
raise DAError("A block can be a question block or a code block but cannot be both at the same time." + self.idebug(data))
if 'event' in data:
if 'field' in data or 'fields' in data or 'yesno' in data or 'noyes' in data:
raise DAError("The 'event' designator is for special screens that do not gather information and can only be used with 'buttons' or with no other controls." + self.idebug(data))
if 'translations' in data:
should_append = False
if not isinstance(data['translations'], list):
raise DAError("A 'translations' block must be a list" + self.idebug(data))
tr_todo = list()
for item in data['translations']:
if not isinstance(item, str):
raise DAError("A 'translations' block must be a list of text items" + self.idebug(data))
if not (item.endswith('.xlsx') or item.endswith('.xlf') or item.endswith('.xliff')):
raise DAError("Invalid translations entry '" + item + "'. A translations entry must refer to a file ending in .xlsx, .xlf, or .xliff." + self.idebug(data))
parts = item.split(":")
if len(parts) == 1:
item = re.sub(r'^data/sources/', '', item)
the_package = self.from_source.get_package()
if the_package is not None:
item = self.from_source.get_package() + ':data/sources/' + item
tr_todo.append(item)
elif len(parts) == 2 and parts[0].startswith('docassemble.') and parts[1].startswith('data/sources/'):
tr_todo.append(item)
else:
raise DAError("Invalid translations entry: " + item + ". A translations entry must refer to a data sources file" + self.idebug(data))
for item in tr_todo:
self.interview.translations.append(item)
if item.endswith(".xlsx"):
the_xlsx_file = docassemble.base.functions.package_data_filename(item)
if not os.path.isfile(the_xlsx_file):
raise DAError("The translations file " + the_xlsx_file + " could not be found")
df = pandas.read_excel(the_xlsx_file)
for column_name in ('interview', 'question_id', 'index_num', 'hash', 'orig_lang', 'tr_lang', 'orig_text', 'tr_text'):
if column_name not in df.columns:
raise DAError("Invalid translations file " + os.path.basename(the_xlsx_file) + ": column " + column_name + " is missing")
for indexno in df.index:
if not isinstance(df['tr_text'][indexno], str) or df['tr_text'][indexno] == '':
continue
if df['orig_text'][indexno] not in self.interview.translation_dict:
self.interview.translation_dict[df['orig_text'][indexno]] = dict()
if df['orig_lang'][indexno] not in self.interview.translation_dict[df['orig_text'][indexno]]:
self.interview.translation_dict[df['orig_text'][indexno]][df['orig_lang'][indexno]] = dict()
self.interview.translation_dict[df['orig_text'][indexno]][df['orig_lang'][indexno]][df['tr_lang'][indexno]] = df['tr_text'][indexno]
elif item.endswith(".xlf") or item.endswith(".xliff"):
the_xlf_file = docassemble.base.functions.package_data_filename(item)
if not os.path.isfile(the_xlf_file):
continue
tree = ET.parse(the_xlf_file)
root = tree.getroot()
indexno = 1
if root.attrib['version'] == "1.2":
for the_file in root.iter('{urn:oasis:names:tc:xliff:document:1.2}file'):
source_lang = the_file.attrib.get('source-language', 'en')
target_lang = the_file.attrib.get('target-language', 'en')
for transunit in the_file.iter('{urn:oasis:names:tc:xliff:document:1.2}trans-unit'):
orig_text = ''
tr_text = ''
for source in transunit.iter('{urn:oasis:names:tc:xliff:document:1.2}source'):
if source.text:
orig_text += source.text
for mrk in source:
orig_text += mrk.text
if mrk.tail:
orig_text += mrk.tail
for target in transunit.iter('{urn:oasis:names:tc:xliff:document:1.2}target'):
if target.text:
tr_text += target.text
for mrk in target:
tr_text += mrk.text
if mrk.tail:
tr_text += mrk.tail
if orig_text == '' or tr_text == '':
continue
if orig_text not in self.interview.translation_dict:
self.interview.translation_dict[orig_text] = dict()
if source_lang not in self.interview.translation_dict[orig_text]:
self.interview.translation_dict[orig_text][source_lang] = dict()
self.interview.translation_dict[orig_text][source_lang][target_lang] = tr_text
elif root.attrib['version'] == "2.0":
source_lang = root.attrib.get('srcLang', 'en')
target_lang = root.attrib.get('trgLang', 'en')
for segment in root.iter('{urn:oasis:names:tc:xliff:document:2.0}segment'):
orig_text = ''
tr_text = ''
for source in segment.iter('{urn:oasis:names:tc:xliff:document:2.0}source'):
if source.text:
orig_text += source.text
for mrk in source:
orig_text += mrk.text
if mrk.tail:
orig_text += mrk.tail
for target in segment.iter('{urn:oasis:names:tc:xliff:document:2.0}target'):
if target.text:
tr_text += target.text
for mrk in target:
tr_text += mrk.text
if mrk.tail:
tr_text += mrk.tail
if orig_text == '' or tr_text == '':
continue
if orig_text not in self.interview.translation_dict:
self.interview.translation_dict[orig_text] = dict()
if source_lang not in self.interview.translation_dict[orig_text]:
self.interview.translation_dict[orig_text][source_lang] = dict()
self.interview.translation_dict[orig_text][source_lang][target_lang] = tr_text
if 'default language' in data:
should_append = False
self.from_source.set_language(data['default language'])
if 'on change' in data:
should_append = False
self.scan_for_variables = False
if not isinstance(data['on change'], dict):
raise DAError("An on change block must be a dictionary." + self.idebug(data))
if len(data) > 1:
raise DAError("An on change block must not contain any other keys." + self.idebug(data))
for key, val in data['on change'].items():
if not (isinstance(key, str) and isinstance(val, str)):
raise DAError("An on change block must be a dictionary where the keys are field names and the values are Python code." + self.idebug(data))
if key not in self.interview.onchange:
self.interview.onchange[key] = list()
self.interview.onchange[key].append(compile(val, '<on change code>', 'exec'))
self.find_fields_in(val)
if 'sections' in data:
should_append = False
if not isinstance(data['sections'], list):
raise DAError("A sections list must be a list." + self.idebug(data))
if 'language' in data:
the_language = data['language']
else:
the_language = '*'
self.interview.sections[the_language] = data['sections']
if 'progressive' in data:
if 'sections' not in data:
raise DAError("A progressive directive can only be used with sections." + self.idebug(data))
if not isinstance(data['progressive'], bool):
raise DAError("A progressive directive can only be true or false." + self.idebug(data))
self.interview.sections_progressive = data['progressive']
if 'auto open' in data:
if 'sections' not in data:
raise DAError("An auto open directive can only be used with sections." + self.idebug(data))
if not isinstance(data['auto open'], bool):
raise DAError("An auto open directive can only be true or false." + self.idebug(data))
self.interview.sections_auto_open = data['auto open']
if 'section' in data:
if 'question' not in data:
raise DAError("You can only set the section from a question." + self.idebug(data))
self.section = data['section']
if 'machine learning storage' in data:
should_append = False
new_storage = data['machine learning storage']
if not new_storage.endswith('.json'):
raise DAError("Invalid machine learning storage entry '" + str(data['machine learning storage']) + ".' A machine learning storage entry must refer to a file ending in .json." + self.idebug(data))
parts = new_storage.split(":")
if len(parts) == 1:
new_storage = re.sub(r'^data/sources/', '', new_storage)
the_package = self.from_source.get_package()
if the_package is not None:
new_storage = self.from_source.get_package() + ':data/sources/' + new_storage
self.interview.set_ml_store(new_storage)
elif len(parts) == 2 and parts[0].startswith('docassemble.') and parts[1].startswith('data/sources/'):
self.interview.set_ml_store(data['machine learning storage'])
else:
raise DAError("Invalid machine learning storage entry: " + str(data['machine learning storage']) + self.idebug(data))
if 'language' in data:
self.language = data['language']
else:
self.language = self.from_source.get_language()
if 'prevent going back' in data and data['prevent going back']:
self.can_go_back = False
if 'back button' in data:
if isinstance(data['back button'], (bool, NoneType)):
self.back_button = data['back button']
else:
self.back_button = compile(data['back button'], '<back button>', 'eval')
else:
self.back_button = None
if 'allowed to set' in data:
if isinstance(data['allowed to set'], list):
for item in data['allowed to set']:
if not isinstance(item, str):
raise DAError("When allowed to set is a list, it must be a list of text items." + self.idebug(data))
self.allowed_to_set = data['allowed to set']
elif isinstance(data['allowed to set'], str):
self.allowed_to_set = compile(data['allowed to set'], '<allowed to set>', 'eval')
self.find_fields_in(data['allowed to set'])
else:
raise DAError("When allowed to set is not a list, it must be plain text." + self.idebug(data))
if 'usedefs' in data:
defs = list()
if isinstance(data['usedefs'], list):
usedefs = data['usedefs']
else:
usedefs = [data['usedefs']]
for usedef in usedefs:
if isinstance(usedef, (dict, list, set, bool)):
raise DAError("A usedefs section must consist of a list of strings or a single string." + self.idebug(data))
if usedef not in self.interview.defs:
raise DAError('Referred to a non-existent def "' + usedef + '." All defs must be defined before they are used.' + self.idebug(data))
defs.extend(self.interview.defs[usedef])
definitions = "\n".join(defs) + "\n";
else:
definitions = "";
if 'continue button label' in data:
if 'yesno' in data or 'noyes' in data or 'yesnomaybe' in data or 'noyesmaybe' in data or 'buttons' in data:
raise DAError("You cannot set a continue button label if the type of question is yesno, noyes, yesnomaybe, noyesmaybe, or buttons." + self.idebug(data))
self.continuelabel = TextObject(definitions + str(data['continue button label']), question=self)
if 'resume button label' in data:
if 'review' not in data:
raise DAError("You cannot set a resume button label if the type of question is not review." + self.idebug(data))
self.continuelabel = TextObject(definitions + str(data['resume button label']), question=self)
if 'back button label' in data:
self.backbuttonlabel = TextObject(definitions + str(data['back button label']), question=self)
if 'corner back button label' in data:
self.cornerbackbuttonlabel = TextObject(definitions + str(data['corner back button label']), question=self)
if 'skip undefined' in data:
if 'review' not in data:
raise DAError("You cannot set the skip undefined directive if the type of question is not review." + self.idebug(data))
if not data['skip undefined']:
self.skip_undefined = False
if 'list collect' in data:
if 'fields' not in data:
raise DAError("You cannot set list collect without a fields specifier." + self.idebug(data))
if isinstance(data['list collect'], (str, bool)):
self.list_collect = compile(str(data['list collect']), '<list collect code>', 'eval')
elif isinstance(data['list collect'], dict):
if 'enable' in data['list collect']:
self.list_collect = compile(str(data['list collect']['enable']), '<list collect code>', 'eval')
else:
self.list_collect = compile('True', '<list collect code>', 'eval')
if 'label' in data['list collect']:
self.list_collect_label = TextObject(definitions + str(data['list collect']['label']), question=self)
if 'is final' in data['list collect']:
self.list_collect_is_final = compile(str(data['list collect']['is final']), '<list collect final code>', 'eval')
if 'allow append' in data['list collect']:
self.list_collect_allow_append = compile(str(data['list collect']['allow append']), '<list collect allow append code>', 'eval')
if 'allow delete' in data['list collect']:
self.list_collect_allow_delete = compile(str(data['list collect']['allow delete']), '<list collect allow delete code>', 'eval')
if 'add another label' in data['list collect']:
self.list_collect_add_another_label = TextObject(definitions + str(data['list collect']['add another label']), question=self)
else:
raise DAError("Invalid data under list collect." + self.idebug(data))
if 'mandatory' in data:
if 'initial' in data:
raise DAError("You cannot use the mandatory modifier and the initial modifier at the same time." + self.idebug(data))
if 'id' not in data and self.interview.debug and self.interview.source.package.startswith('docassemble.playground'):
self.interview.issue['mandatory_id'] = True
if 'question' not in data and 'code' not in data and 'objects' not in data and 'attachment' not in data and 'data' not in data and 'data from code' not in data:
raise DAError("You cannot use the mandatory modifier on this type of block." + self.idebug(data))
if data['mandatory'] is True:
self.is_mandatory = True
self.mandatory_code = None
elif data['mandatory'] in (False, None):
self.is_mandatory = False
self.mandatory_code = None
else:
self.is_mandatory = False
if isinstance(data['mandatory'], str):
self.mandatory_code = compile(data['mandatory'], '<mandatory code>', 'eval')
self.find_fields_in(data['mandatory'])
else:
self.mandatory_code = None
else:
self.is_mandatory = False
self.mandatory_code = None
if 'attachment options' in data:
should_append = False
if not isinstance(data['attachment options'], list):
data['attachment options'] = [data['attachment options']]
for attachment_option in data['attachment options']:
if not isinstance(attachment_option, dict):
raise DAError("An attachment option must a dictionary." + self.idebug(data))
for key in attachment_option:
value = attachment_option[key]
if key == 'initial yaml':
if 'initial_yaml' not in self.interview.attachment_options:
self.interview.attachment_options['initial_yaml'] = list()
if isinstance(value, list):
the_list = value
else:
the_list = [value]
for yaml_file in the_list:
if not isinstance(yaml_file, str):
raise DAError('An initial yaml file must be a string.' + self.idebug(data))
self.interview.attachment_options['initial_yaml'].append(FileInPackage(yaml_file, 'template', self.package))
elif key == 'additional yaml':
if 'additional_yaml' not in self.interview.attachment_options:
self.interview.attachment_options['additional_yaml'] = list()
if isinstance(value, list):
the_list = value
else:
the_list = [value]
for yaml_file in the_list:
if not isinstance(yaml_file, str):
raise DAError('An additional yaml file must be a string.' + self.idebug(data))
self.interview.attachment_options['additional_yaml'].append(FileInPackage(yaml_file, 'template', self.package))
elif key == 'template file':
if not isinstance(value, str):
raise DAError('The template file must be a string.' + self.idebug(data))
self.interview.attachment_options['template_file'] = FileInPackage(value, 'template', self.package)
elif key == 'rtf template file':
if not isinstance(value, str):
raise DAError('The rtf template file must be a string.' + self.idebug(data))
self.interview.attachment_options['rtf_template_file'] = FileInPackage(value, 'template', self.package)
elif key == 'docx reference file':
if not isinstance(value, str):
raise DAError('The docx reference file must be a string.' + self.idebug(data))
self.interview.attachment_options['docx_reference_file'] = FileInPackage(value, 'template', self.package)
if 'script' in data:
if not isinstance(data['script'], str):
raise DAError("A script section must be plain text." + self.idebug(data))
self.script = TextObject(definitions + do_not_translate + str(data['script']), question=self)
if 'css' in data:
if not isinstance(data['css'], str):
raise DAError("A css section must be plain text." + self.idebug(data))
self.css = TextObject(definitions + do_not_translate + str(data['css']), question=self)
if 'initial' in data and 'code' not in data:
raise DAError("Only a code block can be marked as initial." + self.idebug(data))
if 'initial' in data or 'default role' in data:
if 'default role' in data or data['initial'] is True:
self.is_initial = True
self.initial_code = None
elif data['initial'] in (False, None):
self.is_initial = False
self.initial_code = None
else:
self.is_initial = False
if isinstance(data['initial'], str):
self.initial_code = compile(data['initial'], '<initial code>', 'eval')
self.find_fields_in(data['initial'])
else:
self.initial_code = None
else:
self.is_initial = False
self.initial_code = None
if 'command' in data and data['command'] in ('exit', 'logout', 'exit_logout', 'continue', 'restart', 'leave', 'refresh', 'signin', 'register', 'new_session'):
self.question_type = data['command']
self.content = TextObject(data.get('url', ''), question=self)
return
if 'objects from file' in data:
if not isinstance(data['objects from file'], list):
data['objects from file'] = [data['objects from file']]
if 'use objects' in data and data['use objects']:
self.question_type = 'objects_from_file_da'
else:
self.question_type = 'objects_from_file'
self.objects_from_file = data['objects from file']
for item in data['objects from file']:
if isinstance(item, dict):
for key in item:
self.fields.append(Field({'saveas': key, 'type': 'object_from_file', 'file': item[key]}))
if self.scan_for_variables:
self.fields_used.add(key)
else:
self.other_fields_used.add(key)
else:
raise DAError("An objects section cannot contain a nested list." + self.idebug(data))
if 'data' in data and 'variable name' in data:
if not isinstance(data['variable name'], str):
raise DAError("A data block variable name must be plain text." + self.idebug(data))
if self.scan_for_variables:
self.fields_used.add(data['variable name'].strip())
else:
self.other_fields_used.add(data['variable name'].strip())
if 'use objects' in data and data['use objects']:
self.question_type = 'data_da'
else:
self.question_type = 'data'
self.fields.append(Field({'saveas': data['variable name'].strip(), 'type': 'data', 'data': self.recursive_dataobject(data['data'])}))
if 'data from code' in data and 'variable name' in data:
if not isinstance(data['variable name'], str):
raise DAError("A data from code block variable name must be plain text." + self.idebug(data))
if self.scan_for_variables:
self.fields_used.add(data['variable name'])
else:
self.other_fields_used.add(data['variable name'])
if 'use objects' in data and data['use objects']:
self.question_type = 'data_from_code_da'
else:
self.question_type = 'data_from_code'
self.fields.append(Field({'saveas': data['variable name'], 'type': 'data_from_code', 'data': self.recursive_data_from_code(data['data from code'])}))
if 'objects' in data:
if not isinstance(data['objects'], list):
data['objects'] = [data['objects']]
#raise DAError("An objects section must be organized as a list." + self.idebug(data))
self.question_type = 'objects'
self.objects = data['objects']
for item in data['objects']:
if isinstance(item, dict):
for key in item:
self.fields.append(Field({'saveas': key, 'type': 'object', 'objecttype': item[key]}))
if self.scan_for_variables:
self.fields_used.add(key)
else:
self.other_fields_used.add(key)
else:
raise DAError("An objects section cannot contain a nested list." + self.idebug(data))
if 'id' in data:
# if str(data['id']) in self.interview.ids_in_use:
# raise DAError("The id " + str(data['id']) + " is already in use by another block. Id names must be unique." + self.idebug(data))
self.id = str(data['id']).strip()
if self.interview.debug and self.interview.source.package.startswith('docassemble.playground') and self.id in self.interview.ids_in_use:
self.interview.issue['id_collision'] = self.id
self.interview.ids_in_use.add(self.id)
self.interview.questions_by_id[self.id] = self
if 'ga id' in data:
if not isinstance(data['ga id'], str):
raise DAError("A 'ga id' must refer to text." + self.idebug(data))
self.ga_id = TextObject(definitions + str(data['ga id']), question=self)
if 'segment id' in data:
if not isinstance(data['segment id'], str):
raise DAError("A 'segment id' must refer to text." + self.idebug(data))
if not hasattr(self, 'segment'):
self.segment = dict(arguments=dict())
self.segment['id'] = TextObject(definitions + str(data['segment id']), question=self)
if 'segment' in data:
if not isinstance(data['segment'], dict):
raise DAError("A 'segment' must refer to a dictionary." + self.idebug(data))
if 'id' in data['segment']:
if not isinstance(data['segment']['id'], str):
raise DAError("An 'id' under 'segment' must refer to text." + self.idebug(data))
if not hasattr(self, 'segment'):
self.segment = dict(arguments=dict())
self.segment['id'] = TextObject(definitions + str(data['segment']['id']), question=self)
if 'arguments' in data['segment']:
if not isinstance(data['segment']['arguments'], dict):
raise DAError("An 'arguments' under 'segment' must refer to a dictionary." + self.idebug(data))
if not hasattr(self, 'segment'):
self.segment = dict(arguments=dict())
for key, val in data['segment']['arguments'].items():
if not isinstance(val, (str, int, float, bool)):
raise DAError("Each item under 'arguments' in a 'segment' must be plain text." + self.idebug(data))
self.segment['arguments'][key] = TextObject(definitions + str(val), question=self)
if 'supersedes' in data:
if not isinstance(data['supersedes'], list):
supersedes_list = [str(data['supersedes'])]
else:
supersedes_list = [str(x) for x in data['supersedes']]
self.interview.id_orderings.append(dict(type="supersedes", question=self, supersedes=supersedes_list))
if 'order' in data:
should_append = False
if 'question' in data or 'code' in data or 'attachment' in data or 'attachments' in data or 'template' in data:
raise DAError("An 'order' block cannot be combined with another type of block." + self.idebug(data))
if not isinstance(data['order'], list):
raise DAError("An 'order' block must be a list." + self.idebug(data))
self.interview.id_orderings.append(dict(type="order", order=[str(x) for x in data['order']]))
for key in ('image sets', 'images'):
if key not in data:
continue
should_append = False
if not isinstance(data[key], dict):
raise DAError("The '" + key + "' section needs to be a dictionary, not a list or text." + self.idebug(data))
if key == 'images':
data[key] = {'unspecified': {'images': data[key]}}
elif 'images' in data[key] and 'attribution' in data[key]:
data[key] = {'unspecified': data[key]}
for setname, image_set in data[key].items():
if not isinstance(image_set, dict):
if key == 'image sets':
raise DAError("Each item in the 'image sets' section needs to be a dictionary, not a list. Each dictionary item should have an 'images' definition (which can be a dictionary or list) and an optional 'attribution' definition (which must be text)." + self.idebug(data))
else:
raise DAError("Each item in the 'images' section needs to be a dictionary, not a list." + self.idebug(data))
if 'attribution' in image_set:
if not isinstance(image_set['attribution'], str):
raise DAError("An attribution in an 'image set' section cannot be a dictionary or a list." + self.idebug(data))
attribution = re.sub(r'\n', ' ', image_set['attribution'].strip())
else:
attribution = None
if 'images' in image_set:
if isinstance(image_set['images'], list):
image_list = image_set['images']
elif isinstance(image_set['images'], dict):
image_list = [image_set['images']]
else:
if key == 'image set':
raise DAError("An 'images' definition in an 'image set' item must be a dictionary or a list." + self.idebug(data))
else:
raise DAError("An 'images' section must be a dictionary or a list." + self.idebug(data))
for image in image_list:
if not isinstance(image, dict):
the_image = {str(image): str(image)}
else:
the_image = image
for key, value in the_image.items():
self.interview.images[key] = PackageImage(filename=value, attribution=attribution, setname=setname, package=self.package)
if 'def' in data:
should_append = False
if not isinstance(data['def'], str):
raise DAError("A def name must be a string." + self.idebug(data))
if data['def'] not in self.interview.defs:
self.interview.defs[data['def']] = list()
if 'mako' in data:
if isinstance(data['mako'], str):
list_of_defs = [data['mako']]
elif isinstance(data['mako'], list):
list_of_defs = data['mako']
else:
raise DAError("A mako template definition must be a string or a list of strings." + self.idebug(data))
for definition in list_of_defs:
if not isinstance(definition, str):
raise DAError("A mako template definition must be a string." + self.idebug(data))
self.interview.defs[data['def']].append(definition)
if 'interview help' in data:
should_append = False
if isinstance(data['interview help'], list):
raise DAError("An interview help section must not be in the form of a list." + self.idebug(data))
elif not isinstance(data['interview help'], dict):
data['interview help'] = {'content': str(data['interview help'])}
audiovideo = list()
if 'label' in data['interview help']:
data['interview help']['label'] = str(data['interview help']['label'])
if 'audio' in data['interview help']:
if not isinstance(data['interview help']['audio'], list):
the_list = [data['interview help']['audio']]
else:
the_list = data['interview help']['audio']
audiovideo = list()
for the_item in the_list:
if isinstance(the_item, (list, dict)):
raise DAError("An interview help audio section must be in the form of a text item or a list of text items." + self.idebug(data))
audiovideo.append({'text': TextObject(definitions + str(data['interview help']['audio']), question=self), 'package': self.package, 'type': 'audio'})
if 'video' in data['interview help']:
if not isinstance(data['interview help']['video'], list):
the_list = [data['interview help']['video']]
else:
the_list = data['interview help']['video']
for the_item in the_list:
if isinstance(the_item, (list, dict)):
raise DAError("An interview help video section must be in the form of a text item or a list of text items." + self.idebug(data))
audiovideo.append({'text': TextObject(definitions + str(data['interview help']['video']), question=self), 'package': self.package, 'type': 'video'})
if 'video' not in data['interview help'] and 'audio' not in data['interview help']:
audiovideo = None
if 'heading' in data['interview help']:
if not isinstance(data['interview help']['heading'], (dict, list)):
help_heading = TextObject(definitions + str(data['interview help']['heading']), question=self)
else:
raise DAError("A heading within an interview help section must be text, not a list or a dictionary." + self.idebug(data))
else:
help_heading = None
if 'content' in data['interview help']:
if not isinstance(data['interview help']['content'], (dict, list)):
help_content = TextObject(definitions + str(data['interview help']['content']), question=self)
else:
raise DAError("Help content must be text, not a list or a dictionary." + self.idebug(data))
else:
raise DAError("No content section was found in an interview help section." + self.idebug(data))
if 'label' in data['interview help']:
if not isinstance(data['interview help']['label'], (dict, list)):
help_label = TextObject(definitions + str(data['interview help']['label']), question=self)
else:
raise DAError("Help label must be text, not a list or a dictionary." + self.idebug(data))
else:
help_label = None
if self.language not in self.interview.helptext:
self.interview.helptext[self.language] = list()
self.interview.helptext[self.language].append({'content': help_content, 'heading': help_heading, 'audiovideo': audiovideo, 'label': help_label, 'from': 'interview'})
if 'default screen parts' in data:
should_append = False
if not isinstance(data['default screen parts'], dict):
raise DAError("A default screen parts block must be in the form of a dictionary." + self.idebug(data))
if self.language not in self.interview.default_screen_parts:
self.interview.default_screen_parts[self.language] = dict()
for key, content in data['default screen parts'].items():
if content is None:
if key in self.interview.default_screen_parts[self.language]:
del self.interview.default_screen_parts[self.language][key]
else:
if not (isinstance(key, str) and isinstance(content, str)):
raise DAError("A default screen parts block must be a dictionary of text keys and text values." + self.idebug(data))
self.interview.default_screen_parts[self.language][key] = TextObject(definitions + str(content.strip()), question=self)
if 'default validation messages' in data:
should_append = False
if not isinstance(data['default validation messages'], dict):
raise DAError("A default validation messages block must be in the form of a dictionary." + self.idebug(data))
if self.language not in self.interview.default_validation_messages:
self.interview.default_validation_messages[self.language] = dict()
for validation_key, validation_message in data['default validation messages'].items():
if not (isinstance(validation_key, str) and isinstance(validation_message, str)):
raise DAError("A validation messages block must be a dictionary of text keys and text values." + self.idebug(data))
self.interview.default_validation_messages[self.language][validation_key] = validation_message.strip()
if 'generic object' in data:
self.is_generic = True
#self.is_generic_list = False
self.generic_object = data['generic object']
elif 'generic list object' in data:
self.is_generic = True
#self.is_generic_list = True
self.generic_object = data['generic list object']
else:
self.is_generic = False
if 'comment' in data and len(data) == 1:
should_append = False
if 'metadata' in data:
for key in data:
if key not in ('metadata', 'comment'):
raise DAError("A metadata directive cannot be mixed with other directives." + self.idebug(data))
should_append = False
if isinstance(data['metadata'], dict):
data['metadata']['_origin_path'] = self.from_source.path
data['metadata']['_origin_package'] = self.from_source.get_package()
self.interview.metadata.append(data['metadata'])
else:
raise DAError("A metadata section must be organized as a dictionary." + self.idebug(data))
if 'modules' in data:
if isinstance(data['modules'], str):
data['modules'] = [data['modules']]
if isinstance(data['modules'], list):
if 'docassemble.base.util' in data['modules'] or 'docassemble.base.legal' in data['modules']:
# logmessage("setting imports_util to true")
self.interview.imports_util = True
# else:
# logmessage("not setting imports_util to true")
self.question_type = 'modules'
self.module_list = data['modules']
else:
raise DAError("A modules section must be organized as a list." + self.idebug(data))
if 'reset' in data:
#logmessage("Found a reset")
if isinstance(data['reset'], str):
data['reset'] = [data['reset']]
if isinstance(data['reset'], list):
self.question_type = 'reset'
self.reset_list = data['reset']
else:
raise DAError("A reset section must be organized as a list." + self.idebug(data))
if 'imports' in data:
if isinstance(data['imports'], str):
data['imports'] = [data['imports']]
if isinstance(data['imports'], list):
self.question_type = 'imports'
self.module_list = data['imports']
else:
raise DAError("An imports section must be organized as a list." + self.idebug(data))
if 'terms' in data and 'question' in data:
if not isinstance(data['terms'], (dict, list)):
raise DAError("Terms must be organized as a dictionary or a list." + self.idebug(data))
if isinstance(data['terms'], dict):
data['terms'] = [data['terms']]
for termitem in data['terms']:
if not isinstance(termitem, dict):
raise DAError("A terms section organized as a list must be a list of dictionary items." + self.idebug(data))
if len(termitem) == 2 and 'phrases' in termitem and isinstance(termitem['phrases'], list) and 'definition' in termitem:
termitems = [(phrase, termitem['definition']) for phrase in termitem['phrases']]
else:
termitems = termitem.items()
for term, definition in termitems:
lower_term = re.sub(r'\s+', ' ', term.lower())
term_textobject = TextObject(str(lower_term), question=self)
alt_terms = dict()
re_dict = dict()
re_dict[self.language] = re.compile(r"{(?i)(%s)(\|[^\}]*)?}" % (re.sub(r'\s', '\\\s+', lower_term),), re.IGNORECASE | re.DOTALL)
for lang, tr_tuple in term_textobject.other_lang.items():
lower_other = re.sub(r'\s+', ' ', tr_tuple[0].lower())
re_dict[lang] = re.compile(r"{(?i)(%s)(\|[^\}]*)?}" % (re.sub(r'\s', '\\\s+', lower_other),), re.IGNORECASE | re.DOTALL)
alt_terms[lang] = tr_tuple[0]
self.terms[lower_term] = {'definition': TextObject(definitions + str(definition), question=self), 're': re_dict, 'alt_terms': alt_terms}
if 'auto terms' in data and 'question' in data:
if not isinstance(data['auto terms'], (dict, list)):
raise DAError("Terms must be organized as a dictionary or a list." + self.idebug(data))
if isinstance(data['auto terms'], dict):
data['auto terms'] = [data['auto terms']]
for termitem in data['auto terms']:
if not isinstance(termitem, dict):
raise DAError("A terms section organized as a list must be a list of dictionary items." + self.idebug(data))
if len(termitem) == 2 and 'phrases' in termitem and isinstance(termitem['phrases'], list) and 'definition' in termitem:
termitems = [(phrase, termitem['definition']) for phrase in termitem['phrases']]
else:
termitems = termitem.items()
for term, definition in termitems:
lower_term = re.sub(r'\s+', ' ', term.lower())
term_textobject = TextObject(str(lower_term), question=self)
alt_terms = dict()
re_dict = dict()
re_dict[self.language] = re.compile(r"{?(?i)\b(%s)\b}?" % (re.sub(r'\s', '\\\s+', lower_term),), re.IGNORECASE | re.DOTALL)
for lang, tr_tuple in term_textobject.other_lang.items():
lower_other = re.sub(r'\s+', ' ', tr_tuple[0].lower())
re_dict[lang] = re.compile(r"{?(?i)\b(%s)\b}?" % (re.sub(r'\s', '\\\s+', lower_other),), re.IGNORECASE | re.DOTALL)
alt_terms[lang] = tr_tuple[0]
self.autoterms[lower_term] = {'definition': TextObject(definitions + str(definition), question=self), 're': re_dict, 'alt_terms': alt_terms}
if 'terms' in data and 'question' not in data:
should_append = False
if self.language not in self.interview.terms:
self.interview.terms[self.language] = dict()
if isinstance(data['terms'], list):
for termitem in data['terms']:
if isinstance(termitem, dict):
if len(termitem) == 2 and 'phrases' in termitem and isinstance(termitem['phrases'], list) and 'definition' in termitem:
termitems = [(phrase, termitem['definition']) for phrase in termitem['phrases']]
else:
termitems = termitem.items()
for term, definition in termitems:
lower_term = re.sub(r'\s+', ' ', term.lower())
term_textobject = TextObject(str(lower_term), question=self)
definition_textobject = TextObject(str(definition), question=self)
self.interview.terms[self.language][lower_term] = {'definition': str(definition), 're': re.compile(r"{(?i)(%s)(\|[^\}]*)?}" % (re.sub(r'\s', '\\\s+', lower_term),), re.IGNORECASE | re.DOTALL)}
for lang, tr_tuple in term_textobject.other_lang.items():
if lang not in self.interview.terms:
self.interview.terms[lang] = dict()
if tr_tuple[0] not in self.interview.terms[lang]:
if lang in definition_textobject.other_lang:
lower_other = re.sub(r'\s+', ' ', tr_tuple[0].lower())
self.interview.terms[lang][tr_tuple[0]] = {'definition': definition_textobject.other_lang[lang][0], 're': re.compile(r"{(?i)(%s)(\|[^\}]*)?}" % (re.sub(r'\s', '\\\s+', lower_other),), re.IGNORECASE | re.DOTALL)}
else:
raise DAError("A terms section organized as a list must be a list of dictionary items." + self.idebug(data))
elif isinstance(data['terms'], dict):
for term in data['terms']:
lower_term = re.sub(r'\s+', ' ', term.lower())
term_textobject = TextObject(str(lower_term), question=self)
definition_textobject = TextObject(str(data['terms'][term]), question=self)
self.interview.terms[self.language][lower_term] = {'definition': str(data['terms'][term]), 're': re.compile(r"{(?i)(%s)(\|[^\}]*)?}" % (re.sub(r'\s', '\\\s+', lower_term),), re.IGNORECASE | re.DOTALL)}
for lang, tr_tuple in term_textobject.other_lang.items():
if lang not in self.interview.terms:
self.interview.terms[lang] = dict()
if tr_tuple[0] not in self.interview.terms[lang]:
if lang in definition_textobject.other_lang:
lower_other = re.sub(r'\s+', ' ', tr_tuple[0].lower())
self.interview.terms[lang][tr_tuple[0]] = {'definition': definition_textobject.other_lang[lang][0], 're': re.compile(r"{(?i)(%s)(\|[^\}]*)?}" % (re.sub(r'\s', '\\\s+', lower_other),), re.IGNORECASE | re.DOTALL)}
else:
raise DAError("A terms section must be organized as a dictionary or a list." + self.idebug(data))
if 'auto terms' in data and 'question' not in data:
should_append = False
if self.language not in self.interview.autoterms:
self.interview.autoterms[self.language] = dict()
if isinstance(data['auto terms'], list):
for termitem in data['auto terms']:
if isinstance(termitem, dict):
if len(termitem) == 2 and 'phrases' in termitem and isinstance(termitem['phrases'], list) and 'definition' in termitem:
termitems = [(phrase, termitem['definition']) for phrase in termitem['phrases']]
else:
termitems = termitem.items()
for term, definition in termitems:
lower_term = re.sub(r'\s+', ' ', term.lower())
term_textobject = TextObject(str(lower_term), question=self)
definition_textobject = TextObject(str(definition), question=self)
self.interview.autoterms[self.language][lower_term] = {'definition': str(definition), 're': re.compile(r"{?(?i)\b(%s)\b}?" % (re.sub(r'\s', '\\\s+', lower_term),), re.IGNORECASE | re.DOTALL)}
for lang, tr_tuple in term_textobject.other_lang.items():
if lang not in self.interview.autoterms:
self.interview.autoterms[lang] = dict()
if tr_tuple[0] not in self.interview.autoterms[lang]:
if lang in definition_textobject.other_lang:
lower_other = re.sub(r'\s+', ' ', tr_tuple[0].lower())
self.interview.autoterms[lang][tr_tuple[0]] = {'definition': definition_textobject.other_lang[lang][0], 're': re.compile(r"{?(?i)\b(%s)\b}?" % (re.sub(r'\s', '\\\s+', lower_other),), re.IGNORECASE | re.DOTALL)}
else:
raise DAError("An auto terms section organized as a list must be a list of dictionary items." + self.idebug(data))
elif isinstance(data['auto terms'], dict):
for term in data['auto terms']:
lower_term = re.sub(r'\s+', ' ', term.lower())
term_textobject = TextObject(str(lower_term), question=self)
definition_textobject = TextObject(str(data['auto terms'][term]), question=self)
self.interview.autoterms[self.language][lower_term] = {'definition': str(data['auto terms'][term]), 're': re.compile(r"{?(?i)\b(%s)\b}?" % (re.sub(r'\s', '\\\s+', lower_term),), re.IGNORECASE | re.DOTALL)}
for lang, tr_tuple in term_textobject.other_lang.items():
if lang not in self.interview.autoterms:
self.interview.autoterms[lang] = dict()
if tr_tuple[0] not in self.interview.autoterms[lang]:
if lang in definition_textobject.other_lang:
lower_other = re.sub(r'\s+', ' ', tr_tuple[0].lower())
self.interview.autoterms[lang][tr_tuple[0]] = {'definition': definition_textobject.other_lang[lang][0], 're': re.compile(r"{?(?i)\b(%s)\b}?" % (re.sub(r'\s', '\\\s+', lower_other),), re.IGNORECASE | re.DOTALL)}
else:
raise DAError("An auto terms section must be organized as a dictionary or a list." + self.idebug(data))
if 'default role' in data:
if 'code' not in data:
should_append = False
if isinstance(data['default role'], str):
self.interview.default_role = [data['default role']]
elif isinstance(data['default role'], list):
self.interview.default_role = data['default role']
else:
raise DAError("A default role must be a list or a string." + self.idebug(data))
if 'role' in data:
if isinstance(data['role'], str):
if data['role'] not in self.role:
self.role.append(data['role'])
elif isinstance(data['role'], list):
for rolename in data['role']:
if data['role'] not in self.role:
self.role.append(rolename)
else:
raise DAError("The role of a question must be a string or a list." + self.idebug(data))
else:
self.role = list()
if 'include' in data:
should_append = False
if isinstance(data['include'], str):
data['include'] = [data['include']]
if isinstance(data['include'], list):
for questionPath in data['include']:
if ':' in questionPath:
self.interview.read_from(interview_source_from_string(questionPath))
else:
new_source = self.from_source.append(questionPath)
if new_source is None:
new_source = interview_source_from_string('docassemble.base:data/questions/' + re.sub(r'^data/questions/', '', questionPath))
if new_source is None:
raise DANotFoundError('Question file ' + questionPath + ' not found')
self.interview.read_from(new_source)
else:
raise DAError("An include section must be organized as a list." + self.idebug(data))
if 'action buttons' in data:
if isinstance(data['action buttons'], dict) and len(data['action buttons']) == 1 and 'code' in data['action buttons']:
self.action_buttons.append(compile(data['action buttons']['code'], '<action buttons code>', 'eval'))
else:
if not isinstance(data['action buttons'], list):
raise DAError("An action buttons specifier must be a list." + self.idebug(data))
for item in data['action buttons']:
if not isinstance(item, dict):
raise DAError("An action buttons item must be a dictionary." + self.idebug(data))
action = item.get('action', None)
target = item.get('new window', None)
if target is True:
target = '_blank'
elif target is False:
target = None
label = item.get('label', None)
color = item.get('color', 'primary')
icon = item.get('icon', None)
placement = item.get('placement', None)
forget_prior = item.get('forget prior', False)
given_arguments = item.get('arguments', dict())
if not isinstance(action, str):
raise DAError("An action buttons item must contain an action in plain text." + self.idebug(data))
if not isinstance(target, (str, NoneType)):
raise DAError("The new window specifier in an action buttons item must refer to True or plain text." + self.idebug(data))
if not isinstance(given_arguments, dict):
raise DAError("The arguments specifier in an action buttons item must refer to a dictionary." + self.idebug(data))
if not isinstance(label, str):
raise DAError("An action buttons item must contain a label in plain text." + self.idebug(data))
if not isinstance(color, str):
raise DAError("The color specifier in an action buttons item must refer to plain text." + self.idebug(data))
if not isinstance(icon, (str, NoneType)):
raise DAError("The icon specifier in an action buttons item must refer to plain text." + self.idebug(data))
if not isinstance(placement, (str, NoneType)):
raise DAError("The placement specifier in an action buttons item must refer to plain text." + self.idebug(data))
if not isinstance(forget_prior, bool):
raise DAError("The forget prior specifier in an action buttons item must refer to true or false." + self.idebug(data))
button = dict(action=TextObject(definitions + action, question=self), label=TextObject(definitions + label, question=self), color=TextObject(definitions + color, question=self))
if target is not None:
button['target'] = TextObject(definitions + target, question=self)
else:
button['target'] = None
if icon is not None:
button['icon'] = TextObject(definitions + icon, question=self)
else:
button['icon'] = None
if placement is not None:
button['placement'] = TextObject(definitions + placement, question=self)
else:
button['placement'] = None
if forget_prior:
button['forget_prior'] = True
else:
button['forget_prior'] = False
button['arguments'] = dict()
for key, val in given_arguments.items():
if isinstance(val, (list, dict)):
raise DAError("The arguments specifier in an action buttons item must refer to plain items." + self.idebug(data))
if isinstance(val, str):
button['arguments'][key] = TextObject(definitions + val, question=self)
else:
button['arguments'][key] = val
self.action_buttons.append(button)
if 'if' in data:
if isinstance(data['if'], str):
self.condition = [compile(data['if'], '<if code>', 'eval')]
self.find_fields_in(data['if'])
elif isinstance(data['if'], list):
self.condition = [compile(x, '<if code>', 'eval') for x in data['if']]
for x in data['if']:
self.find_fields_in(x)
else:
raise DAError("An if statement must either be text or a list." + self.idebug(data))
if 'validation code' in data:
if not isinstance(data['validation code'], str):
raise DAError("A validation code statement must be text." + self.idebug(data))
self.validation_code = compile(data['validation code'], '<code block>', 'exec')
self.find_fields_in(data['validation code'])
if 'require' in data:
if isinstance(data['require'], list):
self.question_type = 'require'
try:
self.require_list = list(map((lambda x: compile(x, '<require code>', 'eval')), data['require']))
for x in data['require']:
self.find_fields_in(x)
except:
logmessage("Compile error in require:\n" + str(data['require']) + "\n" + str(sys.exc_info()[0]))
raise
if 'orelse' in data:
if isinstance(data['orelse'], dict):
self.or_else_question = Question(data['orelse'], self.interview, register_target=register_target, source=self.from_source, package=self.package)
else:
raise DAError("The orelse part of a require section must be organized as a dictionary." + self.idebug(data))
else:
raise DAError("A require section must have an orelse part." + self.idebug(data))
else:
raise DAError("A require section must be organized as a list." + self.idebug(data))
if 'attachment' in data:
self.attachments = self.process_attachment_list(data['attachment'])
elif 'attachments' in data:
self.attachments = self.process_attachment_list(data['attachments'])
elif 'attachment code' in data:
self.process_attachment_code(data['attachment code'])
elif 'attachments code' in data:
self.process_attachment_code(data['attachments code'])
if 'allow emailing' in data:
self.allow_emailing = data['allow emailing']
if 'allow downloading' in data:
self.allow_downloading = data['allow downloading']
if 'email subject' in data:
self.email_subject = TextObject(definitions + str(data['email subject']), question=self)
if 'email body' in data:
self.email_body = TextObject(definitions + str(data['email body']), question=self)
if 'email template' in data:
self.email_template = compile(data['email template'], '<email template>', 'eval')
self.find_fields_in(data['email template'])
if 'email address default' in data:
self.email_default = TextObject(definitions + str(data['email address default']), question=self)
if 'always include editable files' in data:
self.always_include_editable_files = data['always include editable files']
if 'include attachment notice' in data:
self.attachment_notice = data['include attachment notice']
if 'include download tab' in data:
self.download_tab = data['include download tab']
if 'manual attachment list' in data:
self.manual_attachment_list = data['manual attachment list']
# if 'role' in data:
# if isinstance(data['role'], list):
# for rolename in data['role']:
# if rolename not in self.role:
# self.role.append(rolename)
# elif isinstance(data['role'], str) and data['role'] not in self.role:
# self.role.append(data['role'])
# else:
# raise DAError("A role section must be text or a list." + self.idebug(data))
if 'progress' in data:
if data['progress'] is None:
self.progress = -1
else:
try:
self.progress = int(data['progress'])
self.interview.progress_points.add(self.progress)
except:
logmessage("Invalid progress number " + repr(data['progress']))
if 'zip filename' in data:
self.zip_filename = TextObject(definitions + str(data['zip filename']), question=self)
if 'action' in data:
self.question_type = 'backgroundresponseaction'
self.content = TextObject('action')
self.action = data['action']
if 'backgroundresponse' in data:
self.question_type = 'backgroundresponse'
self.content = TextObject('backgroundresponse')
self.backgroundresponse = data['backgroundresponse']
if 'response' in data:
self.content = TextObject(definitions + str(data['response']), question=self)
self.question_type = 'response'
elif 'binaryresponse' in data:
self.question_type = 'response'
self.content = TextObject('binary')
self.binaryresponse = data['binaryresponse']
if 'response' not in data:
self.content = TextObject('')
elif 'all_variables' in data:
self.question_type = 'response'
self.all_variables = True
if 'include_internal' in data:
self.include_internal = data['include_internal']
self.content = TextObject('all_variables')
elif 'response filename' in data:
self.question_type = 'sendfile'
if data['response filename'].__class__.__name__ == 'DAFile':
self.response_file = data['response filename']
if hasattr(data['response filename'], 'mimetype') and data['response filename'].mimetype:
self.content_type = TextObject(data['response filename'].mimetype)
else:
info = docassemble.base.functions.server.file_finder(data['response filename'], question=self)
if 'fullpath' in info and info['fullpath']:
self.response_file = FileOnServer(data['response filename'], self) #info['fullpath']
else:
self.response_file = None
if 'mimetype' in info and info['mimetype']:
self.content_type = TextObject(info['mimetype'])
else:
self.content_type = TextObject('text/plain; charset=utf-8')
self.content = TextObject('')
if 'content type' in data:
self.content_type = TextObject(definitions + str(data['content type']), question=self)
elif not (hasattr(self, 'content_type') and self.content_type):
if self.response_file is not None:
self.content_type = TextObject(get_mimetype(self.response_file.path()))
else:
self.content_type = TextObject('text/plain; charset=utf-8')
elif 'redirect url' in data:
self.question_type = 'redirect'
self.content = TextObject(definitions + str(data['redirect url']), question=self)
elif 'null response' in data:
self.content = TextObject('null')
self.question_type = 'response'
if 'sleep' in data:
self.sleep = data['sleep']
if 'response' in data or 'binaryresponse' in data or 'all_variables' or 'null response' in data:
if 'include_internal' in data:
self.include_internal = data['include_internal']
if 'content type' in data:
self.content_type = TextObject(definitions + str(data['content type']), question=self)
else:
self.content_type = TextObject('text/plain; charset=utf-8')
if 'response code' in data:
self.response_code = data['response code']
if 'css class' in data:
if 'question' not in data:
raise DAError("A css class can only accompany a question." + self.idebug(data))
self.css_class = TextObject(definitions + str(data['css class']), question=self)
if 'table css class' in data:
if 'question' not in data:
raise DAError("A table css class can only accompany a question." + self.idebug(data))
self.table_css_class = TextObject(definitions + str(data['table css class']), question=self)
if 'question' in data:
self.content = TextObject(definitions + str(data['question']), question=self)
if 'subquestion' in data:
self.subcontent = TextObject(definitions + str(data['subquestion']), question=self)
if 'reload' in data and data['reload']:
self.reload_after = TextObject(definitions + str(data['reload']), question=self)
if 'help' in data:
if isinstance(data['help'], dict):
for key, value in data['help'].items():
if key == 'label':
self.helplabel = TextObject(definitions + str(value), question=self)
if key == 'audio':
if not isinstance(value, list):
the_list = [value]
else:
the_list = value
for list_item in the_list:
if isinstance(list_item, (dict, list, set)):
raise DAError("An audio declaration in a help block can only contain a text item or a list of text items." + self.idebug(data))
if self.audiovideo is None:
self.audiovideo = dict()
if 'help' not in self.audiovideo:
self.audiovideo['help'] = list()
self.audiovideo['help'].append({'text': TextObject(definitions + str(list_item.strip()), question=self), 'package': self.package, 'type': 'audio'})
if key == 'video':
if not isinstance(value, list):
the_list = [value]
else:
the_list = value
for list_item in the_list:
if isinstance(list_item, (dict, list, set)):
raise DAError("A video declaration in a help block can only contain a text item or a list of text items." + self.idebug(data))
if self.audiovideo is None:
self.audiovideo = dict()
if 'help' not in self.audiovideo:
self.audiovideo['help'] = list()
self.audiovideo['help'].append({'text': TextObject(definitions + str(list_item.strip()), question=self), 'package': self.package, 'type': 'video'})
if key == 'content':
if isinstance(value, (dict, list, set)):
raise DAError("A content declaration in a help block can only contain text." + self.idebug(data))
self.helptext = TextObject(definitions + str(value), question=self)
else:
self.helptext = TextObject(definitions + str(data['help']), question=self)
if 'audio' in data:
if not isinstance(data['audio'], list):
the_list = [data['audio']]
else:
the_list = data['audio']
for list_item in the_list:
if isinstance(list_item, (dict, list, set)):
raise DAError("An audio declaration can only contain a text item or a list of text items." + self.idebug(data))
if self.audiovideo is None:
self.audiovideo = dict()
if 'question' not in self.audiovideo:
self.audiovideo['question'] = list()
self.audiovideo['question'].append({'text': TextObject(definitions + str(list_item.strip()), question=self), 'package': self.package, 'type': 'audio'})
if 'video' in data:
if not isinstance(data['video'], list):
the_list = [data['video']]
else:
the_list = data['video']
for list_item in the_list:
if isinstance(list_item, (dict, list, set)):
raise DAError("A video declaration can only contain a text item or a list of text items." + self.idebug(data))
if self.audiovideo is None:
self.audiovideo = dict()
if 'question' not in self.audiovideo:
self.audiovideo['question'] = list()
self.audiovideo['question'].append({'text': TextObject(definitions + str(list_item.strip()), question=self), 'package': self.package, 'type': 'video'})
if 'decoration' in data:
if isinstance(data['decoration'], dict):
decoration_list = [data['decoration']]
elif isinstance(data['decoration'], list):
decoration_list = data['decoration']
else:
decoration_list = [{'image': str(data['decoration'])}]
processed_decoration_list = []
for item in decoration_list:
if isinstance(item, dict):
the_item = item
else:
the_item = {'image': str(item.rstrip())}
item_to_add = dict()
for key, value in the_item.items():
item_to_add[key] = TextObject(do_not_translate + value, question=self)
processed_decoration_list.append(item_to_add)
self.decorations = processed_decoration_list
if 'signature' in data:
self.question_type = 'signature'
if 'required' in data:
if isinstance(data['required'], bool):
is_required = data['required']
else:
is_required = {'compute': compile(data['required'], '<required code>', 'eval'), 'sourcecode': data['required']}
self.find_fields_in(data['required'])
self.fields.append(Field({'saveas': data['signature'], 'required': is_required}))
else:
self.fields.append(Field({'saveas': data['signature']}))
if self.scan_for_variables:
self.fields_used.add(data['signature'])
else:
self.other_fields_used.add(data['signature'])
elif 'required' in data:
raise DAError("The required modifier can only be used on a signature block" + self.idebug(data))
if 'question metadata' in data:
self.question_metadata = recursive_textobject_or_primitive(data['question metadata'], self)
if 'under' in data:
self.undertext = TextObject(definitions + str(data['under']), question=self)
if 'pre' in data:
self.pretext = TextObject(definitions + str(data['pre']), question=self)
if 'post' in data:
self.posttext = TextObject(definitions + str(data['post']), question=self)
if 'right' in data:
self.righttext = TextObject(definitions + str(data['right']), question=self)
if 'check in' in data:
self.interview.uses_action = True
if isinstance(data['check in'], (dict, list, set)):
raise DAError("A check in event must be text or a list." + self.idebug(data))
self.checkin = str(data['check in'])
self.names_used.add(str(data['check in']))
if 'yesno' in data:
if not isinstance(data['yesno'], str):
raise DAError("A yesno must refer to text." + self.idebug(data))
self.fields.append(Field({'saveas': data['yesno'], 'boolean': 1}))
if self.scan_for_variables:
self.fields_used.add(data['yesno'])
else:
self.other_fields_used.add(data['yesno'])
self.question_type = 'yesno'
if 'noyes' in data:
if not isinstance(data['noyes'], str):
raise DAError("A noyes must refer to text." + self.idebug(data))
self.fields.append(Field({'saveas': data['noyes'], 'boolean': -1}))
if self.scan_for_variables:
self.fields_used.add(data['noyes'])
else:
self.other_fields_used.add(data['noyes'])
self.question_type = 'noyes'
if 'yesnomaybe' in data:
if not isinstance(data['yesnomaybe'], str):
raise DAError("A yesnomaybe must refer to text." + self.idebug(data))
self.fields.append(Field({'saveas': data['yesnomaybe'], 'threestate': 1}))
if self.scan_for_variables:
self.fields_used.add(data['yesnomaybe'])
else:
self.other_fields_used.add(data['yesnomaybe'])
self.question_type = 'yesnomaybe'
if 'noyesmaybe' in data:
if not isinstance(data['noyesmaybe'], str):
raise DAError("A noyesmaybe must refer to text." + self.idebug(data))
self.fields.append(Field({'saveas': data['noyesmaybe'], 'threestate': -1}))
if self.scan_for_variables:
self.fields_used.add(data['noyesmaybe'])
else:
self.other_fields_used.add(data['noyesmaybe'])
self.question_type = 'noyesmaybe'
if 'sets' in data:
if isinstance(data['sets'], str):
self.fields_used.add(data['sets'])
elif isinstance(data['sets'], list):
for key in data['sets']:
self.fields_used.add(key)
else:
raise DAError("A sets phrase must be text or a list." + self.idebug(data))
if 'event' in data:
self.interview.uses_action = True
if isinstance(data['event'], str):
self.fields_used.add(data['event'])
elif isinstance(data['event'], list):
for key in data['event']:
self.fields_used.add(key)
else:
raise DAError("An event phrase must be text or a list." + self.idebug(data))
if 'choices' in data or 'buttons' in data or 'dropdown' in data or 'combobox' in data:
if 'field' in data:
uses_field = True
data['field'] = data['field'].strip()
else:
uses_field = False
if 'shuffle' in data and data['shuffle']:
shuffle = True
else:
shuffle = False
if 'choices' in data or 'dropdown' in data or 'combobox' in data:
if 'choices' in data:
has_code, choices = self.parse_fields(data['choices'], register_target, uses_field)
self.question_variety = 'radio'
elif 'combobox' in data:
has_code, choices = self.parse_fields(data['combobox'], register_target, uses_field)
self.question_variety = 'combobox'
else:
has_code, choices = self.parse_fields(data['dropdown'], register_target, uses_field)
self.question_variety = 'dropdown'
field_data = {'choices': choices, 'shuffle': shuffle}
if has_code:
field_data['has_code'] = True
if 'default' in data:
field_data['default'] = TextObject(definitions + str(data['default']), question=self)
elif 'buttons' in data:
has_code, choices = self.parse_fields(data['buttons'], register_target, uses_field)
field_data = {'choices': choices, 'shuffle': shuffle}
if has_code:
field_data['has_code'] = True
self.question_variety = 'buttons'
if 'validation messages' in data:
if not isinstance(data['validation messages'], dict):
raise DAError("A validation messages indicator must be a dictionary." + self.idebug(data))
field_data['validation messages'] = dict()
for validation_key, validation_message in data['validation messages'].items():
if not (isinstance(validation_key, str) and isinstance(validation_message, str)):
raise DAError("A validation messages indicator must be a dictionary of text keys and text values." + self.idebug(data))
field_data['validation messages'][validation_key] = TextObject(definitions + str(validation_message).strip(), question=self)
if uses_field:
data['field'] = data['field'].strip()
if invalid_variable_name(data['field']):
raise DAError("Missing or invalid variable name " + repr(data['field']) + "." + self.idebug(data))
if self.scan_for_variables:
self.fields_used.add(data['field'])
else:
self.other_fields_used.add(data['field'])
field_data['saveas'] = data['field']
if 'datatype' in data and 'type' not in field_data:
field_data['type'] = data['datatype']
elif is_boolean(field_data):
field_data['type'] = 'boolean'
elif is_threestate(field_data):
field_data['type'] = 'threestate'
self.fields.append(Field(field_data))
self.question_type = 'multiple_choice'
elif 'continue button field' in data and 'fields' not in data and 'yesno' not in data and 'noyes' not in data and 'yesnomaybe' not in data and 'noyesmaybe' not in data and 'signature' not in data:
if not isinstance(data['continue button field'], str):
raise DAError("A continue button field must be plain text." + self.idebug(data))
if self.scan_for_variables:
self.fields_used.add(data['continue button field'])
else:
self.other_fields_used.add(data['continue button field'])
if 'review' in data:
self.review_saveas = data['continue button field']
else:
field_data = {'saveas': data['continue button field']}
self.fields.append(Field(field_data))
self.question_type = 'settrue'
if 'need' in data:
if isinstance(data['need'], (str, dict)):
need_list = [data['need']]
elif isinstance(data['need'], list):
need_list = data['need']
else:
raise DAError("A need phrase must be text or a list." + self.idebug(data))
pre_need_list = []
post_need_list = []
for item in need_list:
if isinstance(item, dict):
if not (('pre' in item and len(item) == 1) or ('post' in item and len(item) == 1) or ('pre' in item and 'post' in item and len(item) == 2)):
raise DAError("If 'need' contains a dictionary it can only include keys 'pre' or 'post'." + self.idebug(data))
if 'post' in item:
if isinstance(item['post'], str):
post_need_list.append(item['post'])
elif isinstance(item['post'], list):
post_need_list.extend(item['post'])
else:
raise DAError("A need post phrase must be text or a list." + self.idebug(data))
if 'pre' in item:
if isinstance(item['pre'], str):
pre_need_list.append(item['pre'])
elif isinstance(item['pre'], list):
pre_need_list.extend(item['pre'])
else:
raise DAError("A need pre phrase must be text or a list." + self.idebug(data))
else:
pre_need_list.append(item)
for sub_item in pre_need_list + post_need_list:
if not isinstance(sub_item, str):
raise DAError("In 'need', the items must be text strings." + self.idebug(data))
if len(pre_need_list):
try:
self.need = list(map((lambda x: compile(x, '<need expression>', 'eval')), pre_need_list))
for x in pre_need_list:
self.find_fields_in(x)
except:
logmessage("Question: compile error in need code:\n" + str(data['need']) + "\n" + str(sys.exc_info()[0]))
raise
if len(post_need_list):
try:
self.need_post = list(map((lambda x: compile(x, '<post need expression>', 'eval')), post_need_list))
for x in post_need_list:
self.find_fields_in(x)
except:
logmessage("Question: compile error in need code:\n" + str(data['need']) + "\n" + str(sys.exc_info()[0]))
raise
if 'depends on' in data:
if not isinstance(data['depends on'], list):
depends_list = [str(data['depends on'])]
else:
depends_list = [str(x) for x in data['depends on']]
# if len(depends_list):
# if self.need is None:
# self.need = list()
# self.need += list(map((lambda x: compile(x, '<depends expression>', 'exec')), depends_list))
else:
depends_list = []
if 'target' in data:
self.interview.uses_action = True
if isinstance(data['target'], (list, dict, set, bool, int, float)):
raise DAError("The target of a template must be plain text." + self.idebug(data))
if 'template' not in data:
raise DAError("A target directive can only be used with a template." + self.idebug(data))
self.target = data['target']
if 'table' in data or 'rows' in data or 'columns' in data:
if 'table' not in data or 'rows' not in data or 'columns' not in data:
raise DAError("A table definition must have definitions for table, row, and column." + self.idebug(data))
if isinstance(data['rows'], (list, dict, set, bool, int, float)):
raise DAError("The row part of a table definition must be plain Python code." + self.idebug(data))
data['rows'] = data['rows'].strip()
if not isinstance(data['columns'], list):
raise DAError("The column part of a table definition must be a list." + self.idebug(data))
row = compile(data['rows'], '<row code>', 'eval')
self.find_fields_in(data['rows'])
header = list()
column = list()
read_only = dict(edit=True, delete=True)
is_editable = False
require_gathered = True
if 'require gathered' in data and data['require gathered'] is False:
require_gathered = False
else:
require_gathered = True
if 'show incomplete' in data and data['show incomplete'] is True:
show_incomplete = True
else:
show_incomplete = False
if show_incomplete is True or require_gathered is False:
ensure_complete = False
else:
ensure_complete = True
if 'not available label' in data and isinstance(data['not available label'], str):
not_available_label = data['not available label'].strip()
else:
# word('n/a')
not_available_label = 'n/a'
for col in data['columns']:
if not isinstance(col, dict):
raise DAError("The column items in a table definition must be dictionaries." + self.idebug(data))
if len(col) == 0:
raise DAError("A column item in a table definition cannot be empty." + self.idebug(data))
if 'header' in col and 'cell' in col:
header_text = col['header']
cell_text = str(col['cell']).strip()
else:
for key, val in col.items():
header_text = key
cell_text = str(val).strip()
break
if header_text == '':
header.append(TextObject(' '))
else:
header.append(TextObject(definitions + str(header_text), question=self))
self.find_fields_in(cell_text)
column.append(compile(cell_text, '<column code>', 'eval'))
if 'allow reordering' in data and data['allow reordering'] is not False:
reorder = True
else:
reorder = False
if 'edit' in data and data['edit'] is not False:
is_editable = True
if isinstance(data['edit'], list):
if len(data['edit']) == 0:
raise DAError("The edit directive must be a list of attributes, or True or False" + self.idebug(data))
for attribute_name in data['edit']:
if not isinstance(attribute_name, str):
raise DAError("The edit directive must be a list of attribute names" + self.idebug(data))
elif not isinstance(data['edit'], bool):
raise DAError("The edit directive must be a list of attributes, or True or False" + self.idebug(data))
keyword_args = ''
if 'delete buttons' in data and not data['delete buttons']:
keyword_args += ', delete=False'
if 'confirm' in data and data['confirm']:
keyword_args += ', confirm=True'
if 'read only' in data:
if not isinstance(data['read only'], str):
raise DAError("The read only directive must be plain text referring to an attribute" + self.idebug(data))
keyword_args += ', read_only_attribute=' + repr(data['read only'].strip())
if isinstance(data['edit'], list):
column.append(compile('(' + data['rows'] + ').item_actions(row_item, row_index, ' + ', '.join([repr(y) for y in data['edit']]) + keyword_args + ', reorder=' + repr(reorder) + ', ensure_complete=' + repr(ensure_complete) + ')', '<edit code>', 'eval'))
else:
column.append(compile('(' + data['rows'] + ').item_actions(row_item, row_index' + keyword_args + ', reorder=' + repr(reorder) + ', ensure_complete=' + repr(ensure_complete) + ')', '<edit code>', 'eval'))
if 'edit header' in data:
if not isinstance(data['edit header'], str):
raise DAError("The edit header directive must be text" + self.idebug(data))
if data['edit header'] == '':
header.append(TextObject(' '))
else:
header.append(TextObject(definitions + str(data['edit header']), question=self))
else:
header.append(TextObject(word("Actions")))
elif ('delete buttons' in data and data['delete buttons']) or reorder:
is_editable = True
keyword_args = ''
if 'read only' in data:
if not isinstance(data['read only'], str):
raise DAError("The read only directive must be plain text referring to an attribute" + self.idebug(data))
keyword_args += ', read_only_attribute=' + repr(data['read only'].strip())
if 'confirm' in data and data['confirm']:
keyword_args += ', confirm=True'
if 'delete buttons' in data and data['delete buttons']:
column.append(compile('(' + data['rows'] + ').item_actions(row_item, row_index, edit=False' + keyword_args + ', reorder=' + repr(reorder) + ', ensure_complete=' + repr(ensure_complete) + ')', '<delete button code>', 'eval'))
else:
column.append(compile('(' + data['rows'] + ').item_actions(row_item, row_index, edit=False' + keyword_args + ', delete=False, reorder=' + repr(reorder) + ', ensure_complete=' + repr(ensure_complete) + ')', '<reorder buttons code>', 'eval'))
if 'edit header' in data:
if not isinstance(data['edit header'], str):
raise DAError("The edit header directive must be text" + self.idebug(data))
if data['edit header'] == '':
header.append(TextObject(' '))
else:
header.append(TextObject(definitions + str(data['edit header']), question=self))
else:
header.append(TextObject(word("Actions")))
if self.scan_for_variables:
self.fields_used.add(data['table'])
else:
self.other_fields_used.add(data['table'])
empty_message = data.get('show if empty', True)
if empty_message not in (True, False, None):
empty_message = TextObject(definitions + str(empty_message), question=self)
field_data = {'saveas': data['table'], 'extras': dict(header=header, row=row, column=column, empty_message=empty_message, indent=data.get('indent', False), is_editable=is_editable, require_gathered=require_gathered, show_incomplete=show_incomplete, not_available_label=not_available_label)}
self.fields.append(Field(field_data))
self.content = TextObject('')
self.subcontent = TextObject('')
self.question_type = 'table'
if 'template' in data and 'content file' in data:
if isinstance(data['content file'], dict):
if len(data['content file']) == 1 and 'code' in data['content file'] and isinstance(data['content file']['code'], str):
if self.scan_for_variables:
self.fields_used.add(data['template'])
else:
self.other_fields_used.add(data['template'])
field_data = {'saveas': data['template']}
self.fields.append(Field(field_data))
self.compute = compile(data['content file']['code'], '<content file code>', 'eval')
self.sourcecode = data['content file']['code']
self.find_fields_in(data['content file']['code'])
self.question_type = 'template_code'
else:
raise DAError('A content file must be specified as text, as a list of text filenames, or as a dictionary with code as the key' + self.idebug(data))
else:
if not isinstance(data['content file'], list):
data['content file'] = [data['content file']]
data['content'] = ''
for content_file in data['content file']:
if not isinstance(content_file, str):
raise DAError('A content file must be specified as text, as a list of text filenames, or as a dictionary with code as the key' + self.idebug(data))
file_to_read = docassemble.base.functions.package_template_filename(content_file, package=self.package)
#if file_to_read is not None and get_mimetype(file_to_read) != 'text/markdown':
# raise DAError('The content file ' + str(data['content file']) + ' is not a markdown file ' + str(file_to_read) + self.idebug(data))
if file_to_read is not None and os.path.isfile(file_to_read) and os.access(file_to_read, os.R_OK):
with open(file_to_read, 'r', encoding='utf-8') as the_file:
data['content'] += the_file.read()
else:
raise DAError('Unable to read content file ' + str(data['content file']) + ' after trying to find it at ' + str(file_to_read) + self.idebug(data))
if 'template' in data and 'content' in data:
if isinstance(data['template'], (list, dict)):
raise DAError("A template must designate a single variable expressed as text." + self.idebug(data))
if isinstance(data['content'], (list, dict)):
raise DAError("The content of a template must be expressed as text." + self.idebug(data))
if self.scan_for_variables:
self.fields_used.add(data['template'])
else:
self.other_fields_used.add(data['template'])
field_data = {'saveas': data['template']}
self.fields.append(Field(field_data))
self.content = TextObject(definitions + str(data['content']), question=self)
#logmessage("keys are: " + str(self.mako_names))
if 'subject' in data:
self.subcontent = TextObject(definitions + str(data['subject']), question=self)
else:
self.subcontent = TextObject("")
self.question_type = 'template'
#if self.scan_for_variables:
# self.reset_list = self.fields_used
if 'code' in data:
if 'event' in data:
self.question_type = 'event_code'
self.scan_for_variables = False
else:
self.question_type = 'code'
if isinstance(data['code'], str):
if not self.interview.calls_process_action and match_process_action.search(data['code']):
self.interview.calls_process_action = True
try:
self.compute = compile(data['code'], '<code block>', 'exec')
self.sourcecode = data['code']
except:
logmessage("Question: compile error in code:\n" + str(data['code']) + "\n" + str(sys.exc_info()[0]))
raise
self.find_fields_in(data['code'])
else:
raise DAError("A code section must be text, not a list or a dictionary." + self.idebug(data))
if 'reconsider' in data:
#if not isinstance(data['reconsider'], bool):
# raise DAError("A reconsider directive must be true or false." + self.idebug(data))
if isinstance(data['reconsider'], bool):
if data['reconsider']:
if self.is_generic:
if self.generic_object not in self.interview.reconsider_generic:
self.interview.reconsider_generic[self.generic_object] = set()
self.interview.reconsider_generic[self.generic_object].update(self.fields_used)
else:
self.interview.reconsider.update(self.fields_used)
else:
if isinstance(data['reconsider'], str):
fields = [data['reconsider']]
elif isinstance(data['reconsider'], list):
fields = data['reconsider']
else:
raise DAError("A reconsider directive must be true, false, a single variable or a list." + self.idebug(data))
for the_field in fields:
if not isinstance(the_field, str):
raise DAError("A reconsider directive must refer to variable names expressed as text." + self.idebug(data))
self.find_fields_in(the_field)
self.reconsider.append(the_field)
if 'undefine' in data:
if isinstance(data['undefine'], str):
fields = [data['undefine']]
elif isinstance(data['undefine'], list):
fields = data['undefine']
else:
raise DAError("A undefine directive must a single variable or a list." + self.idebug(data))
for the_field in fields:
if not isinstance(the_field, str):
raise DAError("A undefine directive must refer to variable names expressed as text." + self.idebug(data))
self.find_fields_in(the_field)
self.undefine.append(the_field)
if 'continue button field' in data and 'question' in data and ('field' in data or 'fields' in data or 'yesno' in data or 'noyes' in data or 'yesnomaybe' in data or 'noyesmaybe' in data or 'signature' in data):
if not isinstance(data['continue button field'], str):
raise DAError("A continue button field must be plain text." + self.idebug(data))
if self.scan_for_variables:
self.fields_used.add(data['continue button field'])
else:
self.other_fields_used.add(data['continue button field'])
self.fields_saveas = data['continue button field']
if 'fields' in data:
self.question_type = 'fields'
if isinstance(data['fields'], dict):
data['fields'] = [data['fields']]
if not isinstance(data['fields'], list):
raise DAError("The fields must be written in the form of a list." + self.idebug(data))
else:
field_number = 0
for field in data['fields']:
docassemble.base.functions.this_thread.misc['current_field'] = field_number
if isinstance(field, dict):
manual_keys = set()
field_info = {'type': 'text', 'number': field_number}
if 'datatype' in field:
if field['datatype'] in ('radio', 'combobox', 'pulldown', 'ajax'):
field['input type'] = field['datatype']
field['datatype'] = 'text'
if field['datatype'] == 'mlarea':
field['input type'] = 'area'
field['datatype'] = 'ml'
if field['datatype'] == 'area':
field['input type'] = 'area'
field['datatype'] = 'text'
if 'input type' in field and field['input type'] == 'ajax':
if 'action' not in field:
raise DAError("An ajax field must have an associated action." + self.idebug(data))
if 'choices' in field or 'code' in field:
raise DAError("An ajax field cannot contain a list of choices except through an action." + self.idebug(data))
if len(field) == 1 and 'code' in field:
field_info['type'] = 'fields_code'
self.find_fields_in(field['code'])
field_info['extras'] = dict(fields_code=compile(field['code'], '<fields code>', 'eval'))
self.fields.append(Field(field_info))
field_number += 1
if 'current_field' in docassemble.base.functions.this_thread.misc:
del docassemble.base.functions.this_thread.misc['current_field']
continue
if 'datatype' in field and field['datatype'] in ('object', 'object_radio', 'multiselect', 'object_multiselect', 'checkboxes', 'object_checkboxes') and not ('choices' in field or 'code' in field):
raise DAError("A multiple choice field must refer to a list of choices." + self.idebug(data))
if 'input type' in field and field['input type'] in ('radio', 'combobox', 'pulldown') and not ('choices' in field or 'code' in field):
raise DAError("A multiple choice field must refer to a list of choices." + self.idebug(data))
if 'object labeler' in field and ('datatype' not in field or not field['datatype'].startswith('object')):
raise DAError("An object labeler can only be used with an object data type")
if 'note' in field and 'html' in field:
raise DAError("You cannot include both note and html in a field." + self.idebug(data))
for key in field:
if key == 'default' and 'datatype' in field and field['datatype'] in ('object', 'object_radio', 'object_multiselect', 'object_checkboxes'):
continue
if key == 'input type':
field_info['inputtype'] = field[key]
elif 'datatype' in field and field['datatype'] in ('ml', 'mlarea') and key in ('using', 'keep for training'):
if key == 'using':
if 'extras' not in field_info:
field_info['extras'] = dict()
field_info['extras']['ml_group'] = TextObject(definitions + str(field[key]), question=self)
if key == 'keep for training':
if 'extras' not in field_info:
field_info['extras'] = dict()
if isinstance(field[key], bool):
field_info['extras']['ml_train'] = field[key]
else:
field_info['extras']['ml_train'] = {'compute': compile(field[key], '<keep for training code>', 'eval'), 'sourcecode': field[key]}
self.find_fields_in(field[key])
elif key == 'validation messages':
if not isinstance(field[key], dict):
raise DAError("A validation messages indicator must be a dictionary." + self.idebug(data))
field_info['validation messages'] = dict()
for validation_key, validation_message in field[key].items():
if not (isinstance(validation_key, str) and isinstance(validation_message, str)):
raise DAError("A validation messages indicator must be a dictionary of text keys and text values." + self.idebug(data))
field_info['validation messages'][validation_key] = TextObject(definitions + str(validation_message).strip(), question=self)
elif key == 'validate':
field_info['validate'] = {'compute': compile(field[key], '<validate code>', 'eval'), 'sourcecode': field[key]}
self.find_fields_in(field[key])
elif key == 'rows' and (('input type' in field and field['input type'] == 'area') or ('datatype' in field and field['datatype'] in ('multiselect', 'object_multiselect'))):
field_info['rows'] = {'compute': compile(str(field[key]), '<rows code>', 'eval'), 'sourcecode': str(field[key])}
self.find_fields_in(field[key])
elif key == 'maximum image size' and 'datatype' in field and field['datatype'] in ('file', 'files', 'camera', 'user', 'environment'):
field_info['max_image_size'] = {'compute': compile(str(field[key]), '<maximum image size code>', 'eval'), 'sourcecode': str(field[key])}
self.find_fields_in(field[key])
elif key == 'image upload type' and 'datatype' in field and field['datatype'] in ('file', 'files', 'camera', 'user', 'environment'):
if field[key].lower().strip() in ('jpeg', 'jpg', 'bmp', 'png'):
field_info['image_type'] = {'compute': compile(repr(field[key]), '<image upload type code>', 'eval'), 'sourcecode': repr(field[key])}
else:
field_info['image_type'] = {'compute': compile(str(field[key]), '<image upload type code>', 'eval'), 'sourcecode': str(field[key])}
elif key == 'accept' and 'datatype' in field and field['datatype'] in ('file', 'files', 'camera', 'user', 'environment'):
field_info['accept'] = {'compute': compile(field[key], '<accept code>', 'eval'), 'sourcecode': field[key]}
self.find_fields_in(field[key])
elif key == 'allow privileges' and 'datatype' in field and field['datatype'] in ('file', 'files', 'camera', 'user', 'environment'):
if isinstance(field[key], list):
for item in field[key]:
if not isinstance(item, str):
raise DAError("An allow privileges specifier must be a list of plain text items or code." + self.idebug(data))
field_info['allow_privileges'] = field[key]
elif isinstance(field[key], str):
field_info['allow_privileges'] = [field[key]]
elif isinstance(field[key], dict) and len(field[key]) == 1 and 'code' in field[key]:
field_info['allow_privileges'] = {'compute': compile(field[key]['code'], '<allow privileges code>', 'eval'), 'sourcecode': field[key]['code']}
self.find_fields_in(field[key]['code'])
else:
raise DAError("An allow privileges specifier must be a list of plain text items or code." + self.idebug(data))
elif key == 'allow users' and 'datatype' in field and field['datatype'] in ('file', 'files', 'camera', 'user', 'environment'):
if isinstance(field[key], list):
for item in field[key]:
if not isinstance(item, (str, int)):
raise DAError("An allow users specifier must be a list of integers and plain text items or code." + self.idebug(data))
field_info['allow_users'] = field[key]
elif isinstance(field[key], str):
field_info['allow_users'] = [field[key]]
elif isinstance(field[key], dict) and len(field[key]) == 1 and 'code' in field[key]:
field_info['allow_users'] = {'compute': compile(field[key]['code'], '<allow users code>', 'eval'), 'sourcecode': field[key]['code']}
self.find_fields_in(field[key]['code'])
else:
raise DAError("An allow users specifier must be a list of integers and plain text items or code." + self.idebug(data))
elif key == 'persistent' and 'datatype' in field and field['datatype'] in ('file', 'files', 'camera', 'user', 'environment'):
if isinstance(field[key], bool):
field_info['persistent'] = field[key]
else:
field_info['persistent'] = {'compute': compile(field[key], '<persistent code>', 'eval'), 'sourcecode': field[key]}
self.find_fields_in(field[key])
elif key == 'private' and 'datatype' in field and field['datatype'] in ('file', 'files', 'camera', 'user', 'environment'):
if isinstance(field[key], bool):
field_info['private'] = field[key]
else:
field_info['private'] = {'compute': compile(field[key], '<public code>', 'eval'), 'sourcecode': field[key]}
self.find_fields_in(field[key])
elif key == 'object labeler':
field_info['object_labeler'] = {'compute': compile(str(field[key]), '<object labeler code>', 'eval'), 'sourcecode': str(field[key])}
self.find_fields_in(field[key])
elif key == 'help generator':
field_info['help_generator'] = {'compute': compile(str(field[key]), '<help generator code>', 'eval'), 'sourcecode': str(field[key])}
self.find_fields_in(field[key])
elif key == 'image generator':
field_info['image_generator'] = {'compute': compile(str(field[key]), '<image generator code>', 'eval'), 'sourcecode': str(field[key])}
self.find_fields_in(field[key])
elif key == 'required':
if isinstance(field[key], bool):
field_info['required'] = field[key]
else:
field_info['required'] = {'compute': compile(field[key], '<required code>', 'eval'), 'sourcecode': field[key]}
self.find_fields_in(field[key])
elif key == 'js show if' or key == 'js hide if':
if not isinstance(field[key], str):
raise DAError("A js show if or js hide if expression must be a string" + self.idebug(data))
js_info = dict()
if key == 'js show if':
js_info['sign'] = True
else:
js_info['sign'] = False
js_info['mode'] = 0
js_info['expression'] = TextObject(definitions + str(field[key]).strip(), question=self, translate=False)
js_info['vars'] = list(set(re.findall(r'val\(\'([^\)]+)\'\)', field[key]) + re.findall(r'val\("([^\)]+)"\)', field[key])))
if 'extras' not in field_info:
field_info['extras'] = dict()
field_info['extras']['show_if_js'] = js_info
elif key == 'js disable if' or key == 'js enable if':
if not isinstance(field[key], str):
raise DAError("A js disable if or js enable if expression must be a string" + self.idebug(data))
js_info = dict()
if key == 'js enable if':
js_info['sign'] = True
else:
js_info['sign'] = False
js_info['mode'] = 1
js_info['expression'] = TextObject(definitions + str(field[key]).strip(), question=self, translate=False)
js_info['vars'] = list(set(re.findall(r'val\(\'([^\)]+)\'\)', field[key]) + re.findall(r'val\("([^\)]+)"\)', field[key])))
if 'extras' not in field_info:
field_info['extras'] = dict()
field_info['extras']['show_if_js'] = js_info
elif key == 'show if' or key == 'hide if':
if 'extras' not in field_info:
field_info['extras'] = dict()
if isinstance(field[key], dict):
showif_valid = False
if 'variable' in field[key] and 'is' in field[key]:
if 'js show if' in field or 'js hide if' in field:
raise DAError("You cannot mix js show if and non-js show if" + self.idebug(data))
if 'js disable if' in field or 'js enable if' in field:
raise DAError("You cannot mix js disable if and non-js show if" + self.idebug(data))
field_info['extras']['show_if_var'] = safeid(field[key]['variable'].strip())
if isinstance(field[key]['is'], str):
field_info['extras']['show_if_val'] = TextObject(definitions + str(field[key]['is']).strip(), question=self)
else:
field_info['extras']['show_if_val'] = TextObject(str(field[key]['is']))
showif_valid = True
if 'code' in field[key]:
field_info['showif_code'] = compile(field[key]['code'], '<show if code>', 'eval')
self.find_fields_in(field[key]['code'])
showif_valid = True
if not showif_valid:
raise DAError("The keys of '" + key + "' must be 'variable' and 'is,' or 'code.'" + self.idebug(data))
elif isinstance(field[key], list):
raise DAError("The keys of '" + key + "' cannot be a list" + self.idebug(data))
elif isinstance(field[key], str):
field_info['extras']['show_if_var'] = safeid(field[key].strip())
field_info['extras']['show_if_val'] = TextObject('True')
else:
raise DAError("Invalid variable name in show if/hide if")
exclusive = False
if isinstance(field[key], dict) and 'code' in field[key]:
if len(field[key]) == 1:
exclusive = True
if key == 'show if':
field_info['extras']['show_if_sign_code'] = 1
else:
field_info['extras']['show_if_sign_code'] = 0
if not exclusive:
if key == 'show if':
field_info['extras']['show_if_sign'] = 1
else:
field_info['extras']['show_if_sign'] = 0
field_info['extras']['show_if_mode'] = 0
elif key == 'disable if' or key == 'enable if':
if 'extras' not in field_info:
field_info['extras'] = dict()
if isinstance(field[key], dict):
showif_valid = False
if 'variable' in field[key] and 'is' in field[key]:
if 'js show if' in field or 'js hide if' in field:
raise DAError("You cannot mix js show if and non-js disable if" + self.idebug(data))
if 'js disable if' in field or 'js enable if' in field:
raise DAError("You cannot mix js disable if and non-js disable if" + self.idebug(data))
field_info['extras']['show_if_var'] = safeid(field[key]['variable'].strip())
if isinstance(field[key]['is'], str):
field_info['extras']['show_if_val'] = TextObject(definitions + str(field[key]['is']).strip(), question=self)
else:
field_info['extras']['show_if_val'] = TextObject(str(field[key]['is']))
showif_valid = True
if 'code' in field[key]:
field_info['showif_code'] = compile(field[key]['code'], '<show if code>', 'eval')
self.find_fields_in(field[key]['code'])
showif_valid = True
if not showif_valid:
raise DAError("The keys of '" + key + "' must be 'variable' and 'is,' or 'code.'" + self.idebug(data))
elif isinstance(field[key], list):
raise DAError("The keys of '" + key + "' cannot be a list" + self.idebug(data))
elif isinstance(field[key], str):
field_info['extras']['show_if_var'] = safeid(field[key].strip())
field_info['extras']['show_if_val'] = TextObject('True')
else:
raise DAError("Invalid variable name in disable if/enable if")
exclusive = False
if isinstance(field[key], dict) and 'code' in field[key]:
if len(field[key]) == 1:
exclusive = True
if key == 'enable if':
field_info['extras']['show_if_sign_code'] = 1
else:
field_info['extras']['show_if_sign_code'] = 0
if not exclusive:
if key == 'enable if':
field_info['extras']['show_if_sign'] = 1
else:
field_info['extras']['show_if_sign'] = 0
field_info['extras']['show_if_mode'] = 1
elif key == 'default' or key == 'hint' or key == 'help':
if not isinstance(field[key], dict) and not isinstance(field[key], list):
field_info[key] = TextObject(definitions + str(field[key]), question=self)
if key == 'default':
if isinstance(field[key], dict) and 'code' in field[key]:
if 'extras' not in field_info:
field_info['extras'] = dict()
field_info['extras']['default'] = {'compute': compile(field[key]['code'], '<default code>', 'eval'), 'sourcecode': field[key]['code']}
self.find_fields_in(field[key]['code'])
else:
if isinstance(field[key], (dict, list)):
field_info[key] = field[key]
if 'datatype' not in field and 'code' not in field and 'choices' not in field:
auto_determine_type(field_info, the_value=field[key])
elif key == 'disable others':
if 'datatype' in field and field['datatype'] in ('file', 'files', 'range', 'multiselect', 'checkboxes', 'camera', 'user', 'environment', 'camcorder', 'microphone', 'object_multiselect', 'object_checkboxes'): #'yesno', 'yesnowide', 'noyes', 'noyeswide',
raise DAError("A 'disable others' directive cannot be used with this data type." + self.idebug(data))
if not isinstance(field[key], (list, bool)):
raise DAError("A 'disable others' directive must be True, False, or a list of variable names." + self.idebug(data))
field_info['disable others'] = field[key]
if field[key] is not False:
field_info['required'] = False
elif key == 'uncheck others' and 'datatype' in field and field['datatype'] in ('yesno', 'yesnowide', 'noyes', 'noyeswide'):
if not isinstance(field[key], (list, bool)):
raise DAError("An 'uncheck others' directive must be True, False, or a list of variable names." + self.idebug(data))
field_info['uncheck others'] = field[key]
elif key == 'datatype':
field_info['type'] = field[key]
if field[key] in ('yesno', 'yesnowide', 'noyes', 'noyeswide') and 'required' not in field_info:
field_info['required'] = False
if field[key] == 'range' and 'required' not in field_info:
field_info['required'] = False
if field[key] == 'range' and not ('min' in field and 'max' in field):
raise DAError("If the datatype of a field is 'range', you must provide a min and a max." + self.idebug(data))
if field[key] in ('yesno', 'yesnowide', 'yesnoradio'):
field_info['boolean'] = 1
elif field[key] in ('noyes', 'noyeswide', 'noyesradio'):
field_info['boolean'] = -1
elif field[key] == 'yesnomaybe':
field_info['threestate'] = 1
elif field[key] == 'noyesmaybe':
field_info['threestate'] = -1
elif key == 'code':
self.find_fields_in(field[key])
field_info['choicetype'] = 'compute'
field_info['selections'] = {'compute': compile(field[key], '<choices code>', 'eval'), 'sourcecode': field[key]}
self.find_fields_in(field[key])
if 'exclude' in field:
if isinstance(field['exclude'], dict):
raise DAError("An exclude entry cannot be a dictionary." + self.idebug(data))
if not isinstance(field['exclude'], list):
field_info['selections']['exclude'] = [compile(field['exclude'], '<expression>', 'eval')]
self.find_fields_in(field['exclude'])
else:
field_info['selections']['exclude'] = list()
for x in field['exclude']:
field_info['selections']['exclude'].append(compile(x, '<expression>', 'eval'))
self.find_fields_in(x)
elif key == 'address autocomplete':
field_info['address_autocomplete'] = True
elif key == 'action' and 'input type' in field and field['input type'] == 'ajax':
if not isinstance(field[key], str):
raise DAError("An action must be plain text" + self.idebug(data))
if 'combobox action' not in field_info:
field_info['combobox action'] = dict(trig=4)
field_info['combobox action']['action'] = field[key]
elif key == 'trigger at' and 'action' in field and 'input type' in field and field['input type'] == 'ajax':
if (not isinstance(field[key], int)) or field[key] < 2:
raise DAError("A trigger at must an integer greater than one" + self.idebug(data))
if 'combobox action' not in field_info:
field_info['combobox action'] = dict()
field_info['combobox action']['trig'] = field[key]
elif key == 'exclude':
pass
elif key == 'choices':
if 'datatype' in field and field['datatype'] in ('object', 'object_radio', 'object_multiselect', 'object_checkboxes'):
field_info['choicetype'] = 'compute'
if not isinstance(field[key], (list, str)):
raise DAError("choices is not in appropriate format" + self.idebug(data))
field_info['selections'] = dict()
else:
field_info['choicetype'] = 'manual'
field_info['selections'] = dict(values=self.process_selections_manual(field[key]))
if 'datatype' not in field:
auto_determine_type(field_info)
for item in field_info['selections']['values']:
if isinstance(item['key'], TextObject):
if not item['key'].uses_mako:
manual_keys.add(item['key'].original_text)
else:
manual_keys.add(item['key'])
if 'exclude' in field:
if isinstance(field['exclude'], dict):
raise DAError("An exclude entry cannot be a dictionary." + self.idebug(data))
if not isinstance(field['exclude'], list):
self.find_fields_in(field['exclude'])
field_info['selections']['exclude'] = [compile(field['exclude'].strip(), '<expression>', 'eval')]
else:
field_info['selections']['exclude'] = list()
for x in field['exclude']:
self.find_fields_in(x)
field_info['selections']['exclude'].append(compile(x, '<expression>', 'eval'))
elif key in ('note', 'html'):
if 'extras' not in field_info:
field_info['extras'] = dict()
field_info['extras'][key] = TextObject(definitions + str(field[key]), question=self)
elif key == 'field metadata':
if 'extras' not in field_info:
field_info['extras'] = dict()
field_info['extras'][key] = recursive_textobject_or_primitive(field[key], self)
elif key in ('min', 'max', 'minlength', 'maxlength', 'step', 'scale', 'inline', 'inline width', 'currency symbol'):
if 'extras' not in field_info:
field_info['extras'] = dict()
field_info['extras'][key] = TextObject(definitions + str(field[key]), question=self)
# elif key in ('css', 'script'):
# if 'extras' not in field_info:
# field_info['extras'] = dict()
# if field_info['type'] == 'text':
# field_info['type'] = key
# field_info['extras'][key] = TextObject(definitions + str(field[key]), question=self)
elif key == 'shuffle':
field_info['shuffle'] = field[key]
elif key == 'none of the above' and 'datatype' in field and field['datatype'] in ('checkboxes', 'object_checkboxes', 'object_radio'):
if isinstance(field[key], bool):
field_info['nota'] = field[key]
else:
field_info['nota'] = TextObject(definitions + interpret_label(field[key]), question=self)
elif key == 'field':
if 'label' not in field:
raise DAError("If you use 'field' to indicate a variable in a 'fields' section, you must also include a 'label.'" + self.idebug(data))
if not isinstance(field[key], str):
raise DAError("Fields in a 'field' section must be plain text." + self.idebug(data))
field[key] = field[key].strip()
if invalid_variable_name(field[key]):
raise DAError("Missing or invalid variable name " + repr(field[key]) + "." + self.idebug(data))
field_info['saveas'] = field[key]
elif key == 'label':
if 'field' not in field:
raise DAError("If you use 'label' to label a field in a 'fields' section, you must also include a 'field.'" + self.idebug(data))
field_info['label'] = TextObject(definitions + interpret_label(field[key]), question=self)
else:
if 'label' in field_info:
raise DAError("Syntax error: field label '" + str(key) + "' overwrites previous label, '" + str(field_info['label'].original_text) + "'" + self.idebug(data))
field_info['label'] = TextObject(definitions + interpret_label(key), question=self)
if not isinstance(field[key], str):
raise DAError("Fields in a 'field' section must be plain text." + self.idebug(data))
field[key] = field[key].strip()
if invalid_variable_name(field[key]):
raise DAError("Missing or invalid variable name " + repr(field[key]) + " for key " + repr(key) + "." + self.idebug(data))
field_info['saveas'] = field[key]
if 'type' in field_info:
if field_info['type'] in ('multiselect', 'object_multiselect', 'checkboxes', 'object_checkboxes') and 'nota' not in field_info:
field_info['nota'] = True
if field_info['type'] == 'object_radio' and 'nota' not in field_info:
field_info['nota'] = False
if 'choicetype' in field_info and field_info['choicetype'] == 'compute' and 'type' in field_info and field_info['type'] in ('object', 'object_radio', 'object_multiselect', 'object_checkboxes'):
if 'choices' not in field:
raise DAError("You need to have a choices element if you want to set a variable to an object." + self.idebug(data))
if not isinstance(field['choices'], list):
select_list = [str(field['choices'])]
else:
select_list = field['choices']
if 'exclude' in field:
if isinstance(field['exclude'], dict):
raise DAError("choices exclude list is not in appropriate format" + self.idebug(data))
if not isinstance(field['exclude'], list):
exclude_list = [str(field['exclude']).strip()]
else:
exclude_list = [x.strip() for x in field['exclude']]
if len(exclude_list):
select_list.append('exclude=[' + ", ".join(exclude_list) + ']')
if 'default' in field:
if not isinstance(field['default'], (list, str)):
raise DAError("default list is not in appropriate format" + self.idebug(data))
if not isinstance(field['default'], list):
default_list = [str(field['default'])]
else:
default_list = field['default']
else:
default_list = list()
if field_info['type'] in ('object_multiselect', 'object_checkboxes'):
default_list.append('_DAOBJECTDEFAULTDA')
if len(default_list):
select_list.append('default=[' + ", ".join(default_list) + ']')
additional_parameters = ''
if 'object_labeler' in field_info:
additional_parameters += ", object_labeler=_DAOBJECTLABELER"
if 'help_generator' in field_info:
additional_parameters += ", help_generator=_DAHELPGENERATOR"
if 'image_generator' in field_info:
additional_parameters += ", image_generator=_DAIMAGEGENERATOR"
source_code = "docassemble_base_core_selections(" + ", ".join(select_list) + additional_parameters + ")"
#logmessage("source_code is " + source_code)
field_info['selections'] = {'compute': compile(source_code, '<expression>', 'eval'), 'sourcecode': source_code}
if 'saveas' in field_info:
if not isinstance(field_info['saveas'], str):
raise DAError("Invalid variable name " + repr(field_info['saveas']) + "." + self.idebug(data))
self.fields.append(Field(field_info))
if 'type' in field_info:
if field_info['type'] in ('multiselect', 'object_multiselect', 'checkboxes', 'object_checkboxes'):
if self.scan_for_variables:
self.fields_used.add(field_info['saveas'])
self.fields_used.add(field_info['saveas'] + '.gathered')
if field_info['type'] in ('multiselect', 'checkboxes'):
for the_key in manual_keys:
self.fields_used.add(field_info['saveas'] + '[' + repr(the_key) + ']')
else:
self.other_fields_used.add(field_info['saveas'])
self.other_fields_used.add(field_info['saveas'] + '.gathered')
if field_info['type'] in ('multiselect', 'checkboxes'):
for the_key in manual_keys:
self.other_fields_used.add(field_info['saveas'] + '[' + repr(the_key) + ']')
elif field_info['type'] == 'ml':
if self.scan_for_variables:
self.fields_used.add(field_info['saveas'])
else:
self.other_fields_used.add(field_info['saveas'])
self.interview.mlfields[field_info['saveas']] = dict(saveas=field_info['saveas'])
if 'extras' in field_info and 'ml_group' in field_info['extras']:
self.interview.mlfields[field_info['saveas']]['ml_group'] = field_info['extras']['ml_group']
if re.search(r'\.text$', field_info['saveas']):
field_info['saveas'] = field_info['saveas'].strip()
if invalid_variable_name(field_info['saveas']):
raise DAError("Missing or invalid variable name " + repr(field_info['saveas']) + "." + self.idebug(data))
field_info['saveas'] = re.sub(r'\.text$', '', field_info['saveas'])
if self.scan_for_variables:
self.fields_used.add(field_info['saveas'])
else:
self.other_fields_used.add(field_info['saveas'])
else:
if self.scan_for_variables:
self.fields_used.add(field_info['saveas'] + '.text')
else:
self.other_fields_used.add(field_info['saveas'] + '.text')
else:
if self.scan_for_variables:
self.fields_used.add(field_info['saveas'])
else:
self.other_fields_used.add(field_info['saveas'])
else:
if self.scan_for_variables:
self.fields_used.add(field_info['saveas'])
else:
self.other_fields_used.add(field_info['saveas'])
elif 'note' in field or 'html' in field:
if 'note' in field:
field_info['type'] = 'note'
else:
field_info['type'] = 'html'
self.fields.append(Field(field_info))
else:
raise DAError("A field was listed without indicating a label or a variable name, and the field was not a note or raw HTML." + self.idebug(data) + " and field_info was " + repr(field_info))
else:
raise DAError("Each individual field in a list of fields must be expressed as a dictionary item, e.g., ' - Fruit: user.favorite_fruit'." + self.idebug(data))
field_number += 1
if 'current_field' in docassemble.base.functions.this_thread.misc:
del docassemble.base.functions.this_thread.misc['current_field']
if 'review' in data:
self.question_type = 'review'
if self.is_mandatory and 'continue button field' not in data:
raise DAError("A review block without a continue button field cannot be mandatory." + self.idebug(data))
if isinstance(data['review'], dict):
data['review'] = [data['review']]
if not isinstance(data['review'], list):
raise DAError("The review must be written in the form of a list." + self.idebug(data))
field_number = 0
for field in data['review']:
if not isinstance(field, dict):
raise DAError("Each individual field in a list of fields must be expressed as a dictionary item, e.g., ' - Fruit: user.favorite_fruit'." + self.idebug(data))
field_info = {'number': field_number, 'data': []}
for key in field:
if key == 'action':
continue
elif key == 'help':
if not isinstance(field[key], dict) and not isinstance(field[key], list):
field_info[key] = TextObject(definitions + str(field[key]), question=self)
if 'button' in field: #or 'css' in field or 'script' in field:
raise DAError("In a review block, you cannot mix help text with a button item." + self.idebug(data)) #, css, or script
elif key == 'button':
if not isinstance(field[key], dict) and not isinstance(field[key], list):
field_info['help'] = TextObject(definitions + str(field[key]), question=self)
field_info['type'] = 'button'
elif key in ('note', 'html'):
if 'type' not in field_info:
field_info['type'] = key
if 'extras' not in field_info:
field_info['extras'] = dict()
field_info['extras'][key] = TextObject(definitions + str(field[key]), question=self)
elif key == 'show if':
if not isinstance(field[key], list):
field_list = [field[key]]
else:
field_list = field[key]
field_data = []
for the_saveas in field_list:
#if not isinstance(the_saveas, str):
# raise DAError("Invalid variable name in fields." + self.idebug(data))
the_saveas = str(the_saveas).strip()
#if invalid_variable_name(the_saveas):
# raise DAError("Missing or invalid variable name " + repr(the_saveas) + " ." + self.idebug(data))
if the_saveas not in field_data:
field_data.append(the_saveas)
self.find_fields_in(the_saveas)
if len(field_list):
if 'saveas_code' not in field_info:
field_info['saveas_code'] = []
field_info['saveas_code'].extend([(compile(y, '<expression>', 'eval'), True) for y in field_list])
elif key in ('field', 'fields'):
if 'label' not in field:
raise DAError("If you use 'field' or 'fields' to indicate variables in a 'review' section, you must also include a 'label.'" + self.idebug(data))
if not isinstance(field[key], list):
field_list = [field[key]]
else:
field_list = field[key]
field_info['data'] = []
for the_saveas in field_list:
if isinstance(the_saveas, dict) and len(the_saveas) == 1 and ('undefine' in the_saveas or 'recompute' in the_saveas or 'set' in the_saveas or 'follow up' in the_saveas):
if 'set' in the_saveas:
if not isinstance(the_saveas['set'], list):
raise DAError("The set statement must refer to a list." + self.idebug(data))
clean_list = []
for the_dict in the_saveas['set']:
if not isinstance(the_dict, dict):
raise DAError("A set command must refer to a list of dicts." + self.idebug(data))
for the_var, the_val in the_dict.items():
if not isinstance(the_var, str):
raise DAError("A set command must refer to a list of dicts with keys as variable names." + self.idebug(data))
the_var_stripped = the_var.strip()
if invalid_variable_name(the_var_stripped):
raise DAError("Missing or invalid variable name " + repr(the_var) + " ." + self.idebug(data))
self.find_fields_in(the_var_stripped)
clean_list.append([the_var_stripped, the_val])
field_info['data'].append(dict(action='_da_set', arguments=dict(variables=clean_list)))
if 'follow up' in the_saveas:
if not isinstance(the_saveas['follow up'], list):
raise DAError("The follow up statement must refer to a list." + self.idebug(data))
for var in the_saveas['follow up']:
if not isinstance(var, str):
raise DAError("Invalid variable name in follow up " + command + "." + self.idebug(data))
var_saveas = var.strip()
if invalid_variable_name(var_saveas):
raise DAError("Missing or invalid variable name " + repr(var_saveas) + " ." + self.idebug(data))
self.find_fields_in(var_saveas)
#field_info['data'].append(dict(action="_da_follow_up", arguments=dict(action=var)))
field_info['data'].append(dict(action=var, arguments=dict()))
for command in ('undefine', 'invalidate', 'recompute'):
if command not in the_saveas:
continue
if not isinstance(the_saveas[command], list):
raise DAError("The " + command + " statement must refer to a list." + self.idebug(data))
clean_list = []
for undef_var in the_saveas[command]:
if not isinstance(undef_var, str):
raise DAError("Invalid variable name " + repr(undef_var) + " in " + command + "." + self.idebug(data))
undef_saveas = undef_var.strip()
if invalid_variable_name(undef_saveas):
raise DAError("Missing or invalid variable name " + repr(undef_saveas) + " ." + self.idebug(data))
self.find_fields_in(undef_saveas)
clean_list.append(undef_saveas)
if command == 'invalidate':
field_info['data'].append(dict(action='_da_invalidate', arguments=dict(variables=clean_list)))
else:
field_info['data'].append(dict(action='_da_undefine', arguments=dict(variables=clean_list)))
if command == 'recompute':
field_info['data'].append(dict(action='_da_compute', arguments=dict(variables=clean_list)))
continue
if isinstance(the_saveas, dict) and len(the_saveas) == 2 and 'action' in the_saveas and 'arguments' in the_saveas:
if not isinstance(the_saveas['arguments'], dict):
raise DAError("An arguments directive must refer to a dictionary. " + repr(data))
field_info['data'].append(dict(action=the_saveas['action'], arguments=the_saveas['arguments']))
if not isinstance(the_saveas, str):
raise DAError("Invalid variable name " + repr(the_saveas) + " in fields." + self.idebug(data))
the_saveas = the_saveas.strip()
if invalid_variable_name(the_saveas):
raise DAError("Missing or invalid variable name " + repr(the_saveas) + " ." + self.idebug(data))
if the_saveas not in field_info['data']:
field_info['data'].append(the_saveas)
self.find_fields_in(the_saveas)
if 'action' in field:
field_info['action'] = dict(action=field['action'], arguments=dict())
elif key == 'label':
if 'field' not in field and 'fields' not in field:
raise DAError("If you use 'label' to label a field in a 'review' section, you must also include a 'field' or 'fields.'" + self.idebug(data))
field_info['label'] = TextObject(definitions + interpret_label(field[key]), question=self)
else:
field_info['label'] = TextObject(definitions + interpret_label(key), question=self)
if not isinstance(field[key], list):
field_list = [field[key]]
else:
field_list = field[key]
field_info['data'] = []
for the_saveas in field_list:
if isinstance(the_saveas, dict) and len(the_saveas) == 1 and ('undefine' in the_saveas or 'recompute' in the_saveas):
if 'set' in the_saveas:
if not isinstance(the_saveas['set'], list):
raise DAError("The set statement must refer to a list." + self.idebug(data))
clean_list = []
for the_dict in the_saveas['set']:
if not isinstance(the_dict, dict):
raise DAError("A set command must refer to a list of dicts." + self.idebug(data))
for the_var, the_val in the_dict.items():
if not isinstance(the_var, str):
raise DAError("A set command must refer to a list of dicts with keys as variable names." + self.idebug(data))
the_var_stripped = the_var.strip()
if invalid_variable_name(the_var_stripped):
raise DAError("Missing or invalid variable name " + repr(the_var) + " ." + self.idebug(data))
self.find_fields_in(the_var_stripped)
clean_list.append([the_var_stripped, the_val])
field_info['data'].append(dict(action='_da_set', arguments=dict(variables=clean_list)))
for command in ('undefine', 'recompute'):
if command not in the_saveas:
continue
if not isinstance(the_saveas[command], list):
raise DAError("The " + command + " statement must refer to a list." + self.idebug(data))
clean_list = []
for undef_var in the_saveas[command]:
if not isinstance(undef_var, str):
raise DAError("Invalid variable name " + repr(undef_var) + " in fields " + command + "." + self.idebug(data))
undef_saveas = undef_var.strip()
if invalid_variable_name(undef_saveas):
raise DAError("Missing or invalid variable name " + repr(undef_saveas) + " ." + self.idebug(data))
self.find_fields_in(undef_saveas)
clean_list.append(undef_saveas)
if command == 'invalidate':
field_info['data'].append(dict(action='_da_invalidate', arguments=dict(variables=clean_list)))
else:
field_info['data'].append(dict(action='_da_undefine', arguments=dict(variables=clean_list)))
if command == 'recompute':
field_info['data'].append(dict(action='_da_compute', arguments=dict(variables=clean_list)))
continue
if not isinstance(the_saveas, str):
raise DAError("Invalid variable name " + repr(the_saveas) + " in fields." + self.idebug(data))
the_saveas = the_saveas.strip()
if invalid_variable_name(the_saveas):
raise DAError("Missing or invalid variable name " + repr(the_saveas) + " ." + self.idebug(data))
#if the_saveas not in field_info['data']:
field_info['data'].append(the_saveas)
self.find_fields_in(the_saveas)
if 'action' in field:
field_info['action'] = dict(action=field['action'], arguments=dict())
if 'type' in field_info and field_info['type'] in ('note', 'html') and 'label' in field_info:
del field_info['type']
if len(field_info['data']):
if 'saveas_code' not in field_info:
field_info['saveas_code'] = []
field_info['saveas_code'].extend([(compile(y, '<expression>', 'eval'), False) for y in field_info['data'] if isinstance(y, str)])
if 'action' not in field_info:
if len(field_info['data']) == 1 and isinstance(field_info['data'][0], str):
field_info['action'] = dict(action=field_info['data'][0], arguments=dict())
else:
field_info['action'] = dict(action="_da_force_ask", arguments=dict(variables=field_info['data']))
if len(field_info['data']) or ('type' in field_info and field_info['type'] in ('note', 'html')):
self.fields.append(Field(field_info))
else:
raise DAError("A field in a review list was listed without indicating a label or a variable name, and the field was not a note or raw HTML." + self.idebug(field_info))
field_number += 1
if not hasattr(self, 'question_type'):
if len(self.attachments) and len(self.fields_used) and not hasattr(self, 'content'):
self.question_type = 'attachments'
elif hasattr(self, 'content'):
self.question_type = 'deadend'
if should_append:
if not hasattr(self, 'question_type'):
raise DAError("No question type could be determined for this section." + self.idebug(data))
if main_list:
self.interview.questions_list.append(self)
self.number = self.interview.next_number()
#self.number = len(self.interview.questions_list) - 1
if hasattr(self, 'id'):
self.name = "ID " + self.id
# if self.name in self.interview.questions_by_name:
# raise DAError("Question ID " + str(self.id) + " results in duplicate question name")
else:
self.name = "Question_" + str(self.number)
else:
self.number = self.interview.next_block_number()
if self.name is None:
self.name = "Block_" + str(self.number)
self.interview.all_questions.append(self)
# if hasattr(self, 'id'):
# try:
# self.interview.questions_by_id[self.id].append(self)
# except:
# self.interview.questions_by_id[self.id] = [self]
if self.name is not None:
self.interview.questions_by_name[self.name] = self
foundmatch = False
for field_name in self.fields_used:
if re.search(r'\[', field_name):
foundmatch = True
break
while foundmatch:
foundmatch = False
vars_to_add = set()
for field_name in self.fields_used:
for m in re.finditer(r'^(.*?)\[\'([^\'\"]*)\'\](.*)', field_name):
new_var = m.group(1) + "['" + m.group(2) + "']" + m.group(3)
if new_var not in self.fields_used:
foundmatch = True
#logmessage("Adding " + new_var)
vars_to_add.add(new_var)
# new_var = m.group(1) + '["' + m.group(2) + '"]' + m.group(3)
# if new_var not in self.fields_used:
# foundmatch = True
# logmessage("Adding " + new_var)
# vars_to_add.add(new_var)
for m in re.finditer(r'^(.*?)\[\"([^\"\']*)\"\](.*)', field_name):
new_var = m.group(1) + "['" + m.group(2) + "']" + m.group(3)
if new_var not in self.fields_used:
foundmatch = True
#logmessage("Adding " + new_var)
vars_to_add.add(new_var)
new_var = m.group(1) + "['" + m.group(2) + "']" + m.group(3)
if new_var not in self.fields_used:
foundmatch = True
#logmessage("Adding " + new_var)
vars_to_add.add(new_var)
for m in re.finditer(r'^(.*?)\[u\'([^\'\"]*)\'\](.*)', field_name):
new_var = m.group(1) + "['" + m.group(2) + "']" + m.group(3)
if new_var not in self.fields_used:
foundmatch = True
#logmessage("Adding " + new_var)
vars_to_add.add(new_var)
# new_var = m.group(1) + '["' + m.group(2) + '"]' + m.group(3)
# if new_var not in self.fields_used:
# foundmatch = True
# logmessage("Adding " + new_var)
# vars_to_add.add(new_var)
for new_var in vars_to_add:
#logmessage("Really adding " + new_var)
self.fields_used.add(new_var)
for field_name in self.fields_used:
if field_name not in self.interview.questions:
self.interview.questions[field_name] = dict()
if self.language not in self.interview.questions[field_name]:
self.interview.questions[field_name][self.language] = list()
self.interview.questions[field_name][self.language].append(register_target)
if self.is_generic:
if self.generic_object not in self.interview.generic_questions:
self.interview.generic_questions[self.generic_object] = dict()
if field_name not in self.interview.generic_questions[self.generic_object]:
self.interview.generic_questions[self.generic_object][field_name] = dict()
if self.language not in self.interview.generic_questions[self.generic_object][field_name]:
self.interview.generic_questions[self.generic_object][field_name][self.language] = list()
self.interview.generic_questions[self.generic_object][field_name][self.language].append(register_target)
for variable in depends_list:
if variable not in self.interview.invalidation:
self.interview.invalidation[variable] = set()
self.interview.invalidation[variable].add(field_name)
if len(self.attachments):
indexno = 0
for att in self.attachments:
att['question_name'] = self.name
att['indexno'] = indexno
indexno += 1
self.data_for_debug = data
def get_old_values(self, user_dict):
old_values = dict()
for field_name in self.fields_for_invalidation:
try:
old_values[field_name] = eval(field_name, user_dict)
except Exception as err:
if field_name in user_dict['_internal']['dirty']:
old_values[field_name] = user_dict['_internal']['dirty'][field_name]
return old_values
def invalidate_dependencies_of_variable(self, the_user_dict, field_name, old_value):
if field_name in self.interview.invalidation_todo or field_name in self.interview.onchange_todo:
self.interview.invalidate_dependencies(field_name, the_user_dict, { field_name: old_value })
try:
del the_user_dict['_internal']['dirty'][field_name]
except:
pass
def invalidate_dependencies(self, the_user_dict, old_values):
for field_name in self.fields_used.union(self.other_fields_used):
if field_name in self.interview.invalidation_todo or field_name in self.interview.onchange_todo:
self.interview.invalidate_dependencies(field_name, the_user_dict, old_values)
try:
del the_user_dict['_internal']['dirty'][field_name]
except:
pass
def post_exec(self, the_user_dict):
if self.need_post is not None:
for need_code in self.need_post:
eval(need_code, the_user_dict)
def exec_setup(self, is_generic, the_x, iterators, the_user_dict):
if is_generic:
if the_x != 'None':
exec("x = " + the_x, the_user_dict)
if len(iterators):
for indexno in range(len(iterators)):
exec(list_of_indices[indexno] + " = " + iterators[indexno], the_user_dict)
for the_field in self.undefine:
docassemble.base.functions.undefine(the_field)
if len(self.reconsider) > 0:
docassemble.base.functions.reconsider(*[substitute_vars(item, is_generic, the_x, iterators) for item in self.reconsider])
if self.need is not None:
for need_code in self.need:
eval(need_code, the_user_dict)
def recursive_data_from_code(self, target):
if isinstance(target, dict) or (hasattr(target, 'elements') and isinstance(target.elements, dict)):
new_dict = dict()
for key, val in target.items():
new_dict[key] = self.recursive_data_from_code(val)
return new_dict
if isinstance(target, list) or (hasattr(target, 'elements') and isinstance(target.elements, list)):
new_list = list()
for val in target.__iter__():
new_list.append(self.recursive_data_from_code(val))
return new_list
if isinstance(target, set) or (hasattr(target, 'elements') and isinstance(target.elements, set)):
new_set = set()
for val in target.__iter__():
new_set.add(self.recursive_data_from_code(val))
return new_set
if isinstance(target, (bool, float, int, NoneType)):
return target
self.find_fields_in(target)
return compile(target, '<expression>', 'eval')
def recursive_dataobject(self, target):
if isinstance(target, dict) or (hasattr(target, 'elements') and isinstance(target.elements, dict)):
new_dict = dict()
for key, val in target.items():
new_dict[key] = self.recursive_dataobject(val)
return new_dict
if isinstance(target, list) or (hasattr(target, 'elements') and isinstance(target.elements, list)):
new_list = list()
for val in target.__iter__():
new_list.append(self.recursive_dataobject(val))
return new_list
if isinstance(target, set) or (hasattr(target, 'elements') and isinstance(target.elements, set)):
new_set = set()
for val in target.__iter__():
new_set.add(self.recursive_dataobject(val, self.mako_names))
return new_set
if isinstance(target, (bool, float, int, NoneType)):
return target
return TextObject(str(target), question=self)
def find_fields_in(self, code):
myvisitor = myvisitnode()
t = ast.parse(str(code))
myvisitor.visit(t)
predefines = set(globals().keys()) | set(locals().keys())
if self.scan_for_variables:
for item in myvisitor.targets.keys():
if item not in predefines:
self.fields_used.add(item)
else:
for item in myvisitor.targets.keys():
if item not in predefines:
self.other_fields_used.add(item)
definables = set(predefines) | set(myvisitor.targets.keys())
for item in myvisitor.names.keys():
if item not in definables:
self.names_used.add(item)
def yes(self):
return word("Yes")
def no(self):
return word("No")
def maybe(self):
return word("I don't know")
def back(self):
return word("Back")
def cornerback(self):
return word("Back")
def help(self):
return word("Help")
def process_attachment_code(self, sourcecode):
if not isinstance(sourcecode, str):
raise DAError("An attachment code specifier must be plain text")
try:
self.compute_attachment = compile(sourcecode, '<expression>', 'eval')
self.find_fields_in(sourcecode)
self.sourcecode = sourcecode
except:
logmessage("Question: compile error in code:\n" + str(sourcecode) + "\n" + str(sys.exc_info()[0]))
raise
def process_attachment_list(self, target):
if isinstance(target, list):
att_list = list(map((lambda x: self.process_attachment(x)), target))
return att_list
else:
return([self.process_attachment(target)])
def process_attachment(self, orig_target):
metadata = dict()
variable_name = str()
defs = list()
options = dict()
if isinstance(orig_target, dict):
target = dict()
for key, value in orig_target.items():
target[key.lower()] = value
if 'language' in target:
options['language'] = target['language']
if 'name' not in target:
target['name'] = word("Document")
if 'filename' not in target:
#target['filename'] = docassemble.base.functions.space_to_underscore(target['name'])
target['filename'] = ''
if 'description' not in target:
target['description'] = ''
if 'redact' in target:
if isinstance(target['redact'], bool) or isinstance(target['redact'], NoneType):
options['redact'] = target['redact']
else:
options['redact'] = compile(target['redact'], '<expression>', 'eval')
self.find_fields_in(target['redact'])
if 'checkbox export value' in target and 'pdf template file' in target:
if not isinstance(target['checkbox export value'], str):
raise DAError("A checkbox export value must be a string." + self.idebug(target))
options['checkbox_export_value'] = TextObject(target['checkbox export value'])
if 'decimal places' in target and 'pdf template file' in target:
if not isinstance(target['decimal places'], (str, int)):
raise DAError("A decimal places directive must be an integer or string." + self.idebug(target))
options['decimal_places'] = TextObject(str(target['decimal places']))
if 'initial yaml' in target:
if not isinstance(target['initial yaml'], list):
target['initial yaml'] = [target['initial yaml']]
options['initial_yaml'] = list()
for yaml_file in target['initial yaml']:
if not isinstance(yaml_file, str):
raise DAError('An initial yaml file must be a string.' + self.idebug(target))
options['initial_yaml'].append(FileInPackage(yaml_file, 'template', self.package))
if 'additional yaml' in target:
if not isinstance(target['additional yaml'], list):
target['additional yaml'] = [target['additional yaml']]
options['additional_yaml'] = list()
for yaml_file in target['additional yaml']:
if not isinstance(yaml_file, str):
raise DAError('An additional yaml file must be a string.' + self.idebug(target))
options['additional_yaml'].append(FileInPackage(yaml_file, 'template', self.package))
if 'template file' in target:
if not isinstance(target['template file'], str):
raise DAError('The template file must be a string.' + self.idebug(target))
options['template_file'] = FileInPackage(target['template file'], 'template', self.package)
if 'rtf template file' in target:
if not isinstance(target['rtf template file'], str):
raise DAError('The rtf template file must be a string.' + self.idebug(target))
options['rtf_template_file'] = FileInPackage(target['rtf template file'], 'template', self.package)
if 'docx reference file' in target:
if not isinstance(target['docx reference file'], str):
raise DAError('The docx reference file must be a string.' + self.idebug(target))
options['docx_reference_file'] = FileInPackage(target['docx reference file'], 'template', self.package)
if 'usedefs' in target:
if isinstance(target['usedefs'], str):
the_list = [target['usedefs']]
elif isinstance(target['usedefs'], list):
the_list = target['usedefs']
else:
raise DAError('The usedefs included in an attachment must be specified as a list of strings or a single string.' + self.idebug(target))
for def_key in the_list:
if not isinstance(def_key, str):
raise DAError('The defs in an attachment must be strings.' + self.idebug(target))
if def_key not in self.interview.defs:
raise DAError('Referred to a non-existent def "' + def_key + '." All defs must be defined before they are used.' + self.idebug(target))
defs.extend(self.interview.defs[def_key])
if 'variable name' in target:
variable_name = target['variable name']
if variable_name is None:
raise DAError('A variable name cannot be None.' + self.idebug(target))
if self.scan_for_variables:
self.fields_used.add(target['variable name'])
else:
self.other_fields_used.add(target['variable name'])
else:
variable_name = "_internal['docvar'][" + str(self.interview.next_attachment_number()) + "]"
if 'metadata' in target:
if not isinstance(target['metadata'], dict):
raise DAError('Unknown data type ' + str(type(target['metadata'])) + ' in attachment metadata.' + self.idebug(target))
for key in target['metadata']:
data = target['metadata'][key]
if data is list:
for sub_data in data:
if sub_data is not str:
raise DAError('Unknown data type ' + str(type(sub_data)) + ' in list in attachment metadata' + self.idebug(target))
newdata = list(map((lambda x: TextObject(x, question=self)), data))
metadata[key] = newdata
elif isinstance(data, str):
metadata[key] = TextObject(data, question=self)
elif isinstance(data, bool):
metadata[key] = data
else:
raise DAError('Unknown data type ' + str(type(data)) + ' in key in attachment metadata' + self.idebug(target))
if 'raw' in target and target['raw']:
if 'content file' in target:
content_file = target['content file']
if isinstance(content_file, dict):
target['valid formats'] = ['raw']
target['raw'] = '.txt'
else:
if not isinstance(content_file, list):
content_file = [content_file]
the_ext = None
for item in content_file:
(the_base, the_ext) = os.path.splitext(item)
if the_ext:
target['raw'] = the_ext
target['valid formats'] = ['raw']
else:
target['raw'] = False
else:
target['raw'] = False
else:
target['raw'] = False
if 'content file' in target:
if isinstance(target['content file'], dict):
if len(target['content file']) == 1 and 'code' in target['content file'] and isinstance(target['content file']['code'], str):
options['content file code'] = compile(target['content file']['code'], '<content file code>', 'eval')
self.find_fields_in(target['content file']['code'])
else:
raise DAError('A content file must be specified as text, a list of text filenames, or a dictionary where the one key is code' + self.idebug(target))
else:
if not isinstance(target['content file'], list):
target['content file'] = [target['content file']]
target['content'] = ''
for content_file in target['content file']:
if not isinstance(content_file, str):
raise DAError('A content file must be specified as text, a list of text filenames, or a dictionary where the one key is code' + self.idebug(target))
file_to_read = docassemble.base.functions.package_template_filename(content_file, package=self.package)
if file_to_read is not None and os.path.isfile(file_to_read) and os.access(file_to_read, os.R_OK):
with open(file_to_read, 'r', encoding='utf-8') as the_file:
target['content'] += the_file.read()
else:
raise DAError('Unable to read content file ' + str(content_file) + ' after trying to find it at ' + str(file_to_read) + self.idebug(target))
if 'pdf template file' in target and ('code' in target or 'field variables' in target or 'field code' in target or 'raw field variables' in target) and 'fields' not in target:
target['fields'] = dict()
field_mode = 'manual'
elif 'docx template file' in target:
if 'update references' in target:
if isinstance(target['update references'], bool):
options['update_references'] = target['update references']
elif isinstance(target['update references'], str):
options['update_references'] = compile(target['update references'], '<expression>', 'eval')
self.find_fields_in(target['update references'])
else:
raise DAError('Unknown data type in attachment "update references".' + self.idebug(target))
if 'fields' in target:
field_mode = 'manual'
else:
target['fields'] = dict()
if 'code' in target or 'field variables' in target or 'field code' in target or 'raw field variables' in target:
field_mode = 'manual'
else:
field_mode = 'auto'
else:
field_mode = 'manual'
if 'fields' in target:
if 'pdf template file' not in target and 'docx template file' not in target:
raise DAError('Fields supplied to attachment but no pdf template file or docx template file supplied' + self.idebug(target))
if 'pdf template file' in target and 'docx template file' in target:
raise DAError('You cannot use a pdf template file and a docx template file at the same time' + self.idebug(target))
if 'pdf template file' in target:
template_type = 'pdf'
target['valid formats'] = ['pdf']
if 'editable' in target:
options['editable'] = compile(str(target['editable']), '<editable expression>', 'eval')
elif 'docx template file' in target:
template_type = 'docx'
if 'valid formats' in target:
if isinstance(target['valid formats'], str):
target['valid formats'] = [target['valid formats']]
elif not isinstance(target['valid formats'], list):
raise DAError('Unknown data type in attachment valid formats.' + self.idebug(target))
if 'rtf to docx' in target['valid formats']:
raise DAError('Valid formats cannot include "rtf to docx" when "docx template file" is used' + self.idebug(target))
else:
target['valid formats'] = ['docx', 'pdf']
if template_type == 'docx':
if not isinstance(target['docx template file'], (str, dict, list)):
raise DAError(template_type + ' template file supplied to attachment must be a string, dict, or list' + self.idebug(target))
if not isinstance(target['docx template file'], list):
target[template_type + ' template file'] = [target['docx template file']]
else:
if not isinstance(target[template_type + ' template file'], (str, dict)):
raise DAError(template_type + ' template file supplied to attachment must be a string or dict' + self.idebug(target))
if field_mode == 'auto':
options['fields'] = 'auto'
elif not isinstance(target['fields'], (list, dict)):
raise DAError('fields supplied to attachment must be a list or dictionary' + self.idebug(target))
target['content'] = ''
if template_type == 'docx':
options[template_type + '_template_file'] = [FileInPackage(item, 'template', package=self.package) for item in target['docx template file']]
for item in target['docx template file']:
if not isinstance(item, (str, dict)):
raise DAError('docx template file supplied to attachment must be a string or dict' + self.idebug(target))
template_files = []
for template_file in options['docx_template_file']:
if not template_file.is_code:
the_docx_path = template_file.path()
if not os.path.isfile(the_docx_path):
raise DAError("Missing docx template file " + os.path.basename(the_docx_path))
template_files.append(the_docx_path)
if len(template_files):
if len(template_files) == 1:
the_docx_path = template_files[0]
else:
the_docx_path = docassemble.base.file_docx.concatenate_files(template_files)
try:
docx_template = docassemble.base.file_docx.DocxTemplate(the_docx_path)
the_env = custom_jinja_env()
the_xml = docx_template.get_xml()
the_xml = re.sub(r'<w:p>', '\n<w:p>', the_xml)
the_xml = re.sub(r'({[\%\{].*?[\%\}]})', fix_quotes, the_xml)
the_xml = docx_template.patch_xml(the_xml)
parsed_content = the_env.parse(the_xml)
except TemplateError as the_error:
if the_error.filename is None:
try:
the_error.filename = os.path.basename(options['docx_template_file'].path())
except:
pass
if hasattr(the_error, 'lineno') and the_error.lineno is not None:
line_number = max(the_error.lineno - 4, 0)
the_error.docx_context = map(lambda x: re.sub(r'<[^>]+>', '', x), the_xml.splitlines()[line_number:(line_number + 7)])
raise the_error
for key in jinja2meta.find_undeclared_variables(parsed_content):
if not key.startswith('_'):
self.mako_names.add(key)
for key in ('field code', 'fields'):
if key in target:
if isinstance(target[key], list):
for item in target[key]:
for field_name in item.keys():
try:
self.names_used.remove(field_name)
except:
pass
try:
self.mako_names.remove(field_name)
except:
pass
elif isinstance(target[key], dict):
for field_name in target[key].keys():
try:
self.names_used.remove(field_name)
except:
pass
try:
self.mako_names.remove(field_name)
except:
pass
else:
options[template_type + '_template_file'] = FileInPackage(target[template_type + ' template file'], 'template', package=self.package)
if field_mode == 'manual':
options['fields'] = recursive_textobject(target['fields'], self)
if 'code' in target:
if isinstance(target['code'], str):
options['code'] = compile(target['code'], '<expression>', 'eval')
self.find_fields_in(target['code'])
if 'field variables' in target:
if not isinstance(target['field variables'], list):
raise DAError('The field variables must be expressed in the form of a list' + self.idebug(target))
if 'code dict' not in options:
options['code dict'] = dict()
for varname in target['field variables']:
if not valid_variable_match.match(str(varname)):
raise DAError('The variable ' + str(varname) + " cannot be used in a code list" + self.idebug(target))
options['code dict'][varname] = compile(varname, '<expression>', 'eval')
self.find_fields_in(varname)
if 'raw field variables' in target:
if not isinstance(target['raw field variables'], list):
raise DAError('The raw field variables must be expressed in the form of a list' + self.idebug(target))
if 'raw code dict' not in options:
options['raw code dict'] = dict()
for varname in target['raw field variables']:
if not valid_variable_match.match(str(varname)):
raise DAError('The variable ' + str(varname) + " cannot be used in a code list" + self.idebug(target))
options['raw code dict'][varname] = compile(varname, '<expression>', 'eval')
self.find_fields_in(varname)
if 'field code' in target:
if 'code dict' not in options:
options['code dict'] = dict()
if not isinstance(target['field code'], list):
target['field code'] = [target['field code']]
for item in target['field code']:
if not isinstance(item, dict):
raise DAError('The field code must be expressed in the form of a dictionary' + self.idebug(target))
for key, val in item.items():
options['code dict'][key] = compile(str(val), '<expression>', 'eval')
self.find_fields_in(val)
if 'valid formats' in target:
if isinstance(target['valid formats'], str):
target['valid formats'] = [target['valid formats']]
elif not isinstance(target['valid formats'], list):
raise DAError('Unknown data type in attachment valid formats.' + self.idebug(target))
if 'rtf to docx' in target['valid formats'] and 'docx' in target['valid formats']:
raise DAError('Valid formats cannot include both "rtf to docx" and "docx."' + self.idebug(target))
else:
target['valid formats'] = ['*']
if 'password' in target:
options['password'] = TextObject(target['password'])
if 'template password' in target:
options['template_password'] = TextObject(target['template password'])
if 'persistent' in target:
if isinstance(target['persistent'], bool):
options['persistent'] = target['persistent']
elif isinstance(target['persistent'], str):
options['persistent'] = compile(target['persistent'], '<persistent expression>', 'eval')
self.find_fields_in(target['persistent'])
else:
raise DAError('Unknown data type in attachment persistent.' + self.idebug(target))
if 'private' in target:
if isinstance(target['private'], bool):
options['private'] = target['private']
elif isinstance(target['private'], str):
options['private'] = compile(target['private'], '<public expression>', 'eval')
self.find_fields_in(target['private'])
else:
raise DAError('Unknown data type in attachment public.' + self.idebug(target))
if 'allow privileges' in target:
if isinstance(target['allow privileges'], dict) and len(target['allow privileges']) == 1 and 'code' in target['allow privileges'] and isinstance(target['allow privileges']['code'], str):
options['allow privileges'] = compile(target['allow privileges']['code'], '<allow privileges expression>', 'eval')
elif isinstance(target['allow privileges'], str):
options['allow privileges'] = [target['allow privileges']]
elif isinstance(target['allow privileges'], list):
for item in target['allow privileges']:
if not isinstance(item, str):
raise DAError('Unknown data type in attachment allow privileges.' + self.idebug(target))
options['allow privileges'] = target['allow privileges']
if 'allow users' in target:
if isinstance(target['allow users'], dict) and len(target['allow users']) == 1 and 'code' in target['allow users'] and isinstance(target['allow users']['code'], str):
options['allow users'] = compile(target['allow users']['code'], '<allow users expression>', 'eval')
elif isinstance(target['allow users'], (str, int)):
options['allow users'] = [target['allow users']]
elif isinstance(target['allow users'], list):
for item in target['allow users']:
if not isinstance(item, (str, int)):
raise DAError('Unknown data type in attachment allow users.' + self.idebug(target))
options['allow users'] = target['allow users']
if 'hyperlink style' in target:
if isinstance(target['hyperlink style'], str):
options['hyperlink_style'] = TextObject(target['hyperlink style'].strip(), question=self)
else:
raise DAError('Unknown data type in attachment hyperlink style.' + self.idebug(target))
if 'pdf/a' in target:
if isinstance(target['pdf/a'], bool):
options['pdf_a'] = target['pdf/a']
elif isinstance(target['pdf/a'], str):
options['pdf_a'] = compile(target['pdf/a'], '<pdfa expression>', 'eval')
self.find_fields_in(target['pdf/a'])
else:
raise DAError('Unknown data type in attachment pdf/a.' + self.idebug(target))
if 'skip undefined' in target:
if isinstance(target['skip undefined'], bool):
options['skip_undefined'] = target['skip undefined']
elif isinstance(target['skip undefined'], str):
options['skip_undefined'] = compile(target['skip undefined'], '<skip undefined expression>', 'eval')
self.find_fields_in(target['skip undefined'])
else:
raise DAError('Unknown data type in attachment skip undefined.' + self.idebug(target))
else:
options['skip_undefined'] = False;
if 'tagged pdf' in target:
if isinstance(target['tagged pdf'], bool):
options['tagged_pdf'] = target['tagged pdf']
elif isinstance(target['tagged pdf'], str):
options['tagged_pdf'] = compile(target['tagged pdf'], '<tagged pdf expression>', 'eval')
self.find_fields_in(target['tagged pdf'])
else:
raise DAError('Unknown data type in attachment tagged pdf.' + self.idebug(target))
if 'content' not in target:
if 'content file code' in options:
return({'name': TextObject(target['name'], question=self), 'filename': TextObject(target['filename'], question=self), 'description': TextObject(target['description'], question=self), 'content': None, 'valid_formats': target['valid formats'], 'metadata': metadata, 'variable_name': variable_name, 'orig_variable_name': variable_name, 'options': options, 'raw': target['raw']})
raise DAError("No content provided in attachment." + self.idebug(target))
#logmessage("The content is " + str(target['content']))
return({'name': TextObject(target['name'], question=self), 'filename': TextObject(target['filename'], question=self), 'description': TextObject(target['description'], question=self), 'content': TextObject("\n".join(defs) + "\n" + target['content'], question=self), 'valid_formats': target['valid formats'], 'metadata': metadata, 'variable_name': variable_name, 'orig_variable_name': variable_name, 'options': options, 'raw': target['raw']})
elif isinstance(orig_target, str):
return({'name': TextObject('Document'), 'filename': TextObject('Document'), 'description': TextObject(''), 'content': TextObject(orig_target, question=self), 'valid_formats': ['*'], 'metadata': metadata, 'variable_name': variable_name, 'orig_variable_name': variable_name, 'options': options, 'raw': False})
else:
raise DAError("Unknown data type in attachment")
def get_question_for_field_with_sub_fields(self, field, user_dict):
field_list = eval(field.extras['fields_code'], user_dict)
if not isinstance(field_list, list):
raise DAError("A code directive that defines items in fields must return a list")
new_interview_source = InterviewSourceString(content='')
new_interview = new_interview_source.get_interview()
reproduce_basics(self.interview, new_interview)
return Question(dict(question='n/a', fields=field_list), new_interview, source=new_interview_source, package=self.package)
def get_fields_and_sub_fields(self, user_dict):
all_fields = list()
for field in self.fields:
if hasattr(field, 'extras') and 'fields_code' in field.extras:
the_question = self.get_question_for_field_with_sub_fields(field, user_dict)
for sub_field in the_question.fields:
all_fields.append(sub_field)
else:
all_fields.append(field)
return all_fields
def ask(self, user_dict, old_user_dict, the_x, iterators, sought, orig_sought, process_list_collect=True, test_for_objects=True):
#logmessage("ask: orig_sought is " + str(orig_sought) + " and q is " + self.name)
docassemble.base.functions.this_thread.current_question = self
if the_x != 'None':
exec("x = " + the_x, user_dict)
if len(iterators):
for indexno in range(len(iterators)):
#logmessage("Running " + list_of_indices[indexno] + " = " + iterators[indexno])
exec(list_of_indices[indexno] + " = " + iterators[indexno], user_dict)
if self.need is not None:
for need_code in self.need:
eval(need_code, user_dict)
for the_field in self.undefine:
docassemble.base.functions.undefine(the_field)
if len(self.reconsider) > 0:
docassemble.base.functions.reconsider(*self.reconsider)
question_text = self.content.text(user_dict)
#logmessage("Asking " + str(question_text))
#sys.stderr.write("Asking " + str(question_text) + "\n")
if self.subcontent is not None:
subquestion = self.subcontent.text(user_dict)
else:
subquestion = None
the_default_titles = dict()
if self.language in self.interview.default_title:
the_default_titles.update(self.interview.default_title[self.language])
for key, val in self.interview.default_title['*'].items():
if key not in the_default_titles:
the_default_titles[key] = val
extras = dict()
if len(self.action_buttons) > 0:
extras['action_buttons'] = list()
for item in self.action_buttons:
if isinstance(item, dict):
label = item['label'].text(user_dict).strip()
given_arguments = item.get('arguments', dict())
arguments = dict()
forget_prior = item.get('forget_prior', False)
for key, val in given_arguments.items():
if isinstance(val, TextObject):
arguments[key] = val.text(user_dict).strip()
else:
arguments[key] = val
action = item['action'].text(user_dict).strip()
if not (re.search(r'^https?://', action) or action.startswith('javascript:') or action.startswith('/') or action.startswith('?')):
if forget_prior:
arguments = {'_action': action, '_arguments': arguments}
action = '_da_priority_action'
action = docassemble.base.functions.url_action(action, **arguments)
color = item['color'].text(user_dict).strip()
if item['target'] is not None:
target = item['target'].text(user_dict).strip()
else:
target = None
if item['icon'] is not None:
icon = item['icon'].text(user_dict).strip()
else:
icon = None
if item['placement'] is not None:
placement = item['placement'].text(user_dict).strip()
else:
placement = None
extras['action_buttons'].append(dict(action=action, label=label, color=color, icon=icon, placement=placement, forget_prior=forget_prior, target=target))
else:
action_buttons = eval(item, user_dict)
if hasattr(action_buttons, 'instanceName') and hasattr(action_buttons, 'elements'):
action_buttons = action_buttons.elements
if not isinstance(action_buttons, list):
raise DAError("action buttons code did not evaluate to a list")
for button in action_buttons:
if not (isinstance(button, dict) and 'label' in button and 'action' in button and isinstance(button['label'], str) and isinstance(button['action'], str)):
raise DAError("action buttons code did not evaluate to a list of dictionaries with label and action items")
if 'new window' in button and not isinstance(button['new window'], (str, bool, NoneType)):
raise DAError("action buttons code included a new window item that was not boolean, text, or None")
if 'color' in button and not isinstance(button['color'], (str, NoneType)):
raise DAError("action buttons code included a color item that was not text or None")
if 'icon' in button and not isinstance(button['icon'], (str, NoneType)):
raise DAError("action buttons code included an icon item that was not text or None")
color = button.get('color', 'primary')
if color is None:
color = 'primary'
icon = button.get('icon', None)
placement = button.get('placement', None)
target = button.get('new window', None)
if target is True:
target = '_blank'
elif target is False:
target = None
arguments = button.get('arguments', dict())
forget_prior = button.get('forget_prior', False)
if arguments is None:
arguments = dict()
if not isinstance(arguments, dict):
raise DAError("action buttons code included an arguments item that was not a dictionary")
action = button['action']
if not (re.search(r'^https?://', action) or action.startswith('javascript:') or action.startswith('/') or action.startswith('?')):
if forget_prior:
arguments = {'_action': action, '_arguments': arguments}
action = '_da_priority_action'
action = docassemble.base.functions.url_action(action, **arguments)
label = button['label']
extras['action_buttons'].append(dict(action=action, label=label, color=color, icon=icon, placement=placement, target=target))
for item in extras['action_buttons']:
if color not in ('primary', 'secondary', 'success', 'danger', 'warning', 'info', 'light', 'dark', 'link'):
raise DAError("color in action buttons not valid: " + repr(color))
if hasattr(self, 'question_metadata'):
extras['questionMetadata'] = recursive_eval_textobject_or_primitive(self.question_metadata, user_dict)
if hasattr(self, 'css_class') and self.css_class is not None:
extras['cssClass'] = self.css_class.text(user_dict)
elif 'css class' in user_dict['_internal'] and user_dict['_internal']['css class'] is not None:
extras['cssClass'] = user_dict['_internal']['css class']
elif self.language in self.interview.default_screen_parts and 'css class' in self.interview.default_screen_parts[self.language]:
extras['cssClass'] = self.interview.default_screen_parts[self.language]['css class'].text(user_dict)
elif 'css class' in the_default_titles:
extras['cssClass'] = the_default_titles['css class']
if hasattr(self, 'table_css_class') and self.table_css_class is not None:
extras['tableCssClass'] = self.table_css_class.text(user_dict)
elif 'table css class' in user_dict['_internal'] and user_dict['_internal']['table css class'] is not None:
extras['tableCssClass'] = user_dict['_internal']['table css class']
elif self.language in self.interview.default_screen_parts and 'table css class' in self.interview.default_screen_parts[self.language]:
extras['tableCssClass'] = self.interview.default_screen_parts[self.language]['table css class'].text(user_dict)
elif 'table css class' in the_default_titles:
extras['tableCssClass'] = the_default_titles['table css class']
if hasattr(self, 'undertext') and self.undertext is not None:
extras['underText'] = self.undertext.text(user_dict)
elif 'under' in user_dict['_internal'] and user_dict['_internal']['under'] is not None:
extras['underText'] = user_dict['_internal']['under']
elif self.language in self.interview.default_screen_parts and 'under' in self.interview.default_screen_parts[self.language]:
extras['underText'] = self.interview.default_screen_parts[self.language]['under'].text(user_dict)
elif 'under' in the_default_titles:
extras['underText'] = the_default_titles['under']
if hasattr(self, 'pretext') and self.pretext is not None:
extras['pre text'] = self.pretext.text(user_dict)
elif 'pre' in user_dict['_internal'] and user_dict['_internal']['pre'] is not None:
extras['pre text'] = user_dict['_internal']['pre']
elif self.language in self.interview.default_screen_parts and 'pre' in self.interview.default_screen_parts[self.language]:
extras['pre text'] = self.interview.default_screen_parts[self.language]['pre'].text(user_dict)
elif 'pre' in the_default_titles:
extras['pre text'] = the_default_titles['pre']
if hasattr(self, 'posttext') and self.posttext is not None:
extras['post text'] = self.posttext.text(user_dict)
elif 'post' in user_dict['_internal'] and user_dict['_internal']['post'] is not None:
extras['post text'] = user_dict['_internal']['post']
elif self.language in self.interview.default_screen_parts and 'post' in self.interview.default_screen_parts[self.language]:
extras['post text'] = self.interview.default_screen_parts[self.language]['post'].text(user_dict)
elif 'post' in the_default_titles:
extras['post text'] = the_default_titles['post']
if hasattr(self, 'righttext') and self.righttext is not None:
extras['rightText'] = self.righttext.text(user_dict)
elif 'right' in user_dict['_internal'] and user_dict['_internal']['right'] is not None:
extras['rightText'] = user_dict['_internal']['right']
elif self.language in self.interview.default_screen_parts and 'right' in self.interview.default_screen_parts[self.language]:
extras['rightText'] = self.interview.default_screen_parts[self.language]['right'].text(user_dict)
elif 'right' in the_default_titles:
extras['rightText'] = the_default_titles['right']
for screen_part in ('footer', 'submit', 'exit link', 'exit label', 'exit url', 'full', 'logo', 'title', 'subtitle', 'tab title', 'short title', 'logo', 'title url', 'title url opens in other window'):
if screen_part in user_dict['_internal'] and user_dict['_internal'][screen_part] is not None:
extras[screen_part + ' text'] = user_dict['_internal'][screen_part]
if self.language in self.interview.default_screen_parts:
for screen_part in self.interview.default_screen_parts[self.language]:
if screen_part in ('footer', 'submit', 'exit link', 'exit label', 'exit url', 'full', 'logo', 'title', 'subtitle', 'tab title', 'short title', 'logo', 'title url', 'title url opens in other window') and (screen_part + ' text') not in extras:
extras[screen_part + ' text'] = self.interview.default_screen_parts[self.language][screen_part].text(user_dict)
for key, val in the_default_titles.items():
if key in ('pre', 'post', 'footer', 'submit', 'exit link', 'exit label', 'exit url', 'full', 'logo', 'title', 'subtitle', 'tab title', 'short title', 'logo', 'title url', 'title url opens in other window') and (key + ' text') not in extras:
extras[key + ' text'] = val
if len(self.terms):
lang = docassemble.base.functions.get_language()
extras['terms'] = dict()
for termitem, definition in self.terms.items():
if lang in definition['alt_terms']:
extras['terms'][definition['alt_terms'][lang].lower()] = dict(definition=definition['definition'].text(user_dict))
else:
extras['terms'][termitem] = dict(definition=definition['definition'].text(user_dict))
if len(self.autoterms):
lang = docassemble.base.functions.get_language()
extras['autoterms'] = dict()
for termitem, definition in self.autoterms.items():
if lang in definition['alt_terms']:
extras['autoterms'][definition['alt_terms'][lang].lower()] = dict(definition=definition['definition'].text(user_dict))
else:
extras['autoterms'][termitem] = dict(definition=definition['definition'].text(user_dict))
for term_type in ('terms', 'autoterms'):
if term_type in user_dict['_internal']:
extras['interview_' + term_type] = dict()
for lang, termdefs in getattr(self.interview, term_type).items():
if lang not in extras['interview_' + term_type]:
extras['interview_' + term_type][lang] = dict()
for term, term_info in termdefs.items():
extras['interview_' + term_type][lang][term] = term_info
for lang, termdefs in user_dict['_internal'][term_type].items():
if lang not in extras['interview_' + term_type]:
extras['interview_' + term_type][lang] = dict()
for term, term_info in termdefs.items():
extras['interview_' + term_type][lang][term] = term_info
if self.css is not None:
extras['css'] = self.css.text(user_dict)
if self.script is not None:
extras['script'] = self.script.text(user_dict)
if self.continuelabel is not None:
continuelabel = self.continuelabel.text(user_dict)
elif self.question_type == 'review':
if 'resume button label' in user_dict['_internal'] and user_dict['_internal']['resume button label'] is not None:
continuelabel = user_dict['_internal']['resume button label']
elif self.language in self.interview.default_screen_parts and 'resume button label' in self.interview.default_screen_parts[self.language]:
continuelabel = self.interview.default_screen_parts[self.language]['resume button label'].text(user_dict)
elif 'resume button label' in the_default_titles:
continuelabel = the_default_titles['resume button label']
else:
continuelabel = None
else:
if 'continue button label' in user_dict['_internal'] and user_dict['_internal']['continue button label'] is not None:
continuelabel = user_dict['_internal']['continue button label']
elif self.language in self.interview.default_screen_parts and 'continue button label' in self.interview.default_screen_parts[self.language]:
continuelabel = self.interview.default_screen_parts[self.language]['continue button label'].text(user_dict)
elif 'continue button label' in the_default_titles:
continuelabel = the_default_titles['continue button label']
else:
continuelabel = None
if self.backbuttonlabel is not None:
extras['back button label text'] = self.backbuttonlabel.text(user_dict)
elif 'back button label' in user_dict['_internal'] and user_dict['_internal']['back button label'] is not None:
extras['back button label text'] = user_dict['_internal']['back button label']
elif self.language in self.interview.default_screen_parts and 'back button label' in self.interview.default_screen_parts[self.language]:
extras['back button label text'] = self.interview.default_screen_parts[self.language]['back button label'].text(user_dict)
elif 'back button label' in the_default_titles:
extras['back button label text'] = the_default_titles['back button label']
else:
extras['back button label text'] = None
if self.cornerbackbuttonlabel is not None:
extras['corner back button label text'] = self.cornerbackbuttonlabel.text(user_dict)
elif 'corner back button label' in user_dict['_internal'] and user_dict['_internal']['corner back button label'] is not None:
extras['corner back button label text'] = user_dict['_internal']['corner back button label']
elif self.language in self.interview.default_screen_parts and 'corner back button label' in self.interview.default_screen_parts[self.language]:
extras['corner back button label text'] = self.interview.default_screen_parts[self.language]['corner back button label'].text(user_dict)
elif 'corner back button label' in the_default_titles:
extras['corner back button label text'] = the_default_titles['corner back button label']
else:
extras['corner back button label text'] = None
if self.helptext is not None:
if self.helplabel is not None:
helplabel = self.helplabel.text(user_dict)
elif 'help label' in user_dict['_internal'] and user_dict['_internal']['help label'] is not None:
helplabel = user_dict['_internal']['help label']
elif self.language in self.interview.default_screen_parts and 'help label' in self.interview.default_screen_parts[self.language]:
helplabel = self.interview.default_screen_parts[self.language]['help label'].text(user_dict)
elif 'help label' in the_default_titles:
helplabel = the_default_titles['help label']
else:
helplabel = None
if self.audiovideo is not None and 'help' in self.audiovideo:
the_audio_video = process_audio_video_list(self.audiovideo['help'], user_dict)
else:
the_audio_video = None
help_content = self.helptext.text(user_dict)
if re.search(r'[^\s]', help_content) or the_audio_video is not None:
help_text_list = [{'heading': None, 'content': help_content, 'audiovideo': the_audio_video, 'label': helplabel, 'from': 'question'}]
else:
help_text_list = list()
else:
help_text_list = list()
if self.language in self.interview.default_screen_parts and 'help label' in self.interview.default_screen_parts[self.language]:
extras['help label text'] = self.interview.default_screen_parts[self.language]['help label'].text(user_dict)
elif 'help label' in the_default_titles:
extras['help label text'] = the_default_titles['help label']
interview_help_text_list = self.interview.processed_helptext(user_dict, self.language)
if len(interview_help_text_list) > 0:
help_text_list.extend(interview_help_text_list)
if self.audiovideo is not None and 'question' in self.audiovideo:
audiovideo = process_audio_video_list(self.audiovideo['question'], user_dict)
else:
audiovideo = None
if self.decorations is not None:
decorations = list()
for decoration_item in self.decorations:
processed_item = dict()
for key, value in decoration_item.items():
processed_item[key] = value.text(user_dict).strip()
decorations.append(processed_item)
else:
decorations = None
selectcompute = dict()
defaults = dict()
defined = dict()
hints = dict()
helptexts = dict()
labels = dict()
extras['required'] = dict()
if hasattr(self, 'back_button'):
if isinstance(self.back_button, (bool, NoneType)):
extras['back_button'] = self.back_button
else:
extras['back_button'] = eval(self.back_button, user_dict)
if hasattr(self, 'allowed_to_set'):
if isinstance(self.allowed_to_set, list):
extras['allowed_to_set'] = self.allowed_to_set
else:
extras['allowed_to_set'] = eval(self.allowed_to_set, user_dict)
if not isinstance(extras['allowed_to_set'], list):
raise DAError("allowed to set code did not evaluate to a list")
for item in extras['allowed_to_set']:
if not isinstance(item, str):
raise DAError("allowed to set code did not evaluate to a list of text items")
if self.reload_after is not None:
number = str(self.reload_after.text(user_dict))
if number not in ("False", "false", "Null", "None", "none", "null"):
if number in ("True", "true"):
number = "10"
if number:
number = re.sub(r'[^0-9]', r'', number)
else:
number = "10"
if int(number) < 4:
number = "4"
extras['reload_after'] = number
if hasattr(self, 'allow_downloading'):
if isinstance(self.allow_downloading, bool):
extras['allow_downloading'] = self.allow_downloading
else:
extras['allow_downloading'] = eval(self.allow_downloading, user_dict)
if hasattr(self, 'always_include_editable_files'):
if isinstance(self.always_include_editable_files, bool):
extras['always_include_editable_files'] = self.always_include_editable_files
else:
extras['always_include_editable_files'] = eval(self.always_include_editable_files, user_dict)
if hasattr(self, 'attachment_notice'):
if isinstance(self.attachment_notice, bool):
extras['attachment_notice'] = self.attachment_notice
else:
extras['attachment_notice'] = eval(self.attachment_notice, user_dict)
if hasattr(self, 'download_tab'):
if isinstance(self.download_tab, bool):
extras['download_tab'] = self.download_tab
else:
extras['download_tab'] = eval(self.download_tab, user_dict)
if hasattr(self, 'manual_attachment_list'):
if isinstance(self.manual_attachment_list, bool):
extras['manual_attachment_list'] = self.manual_attachment_list
else:
extras['manual_attachment_list'] = eval(self.manual_attachment_list, user_dict)
if hasattr(self, 'allow_emailing'):
if isinstance(self.allow_emailing, bool):
extras['allow_emailing'] = self.allow_emailing
else:
extras['allow_emailing'] = eval(self.allow_emailing, user_dict)
if hasattr(self, 'zip_filename'):
extras['zip_filename'] = docassemble.base.functions.single_paragraph(self.zip_filename.text(user_dict))
if hasattr(self, 'ga_id'):
extras['ga_id'] = self.ga_id.text(user_dict)
if hasattr(self, 'segment') and 'id' in self.segment:
extras['segment'] = dict(arguments=dict())
extras['segment']['id'] = self.segment['id'].text(user_dict)
if 'arguments' in self.segment:
for key, val in self.segment['arguments'].items():
extras['segment']['arguments'][key] = self.segment['arguments'][key].text(user_dict)
if self.question_type == 'response':
extras['content_type'] = self.content_type.text(user_dict)
# if hasattr(self, 'binaryresponse'):
# extras['binaryresponse'] = self.binaryresponse
elif self.question_type == 'sendfile':
# if self.response_file:
# extras['response_filename'] = self.response_file.path()
# else:
# extras['response_filename'] = None
extras['content_type'] = self.content_type.text(user_dict)
elif self.question_type == 'review':
if hasattr(self, 'skip_undefined') and not self.skip_undefined:
skip_undefined = False
else:
skip_undefined = True
extras['ok'] = dict()
for field in self.fields:
docassemble.base.functions.this_thread.misc['current_field'] = field.number
extras['ok'][field.number] = False
if hasattr(field, 'saveas_code'):
failed = False
for (expression, is_showif) in field.saveas_code:
if skip_undefined:
try:
the_val = eval(expression, user_dict)
except LazyNameError:
raise
except Exception as err:
if self.interview.debug:
logmessage("Exception in review block: " + err.__class__.__name__ + ": " + str(err))
failed = True
break
if is_showif and not the_val:
failed = True
break
else:
the_val = eval(expression, user_dict)
if is_showif and not the_val:
failed = True
break
if failed:
continue
if hasattr(field, 'action'):
if 'action' not in extras:
extras['action'] = dict()
extras['action'][field.number] = json.dumps(substitute_vars_action(field.action, self.is_generic, the_x, iterators))
if hasattr(field, 'extras'):
if 'show_if_js' in field.extras:
if 'show_if_js' not in extras:
extras['show_if_js'] = dict()
extras['show_if_js'][field.number] = dict(expression=field.extras['show_if_js']['expression'].text(user_dict), vars=copy.deepcopy(field.extras['show_if_js']['vars']), sign=field.extras['show_if_js']['sign'], mode=field.extras['show_if_js']['mode'])
if 'field metadata' in field.extras:
if 'field metadata' not in extras:
extras['field metadata'] = dict()
if skip_undefined:
try:
extras['field metadata'][field.number] = recursive_eval_textobject_or_primitive(field.extras['field metadata'], user_dict)
except LazyNameError:
raise
except Exception as err:
if self.interview.debug:
logmessage("Exception in field metadata: " + err.__class__.__name__ + ": " + str(err))
continue
else:
extras['field metadata'][field.number] = recursive_eval_textobject_or_primitive(field.extras['field metadata'], user_dict)
for key in ('note', 'html', 'min', 'max', 'minlength', 'maxlength', 'step', 'scale', 'inline', 'inline width', 'currency symbol'): # 'script', 'css',
if key in field.extras:
if key not in extras:
extras[key] = dict()
if skip_undefined:
try:
extras[key][field.number] = field.extras[key].text(user_dict).strip()
except LazyNameError:
raise
except Exception as err:
if self.interview.debug:
logmessage("Exception in review block: " + err.__class__.__name__ + ": " + str(err))
continue
else:
extras[key][field.number] = field.extras[key].text(user_dict)
if isinstance(extras[key][field.number], str):
extras[key][field.number] = extras[key][field.number].strip()
if extras[key][field.number] == '':
del extras[key][field.number]
if hasattr(field, 'helptext'):
if skip_undefined:
try:
helptexts[field.number] = field.helptext.text(user_dict)
except LazyNameError:
raise
except Exception as err:
if self.interview.debug:
logmessage("Exception in review block: " + err.__class__.__name__ + ": " + str(err))
continue
else:
helptexts[field.number] = field.helptext.text(user_dict)
if hasattr(field, 'label'):
if skip_undefined:
try:
labels[field.number] = field.label.text(user_dict)
except LazyNameError:
raise
except Exception as err:
if self.interview.debug:
logmessage("Exception in review block: " + err.__class__.__name__ + ": " + str(err))
continue
else:
labels[field.number] = field.label.text(user_dict)
extras['ok'][field.number] = True
if 'current_field' in docassemble.base.functions.this_thread.misc:
del docassemble.base.functions.this_thread.misc['current_field']
else:
if hasattr(self, 'list_collect') and process_list_collect and eval(self.list_collect, user_dict):
fields_to_scan = self.get_fields_and_sub_fields(user_dict)
indexno = 0
common_var = None
for field in fields_to_scan:
if not hasattr(field, 'saveas'):
continue
the_saveas = from_safeid(field.saveas)
if common_var is None:
common_var = the_saveas
continue
mismatch = False
for char_index in range(len(common_var)):
if the_saveas[char_index] != common_var[char_index]:
mismatch = True
break
if mismatch:
common_var = common_var[0:char_index]
common_var = re.sub(r'[^\]]*$', '', common_var)
m = re.search(r'^(.*)\[([ijklmn])\]$', common_var)
if not m:
raise DAError("Cannot use list collect on these fields. " + common_var)
the_list_varname = m.group(1)
if hasattr(self, 'list_collect_is_final'):
extras['list_collect_is_final'] = eval(self.list_collect_is_final, user_dict)
else:
extras['list_collect_is_final'] = True
if hasattr(self, 'list_collect_allow_append'):
extras['list_collect_allow_append'] = eval(self.list_collect_allow_append, user_dict)
else:
extras['list_collect_allow_append'] = True
if hasattr(self, 'list_collect_allow_delete'):
extras['list_collect_allow_delete'] = eval(self.list_collect_allow_delete, user_dict)
else:
extras['list_collect_allow_delete'] = True
if hasattr(self, 'list_collect_add_another_label'):
extras['list_collect_add_another_label'] = self.list_collect_add_another_label.text(user_dict)
else:
extras['list_collect_add_another_label'] = None
extras['list_iterator'] = m.group(2)
the_list = eval(the_list_varname, user_dict)
if not hasattr(the_list, 'elements') or not isinstance(the_list.elements, list):
raise DAError("Cannot use list collect on a variable that is not a DAList.")
extras['list_collect'] = the_list
extras['list_message'] = dict()
if hasattr(the_list, 'minimum_number') and the_list.minimum_number:
extras['list_minimum'] = the_list.minimum_number
iterator_index = list_of_indices.index(extras['list_iterator'])
length_to_use = len(the_list.elements)
if hasattr(the_list, 'minimum_number') and the_list.minimum_number is not None and the_list.minimum_number > length_to_use:
length_to_use = the_list.minimum_number
if length_to_use == 0:
length_to_use = 1
if the_list.ask_object_type or not extras['list_collect_allow_append']:
extra_amount = 0
else:
extra_amount = get_config('list collect extra count', 15)
for list_indexno in range(length_to_use + extra_amount):
new_iterators = copy.copy(iterators)
new_iterators[iterator_index] = str(list_indexno)
ask_result = self.ask(user_dict, old_user_dict, the_x, new_iterators, sought, orig_sought, process_list_collect=False, test_for_objects=(list_indexno < length_to_use))
if hasattr(self, 'list_collect_label'):
extras['list_message'][list_indexno] = self.list_collect_label.text(user_dict)
else:
extras['list_message'][list_indexno] = ''
for key in ('selectcompute', 'defaults', 'hints', 'helptexts', 'labels'):
for field_num, val in ask_result[key].items():
if key == 'selectcompute':
selectcompute[str(list_indexno) + '_' + str(field_num)] = val
if list_indexno == length_to_use - 1:
selectcompute[str(list_indexno + 1) + '_' + str(field_num)] = val
#for ii in range(1, extra_amount + 1):
# selectcompute[str(list_indexno + ii) + '_' + str(field_num)] = val
elif key == 'defaults':
defaults[str(list_indexno) + '_' + str(field_num)] = val
#if list_indexno == length_to_use - 1:
#for ii in range(1, extra_amount + 1):
# defaults[str(list_indexno + ii) + '_' + str(field_num)] = val
elif key == 'hints':
hints[str(list_indexno) + '_' + str(field_num)] = val
#if list_indexno == length_to_use - 1:
#for ii in range(1, extra_amount + 1):
# hints[str(list_indexno + ii) + '_' + str(field_num)] = val
elif key == 'helptexts':
helptexts[str(list_indexno) + '_' + str(field_num)] = val
#if list_indexno == length_to_use - 1:
#for ii in range(1, extra_amount + 1):
# helptexts[str(list_indexno + ii) + '_' + str(field_num)] = val
elif key == 'labels':
labels[str(list_indexno) + '_' + str(field_num)] = val
#if list_indexno == length_to_use - 1:
#for ii in range(1, extra_amount + 1):
# labels[str(list_indexno + ii) + '_' + str(field_num)] = val
for key, possible_dict in ask_result['extras'].items():
if isinstance(possible_dict, dict):
if key not in extras:
extras[key] = dict()
for field_num, val in possible_dict.items():
extras[key][str(list_indexno) + '_' + str(field_num)] = val
#if list_indexno == length_to_use - 1:
#for ii in range(1, extra_amount + 1):
# extras[key][str(list_indexno + ii) + '_' + str(field_num)] = val
if len(iterators):
for indexno in range(len(iterators)):
exec(list_of_indices[indexno] + " = " + iterators[indexno], user_dict)
else:
if hasattr(self, 'fields_saveas'):
only_empty_fields_exist = False
else:
only_empty_fields_exist = True
commands_to_run = list()
for field in self.fields:
if hasattr(field, 'inputtype') and field.inputtype == 'combobox':
only_empty_fields_exist = False
docassemble.base.functions.this_thread.misc['current_field'] = field.number
if hasattr(field, 'has_code') and field.has_code:
# standalone multiple-choice questions
selectcompute[field.number] = list()
for choice in field.choices:
if 'compute' in choice and isinstance(choice['compute'], CodeType):
selectcompute[field.number].extend(process_selections(eval(choice['compute'], user_dict)))
else:
new_item = dict()
if 'image' in choice:
new_item['image'] = choice['image']
if 'help' in choice:
new_item['help'] = choice['help'].text(user_dict)
if 'default' in choice:
new_item['default'] = choice['default']
if isinstance(choice['key'], TextObject):
new_item['key'] = choice['key'].text(user_dict)
else:
new_item['key'] = choice['key']
new_item['label'] = choice['label'].text(user_dict)
selectcompute[field.number].append(new_item)
if len(selectcompute[field.number]) > 0:
only_empty_fields_exist = False
elif test_for_objects:
if hasattr(field, 'datatype') and field.datatype in ('multiselect', 'object_multiselect', 'checkboxes', 'object_checkboxes'):
ensure_object_exists(from_safeid(field.saveas), field.datatype, user_dict, commands=commands_to_run)
commands_to_run.append(from_safeid(field.saveas) + ".gathered = True")
else:
if not (hasattr(field, 'inputtype') and field.inputtype == 'combobox'):
commands_to_run.append(from_safeid(field.saveas) + ' = None')
elif hasattr(field, 'choicetype') and field.choicetype == 'compute':
# multiple choice field in choices
if hasattr(field, 'datatype') and field.datatype in ('object', 'object_radio', 'object_multiselect', 'object_checkboxes', 'multiselect', 'checkboxes'):
exec("from docassemble.base.core import selections as docassemble_base_core_selections", user_dict)
if hasattr(field, 'object_labeler'):
labeler_func = eval(field.object_labeler['compute'], user_dict)
if not isinstance(labeler_func, types.FunctionType):
raise DAError("The object labeler was not a function")
user_dict['_DAOBJECTLABELER'] = labeler_func
else:
labeler_func = None
if hasattr(field, 'help_generator'):
help_generator_func = eval(field.help_generator['compute'], user_dict)
if not isinstance(help_generator_func, types.FunctionType):
raise DAError("The help generator was not a function")
user_dict['_DAHELPGENERATOR'] = help_generator_func
else:
help_generator_func = None
if hasattr(field, 'image_generator'):
image_generator_func = eval(field.image_generator['compute'], user_dict)
if not isinstance(image_generator_func, types.FunctionType):
raise DAError("The image generator was not a function")
user_dict['_DAIMAGEGENERATOR'] = image_generator_func
else:
image_generator_func = None
to_compute = field.selections['compute']
if field.datatype in ('object_multiselect', 'object_checkboxes'):
default_exists = False
#logmessage("Testing for " + from_safeid(field.saveas))
try:
assert test_for_objects
eval(from_safeid(field.saveas), user_dict)
default_to_use = from_safeid(field.saveas)
except:
default_to_use = 'None'
#logmessage("Running " + '_DAOBJECTDEFAULTDA = ' + default_to_use)
exec('_DAOBJECTDEFAULTDA = ' + default_to_use, user_dict)
if 'exclude' in field.selections:
exclude_list = list()
for x in field.selections['exclude']:
exclude_list.append(eval(x, user_dict))
selectcompute[field.number] = process_selections(eval(to_compute, user_dict), exclude=exclude_list)
else:
#logmessage("Doing " + field.selections.get('sourcecode', "No source code"))
selectcompute[field.number] = process_selections(eval(to_compute, user_dict))
if field.datatype in ('object_multiselet', 'object_checkboxes') and '_DAOBJECTDEFAULTDA' in user_dict:
del user_dict['_DAOBJECTDEFAULTDA']
if labeler_func is not None:
del user_dict['_DAOBJECTLABELER']
if help_generator_func is not None:
del user_dict['_DAHELPGENERATOR']
if image_generator_func is not None:
del user_dict['_DAIMAGEGENERATOR']
if len(selectcompute[field.number]) > 0:
only_empty_fields_exist = False
elif test_for_objects:
if hasattr(field, 'datatype') and field.datatype in ('multiselect', 'object_multiselect', 'checkboxes', 'object_checkboxes'):
ensure_object_exists(from_safeid(field.saveas), field.datatype, user_dict, commands=commands_to_run)
commands_to_run.append(from_safeid(field.saveas) + '.gathered = True')
else:
if not (hasattr(field, 'inputtype') and field.inputtype == 'combobox'):
commands_to_run.append(from_safeid(field.saveas) + ' = None')
elif hasattr(field, 'choicetype') and field.choicetype == 'manual':
if 'exclude' in field.selections:
to_exclude = list()
for x in field.selections['exclude']:
to_exclude.append(eval(x, user_dict))
to_exclude = unpack_list(to_exclude)
selectcompute[field.number] = list()
for candidate in field.selections['values']:
if isinstance(candidate['key'], TextObject):
new_item = dict(key=candidate['key'].text(user_dict), label=candidate['label'].text(user_dict))
else:
new_item = dict(key=candidate['key'], label=candidate['label'].text(user_dict))
if 'image' in candidate:
new_item['image'] = candidate['image']
if 'help' in candidate:
new_item['help'] = candidate['help'].text(user_dict)
if 'default' in candidate:
new_item['default'] = candidate['default']
if new_item['key'] not in to_exclude:
selectcompute[field.number].append(new_item)
else:
selectcompute[field.number] = list()
for item in field.selections['values']:
if isinstance(item['key'], TextObject):
new_item = dict(key=item['key'].text(user_dict), label=item['label'].text(user_dict))
else:
new_item = dict(key=item['key'], label=item['label'].text(user_dict))
if 'image' in item:
new_item['image'] = item['image']
if 'help' in item:
new_item['help'] = item['help'].text(user_dict)
if 'default' in item:
new_item['default'] = item['default']
selectcompute[field.number].append(new_item)
if len(selectcompute[field.number]) > 0:
only_empty_fields_exist = False
else:
if not (hasattr(field, 'inputtype') and field.inputtype == 'combobox'):
commands_to_run.append(from_safeid(field.saveas) + ' = None')
elif hasattr(field, 'saveas') and self.question_type == "multiple_choice":
selectcompute[field.number] = list()
for item in field.choices:
new_item = dict()
if 'image' in item:
new_item['image'] = item['image']
if 'help' in item:
new_item['help'] = item['help'].text(user_dict)
if 'default' in item:
new_item['default'] = item['default']
if isinstance(item['key'], TextObject):
new_item['key'] = item['key'].text(user_dict)
else:
new_item['key'] = item['key']
new_item['label'] = item['label'].text(user_dict)
selectcompute[field.number].append(new_item)
if len(selectcompute[field.number]) > 0:
only_empty_fields_exist = False
else:
if not (hasattr(field, 'inputtype') and field.inputtype == 'combobox'):
commands_to_run.append(from_safeid(field.saveas) + ' = None')
elif self.question_type == "multiple_choice":
selectcompute[field.number] = list()
for item in field.choices:
new_item = dict()
if 'image' in item:
new_item['image'] = item['image']
if 'help' in item:
new_item['help'] = item['help'].text(user_dict)
if 'default' in item:
new_item['default'] = item['default']
new_item['label'] = item['label'].text(user_dict)
new_item['key'] = item['key']
selectcompute[field.number].append(new_item)
only_empty_fields_exist = False
else:
only_empty_fields_exist = False
if len(self.fields) > 0 and only_empty_fields_exist:
if test_for_objects:
assumed_objects = set()
for field in self.fields:
if hasattr(field, 'saveas'):
parse_result = parse_var_name(from_safeid(field.saveas))
if not parse_result['valid']:
raise DAError("Variable name " + from_safeid(field.saveas) + " is invalid: " + parse_result['reason'])
if len(parse_result['objects']):
assumed_objects.add(parse_result['objects'][-1])
if len(parse_result['bracket_objects']):
assumed_objects.add(parse_result['bracket_objects'][-1])
for var in assumed_objects:
if complications.search(var) or var not in user_dict:
eval(var, user_dict)
raise CodeExecute(commands_to_run, self)
if 'current_field' in docassemble.base.functions.this_thread.misc:
del docassemble.base.functions.this_thread.misc['current_field']
extras['ok'] = dict()
for field in self.fields:
docassemble.base.functions.this_thread.misc['current_field'] = field.number
if hasattr(field, 'showif_code'):
result = eval(field.showif_code, user_dict)
if hasattr(field, 'extras') and 'show_if_sign_code' in field.extras and field.extras['show_if_sign_code'] == 0:
if result:
extras['ok'][field.number] = False
continue
else:
if not result:
extras['ok'][field.number] = False
continue
extras['ok'][field.number] = True
if hasattr(field, 'nota'):
if 'nota' not in extras:
extras['nota'] = dict()
if isinstance(field.nota, bool):
extras['nota'][field.number] = field.nota
else:
extras['nota'][field.number] = field.nota.text(user_dict)
if hasattr(field, 'permissions'):
if 'permissions' not in extras:
extras['permissions'] = dict()
extras['permissions'][field.number] = dict()
if isinstance(field.permissions['private'], bool):
extras['permissions'][field.number]['private'] = field.permissions['private']
elif field.permissions['private'] is not None:
extras['permissions'][field.number]['private'] = True if eval(field.permissions['private']['compute'], user_dict) else False
if isinstance(field.permissions['persistent'], bool):
extras['permissions'][field.number]['persistent'] = field.permissions['persistent']
elif field.permissions['persistent'] is not None:
extras['permissions'][field.number]['persistent'] = True if eval(field.permissions['persistent']['compute'], user_dict) else False
if field.permissions['allow_users'] is not None:
if isinstance(field.permissions['allow_users'], list):
extras['permissions'][field.number]['allow_users'] = allow_users_list(field.permissions['allow_users'])
else:
extras['permissions'][field.number]['allow_users'] = allow_users_list(eval(field.permissions['allow_users']['compute'], user_dict))
if field.permissions['allow_privileges'] is not None:
if isinstance(field.permissions['allow_privileges'], list):
extras['permissions'][field.number]['allow_privileges'] = allow_privileges_list(field.permissions['allow_privileges'])
else:
extras['permissions'][field.number]['allow_privileges'] = allow_privileges_list(eval(field.permissions['allow_privileges']['compute'], user_dict))
if isinstance(field.required, bool):
extras['required'][field.number] = field.required
else:
extras['required'][field.number] = eval(field.required['compute'], user_dict)
if hasattr(field, 'max_image_size') and hasattr(field, 'datatype') and field.datatype in ('file', 'files', 'camera', 'user', 'environment'):
extras['max_image_size'] = eval(field.max_image_size['compute'], user_dict)
if hasattr(field, 'image_type') and hasattr(field, 'datatype') and field.datatype in ('file', 'files', 'camera', 'user', 'environment'):
extras['image_type'] = eval(field.image_type['compute'], user_dict)
if hasattr(field, 'accept') and hasattr(field, 'datatype') and field.datatype in ('file', 'files', 'camera', 'user', 'environment'):
if 'accept' not in extras:
extras['accept'] = dict()
extras['accept'][field.number] = eval(field.accept['compute'], user_dict)
if hasattr(field, 'rows') and ((hasattr(field, 'inputtype') and field.inputtype == 'area') or (hasattr(field, 'datatype') and field.datatype in ('multiselect', 'object_multiselect'))):
if 'rows' not in extras:
extras['rows'] = dict()
extras['rows'][field.number] = eval(field.rows['compute'], user_dict)
if hasattr(field, 'validation_messages'):
if 'validation messages' not in extras:
extras['validation messages'] = dict()
extras['validation messages'][field.number] = dict()
for validation_key, validation_message_template in field.validation_messages.items():
extras['validation messages'][field.number][validation_key] = validation_message_template.text(user_dict)
if hasattr(field, 'validate'):
the_func = eval(field.validate['compute'], user_dict)
try:
if hasattr(field, 'datatype'):
if field.datatype in ('number', 'integer', 'currency', 'range'):
the_func(0)
elif field.datatype in ('text', 'password', 'email'):
the_func('')
elif field.datatype == 'date':
the_func('01/01/1970')
elif field.datatype == 'time':
the_func('12:00 AM')
elif field.datatype == 'datetime':
the_func('01/01/1970 12:00 AM')
elif field.datatype.startswith('yesno') or field.datatype.startswith('noyes'):
the_func(True)
else:
the_func('')
except DAValidationError as err:
pass
if hasattr(field, 'datatype') and field.datatype in ('object', 'object_radio', 'object_multiselect', 'object_checkboxes'):
if process_list_collect:
saveas_to_use = from_safeid(field.saveas)
else:
saveas_to_use = substitute_vars(from_safeid(field.saveas), self.is_generic, the_x, iterators, last_only=True)
if field.number not in selectcompute:
raise DAError("datatype was set to object but no code or selections was provided")
string = "_internal['objselections'][" + repr(saveas_to_use) + "] = dict()"
# logmessage("Doing " + string)
try:
exec(string, user_dict)
for selection in selectcompute[field.number]:
key = selection['key']
#logmessage("key is " + str(key))
real_key = from_safeid(key)
string = "_internal['objselections'][" + repr(saveas_to_use) + "][" + repr(key) + "] = " + real_key
#logmessage("Doing " + string)
exec(string, user_dict)
except Exception as err:
raise DAError("Failure while processing field with datatype of object: " + err.__class__.__name__ + " " + str(err))
if hasattr(field, 'label'):
labels[field.number] = field.label.text(user_dict)
if hasattr(field, 'extras'):
if 'fields_code' in field.extras:
the_question = self.get_question_for_field_with_sub_fields(field, user_dict)
ask_result = the_question.ask(user_dict, old_user_dict, the_x, iterators, sought, orig_sought)
for key in ('selectcompute', 'defaults', 'hints', 'helptexts', 'labels'):
for field_num, val in ask_result[key].items():
if key == 'selectcompute':
selectcompute[str(field.number) + '_' + str(field_num)] = val
elif key == 'defaults':
defaults[str(field.number) + '_' + str(field_num)] = val
elif key == 'hints':
hints[str(field.number) + '_' + str(field_num)] = val
elif key == 'helptexts':
helptexts[str(field.number) + '_' + str(field_num)] = val
elif key == 'labels':
labels[str(field.number) + '_' + str(field_num)] = val
for key, possible_dict in ask_result['extras'].items():
#logmessage(repr("key is " + str(key) + " and possible dict is " + repr(possible_dict)))
if isinstance(possible_dict, dict):
#logmessage("key points to a dict")
if key not in extras:
extras[key] = dict()
for field_num, val in possible_dict.items():
#logmessage("Setting " + str(field.number) + '_' + str(field_num))
extras[key][str(field.number) + '_' + str(field_num)] = val
for sub_field in the_question.fields:
sub_field.number = str(field.number) + '_' + str(sub_field.number)
if 'sub_fields' not in extras:
extras['sub_fields'] = dict()
extras['sub_fields'][field.number] = the_question.fields
if 'show_if_js' in field.extras:
if 'show_if_js' not in extras:
extras['show_if_js'] = dict()
extras['show_if_js'][field.number] = dict(expression=field.extras['show_if_js']['expression'].text(user_dict), vars=copy.deepcopy(field.extras['show_if_js']['vars']), sign=field.extras['show_if_js']['sign'], mode=field.extras['show_if_js']['mode'])
if 'field metadata' in field.extras:
if 'field metadata' not in extras:
extras['field metadata'] = dict()
extras['field metadata'][field.number] = recursive_eval_textobject_or_primitive(field.extras['field metadata'], user_dict)
for key in ('note', 'html', 'min', 'max', 'minlength', 'maxlength', 'show_if_val', 'step', 'scale', 'inline', 'inline width', 'ml_group', 'currency symbol'): # , 'textresponse', 'content_type' #'script', 'css',
if key in field.extras:
if key not in extras:
extras[key] = dict()
extras[key][field.number] = field.extras[key].text(user_dict)
if isinstance(extras[key][field.number], str):
extras[key][field.number] = extras[key][field.number].strip()
if extras[key][field.number] == '':
del extras[key][field.number]
for key in ('ml_train',):
if key in field.extras:
if key not in extras:
extras[key] = dict()
if isinstance(field.extras[key], bool):
extras[key][field.number] = field.extras[key]
else:
extras[key][field.number] = eval(field.extras[key]['compute'], user_dict)
if hasattr(field, 'saveas'):
try:
if not test_for_objects:
raise Exception('not setting defaults now')
if old_user_dict is not None:
for varname in ('x', 'i', 'j', 'k', 'l', 'm', 'n'):
if varname in user_dict:
old_user_dict[varname] = user_dict[varname]
elif varname in old_user_dict:
del old_user_dict[varname]
try:
defaults[field.number] = eval(from_safeid(field.saveas), old_user_dict)
except:
defaults[field.number] = eval(from_safeid(field.saveas), user_dict)
else:
defaults[field.number] = eval(from_safeid(field.saveas), user_dict)
except:
try:
defaults[field.number] = user_dict['_internal']['dirty'][substitute_vars(from_safeid(field.saveas), self.is_generic, the_x, iterators)]
except:
if hasattr(field, 'default'):
if isinstance(field.default, TextObject):
defaults[field.number] = field.default.text(user_dict).strip()
else:
defaults[field.number] = field.default
elif hasattr(field, 'extras') and 'default' in field.extras:
defaults[field.number] = eval(field.extras['default']['compute'], user_dict)
if hasattr(field, 'hint'):
hints[field.number] = field.hint.text(user_dict)
if hasattr(field, 'helptext'):
helptexts[field.number] = field.helptext.text(user_dict)
if 'current_field' in docassemble.base.functions.this_thread.misc:
del docassemble.base.functions.this_thread.misc['current_field']
if len(self.attachments) or self.compute_attachment is not None:
if hasattr(self, 'email_default'):
the_email_address = self.email_default.text(user_dict).strip()
if '@' in the_email_address and not re.search(r'\s', the_email_address):
extras['email_default'] = the_email_address
if hasattr(self, 'email_subject'):
extras['email_subject'] = re.sub(r'[\n\r]+', ' ', self.email_subject.text(user_dict).strip())
if hasattr(self, 'email_body'):
extras['email_html'] = '<html><body>' + docassemble.base.filter.markdown_to_html(self.email_body.text(user_dict), status=docassemble.base.functions.this_thread.interview_status, question=self, external=True) + '</body></html>'
extras['email_body'] = BeautifulSoup(extras['email_html'], "html.parser").get_text('\n')
if hasattr(self, 'email_template') and ('email_subject' not in extras or 'email_html' not in extras):
template = eval(self.email_template, user_dict)
if 'email_subject' not in extras:
the_subject = re.sub(r'[\n\r]+', ' ', template.subject.strip())
if the_subject:
extras['email_subject'] = the_subject
if 'email_html' not in extras:
extras['email_html'] = '<html><body>' + template.content_as_html(external=True) + '</body></html>'
extras['email_body'] = BeautifulSoup(extras['email_html'], "html.parser").get_text('\n')
attachment_text = self.processed_attachments(user_dict) # , the_x=the_x, iterators=iterators
else:
attachment_text = []
if test_for_objects:
assumed_objects = set()
for field in self.fields:
if field.number in extras['ok'] and not extras['ok'][field.number]:
continue
docassemble.base.functions.this_thread.misc['current_field'] = field.number
if hasattr(field, 'saveas'):
# m = re.match(r'(.*)\.[^\.]+', from_safeid(field.saveas))
# if m and m.group(1) != 'x':
# assumed_objects.add(m.group(1))
parse_result = parse_var_name(from_safeid(field.saveas))
if not parse_result['valid']:
raise DAError("Variable name " + from_safeid(field.saveas) + " is invalid: " + parse_result['reason'])
if len(parse_result['objects']):
assumed_objects.add(parse_result['objects'][-1])
if len(parse_result['bracket_objects']):
assumed_objects.add(parse_result['bracket_objects'][-1])
if 'current_field' in docassemble.base.functions.this_thread.misc:
del docassemble.base.functions.this_thread.misc['current_field']
for var in assumed_objects:
if complications.search(var) or var not in user_dict:
eval(var, user_dict)
if 'menu_items' in user_dict:
extras['menu_items'] = user_dict['menu_items']
if 'track_location' in user_dict:
extras['track_location'] = user_dict['track_location']
if 'speak_text' in user_dict:
extras['speak_text'] = user_dict['speak_text']
if 'role' in user_dict:
current_role = user_dict['role']
if len(self.role) > 0:
if current_role not in self.role and 'role_event' not in self.fields_used and self.question_type not in ('exit', 'logout', 'exit_logout', 'continue', 'restart', 'leave', 'refresh', 'signin', 'register', 'new_session'):
# logmessage("Calling role_event with " + ", ".join(self.fields_used))
user_dict['role_needed'] = self.role
raise NameError("name 'role_event' is not defined")
elif self.interview.default_role is not None and current_role not in self.interview.default_role and 'role_event' not in self.fields_used and self.question_type not in ('exit', 'logout', 'exit_logout', 'continue', 'restart', 'leave', 'refresh', 'signin', 'register', 'new_session'):
# logmessage("Calling role_event with " + ", ".join(self.fields_used))
user_dict['role_needed'] = self.interview.default_role
raise NameError("name 'role_event' is not defined")
if self.question_type == 'review' and sought is not None and not hasattr(self, 'review_saveas'):
if 'event_stack' not in user_dict['_internal']:
user_dict['_internal']['event_stack'] = dict()
session_uid = docassemble.base.functions.this_thread.current_info['user']['session_uid']
if session_uid not in user_dict['_internal']['event_stack']:
user_dict['_internal']['event_stack'][session_uid] = list()
already_there = False
for event_item in user_dict['_internal']['event_stack'][session_uid]:
if event_item['action'] in (sought, orig_sought):
already_there = True
break
if not already_there:
user_dict['_internal']['event_stack'][session_uid].insert(0, dict(action=orig_sought, arguments=dict(), context=dict()))
if self.need_post is not None:
for need_code in self.need_post:
eval(need_code, user_dict)
return({'type': 'question', 'question_text': question_text, 'subquestion_text': subquestion, 'continue_label': continuelabel, 'audiovideo': audiovideo, 'decorations': decorations, 'help_text': help_text_list, 'attachments': attachment_text, 'question': self, 'selectcompute': selectcompute, 'defaults': defaults, 'hints': hints, 'helptexts': helptexts, 'extras': extras, 'labels': labels, 'sought': sought, 'orig_sought': orig_sought}) #'defined': defined,
def processed_attachments(self, the_user_dict, **kwargs):
use_cache = kwargs.get('use_cache', True)
if self.compute_attachment is not None:
use_cache = False
seeking_var = kwargs.get('seeking_var', '__novar')
steps = the_user_dict['_internal'].get('steps', -1)
#logmessage("processed_attachments: steps is " + str(steps))
if use_cache and self.interview.cache_documents and hasattr(self, 'name') and self.name + '__SEEKING__' + seeking_var in the_user_dict['_internal']['doc_cache']:
if steps in the_user_dict['_internal']['doc_cache'][self.name + '__SEEKING__' + seeking_var]:
#logmessage("processed_attachments: result was in document cache")
return the_user_dict['_internal']['doc_cache'][self.name + '__SEEKING__' + seeking_var][steps]
the_user_dict['_internal']['doc_cache'][self.name + '__SEEKING__' + seeking_var].clear()
result_list = list()
items = list()
for x in self.attachments:
items.append([x, self.prepare_attachment(x, the_user_dict, **kwargs), None])
for item in items:
result_list.append(self.finalize_attachment(item[0], item[1], the_user_dict))
if self.compute_attachment is not None:
computed_attachment_list = eval(self.compute_attachment, the_user_dict)
if not (isinstance(computed_attachment_list, list) or (hasattr(computed_attachment_list, 'elements') and isinstance(computed_attachment_list.elements, list))):
computed_attachment_list = [computed_attachment_list]
for the_att in computed_attachment_list:
if the_att.__class__.__name__ == 'DAFileCollection':
file_dict = dict()
for doc_format in ('pdf', 'rtf', 'docx', 'rtf to docx', 'tex', 'html', 'raw'):
if hasattr(the_att, doc_format):
the_dafile = getattr(the_att, doc_format)
if hasattr(the_dafile, 'number'):
file_dict[doc_format] = the_dafile.number
if 'formats' not in the_att.info:
the_att.info['formats'] = list(file_dict.keys())
if 'valid_formats' not in the_att.info:
the_att.info['valid_formats'] = list(file_dict.keys())
result_list.append({'name': the_att.info['name'], 'filename': the_att.info['filename'], 'description': the_att.info['description'], 'valid_formats': the_att.info.get('valid_formats', ['*']), 'formats_to_use': the_att.info['formats'], 'markdown': the_att.info.get('markdown', dict()), 'content': the_att.info.get('content', dict()), 'extension': the_att.info.get('extension', dict()), 'mimetype': the_att.info.get('mimetype', dict()), 'file': file_dict, 'metadata': the_att.info.get('metadata', dict()), 'variable_name': '', 'orig_variable_name': getattr(the_att, 'instanceName', ''), 'raw': the_att.info.get('raw', False)})
#convert_to_pdf_a
#file is dict of file numbers
# if the_att.__class__.__name__ == 'DAFileCollection' and 'attachment' in the_att.info and isinstance(the_att.info, dict) and 'name' in the_att.info['attachment'] and 'number' in the_att.info['attachment'] and len(self.interview.questions_by_name[the_att.info['attachment']['name']].attachments) > the_att.info['attachment']['number']:
# attachment = self.interview.questions_by_name[the_att.info['attachment']['name']].attachments[the_att.info['attachment']['number']]
# items.append([attachment, self.prepare_attachment(attachment, the_user_dict, **kwargs)])
if self.interview.cache_documents and hasattr(self, 'name'):
if self.name + '__SEEKING__' + seeking_var not in the_user_dict['_internal']['doc_cache']:
the_user_dict['_internal']['doc_cache'][self.name + '__SEEKING__' + seeking_var] = dict()
the_user_dict['_internal']['doc_cache'][self.name + '__SEEKING__' + seeking_var][steps] = result_list
return result_list
#return(list(map((lambda x: self.make_attachment(x, the_user_dict, **kwargs)), self.attachments)))
def parse_fields(self, the_list, register_target, uses_field):
result_list = list()
has_code = False
if isinstance(the_list, dict):
new_list = list()
for key, value in the_list.items():
new_item = dict()
new_item[key] = value
new_list.append(new_item)
the_list = new_list
if not isinstance(the_list, list):
raise DAError("Multiple choices need to be provided in list form. " + self.idebug(the_list))
for the_dict in the_list:
if not isinstance(the_dict, (dict, list)):
the_dict = {str(the_dict): the_dict}
elif not isinstance(the_dict, dict):
raise DAError("Unknown data type for the_dict in parse_fields. " + self.idebug(the_list))
result_dict = dict()
for key, value in the_dict.items():
if len(the_dict) > 1:
if key == 'image':
result_dict['image'] = value
continue
if key == 'help':
result_dict['help'] = TextObject(value, question=self)
continue
if key == 'default':
result_dict['default'] = value
continue
if uses_field:
if key == 'code':
has_code = True
result_dict['compute'] = compile(value, '<expression>', 'eval')
self.find_fields_in(value)
else:
result_dict['label'] = TextObject(key, question=self)
result_dict['key'] = TextObject(value, question=self, translate=False)
elif isinstance(value, dict):
result_dict['label'] = TextObject(key, question=self)
self.embeds = True
result_dict['key'] = Question(value, self.interview, register_target=register_target, source=self.from_source, package=self.package, source_code=codecs.decode(bytearray(yaml.safe_dump(value, default_flow_style=False, default_style = '|', allow_unicode=True), encoding='utf-8'), 'utf-8'))
elif isinstance(value, str):
if value in ('exit', 'logout', 'exit_logout', 'leave') and 'url' in the_dict:
self.embeds = True
result_dict['label'] = TextObject(key, question=self)
result_dict['key'] = Question({'command': value, 'url': the_dict['url']}, self.interview, register_target=register_target, source=self.from_source, package=self.package)
elif value in ('continue', 'restart', 'refresh', 'signin', 'register', 'exit', 'logout', 'exit_logout', 'leave', 'new_session'):
self.embeds = True
result_dict['label'] = TextObject(key, question=self)
result_dict['key'] = Question({'command': value}, self.interview, register_target=register_target, source=self.from_source, package=self.package)
elif key == 'url':
pass
else:
result_dict['label'] = TextObject(key, question=self)
result_dict['key'] = TextObject(key, question=self, translate=False)
elif isinstance(value, bool):
result_dict['label'] = TextObject(key, question=self)
result_dict['key'] = value
else:
raise DAError("Unknown data type in parse_fields:" + str(type(value)) + ". " + self.idebug(the_list))
result_list.append(result_dict)
return(has_code, result_list)
def mark_as_answered(self, the_user_dict):
if self.is_mandatory or self.mandatory_code is not None:
the_user_dict['_internal']['answered'].add(self.name)
def sub_fields_used(self):
all_fields_used = set()
for var_name in self.fields_used:
all_fields_used.add(var_name)
if len(self.fields) > 0 and hasattr(self.fields[0], 'choices'):
for choice in self.fields[0].choices:
if isinstance(choice['key'], Question):
all_fields_used.update(choice['key'].sub_fields_used())
return all_fields_used
def extended_question_name(self, the_user_dict):
if not self.name:
return self.name
the_name = self.name
uses = set()
for var_name in self.sub_fields_used():
if re.search(r'^x\b', var_name):
uses.add('x')
for iterator in re.findall(r'\[([ijklmn])\]', var_name):
uses.add(iterator)
if len(uses) > 0:
ok_to_use_extra = True
for var_name in uses:
if var_name not in the_user_dict:
ok_to_use_extra = False
if ok_to_use_extra and 'x' in uses and not hasattr(the_user_dict['x'], 'instanceName'):
ok_to_use_extra = False
if ok_to_use_extra:
extras = []
if 'x' in uses:
extras.append(the_user_dict['x'].instanceName)
for var_name in ['i', 'j', 'k', 'l', 'm', 'n']:
if var_name in uses:
extras.append(str(the_user_dict[var_name]))
the_name += "|WITH|" + '|'.join(extras)
return the_name
def follow_multiple_choice(self, the_user_dict, interview_status, is_generic, the_x, iterators):
if not self.embeds:
return(self)
if is_generic:
if the_x != 'None':
exec("x = " + the_x, the_user_dict)
if len(iterators):
for indexno in range(len(iterators)):
exec(list_of_indices[indexno] + " = " + iterators[indexno], the_user_dict)
the_name = self.extended_question_name(the_user_dict)
if the_name and the_name in the_user_dict['_internal']['answers']:
interview_status.followed_mc = True
interview_status.tentatively_answered.add(self)
qtarget = self.fields[0].choices[the_user_dict['_internal']['answers'][the_name]].get('key', False)
if isinstance(qtarget, Question):
return(qtarget.follow_multiple_choice(the_user_dict, interview_status, is_generic, the_x, iterators))
return(self)
def finalize_attachment(self, attachment, result, the_user_dict):
if self.interview.cache_documents and attachment['variable_name']:
try:
existing_object = eval(attachment['variable_name'], the_user_dict)
for doc_format in ('pdf', 'rtf', 'docx', 'rtf to docx', 'tex', 'html', 'raw'):
if hasattr(existing_object, doc_format):
the_file = getattr(existing_object, doc_format)
for key in ('extension', 'mimetype', 'content', 'markdown', 'raw'):
if hasattr(the_file, key):
result[key][doc_format] = getattr(the_file, key)
if hasattr(the_file, 'number'):
result['file'][doc_format] = the_file.number
#logmessage("finalize_attachment: returning " + attachment['variable_name'] + " from cache")
for key in ('template', 'field_data', 'images', 'data_strings', 'convert_to_pdf_a', 'convert_to_tagged_pdf', 'password', 'template_password', 'update_references', 'permissions'):
if key in result:
del result[key]
return result
except:
pass
#logmessage("finalize_attachment: " + attachment['variable_name'] + " was not in cache")
#logmessage("In finalize where redact is " + repr(result['redact']))
docassemble.base.functions.this_thread.misc['redact'] = result['redact']
if 'language' in attachment['options']:
old_language = docassemble.base.functions.get_language()
docassemble.base.functions.set_language(attachment['options']['language'])
else:
old_language = None
try:
for doc_format in result['formats_to_use']:
if doc_format == 'raw':
the_temp = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=result['raw'], delete=False)
with open(the_temp.name, 'w', encoding='utf-8') as the_file:
the_file.write(result['markdown'][doc_format].lstrip("\n"))
result['file'][doc_format], result['extension'][doc_format], result['mimetype'][doc_format] = docassemble.base.functions.server.save_numbered_file(result['filename'] + result['raw'], the_temp.name, yaml_file_name=self.interview.source.path)
result['content'][doc_format] = result['markdown'][doc_format].lstrip("\n")
elif doc_format in ('pdf', 'rtf', 'rtf to docx', 'tex', 'docx'):
if 'fields' in attachment['options']:
if doc_format == 'pdf' and 'pdf_template_file' in attachment['options']:
if 'checkbox_export_value' in attachment['options']:
default_export_value = attachment['options']['checkbox_export_value'].text(the_user_dict).strip()
else:
default_export_value = None
docassemble.base.functions.set_context('pdf')
the_pdf_file = docassemble.base.pdftk.fill_template(attachment['options']['pdf_template_file'].path(the_user_dict=the_user_dict), data_strings=result['data_strings'], images=result['images'], editable=result['editable'], pdfa=result['convert_to_pdf_a'], password=result['password'], template_password=result['template_password'], default_export_value=default_export_value)
result['file'][doc_format], result['extension'][doc_format], result['mimetype'][doc_format] = docassemble.base.functions.server.save_numbered_file(result['filename'] + '.' + extension_of_doc_format[doc_format], the_pdf_file, yaml_file_name=self.interview.source.path)
for key in ('images', 'data_strings', 'convert_to_pdf_a', 'convert_to_tagged_pdf', 'password', 'template_password', 'update_references', 'permissions'):
if key in result:
del result[key]
docassemble.base.functions.reset_context()
elif (doc_format == 'docx' or (doc_format == 'pdf' and 'docx' not in result['formats_to_use'])) and 'docx_template_file' in attachment['options']:
#logmessage("field_data is " + repr(result['field_data']))
if result['template'].current_rendering_part is None:
result['template'].current_rendering_part = result['template'].docx._part
docassemble.base.functions.set_context('docx', template=result['template'])
docassemble.base.functions.this_thread.misc['docx_subdocs'] = []
try:
the_template = result['template']
template_loop_count = 0
while True: # Rerender if there's a subdoc using include_docx_template
old_count = docassemble.base.functions.this_thread.misc.get('docx_include_count', 0)
the_template.render(result['field_data'], jinja_env=custom_jinja_env())
if docassemble.base.functions.this_thread.misc.get('docx_include_count', 0) > old_count and template_loop_count < 10:
# There's another template included
new_template_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".docx", delete=False)
the_template.save(new_template_file.name) # Save and refresh the template
the_template = docassemble.base.file_docx.DocxTemplate(new_template_file.name)
if result['hyperlink_style'] and result['hyperlink_style'] in the_template.docx.styles:
the_template.da_hyperlink_style = result['hyperlink_style']
elif 'Hyperlink' in result['template'].docx.styles:
the_template.da_hyperlink_style = 'Hyperlink'
elif 'InternetLink' in result['template'].docx.styles:
the_template.da_hyperlink_style = 'InternetLink'
else:
the_template.da_hyperlink_style = None
docassemble.base.functions.this_thread.misc['docx_template'] = the_template
template_loop_count += 1
else:
break
# Copy over images, etc from subdoc to master template
subdocs = docassemble.base.functions.this_thread.misc.get('docx_subdocs', []) # Get the subdoc file list
the_template_docx = the_template.docx
for subdoc in subdocs:
docassemble.base.file_docx.fix_subdoc(the_template_docx, subdoc)
except TemplateError as the_error:
if (not hasattr(the_error, 'filename')) or the_error.filename is None:
docx_paths = []
for item in attachment['options']['docx_template_file']:
for subitem in item.paths(the_user_dict=the_user_dict):
docx_paths.append(os.path.basename(subitem))
the_error.filename = ', '.join(docx_paths)
#logmessage("TemplateError:\n" + traceback.format_exc())
raise the_error
docassemble.base.functions.reset_context()
docx_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".docx", delete=False)
the_template.save(docx_file.name)
if result['update_references']:
docassemble.base.pandoc.update_references(docx_file.name)
if 'docx' in result['formats_to_use']:
result['file']['docx'], result['extension']['docx'], result['mimetype']['docx'] = docassemble.base.functions.server.save_numbered_file(result['filename'] + '.docx', docx_file.name, yaml_file_name=self.interview.source.path)
if 'pdf' in result['formats_to_use']:
pdf_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".pdf", delete=False)
docassemble.base.pandoc.word_to_pdf(docx_file.name, 'docx', pdf_file.name, pdfa=result['convert_to_pdf_a'], password=result['password'], update_refs=result['update_references'], tagged=result['convert_to_tagged_pdf'], filename=result['filename'])
result['file']['pdf'], result['extension']['pdf'], result['mimetype']['pdf'] = docassemble.base.functions.server.save_numbered_file(result['filename'] + '.pdf', pdf_file.name, yaml_file_name=self.interview.source.path)
for key in ['template', 'field_data', 'images', 'data_strings', 'convert_to_pdf_a', 'convert_to_tagged_pdf', 'password', 'template_password', 'update_references', 'permissions']:
if key in result:
del result[key]
else:
converter = MyPandoc(pdfa=result['convert_to_pdf_a'], password=result['password'])
converter.output_format = doc_format
converter.input_content = result['markdown'][doc_format]
if 'initial_yaml' in attachment['options']:
converter.initial_yaml = [x.path(the_user_dict=the_user_dict) for x in attachment['options']['initial_yaml']]
elif 'initial_yaml' in self.interview.attachment_options:
converter.initial_yaml = [x.path(the_user_dict=the_user_dict) for x in self.interview.attachment_options['initial_yaml']]
if 'additional_yaml' in attachment['options']:
converter.additional_yaml = [x.path(the_user_dict=the_user_dict) for x in attachment['options']['additional_yaml']]
elif 'additional_yaml' in self.interview.attachment_options:
converter.additional_yaml = [x.path(the_user_dict=the_user_dict) for x in self.interview.attachment_options['additional_yaml']]
if doc_format in ('rtf', 'rtf to docx'):
if 'rtf_template_file' in attachment['options']:
converter.template_file = attachment['options']['rtf_template_file'].path(the_user_dict=the_user_dict)
elif 'rtf_template_file' in self.interview.attachment_options:
converter.template_file = self.interview.attachment_options['rtf_template_file'].path(the_user_dict=the_user_dict)
elif doc_format == 'docx':
if 'docx_reference_file' in attachment['options']:
converter.reference_file = attachment['options']['docx_reference_file'].path(the_user_dict=the_user_dict)
elif 'docx_reference_file' in self.interview.attachment_options:
converter.reference_file = self.interview.attachment_options['docx_reference_file'].path(the_user_dict=the_user_dict)
else:
if 'template_file' in attachment['options']:
converter.template_file = attachment['options']['template_file'].path(the_user_dict=the_user_dict)
elif 'template_file' in self.interview.attachment_options:
converter.template_file = self.interview.attachment_options['template_file'].path(the_user_dict=the_user_dict)
converter.metadata = result['metadata']
converter.convert(self)
result['file'][doc_format], result['extension'][doc_format], result['mimetype'][doc_format] = docassemble.base.functions.server.save_numbered_file(result['filename'] + '.' + extension_of_doc_format[doc_format], converter.output_filename, yaml_file_name=self.interview.source.path)
result['content'][doc_format] = result['markdown'][doc_format]
elif doc_format in ['html']:
result['content'][doc_format] = docassemble.base.filter.markdown_to_html(result['markdown'][doc_format], use_pandoc=True, question=self)
if attachment['variable_name']:
string = "from docassemble.base.core import DAFile, DAFileCollection"
exec(string, the_user_dict)
variable_name = attachment['variable_name']
m = re.search(r'^(.*)\.([A-Za-z0-9\_]+)$', attachment['variable_name'])
if m:
base_var = m.group(1)
attrib = m.group(2)
the_var = eval(base_var, the_user_dict)
if hasattr(the_var, 'instanceName'):
variable_name = the_var.instanceName + '.' + attrib
string = variable_name + " = DAFileCollection(" + repr(variable_name) + ")"
# logmessage("Executing " + string + "\n")
exec(string, the_user_dict)
the_name = attachment['name'].text(the_user_dict).strip()
the_filename = attachment['filename'].text(the_user_dict).strip()
if the_filename == '':
the_filename = docassemble.base.functions.space_to_underscore(the_name)
the_user_dict['_attachment_info'] = dict(name=the_name, filename=the_filename, description=attachment['description'].text(the_user_dict), valid_formats=result['valid_formats'], formats=result['formats_to_use'], attachment=dict(name=attachment['question_name'], number=attachment['indexno']), extension=result.get('extension', dict()), mimetype=result.get('mimetype', dict()), content=result.get('content', dict()), markdown=result.get('markdown', dict()), metadata=result.get('metadata', dict()), convert_to_pdf_a=result.get('convert_to_pdf_a', False), convert_to_tagged_pdf=result.get('convert_to_tagged_pdf', False), orig_variable_name=result.get('orig_variable_name', None), raw=result['raw'], permissions=result.get('permissions', None))
exec(variable_name + '.info = _attachment_info', the_user_dict)
del the_user_dict['_attachment_info']
for doc_format in result['file']:
if doc_format == 'raw':
variable_string = variable_name + '.raw'
else:
variable_string = variable_name + '.' + extension_of_doc_format[doc_format]
# filename = result['filename'] + '.' + doc_format
# file_number, extension, mimetype = docassemble.base.functions.server.save_numbered_file(filename, result['file'][doc_format], yaml_file_name=self.interview.source.path)
if result['file'][doc_format] is None:
raise Exception("Could not save numbered file")
if 'content' in result and doc_format in result['content']:
content_string = ', content=' + repr(result['content'][doc_format])
else:
content_string = ''
if 'markdown' in result and doc_format in result['markdown']:
markdown_string = ', markdown=' + repr(result['markdown'][doc_format])
else:
markdown_string = ''
if result['raw']:
the_ext = result['raw']
else:
the_ext = '.' + extension_of_doc_format[doc_format]
string = variable_string + " = DAFile(" + repr(variable_string) + ", filename=" + repr(str(result['filename']) + the_ext) + ", number=" + str(result['file'][doc_format]) + ", mimetype='" + str(result['mimetype'][doc_format]) + "', extension='" + str(result['extension'][doc_format]) + "'" + content_string + markdown_string + ")"
#logmessage("Executing " + string + "\n")
exec(string, the_user_dict)
for doc_format in result['content']:
# logmessage("Considering " + doc_format)
if doc_format not in result['file']:
variable_string = variable_name + '.' + extension_of_doc_format[doc_format]
# logmessage("Setting " + variable_string)
string = variable_string + " = DAFile(" + repr(variable_string) + ', markdown=' + repr(result['markdown'][doc_format]) + ', content=' + repr(result['content'][doc_format]) + ")"
exec(string, the_user_dict)
if 'permissions' in result:
if result['permissions']['private'] is not None or result['permissions']['persistent'] is not None:
params = list()
if 'private' in result['permissions']:
params.append('private=' + repr(result['permissions']['private']))
if 'persistent' in result['permissions']:
params.append('persistent=' + repr(result['permissions']['persistent']))
string = variable_name + '.set_attributes(' + ','.join(params) + ')'
exec(string, the_user_dict)
if len(result['permissions']['allow users']):
string = variable_name + '.user_access(' + ', '.join([repr(y) for y in result['permissions']['allow users']]) + ')'
exec(string, the_user_dict)
if len(result['permissions']['allow privileges']):
string = variable_name + '.privilege_access(' + ', '.join([repr(y) for y in result['permissions']['allow privileges']]) + ')'
exec(string, the_user_dict)
except:
if old_language is not None:
docassemble.base.functions.set_language(old_language)
raise
if old_language is not None:
docassemble.base.functions.set_language(old_language)
return(result)
def prepare_attachment(self, attachment, the_user_dict, **kwargs):
if 'language' in attachment['options']:
old_language = docassemble.base.functions.get_language()
docassemble.base.functions.set_language(attachment['options']['language'])
else:
old_language = None
try:
the_name = attachment['name'].text(the_user_dict).strip()
the_filename = attachment['filename'].text(the_user_dict).strip()
the_filename = docassemble.base.functions.secure_filename(the_filename)
if the_filename == '':
the_filename = docassemble.base.functions.secure_filename(docassemble.base.functions.space_to_underscore(the_name))
result = {'name': the_name, 'filename': the_filename, 'description': attachment['description'].text(the_user_dict), 'valid_formats': attachment['valid_formats']}
actual_extension = attachment['raw']
if attachment['content'] is None and 'content file code' in attachment['options']:
raw_content = ''
the_filenames = eval(attachment['options']['content file code'], the_user_dict)
if not isinstance(the_filenames, list):
if hasattr(the_filenames, 'instanceName') and hasattr(the_filenames, 'elements') and isinstance(the_filenames.elements, list):
the_filenames = the_filenames.elements
else:
the_filenames = [the_filenames]
for the_filename in the_filenames:
the_orig_filename = the_filename
if the_filename.__class__.__name__ in ('DAFile', 'DAFileList', 'DAFileCollection', 'DAStaticFile'):
the_filename = the_filename.path()
elif isinstance(the_filename, str):
if re.search(r'^https?://', str(the_filename)):
temp_template_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", delete=False)
try:
urlretrieve(url_sanitize(str(the_filename)), temp_template_file.name)
except Exception as err:
raise DAError("prepare_attachment: error downloading " + str(the_filename) + ": " + str(err))
the_filename = temp_template_file.name
else:
the_filename = docassemble.base.functions.package_template_filename(the_filename, package=self.package)
else:
the_filename = None
if the_filename is None or not os.path.isfile(the_filename):
raise DAError("prepare_attachment: error obtaining template file from code: " + repr(the_orig_filename))
(the_base, actual_extension) = os.path.splitext(the_filename)
with open(the_filename, 'r', encoding='utf-8') as the_file:
raw_content += the_file.read()
the_content = TextObject(raw_content, question=self)
else:
the_content = attachment['content']
if 'redact' in attachment['options']:
if isinstance(attachment['options']['redact'], CodeType):
result['redact'] = eval(attachment['options']['redact'], the_user_dict)
else:
result['redact'] = attachment['options']['redact']
else:
result['redact'] = True
if 'editable' in attachment['options']:
result['editable'] = eval(attachment['options']['editable'], the_user_dict)
else:
result['editable'] = True
docassemble.base.functions.this_thread.misc['redact'] = result['redact']
result['markdown'] = dict();
result['content'] = dict();
result['extension'] = dict();
result['mimetype'] = dict();
result['file'] = dict();
if attachment['raw']:
result['raw'] = actual_extension
result['formats_to_use'] = ['raw']
else:
result['raw'] = False
if '*' in attachment['valid_formats']:
result['formats_to_use'] = ['pdf', 'rtf', 'html']
else:
result['formats_to_use'] = attachment['valid_formats']
result['metadata'] = dict()
if len(attachment['metadata']) > 0:
for key in attachment['metadata']:
data = attachment['metadata'][key]
if isinstance(data, bool):
result['metadata'][key] = data
elif isinstance(data, list):
result['metadata'][key] = textify(data, the_user_dict)
else:
result['metadata'][key] = data.text(the_user_dict)
if 'pdf_a' in attachment['options']:
if isinstance(attachment['options']['pdf_a'], bool):
result['convert_to_pdf_a'] = attachment['options']['pdf_a']
else:
result['convert_to_pdf_a'] = eval(attachment['options']['pdf_a'], the_user_dict)
else:
result['convert_to_pdf_a'] = self.interview.use_pdf_a
if 'hyperlink_style' in attachment['options']:
result['hyperlink_style'] = attachment['options']['hyperlink_style'].text(the_user_dict).strip()
else:
result['hyperlink_style'] = None
result['permissions'] = dict()
if 'persistent' in attachment['options']:
if isinstance(attachment['options']['persistent'], bool):
result['permissions']['persistent'] = attachment['options']['persistent']
else:
result['permissions']['persistent'] = eval(attachment['options']['persistent'], the_user_dict)
else:
result['permissions']['persistent'] = None
if 'private' in attachment['options']:
if isinstance(attachment['options']['private'], bool):
result['permissions']['private'] = attachment['options']['private']
else:
result['permissions']['private'] = eval(attachment['options']['private'], the_user_dict)
else:
result['permissions']['private'] = None
if 'allow users' in attachment['options']:
if isinstance(attachment['options']['allow users'], list):
result['permissions']['allow users'] = allow_users_list(attachment['options']['allow users'])
else:
result['permissions']['allow users'] = eval(attachment['options']['allow users'], the_user_dict)
result['permissions']['allow users'] = allow_users_list(result['permissions']['allow users'])
else:
result['permissions']['allow users'] = []
if 'allow privileges' in attachment['options']:
if isinstance(attachment['options']['allow privileges'], list):
result['permissions']['allow privileges'] = allow_privileges_list(attachment['options']['allow privileges'])
else:
result['permissions']['allow privileges'] = allow_privileges_list(eval(attachment['options']['allow privileges'], the_user_dict))
else:
result['permissions']['allow privileges'] = []
if 'tagged_pdf' in attachment['options']:
if isinstance(attachment['options']['tagged_pdf'], bool):
result['convert_to_tagged_pdf'] = attachment['options']['tagged_pdf']
else:
result['convert_to_tagged_pdf'] = eval(attachment['options']['tagged_pdf'], the_user_dict)
else:
result['convert_to_tagged_pdf'] = self.interview.use_tagged_pdf
if 'orig_variable_name' in attachment and attachment['orig_variable_name']:
result['orig_variable_name'] = attachment['orig_variable_name']
if 'update_references' in attachment['options']:
if isinstance(attachment['options']['update_references'], bool):
result['update_references'] = attachment['options']['update_references']
else:
result['update_references'] = eval(attachment['options']['update_references'], the_user_dict)
else:
result['update_references'] = False
if 'password' in attachment['options']:
result['password'] = attachment['options']['password'].text(the_user_dict)
else:
result['password'] = None
if 'template_password' in attachment['options']:
result['template_password'] = attachment['options']['template_password'].text(the_user_dict)
else:
result['template_password'] = None
for doc_format in result['formats_to_use']:
if doc_format in ['pdf', 'rtf', 'rtf to docx', 'tex', 'docx', 'raw']:
if 'decimal_places' in attachment['options']:
try:
float_formatter = '%.' + str(int(attachment['options']['decimal_places'].text(the_user_dict).strip())) + 'f'
except:
logmessage("prepare_attachment: error in float_formatter")
float_formatter = None
else:
float_formatter = None
if 'fields' in attachment['options'] and 'docx_template_file' in attachment['options']:
if doc_format == 'docx' or ('docx' not in result['formats_to_use'] and doc_format == 'pdf'):
docx_paths = []
for docx_reference in attachment['options']['docx_template_file']:
for docx_path in docx_reference.paths(the_user_dict=the_user_dict):
if not os.path.isfile(docx_path):
raise DAError("Missing docx template file " + os.path.basename(docx_path))
docx_paths.append(docx_path)
if len(docx_paths) == 1:
docx_path = docx_paths[0]
else:
docx_path = docassemble.base.file_docx.concatenate_files(docx_paths)
result['template'] = docassemble.base.file_docx.DocxTemplate(docx_path)
if result['hyperlink_style'] and result['hyperlink_style'] in result['template'].docx.styles:
result['template'].da_hyperlink_style = result['hyperlink_style']
elif 'Hyperlink' in result['template'].docx.styles:
result['template'].da_hyperlink_style = 'Hyperlink'
elif 'InternetLink' in result['template'].docx.styles:
result['template'].da_hyperlink_style = 'InternetLink'
else:
result['template'].da_hyperlink_style = None
if result['template'].current_rendering_part is None:
result['template'].current_rendering_part = result['template'].docx._part
docassemble.base.functions.set_context('docx', template=result['template'])
if isinstance(attachment['options']['fields'], str):
result['field_data'] = the_user_dict
else:
the_field_data = recursive_eval_textobject(attachment['options']['fields'], the_user_dict, self, result['template'], attachment['options']['skip_undefined'])
new_field_data = dict()
if isinstance(the_field_data, list):
for item in the_field_data:
if isinstance(item, dict):
new_field_data.update(item)
the_field_data = new_field_data
result['field_data'] = copy.deepcopy(pickleable_objects(the_user_dict))
self.interview.populate_non_pickleable(result['field_data'])
if 'alpha' not in result['field_data']:
raise Exception("fuck this")
result['field_data'].update(the_field_data)
result['field_data']['_codecs'] = codecs
result['field_data']['_array'] = array
if 'code' in attachment['options']:
if attachment['options']['skip_undefined']:
try:
additional_dict = eval(attachment['options']['code'], the_user_dict)
except:
additional_dict = {}
else:
additional_dict = eval(attachment['options']['code'], the_user_dict)
if isinstance(additional_dict, dict):
for key, val in additional_dict.items():
if isinstance(val, float) and float_formatter is not None:
result['field_data'][key] = float_formatter % val
elif isinstance(val, RawValue):
result['field_data'][key] = val.value
else:
result['field_data'][key] = docassemble.base.file_docx.transform_for_docx(val, self, result['template'])
else:
raise DAError("code in an attachment returned something other than a dictionary")
if 'raw code dict' in attachment['options']:
for varname, var_code in attachment['options']['raw code dict'].items():
if attachment['options']['skip_undefined']:
try:
val = eval(var_code, the_user_dict)
except:
val = ''
else:
val = eval(var_code, the_user_dict)
if isinstance(val, float) and float_formatter is not None:
result['field_data'][varname] = float_formatter % val
else:
result['field_data'][varname] = val
if 'code dict' in attachment['options']:
for varname, var_code in attachment['options']['code dict'].items():
if attachment['options']['skip_undefined']:
try:
val = eval(var_code, the_user_dict)
except:
val = ''
else:
val = eval(var_code, the_user_dict)
if isinstance(val, float) and float_formatter is not None:
result['field_data'][varname] = float_formatter % val
elif isinstance(val, RawValue):
result['field_data'][varname] = val.value
else:
result['field_data'][varname] = docassemble.base.file_docx.transform_for_docx(val, self, result['template'])
docassemble.base.functions.reset_context()
elif doc_format == 'pdf' and 'fields' in attachment['options'] and 'pdf_template_file' in attachment['options']:
docassemble.base.functions.set_context('pdf')
result['data_strings'] = []
result['images'] = []
if isinstance(attachment['options']['fields'], dict):
the_fields = [attachment['options']['fields']]
else:
the_fields = attachment['options']['fields']
for item in the_fields:
for key, val in item.items():
if attachment['options']['skip_undefined']:
try:
answer = val.text(the_user_dict).rstrip()
except:
answer = ''
else:
answer = val.text(the_user_dict).rstrip()
if answer == 'True':
answer = 'Yes'
elif answer == 'False':
answer = 'No'
elif answer == 'None':
answer = ''
answer = re.sub(r'\[(NEWLINE|BR)\]', r'\n', answer)
answer = re.sub(r'\[(BORDER|NOINDENT|FLUSHLEFT|FLUSHRIGHT|BOLDCENTER|CENTER)\]', r'', answer)
#logmessage("Found a " + str(key) + " with a |" + str(answer) + '|')
m = re.search(r'\[FILE ([^\]]+)\]', answer)
if m:
file_reference = re.sub(r'[ ,].*', '', m.group(1))
file_info = docassemble.base.functions.server.file_finder(file_reference, convert={'svg': 'png'})
result['images'].append((key, file_info))
else:
m = re.search(r'\[QR ([^\]]+)\]', answer)
if m:
im = qrcode.make(re.sub(r' *,.*', '', m.group(1)))
the_image = tempfile.NamedTemporaryFile(prefix="datemp", suffix=".png", delete=False)
im.save(the_image.name)
result['images'].append((key, {'fullpath': the_image.name}))
else:
result['data_strings'].append((key, answer))
if 'code' in attachment['options']:
if attachment['options']['skip_undefined']:
try:
additional_fields = eval(attachment['options']['code'], the_user_dict)
except:
additional_fields = []
else:
additional_fields = eval(attachment['options']['code'], the_user_dict)
if not isinstance(additional_fields, list):
additional_fields = [additional_fields]
for item in additional_fields:
if not isinstance(item, dict):
raise DAError("code in an attachment returned something other than a dictionary or a list of dictionaries")
for key, val in item.items():
if val is True:
val = 'Yes'
elif val is False:
val = 'No'
elif val is None:
val = ''
elif isinstance(val, float) and float_formatter is not None:
val = float_formatter % val
else:
val = str(val)
val = re.sub(r'\s*\[(NEWLINE|BR)\]\s*', r'\n', val)
val = re.sub(r'\s*\[(BORDER|NOINDENT|FLUSHLEFT|FLUSHRIGHT|BOLDCENTER|CENTER)\]\s*', r'', val)
m = re.search(r'\[FILE ([^\]]+)\]', val)
if m:
file_reference = re.sub(r'[ ,].*', '', m.group(1))
file_info = docassemble.base.functions.server.file_finder(file_reference, convert={'svg': 'png'})
result['images'].append((key, file_info))
else:
result['data_strings'].append((key, val))
if 'code dict' in attachment['options']:
additional_fields = attachment['options']['code dict']
if not isinstance(additional_fields, list):
additional_fields = [additional_fields]
for item in additional_fields:
if not isinstance(item, dict):
raise DAError("code dict in an attachment returned something other than a dictionary or a list of dictionaries")
for key, var_code in item.items():
if attachment['options']['skip_undefined']:
try:
val = eval(var_code, the_user_dict)
except:
val = ''
else:
val = eval(var_code, the_user_dict)
if val is True:
val = 'Yes'
elif val is False:
val = 'No'
elif val is None:
val = ''
elif isinstance(val, float) and float_formatter is not None:
val = float_formatter % val
else:
val = str(val)
val = re.sub(r'\[(NEWLINE|BR)\]', r'\n', val)
val = re.sub(r'\[(BORDER|NOINDENT|FLUSHLEFT|FLUSHRIGHT|BOLDCENTER|CENTER)\]', r'', val)
m = re.search(r'\[FILE ([^\]]+)\]', val)
if m:
file_reference = re.sub(r'[ ,].*', '', m.group(1))
file_info = docassemble.base.functions.server.file_finder(file_reference, convert={'svg': 'png'})
result['images'].append((key, file_info))
else:
result['data_strings'].append((key, val))
if 'raw code dict' in attachment['options']:
additional_fields = attachment['options']['raw code dict']
if not isinstance(additional_fields, list):
additional_fields = [additional_fields]
for item in additional_fields:
if not isinstance(item, dict):
raise DAError("raw code dict in an attachment returned something other than a dictionary or a list of dictionaries")
for key, var_code in item.items():
if attachment['options']['skip_undefined']:
try:
val = eval(var_code, the_user_dict)
except:
val = ''
else:
val = eval(var_code, the_user_dict)
if val is True:
val = 'Yes'
elif val is False:
val = 'No'
elif isinstance(val, float) and float_formatter is not None:
val = float_formatter % val
elif val is None:
val = ''
val = re.sub(r'\[(NEWLINE|BR)\]', r'\n', val)
val = re.sub(r'\[(BORDER|NOINDENT|FLUSHLEFT|FLUSHRIGHT|BOLDCENTER|CENTER)\]', r'', val)
m = re.search(r'\[FILE ([^\]]+)\]', val)
if m:
file_reference = re.sub(r'[ ,].*', '', m.group(1))
file_info = docassemble.base.functions.server.file_finder(file_reference, convert={'svg': 'png'})
result['images'].append((key, file_info))
else:
result['data_strings'].append((key, val))
docassemble.base.functions.reset_context()
elif doc_format == 'raw':
docassemble.base.functions.set_context('raw')
the_markdown = the_content.text(the_user_dict)
result['markdown'][doc_format] = the_markdown
docassemble.base.functions.reset_context()
else:
the_markdown = ""
if len(result['metadata']):
modified_metadata = dict()
for key, data in result['metadata'].items():
if re.search(r'Footer|Header', key) and 'Lines' not in key:
#modified_metadata[key] = docassemble.base.filter.metadata_filter(data, doc_format) + str('[END]')
modified_metadata[key] = data + str('[END]')
else:
modified_metadata[key] = data
the_markdown += '---\n' + codecs.decode(bytearray(yaml.safe_dump(modified_metadata, default_flow_style=False, default_style = '|', allow_unicode=False), encoding='utf-8'), 'utf-8') + "...\n"
docassemble.base.functions.set_context('pandoc')
the_markdown += the_content.text(the_user_dict)
#logmessage("Markdown is:\n" + repr(the_markdown) + "END")
if emoji_match.search(the_markdown) and len(self.interview.images) > 0:
the_markdown = emoji_match.sub(emoji_matcher_insert(self), the_markdown)
result['markdown'][doc_format] = the_markdown
docassemble.base.functions.reset_context()
elif doc_format in ['html']:
result['markdown'][doc_format] = the_content.text(the_user_dict)
if emoji_match.search(result['markdown'][doc_format]) and len(self.interview.images) > 0:
result['markdown'][doc_format] = emoji_match.sub(emoji_matcher_html(self), result['markdown'][doc_format])
#logmessage("output was:\n" + repr(result['content'][doc_format]))
except:
if old_language is not None:
docassemble.base.functions.set_language(old_language)
raise
if old_language is not None:
docassemble.base.functions.set_language(old_language)
return(result)
def process_selections_manual(self, data):
result = []
if isinstance(data, list):
for entry in data:
if isinstance(entry, dict):
the_item = dict()
for key in entry:
if len(entry) > 1:
if key in ['default', 'help', 'image']:
continue
if 'key' in entry and 'label' in entry and key != 'key':
continue
if 'default' in entry:
the_item['default'] = entry['default']
if 'help' in entry:
the_item['help'] = TextObject(entry['help'], question=self)
if 'image' in entry:
if entry['image'].__class__.__name__ == 'DAFile':
entry['image'].retrieve()
if entry['image'].mimetype is not None and entry['image'].mimetype.startswith('image'):
the_item['image'] = dict(type='url', value=entry['image'].url_for())
elif entry['image'].__class__.__name__ == 'DAFileList':
entry['image'][0].retrieve()
if entry['image'][0].mimetype is not None and entry['image'][0].mimetype.startswith('image'):
the_item['image'] = dict(type='url', value=entry['image'][0].url_for())
elif entry['image'].__class__.__name__ == 'DAStaticFile':
the_item['image'] = dict(type='url', value=entry['image'].url_for())
else:
the_item['image'] = dict(type='decoration', value=entry['image'])
if 'key' in entry and 'label' in entry:
the_item['key'] = TextObject(entry['key'], question=self, translate=False)
the_item['label'] = TextObject(entry['label'], question=self)
result.append(the_item)
continue
the_item['key'] = TextObject(entry[key], question=self, translate=False)
the_item['label'] = TextObject(key, question=self)
result.append(the_item)
if isinstance(entry, (list, tuple)):
result.append(dict(key=TextObject(entry[0], question=self), label=TextObject(entry[1], question=self)))
elif isinstance(entry, str):
result.append(dict(key=TextObject(entry, question=self), label=TextObject(entry, question=self)))
elif isinstance(entry, (int, float, bool, NoneType)):
result.append(dict(key=TextObject(str(entry), question=self), label=TextObject(str(entry), question=self)))
elif isinstance(data, dict):
for key, value in sorted(data.items(), key=operator.itemgetter(1)):
result.append(dict(key=TextObject(value, question=self), label=TextObject(key, question=self)))
else:
raise DAError("Unknown data type in manual choices selection: " + re.sub(r'[<>]', '', repr(data)))
return(result)
def emoji_matcher_insert(obj):
return (lambda x: docassemble.base.filter.emoji_insert(x.group(1), images=obj.interview.images))
def emoji_matcher_html(obj):
return (lambda x: docassemble.base.filter.emoji_html(x.group(1), images=obj.interview.images))
def interview_source_from_string(path, **kwargs):
if path is None:
raise DAError("Passed None to interview_source_from_string")
#sys.stderr.write("Trying to find " + path + "\n")
for the_filename in [docassemble.base.functions.package_question_filename(path), docassemble.base.functions.standard_question_filename(path), docassemble.base.functions.server.absolute_filename(path)]:
#sys.stderr.write("Trying " + str(the_filename) + " with path " + str(path) + "\n")
if the_filename is not None:
new_source = InterviewSourceFile(filepath=the_filename, path=path)
if new_source.update():
return(new_source)
raise DANotFoundError("Interview " + str(path) + " not found")
def is_boolean(field_data):
if 'choices' not in field_data:
return False
if 'has_code' in field_data:
return False
for entry in field_data['choices']:
if 'key' in entry and 'label' in entry:
if isinstance(entry['key'], TextObject):
if not isinstance(entry['key'].original_text, bool):
return False
else:
if not isinstance(entry['key'], bool):
return False
return True
def is_threestate(field_data):
if 'choices' not in field_data:
return False
if 'has_code' in field_data:
return False
for entry in field_data['choices']:
if 'key' in entry and 'label' in entry:
if isinstance(entry['key'], TextObject):
if not (isinstance(entry['key'].original_text, (bool, NoneType)) or (isinstance(entry['key'].original_text, str) and entry['key'].original_text == 'None')):
return False
else:
if not (isinstance(entry['key'], (bool, NoneType)) or (isinstance(entry['key'], str) and entry['key'].original_text == 'None')):
return False
return True
class TableInfo:
pass
def recursive_update(base, target):
for key, val in target.items():
if isinstance(val, abc.Mapping):
base[key] = recursive_update(base.get(key, {}), val)
else:
base[key] = val
return base
def recursive_add_classes(class_list, the_class):
for cl in the_class.__bases__:
class_list.append(cl.__name__)
recursive_add_classes(class_list, cl)
def unqualified_name(variable, the_user_dict):
if variable == 'x' or variable.startswith('x[') or variable.startswith('x.') and 'x' in the_user_dict and hasattr(the_user_dict['x'], 'instanceName'):
variable = re.sub(r'^x', the_user_dict['x'].instanceName, variable)
for index_var in ['i', 'j', 'k', 'l', 'm', 'n']:
if '[' + index_var + ']' in variable and index_var in the_user_dict:
variable = re.sub(r'\[' + index_var + '\]', '[' + repr(the_user_dict[index_var]) + ']', variable)
return variable
def make_backup_vars(the_user_dict):
backups = dict()
for var in ['x', 'i', 'j', 'k', 'l', 'm', 'n']:
if var in the_user_dict:
backups[var] = the_user_dict[var]
return backups
def restore_backup_vars(the_user_dict, backups):
for var, val in backups.items():
the_user_dict[var] = val
def illegal_variable_name(var):
if re.search(r'[\n\r]', var):
return True
try:
t = ast.parse(var)
except:
return True
detector = docassemble.base.astparser.detectIllegal()
detector.visit(t)
return detector.illegal
class Interview:
def __init__(self, **kwargs):
self.source = None
self.questions = dict()
self.generic_questions = dict()
self.questions_by_id = dict()
self.questions_by_name = dict()
self.questions_list = list()
self.all_questions = list()
self.progress_points = set()
self.ids_in_use = set()
self.id_orderings = list()
self.invalidation = dict()
self.invalidation_todo = dict()
self.onchange = dict()
self.onchange_todo = dict()
self.orderings = list()
self.orderings_by_question = dict()
self.images = dict()
self.metadata = list()
self.helptext = dict()
self.defs = dict()
self.terms = dict()
self.mlfields = dict()
self.autoterms = dict()
self.includes = set()
self.reconsider = set()
self.reconsider_generic = dict()
self.question_index = 0
self.block_index = 0
self.translating = False
self.default_role = None
self.default_validation_messages = dict()
self.default_screen_parts = dict()
self.title = None
self.debug = get_config('debug', True)
self.use_progress_bar = False
self.question_back_button = False
self.question_help_button = False
self.navigation_back_button = True
self.force_fullscreen = False
self.use_pdf_a = get_config('pdf/a', False)
self.use_tagged_pdf = get_config('tagged pdf', False)
self.loop_limit = get_config('loop limit', 500)
self.recursion_limit = get_config('recursion limit', 500)
self.cache_documents = True
self.use_navigation = False
self.use_navigation_on_small_screens = True
self.flush_left = False
self.max_image_size = get_config('maximum image size', None)
self.image_type = get_config('image upload type', None)
self.bootstrap_theme = get_config('bootstrap theme', None)
self.sections = dict()
self.names_used = set()
self.attachment_options = dict()
self.attachment_index = 0
self.external_files = dict()
self.options = dict()
self.calls_process_action = False
self.uses_action = False
self.imports_util = False
self.table_width = 65
self.success = True
self.translation_dict = dict()
self.translations = list()
self.scan_for_emojis = False
self.consolidated_metadata = dict()
self.issue = dict()
if 'source' in kwargs:
self.read_from(kwargs['source'])
self.cross_reference_dependencies()
def cross_reference_dependencies(self):
to_listen_for = set(self.invalidation.keys()).union(set(self.onchange.keys()))
todo = dict()
for question in self.questions_list:
for field_name in question.fields_used.union(question.other_fields_used):
totry = list()
variants = list()
level_dict = dict()
generic_dict = dict()
expression_as_list = [x for x in match_brackets_or_dot.split(field_name) if x != '']
expression_as_list.append('')
recurse_indices(expression_as_list, list_of_indices, [], variants, level_dict, [], generic_dict, [])
for variant in variants:
if variant in to_listen_for:
totry.append({'real': field_name, 'vari': variant, 'iterators': level_dict[variant], 'generic': generic_dict[variant], 'is_generic': 0 if generic_dict[variant] == '' else 1, 'num_dots': variant.count('.'), 'num_iterators': variant.count('[')})
totry = sorted(sorted(sorted(sorted(totry, key=lambda x: len(x['iterators'])), key=lambda x: x['num_iterators'], reverse=True), key=lambda x: x['num_dots'], reverse=True), key=lambda x: x['is_generic'])
for attempt in totry:
if field_name not in todo:
todo[field_name] = []
found = False
for existing_item in todo[field_name]:
if attempt['vari'] == existing_item['vari']:
found = True
if not found:
todo[field_name].append(attempt)
if attempt['vari'] in self.invalidation:
for var in self.invalidation[attempt['vari']]:
if field_name not in self.invalidation_todo:
self.invalidation_todo[field_name] = []
if not found:
self.invalidation_todo[field_name].append({'target': var, 'context': attempt})
question.fields_for_invalidation.add(field_name)
if attempt['vari'] in self.onchange:
if field_name not in self.onchange_todo:
self.onchange_todo[field_name] = []
if not found:
self.onchange_todo[field_name].append({'target': self.onchange[attempt['vari']], 'context': attempt})
question.fields_for_onchange.add(field_name)
def ordered(self, the_list):
if len(the_list) <= 1:
return the_list
def invalidate_dependencies(self, field_name, the_user_dict, old_values):
try:
current_value = eval(field_name, the_user_dict)
except:
return
try:
if current_value == old_values[field_name]:
return
do_invalidation = True
except:
do_invalidation = False
if do_invalidation:
if field_name in self.invalidation_todo:
for info in self.invalidation_todo[field_name]:
unqualified_variable = info['target']
if info['context']['is_generic'] or len(info['context']['iterators']) > 0:
if info['context']['is_generic']:
unqualified_variable = re.sub('^x', info['context']['generic'], info['target'])
for index_num, index_var in enumerate(['i', 'j', 'k', 'l', 'm', 'n']):
if index_num >= len(info['context']['iterators']):
break
unqualified_variable = re.sub(r'\[' + index_var + '\]', '[' + info['context']['iterators'][index_num] + ']', unqualified_variable)
unqualified_variable = unqualified_name(unqualified_variable, the_user_dict)
try:
exec("_internal['dirty'][" + repr(unqualified_variable) + "] = " + unqualified_variable, the_user_dict)
except:
continue
try:
exec("del " + unqualified_variable, the_user_dict)
#logmessage("Interview.invalidate_dependencies: deleted " + unqualified_variable)
except:
pass
if field_name in self.onchange_todo:
if 'alpha' not in the_user_dict:
self.load_util(the_user_dict)
for info in self.onchange_todo[field_name]:
if info['context']['is_generic'] or len(info['context']['iterators']) > 0:
backup_vars = make_backup_vars(the_user_dict)
if info['context']['is_generic']:
try:
the_user_dict['x'] = eval(info['context']['generic'], the_user_dict)
except:
restore_backup_vars(the_user_dict, backup_vars)
continue
failed = False
for index_num, index_var in enumerate(['i', 'j', 'k', 'l', 'm', 'n']):
if index_num >= len(info['context']['iterators']):
break
if index_var == info['context']['iterators'][index_num]:
continue
try:
the_user_dict[index_var] = eval(info['context']['iterators'][index_num], the_user_dict)
except:
failed = True
break
if failed:
restore_backup_vars(the_user_dict, backup_vars)
continue
else:
backup_vars = None
for code_to_run in info['target']:
try:
exec(code_to_run, the_user_dict)
except Exception as err:
logmessage("Exception raised by on change code: " + err.__class__.__name__ + ": " + str(err))
if backup_vars:
restore_backup_vars(the_user_dict, backup_vars)
def get_ml_store(self):
if hasattr(self, 'ml_store'):
return self.ml_store
else:
return self.standard_ml_store()
def set_ml_store(self, ml_store):
self.ml_store = ml_store
def standard_ml_store(self):
if self.source is None:
ml_store = None
else:
ml_store = self.source.get_package()
if ml_store is None:
ml_store = ''
else:
ml_store += ':data/sources/'
if self.source and self.source.path is not None:
ml_store += 'ml-' + re.sub(r'\..*', '', re.sub(r'.*[/:]', '', self.source.path)) + '.json'
else:
ml_store += 'ml-default.json'
return ml_store
def get_bootstrap_theme(self):
if self.bootstrap_theme is None:
return None
result = docassemble.base.functions.server.url_finder(self.bootstrap_theme, _package=self.source.package)
return result
def get_tags(self, the_user_dict):
if 'tags' in the_user_dict['_internal']:
return the_user_dict['_internal']['tags']
else:
tags = set()
for metadata in self.metadata:
if 'tags' in metadata and isinstance(metadata['tags'], list):
for tag in metadata['tags']:
tags.add(tag)
return tags
def get_title(self, the_user_dict, status=None, converter=None):
if converter is None:
converter = lambda y: y
mapping = (('title', 'full'), ('logo', 'logo'), ('short title', 'short'), ('tab title', 'tab'), ('subtitle', 'sub'), ('exit link', 'exit link'), ('exit label', 'exit label'), ('exit url', 'exit url'), ('submit', 'submit'), ('pre', 'pre'), ('post', 'post'), ('footer', 'footer'), ('continue button label', 'continue button label'), ('resume button label', 'resume button label'), ('back button label', 'back button label'), ('corner back button label', 'corner back button label'), ('under', 'under'), ('right', 'right'), ('logo', 'logo'), ('css class', 'css class'), ('table css class', 'table css class'), ('date format', 'date format'), ('time format', 'time format'), ('datetime format', 'datetime format'), ('title url', 'title url'), ('title url opens in other window', 'title url opens in other window'))
title = dict()
for title_name, title_abb in mapping:
if '_internal' in the_user_dict and title_name in the_user_dict['_internal'] and the_user_dict['_internal'][title_name] is not None:
title[title_abb] = str(the_user_dict['_internal'][title_name]).strip()
elif status is not None and (title_name + ' text') in status.extras and status.extras[title_name + ' text'] is not None:
if title_name in ('exit link', 'exit url', ('title url', 'title url'), ('title url opens in other window', 'title url opens in other window')):
title[title_abb] = status.extras[title_name + ' text']
else:
title[title_abb] = converter(status.extras[title_name + ' text'], title_name)
the_user_dict['_internal'][title_name + ' default'] = title[title_abb]
elif status is None and (title_name + ' default') in the_user_dict['_internal'] and the_user_dict['_internal'][title_name + ' default'] is not None:
title[title_abb] = the_user_dict['_internal'][title_name + ' default']
base_lang = get_language()
if base_lang in self.default_title:
for key, val in self.default_title[base_lang].items():
if key not in title:
title[key] = val
if '*' in self.default_title:
for key, val in self.default_title['*'].items():
if key not in title:
title[key] = val
return title
def allowed_to_access(self, is_anonymous=False, has_roles=None):
if isinstance(has_roles, list) and len(has_roles) == 0:
has_roles = ['user']
roles = set()
for metadata in self.metadata:
if 'required privileges' in metadata:
roles = set()
privs = metadata['required privileges']
if isinstance(privs, list) or (hasattr(privs, 'instanceName') and hasattr(privs, 'elements') and isinstance(privs.elements, list)):
for priv in privs:
if isinstance(priv, str):
roles.add(priv)
elif isinstance(privs, str):
roles.add(privs)
if len(roles):
if is_anonymous:
if 'anonymous' in roles:
return True
return False
if has_roles is not None:
return len(set(roles).intersection(set(has_roles))) > 0
if is_anonymous:
require_login = False
for metadata in self.metadata:
if 'require login' in metadata:
require_login = True if metadata['require login'] else False
if require_login:
return False
return True
def allowed_to_initiate(self, is_anonymous=False, has_roles=None):
if isinstance(has_roles, list) and len(has_roles) == 0:
has_roles = ['user']
if not self.allowed_to_access(is_anonymous=is_anonymous, has_roles=has_roles):
return False
roles = set()
is_none = False
for metadata in self.metadata:
if 'required privileges for initiating' in metadata:
roles = set()
is_none = False
privs = metadata['required privileges for initiating']
if isinstance(privs, list) or (hasattr(privs, 'instanceName') and hasattr(privs, 'elements') and isinstance(privs.elements, list)):
if len(privs) == 0:
is_none = True
else:
for priv in privs:
if isinstance(priv, str):
roles.add(priv)
elif isinstance(privs, str):
roles.add(privs)
elif isinstance(privs, NoneType):
is_none = True
if is_none:
return False
if len(roles):
if is_anonymous:
if 'anonymous' in roles:
return True
return False
if has_roles is not None:
return len(set(roles).intersection(set(has_roles))) > 0
return True
def allowed_to_see_listed(self, is_anonymous=False, has_roles=None):
if isinstance(has_roles, list) and len(has_roles) == 0:
has_roles = ['user']
if not self.allowed_to_access(is_anonymous=is_anonymous, has_roles=has_roles):
return False
roles = set()
for metadata in self.metadata:
if 'required privileges for listing' in metadata:
roles = set()
privs = metadata['required privileges for listing']
if isinstance(privs, list) or (hasattr(privs, 'instanceName') and hasattr(privs, 'elements') and isinstance(privs.elements, list)):
for priv in privs:
if isinstance(priv, str):
roles.add(priv)
elif isinstance(privs, str):
roles.add(privs)
if len(roles):
if is_anonymous:
if 'anonymous' in roles:
return True
return False
if has_roles is not None:
return len(set(roles).intersection(set(has_roles))) > 0
if is_anonymous:
require_login = False
for metadata in self.metadata:
if 'require login' in metadata:
require_login = True if metadata['require login'] else False
if require_login:
return False
return True
def is_unlisted(self):
unlisted = False
for metadata in self.metadata:
if 'unlisted' in metadata:
unlisted = metadata['unlisted']
return unlisted
def next_attachment_number(self):
self.attachment_index += 1
return(self.attachment_index - 1)
def next_number(self):
self.question_index += 1
return(self.question_index - 1)
def next_block_number(self):
self.block_index += 1
return(self.block_index - 1)
def read_from(self, source):
if self.source is None:
self.source = source
#self.firstPath = source.path
#self.rootDirectory = source.directory
if hasattr(source, 'package') and source.package:
source_package = source.package
if source_package.startswith('docassemble.playground'):
self.debug = True
else:
source_package = None
if hasattr(source, 'path'):
if source.path in self.includes:
logmessage("Interview: source " + str(source.path) + " has already been included. Skipping.")
return
self.includes.add(source.path)
#for document in yaml.safe_load_all(source.content):
for source_code in document_match.split(source.content):
source_code = remove_trailing_dots.sub('', source_code)
source_code = fix_tabs.sub(' ', source_code)
if source.testing:
try:
#logmessage("Package is " + str(source_package))
document = yaml.safe_load(source_code)
if document is not None:
question = Question(document, self, source=source, package=source_package, source_code=source_code)
self.names_used.update(question.fields_used)
except Exception as errMess:
#sys.stderr.write(str(source_code) + "\n")
try:
logmessage('Interview: error reading YAML file ' + str(source.path) + '\n\nDocument source code was:\n\n---\n' + str(source_code) + '---\n\nError was:\n\n' + str(errMess))
except:
try:
logmessage('Interview: error reading YAML file ' + str(source.path) + '. Error was:\n\n' + str(errMess))
except:
if isinstance(errMess, yaml.error.MarkedYAMLError):
logmessage('Interview: error reading YAML file ' + str(source.path) + '. Error type was:\n\n' + errMess.problem)
else:
logmessage('Interview: error reading YAML file ' + str(source.path) + '. Error type was:\n\n' + errMess.__class__.__name__)
self.success = False
pass
else:
try:
document = yaml.safe_load(source_code)
except Exception as errMess:
self.success = False
#sys.stderr.write("Error: " + str(source_code) + "\n")
#str(source_code)
try:
raise DAError('Error reading YAML file ' + str(source.path) + '\n\nDocument source code was:\n\n---\n' + str(source_code) + '---\n\nError was:\n\n' + str(errMess))
except (UnicodeDecodeError, UnicodeEncodeError):
raise DAError('Error reading YAML file ' + str(source.path) + '\n\nDocument source code was:\n\n---\n' + str(source_code) + '---\n\nError was:\n\n' + str(errMess.__class__.__name__))
if document is not None:
try:
question = Question(document, self, source=source, package=source_package, source_code=source_code)
self.names_used.update(question.fields_used)
except SyntaxException as qError:
self.success = False
raise Exception("Syntax Exception: " + str(qError) + "\n\nIn file " + str(source.path) + " from package " + str(source_package) + ":\n" + str(source_code))
except CompileException as qError:
self.success = False
raise Exception("Compile Exception: " + str(qError) + "\n\nIn file " + str(source.path) + " from package " + str(source_package) + ":\n" + str(source_code))
except SyntaxError as qError:
self.success = False
raise Exception("Syntax Error: " + str(qError) + "\n\nIn file " + str(source.path) + " from package " + str(source_package) + ":\n" + str(source_code))
for ordering in self.id_orderings:
if ordering['type'] == 'supersedes' and hasattr(ordering['question'], 'number'):
new_list = [ordering['question'].number]
for question_id in ordering['supersedes']:
if question_id in self.questions_by_id:
new_list.append(self.questions_by_id[question_id].number)
else:
logmessage("warning: reference in a supersedes directive to an id " + question_id + " that does not exist in interview")
elif ordering['type'] == 'order':
new_list = list()
for question_id in ordering['order']:
if question_id in self.questions_by_id and hasattr(self.questions_by_id[question_id], 'number'):
new_list.append(self.questions_by_id[question_id].number)
else:
logmessage("warning: reference in an order directive to id " + question_id + " that does not exist in interview")
else:
new_list = list()
self.orderings.append(new_list)
for ordering in self.orderings:
for question_a in ordering:
mode = 1
for question_b in ordering:
if question_a == question_b:
mode = -1
continue
if question_b not in self.orderings_by_question:
self.orderings_by_question[question_b] = dict()
self.orderings_by_question[question_b][question_a] = mode
#logmessage(repr(self.orderings_by_question))
self.sorter = self.make_sorter()
if len(self.images) > 0 or get_config('default icons', 'font awesome') in ('material icons', 'font awesome'):
self.scan_for_emojis = True
for metadata in self.metadata:
if 'social' in metadata and isinstance(metadata['social'], dict):
if 'image' in metadata['social'] and isinstance(metadata['social']['image'], str):
metadata['social']['image'] = docassemble.base.functions.server.url_finder(metadata['social']['image'], _package=metadata['_origin_package'], _external=True)
if metadata['social']['image'] is None:
logmessage("Invalid image reference in social meta tags")
del metadata['social']['image']
for key, subkey in (('og', 'image'), ('twitter', 'image')):
if key in metadata['social'] and isinstance(metadata['social'][key], dict) and subkey in metadata['social'][key] and isinstance(metadata['social'][key][subkey], str):
metadata['social'][key][subkey] = docassemble.base.functions.server.url_finder(metadata['social'][key][subkey], _package=metadata['_origin_package'], _external=True)
if metadata['social'][key][subkey] is None:
logmessage("Invalid image reference in social meta tags")
del metadata['social'][key][subkey]
for key, val in metadata['social'].items():
if isinstance(val, dict):
for subkey, subval in val.items():
if isinstance(subval, str):
metadata['social'][key][subkey] = subval.replace('\n', ' ').replace('"', '"').strip()
elif isinstance(val, str):
metadata['social'][key] = val.replace('\n', ' ').replace('"', '"').strip()
for key, val in metadata.items():
if key in self.consolidated_metadata and isinstance(self.consolidated_metadata[key], dict) and isinstance(val, dict):
recursive_update(self.consolidated_metadata[key], val)
else:
self.consolidated_metadata[key] = val
mapping = (('title', 'full'), ('logo', 'logo'), ('short title', 'short'), ('tab title', 'tab'), ('subtitle', 'sub'), ('exit link', 'exit link'), ('exit label', 'exit label'), ('exit url', 'exit url'), ('submit', 'submit'), ('pre', 'pre'), ('post', 'post'), ('footer', 'footer'), ('help label', 'help label'), ('continue button label', 'continue button label'), ('resume button label', 'resume button label'), ('back button label', 'back button label'), ('corner back button label', 'corner back button label'), ('right', 'right'), ('under', 'under'), ('submit', 'submit'), ('css class', 'css class'), ('table css class', 'table css class'), ('date format', 'date format'), ('time format', 'time format'), ('datetime format', 'datetime format'), ('title url', 'title url'), ('title url opens in other window', 'title url opens in other window'))
self.default_title = {'*': dict()}
for metadata in self.metadata:
for title_name, title_abb in mapping:
if metadata.get(title_name, None) is not None:
if isinstance(metadata[title_name], dict):
for lang, val in metadata[title_name].items():
if lang not in self.default_title:
self.default_title[lang] = dict()
self.default_title[lang][title_abb] = str(val).strip()
else:
self.default_title['*'][title_abb] = str(metadata[title_name]).strip()
for lang, parts in docassemble.base.functions.server.main_page_parts.items():
if lang not in self.default_title:
self.default_title[lang] = dict()
for title_name, title_abb in mapping:
if title_abb in self.default_title[lang]:
continue
if parts.get('main page ' + title_name, '') != '':
self.default_title[lang][title_abb] = parts['main page ' + title_name].strip()
def make_sorter(self):
lookup_dict = self.orderings_by_question
class K:
def __init__(self, obj, *args):
self.obj = obj.number
self.lookup = lookup_dict
def __lt__(self, other):
if self.obj == other.obj:
return False
if self.obj in self.lookup and other.obj in self.lookup[self.obj] and self.lookup[self.obj][other.obj] == -1:
return True
return False
def __gt__(self, other):
if self.obj == other.obj:
return False
if self.obj in self.lookup and other.obj in self.lookup[self.obj] and self.lookup[self.obj][other.obj] == 1:
return True
return False
def __eq__(self, other):
if self.obj == other.obj or self.obj not in self.lookup or other.obj not in self.lookup:
return True
return False
def __le__(self, other):
if self.obj == other.obj or self.obj not in self.lookup or other.obj not in self.lookup:
return True
if self.lookup[self.obj][other.obj] == -1:
return True
return False
def __ge__(self, other):
if self.obj == other.obj or self.obj not in self.lookup or other.obj not in self.lookup:
return True
if self.lookup[self.obj][other.obj] == 1:
return True
return False
def __ne__(self, other):
if self.obj == other.obj or self.obj not in self.lookup or other.obj not in self.lookup:
return False
return True
return K
def sort_with_orderings(self, the_list):
if len(the_list) <= 1:
return the_list
result = sorted(the_list, key=self.sorter)
# logmessage(repr([y for y in reversed([x.number for x in result])]))
return reversed(result)
def processed_helptext(self, the_user_dict, language):
result = list()
if language in self.helptext:
for source in self.helptext[language]:
help_item = dict()
help_item['from'] = source['from']
if source['label'] is None:
help_item['label'] = None
else:
help_item['label'] = source['label'].text(the_user_dict)
if source['heading'] is None:
help_item['heading'] = None
else:
help_item['heading'] = source['heading'].text(the_user_dict)
if source['audiovideo'] is None:
help_item['audiovideo'] = None
else:
help_item['audiovideo'] = process_audio_video_list(source['audiovideo'], the_user_dict)
help_item['content'] = source['content'].text(the_user_dict)
result.append(help_item)
return result
def populate_non_pickleable(self, user_dict_copy):
if not self.imports_util and not self.consolidated_metadata.get('suppress loading util', False):
exec(import_util, user_dict_copy)
for question in self.questions_list:
if question.question_type == 'imports':
for module_name in question.module_list:
if module_name.startswith('.'):
exec('import ' + str(question.package) + module_name, user_dict_copy)
else:
exec('import ' + module_name, user_dict_copy)
if question.question_type == 'modules':
for module_name in question.module_list:
if module_name.startswith('.'):
exec('from ' + str(question.package) + module_name + ' import *', user_dict_copy)
else:
exec('from ' + module_name + ' import *', user_dict_copy)
def assemble(self, user_dict, interview_status=None, old_user_dict=None, force_question=None):
#sys.stderr.write("assemble\n")
user_dict['_internal']['tracker'] += 1
if interview_status is None:
interview_status = InterviewStatus()
#if interview_status.current_info['url'] is not None:
# user_dict['_internal']['url'] = interview_status.current_info['url']
interview_status.set_tracker(user_dict['_internal']['tracker'])
#docassemble.base.functions.reset_local_variables()
interview_status.current_info.update({'default_role': self.default_role})
docassemble.base.functions.this_thread.misc['reconsidered'] = set()
docassemble.base.functions.this_thread.current_package = self.source.package
docassemble.base.functions.this_thread.current_info = interview_status.current_info
docassemble.base.functions.this_thread.interview = self
docassemble.base.functions.this_thread.interview_status = interview_status
docassemble.base.functions.this_thread.internal = user_dict['_internal']
if user_dict['nav'].sections is None:
user_dict['nav'].sections = self.sections
if hasattr(self, 'sections_progressive'):
user_dict['nav'].progressive = self.sections_progressive
if hasattr(self, 'sections_auto_open'):
user_dict['nav'].auto_open = self.sections_auto_open
for question in self.questions_list:
if question.question_type == 'imports':
for module_name in question.module_list:
if module_name.startswith('.'):
exec('import ' + str(question.package) + module_name, user_dict)
else:
exec('import ' + module_name, user_dict)
if question.question_type == 'modules':
for module_name in question.module_list:
if module_name.startswith('.'):
exec('from ' + str(question.package) + module_name + ' import *', user_dict)
else:
exec('from ' + module_name + ' import *', user_dict)
if question.question_type == 'reset':
for var in question.reset_list:
if complications.search(var):
try:
exec('del ' + str(var), user_dict)
except:
pass
elif var in user_dict:
del user_dict[var]
if 'x' in user_dict and user_dict['x'].__class__.__name__ in self.reconsider_generic:
for var in self.reconsider_generic[user_dict['x'].__class__.__name__]:
try:
exec('del ' + str(var), user_dict)
except:
pass
for var in self.reconsider:
if complications.search(var):
try:
exec('del ' + str(var), user_dict)
except:
pass
elif var in user_dict:
del user_dict[var]
session_uid = interview_status.current_info['user']['session_uid']
device_id = interview_status.current_info['user']['device_id']
user_id = str(interview_status.current_info['user']['the_user_id'])
if 'session_local' not in user_dict['_internal']: ### take out after a time
user_dict['_internal']['session_local'] = dict()
user_dict['_internal']['device_local'] = dict()
user_dict['_internal']['user_local'] = dict()
if session_uid not in user_dict['_internal']['session_local'] or device_id not in user_dict['_internal']['device_local'] or user_id not in user_dict['_internal']['user_local']:
exec('from docassemble.base.core import DASessionLocal, DADeviceLocal, DAUserLocal')
if session_uid not in user_dict['_internal']['session_local']:
user_dict['_internal']['session_local'][session_uid] = eval("DASessionLocal()")
if device_id not in user_dict['_internal']['device_local']:
user_dict['_internal']['device_local'][device_id] = eval("DADeviceLocal()")
if user_id not in user_dict['_internal']['user_local']:
user_dict['_internal']['user_local'][user_id] = eval("DAUserLocal()")
user_dict['session_local'] = user_dict['_internal']['session_local'][session_uid]
user_dict['device_local'] = user_dict['_internal']['device_local'][device_id]
user_dict['user_local'] = user_dict['_internal']['user_local'][user_id]
number_loops = 0
variables_sought = set()
try:
while True:
number_loops += 1
if number_loops > self.loop_limit:
docassemble.base.functions.wrap_up(user_dict)
raise DAError("There appears to be a circularity. Variables involved: " + ", ".join(variables_sought) + ".")
docassemble.base.functions.reset_gathering_mode()
if 'action' in interview_status.current_info:
#logmessage("assemble: there is an action in the current_info: " + repr(interview_status.current_info['action']))
if interview_status.current_info['action'] in ('_da_list_remove', '_da_list_add', '_da_list_complete'):
for the_key in ('list', 'item', 'items'):
if the_key in interview_status.current_info['arguments']:
if illegal_variable_name(interview_status.current_info['arguments'][the_key]):
raise DAError("Invalid name " + interview_status.current_info['arguments'][the_key])
interview_status.current_info['action_' + the_key] = eval(interview_status.current_info['arguments'][the_key], user_dict)
if interview_status.current_info['action'] in ('_da_dict_remove', '_da_dict_add', '_da_dict_complete'):
for the_key in ('dict', 'item', 'items'):
if the_key in interview_status.current_info['arguments']:
if illegal_variable_name(interview_status.current_info['arguments'][the_key]):
raise DAError("Invalid name " + interview_status.current_info['arguments'][the_key])
interview_status.current_info['action_' + the_key] = eval(interview_status.current_info['arguments'][the_key], user_dict)
#else:
# logmessage("assemble: there is no action in the current_info")
try:
if not self.imports_util:
if self.consolidated_metadata.get('suppress loading util', False):
exec(import_process_action, user_dict)
elif 'alpha' not in user_dict:
exec(import_util, user_dict)
if force_question is not None:
if self.debug:
interview_status.seeking.append({'question': question, 'reason': 'multiple choice question', 'time': time.time()})
docassemble.base.functions.this_thread.current_question = force_question
interview_status.populate(force_question.ask(user_dict, old_user_dict, 'None', [], None, None))
raise MandatoryQuestion()
if not self.calls_process_action:
exec(run_process_action, user_dict)
for question in self.questions_list:
if question.question_type == 'code' and (question.is_initial or (question.initial_code is not None and eval(question.initial_code, user_dict))):
#logmessage("Running some initial code:\n\n" + question.sourcecode)
if self.debug:
interview_status.seeking.append({'question': question, 'reason': 'initial', 'time': time.time()})
docassemble.base.functions.this_thread.current_question = question
exec_with_trap(question, user_dict)
continue
if question.name and question.name in user_dict['_internal']['answered']:
#logmessage("Skipping " + question.name + " because answered")
continue
if question.question_type in ("objects_from_file", "objects_from_file_da"):
if self.debug:
interview_status.seeking.append({'question': question, 'reason': 'objects from file', 'time': time.time()})
if question.question_type == "objects_from_file_da":
use_objects = True
else:
use_objects = False
for keyvalue in question.objects_from_file:
for variable, the_file in keyvalue.items():
exec(import_core, user_dict)
command = variable + ' = objects_from_file("' + str(the_file) + '", name=' + repr(variable) + ', use_objects=' + repr(use_objects) + ', package=' + repr(question.package) + ')'
#logmessage("Running " + command)
exec(command, user_dict)
question.mark_as_answered(user_dict)
if question.is_mandatory or (question.mandatory_code is not None and eval(question.mandatory_code, user_dict)):
if question.question_type == "data":
if self.debug:
interview_status.seeking.append({'question': question, 'reason': 'data', 'time': time.time()})
string = from_safeid(question.fields[0].saveas) + ' = ' + repr(recursive_eval_dataobject(question.fields[0].data, user_dict))
exec(string, user_dict)
question.mark_as_answered(user_dict)
if question.question_type == "data_da":
if self.debug:
interview_status.seeking.append({'question': question, 'reason': 'data', 'time': time.time()})
exec(import_core, user_dict)
string = from_safeid(question.fields[0].saveas) + ' = objects_from_structure(' + repr(recursive_eval_dataobject(question.fields[0].data, user_dict)) + ', root=' + repr(from_safeid(question.fields[0].saveas)) + ')'
exec(string, user_dict)
question.mark_as_answered(user_dict)
if question.question_type == "data_from_code":
if self.debug:
interview_status.seeking.append({'question': question, 'reason': 'data', 'time': time.time()})
string = from_safeid(question.fields[0].saveas) + ' = ' + repr(recursive_eval_data_from_code(question.fields[0].data, user_dict))
exec(string, user_dict)
question.mark_as_answered(user_dict)
if question.question_type == "data_from_code_da":
if self.debug:
interview_status.seeking.append({'question': question, 'reason': 'data', 'time': time.time()})
exec(import_core, user_dict)
string = from_safeid(question.fields[0].saveas) + ' = objects_from_structure(' + repr(recursive_eval_data_from_code(question.fields[0].data, user_dict)) + ', root=' + repr(from_safeid(question.fields[0].saveas)) + ')'
exec(string, user_dict)
question.mark_as_answered(user_dict)
if question.question_type == "objects":
if self.debug:
interview_status.seeking.append({'question': question, 'reason': 'objects', 'time': time.time()})
#logmessage("Going into objects")
docassemble.base.functions.this_thread.current_question = question
for keyvalue in question.objects:
for variable in keyvalue:
object_type_name = keyvalue[variable]
user_dict["__object_type"] = eval(object_type_name, user_dict)
if False and re.search(r"\.", variable):
m = re.search(r"(.*)\.(.*)", variable)
variable = m.group(1)
attribute = m.group(2)
command = variable + ".initializeAttribute(" + repr(attribute) + ", __object_type)"
#command = variable + "." + attribute + " = " + object_type + "()"
#logmessage("Running " + command)
exec(command, user_dict)
else:
if user_dict["__object_type"].__class__.__name__ == 'DAObjectPlusParameters':
command = variable + ' = __object_type.object_type(' + repr(variable) + ', **__object_type.parameters)'
else:
command = variable + ' = __object_type(' + repr(variable) + ')'
# command = variable + ' = ' + object_type + '(' + repr(variable) + ')'
#logmessage("Running " + command)
exec(command, user_dict)
if "__object_type" in user_dict:
del user_dict["__object_type"]
question.mark_as_answered(user_dict)
if question.question_type == 'code':
if self.debug:
interview_status.seeking.append({'question': question, 'reason': 'mandatory code', 'time': time.time()})
#logmessage("Running some code:\n\n" + question.sourcecode)
#logmessage("Question name is " + question.name)
docassemble.base.functions.this_thread.current_question = question
exec_with_trap(question, user_dict)
#logmessage("Code completed")
if question.name:
user_dict['_internal']['answered'].add(question.name)
#logmessage("Question " + str(question.name) + " marked as answered")
elif hasattr(question, 'content') and question.name:
if self.debug:
interview_status.seeking.append({'question': question, 'reason': 'mandatory question', 'time': time.time()})
if question.name and question.name in user_dict['_internal']['answers']:
the_question = question.follow_multiple_choice(user_dict, interview_status, False, 'None', [])
if self.debug and the_question is not question:
interview_status.seeking.append({'question': the_question, 'reason': 'result of multiple choice', 'time': time.time()})
if the_question.question_type in ["code", "event_code"]:
docassemble.base.functions.this_thread.current_question = the_question
exec_with_trap(the_question, user_dict)
interview_status.mark_tentative_as_answered(user_dict)
continue
elif hasattr(the_question, 'content'):
interview_status.populate(the_question.ask(user_dict, old_user_dict, 'None', [], None, None))
interview_status.mark_tentative_as_answered(user_dict)
else:
raise DAError("An embedded question can only be a code block or a regular question block. The question type was " + getattr(the_question, 'question_type', 'unknown'))
else:
interview_status.populate(question.ask(user_dict, old_user_dict, 'None', [], None, None))
if interview_status.question.question_type == 'continue':
user_dict['_internal']['answered'].add(question.name)
else:
raise MandatoryQuestion()
except ForcedReRun as the_exception:
continue
except (NameError, DAAttributeError, DAIndexError) as the_exception:
if 'pending_error' in docassemble.base.functions.this_thread.misc:
del docassemble.base.functions.this_thread.misc['pending_error']
#logmessage("Error in " + the_exception.__class__.__name__ + " is " + str(the_exception))
if self.debug and docassemble.base.functions.this_thread.evaluation_context == 'docx':
logmessage("NameError exception during document assembly: " + str(the_exception))
docassemble.base.functions.reset_context()
seeking_question = False
if isinstance(the_exception, ForcedNameError):
#logmessage("assemble: got a ForcedNameError for " + str(the_exception.name))
follow_mc = False
seeking_question = True
#logmessage("next action is " + repr(the_exception.next_action))
if the_exception.next_action is not None and not interview_status.checkin:
if 'event_stack' not in user_dict['_internal']:
user_dict['_internal']['event_stack'] = dict()
if session_uid not in user_dict['_internal']['event_stack']:
user_dict['_internal']['event_stack'][session_uid] = list()
new_items = list()
for new_item in the_exception.next_action:
already_there = False
for event_item in user_dict['_internal']['event_stack'][session_uid]:
if (isinstance(new_item, dict) and event_item['action'] == new_item['action']) or (isinstance(new_item, str) and event_item['action'] == new_item):
already_there = True
break
if not already_there:
new_items.append(new_item)
if len(new_items):
user_dict['_internal']['event_stack'][session_uid] = new_items + user_dict['_internal']['event_stack'][session_uid]
#interview_status.next_action.extend(the_exception.next_action)
if the_exception.name.startswith('_da_'):
continue
docassemble.base.functions.this_thread.misc['forgive_missing_question'] = [the_exception.name]
if the_exception.arguments is not None:
docassemble.base.functions.this_thread.current_info.update(dict(action=the_exception.name, arguments=the_exception.arguments))
missingVariable = the_exception.name
else:
follow_mc = True
missingVariable = extract_missing_name(the_exception)
variables_sought.add(missingVariable)
question_result = self.askfor(missingVariable, user_dict, old_user_dict, interview_status, seeking=interview_status.seeking, follow_mc=follow_mc, seeking_question=seeking_question)
if question_result['type'] in ('continue', 're_run'):
continue
elif question_result['type'] == 'refresh':
pass
else:
interview_status.populate(question_result)
break
except UndefinedError as the_exception:
#logmessage("UndefinedError")
if self.debug and docassemble.base.functions.this_thread.evaluation_context == 'docx':
#logmessage(the_exception.__class__.__name__ + " exception during document assembly: " + str(the_exception) + "\n" + traceback.format_exc())
logmessage(the_exception.__class__.__name__ + " exception during document assembly: " + str(the_exception) + "\n")
docassemble.base.functions.reset_context()
missingVariable = extract_missing_name(the_exception)
#logmessage("extracted " + missingVariable)
variables_sought.add(missingVariable)
question_result = self.askfor(missingVariable, user_dict, old_user_dict, interview_status, seeking=interview_status.seeking, follow_mc=True)
if question_result['type'] in ('continue', 're_run'):
continue
elif question_result['type'] == 'refresh':
pass
else:
interview_status.populate(question_result)
break
except CommandError as qError:
#logmessage("CommandError")
docassemble.base.functions.reset_context()
question_data = dict(command=qError.return_type, url=qError.url, sleep=qError.sleep)
new_interview_source = InterviewSourceString(content='')
new_interview = new_interview_source.get_interview()
reproduce_basics(self, new_interview)
new_question = Question(question_data, new_interview, source=new_interview_source, package=self.source.package)
new_question.name = "Question_Temp"
interview_status.populate(new_question.ask(user_dict, old_user_dict, 'None', [], None, None))
break
except ResponseError as qError:
docassemble.base.functions.reset_context()
#logmessage("Trapped ResponseError")
question_data = dict(extras=dict())
if hasattr(qError, 'response') and qError.response is not None:
question_data['response'] = qError.response
elif hasattr(qError, 'binaryresponse') and qError.binaryresponse is not None:
question_data['binaryresponse'] = qError.binaryresponse
elif hasattr(qError, 'filename') and qError.filename is not None:
question_data['response filename'] = qError.filename
elif hasattr(qError, 'url') and qError.url is not None:
question_data['redirect url'] = qError.url
elif hasattr(qError, 'all_variables') and qError.all_variables:
if hasattr(qError, 'include_internal'):
question_data['include_internal'] = qError.include_internal
question_data['content type'] = 'application/json'
question_data['all_variables'] = True
elif hasattr(qError, 'nullresponse') and qError.nullresponse:
question_data['null response'] = qError.nullresponse
elif hasattr(qError, 'sleep') and qError.sleep:
question_data['sleep'] = qError.sleep
if hasattr(qError, 'content_type') and qError.content_type:
question_data['content type'] = qError.content_type
if hasattr(qError, 'response_code') and qError.response_code:
question_data['response code'] = qError.response_code
# new_interview = copy.deepcopy(self)
# if self.source is None:
# new_interview_source = InterviewSourceString(content='')
# else:
# new_interview_source = self.source
new_interview_source = InterviewSourceString(content='')
new_interview = new_interview_source.get_interview()
reproduce_basics(self, new_interview)
new_question = Question(question_data, new_interview, source=new_interview_source, package=self.source.package)
new_question.name = "Question_Temp"
#the_question = new_question.follow_multiple_choice(user_dict)
interview_status.populate(new_question.ask(user_dict, old_user_dict, 'None', [], None, None))
break
except BackgroundResponseError as qError:
docassemble.base.functions.reset_context()
#logmessage("Trapped BackgroundResponseError")
question_data = dict(extras=dict())
if hasattr(qError, 'backgroundresponse'):
question_data['backgroundresponse'] = qError.backgroundresponse
if hasattr(qError, 'sleep'):
question_data['sleep'] = qError.sleep
new_interview_source = InterviewSourceString(content='')
new_interview = new_interview_source.get_interview()
reproduce_basics(self, new_interview)
new_question = Question(question_data, new_interview, source=new_interview_source, package=self.source.package)
new_question.name = "Question_Temp"
interview_status.populate(new_question.ask(user_dict, old_user_dict, 'None', [], None, None))
break
except BackgroundResponseActionError as qError:
docassemble.base.functions.reset_context()
#logmessage("Trapped BackgroundResponseActionError")
question_data = dict(extras=dict())
if hasattr(qError, 'action'):
question_data['action'] = qError.action
new_interview_source = InterviewSourceString(content='')
new_interview = new_interview_source.get_interview()
reproduce_basics(self, new_interview)
new_question = Question(question_data, new_interview, source=new_interview_source, package=self.source.package)
new_question.name = "Question_Temp"
interview_status.populate(new_question.ask(user_dict, old_user_dict, 'None', [], None, None))
break
# except SendFileError as qError:
# #logmessage("Trapped SendFileError")
# question_data = dict(extras=dict())
# if hasattr(qError, 'filename') and qError.filename is not None:
# question_data['response filename'] = qError.filename
# if hasattr(qError, 'content_type') and qError.content_type:
# question_data['content type'] = qError.content_type
# new_interview_source = InterviewSourceString(content='')
# new_interview = new_interview_source.get_interview()
# new_question = Question(question_data, new_interview, source=new_interview_source, package=self.source.package)
# new_question.name = "Question_Temp"
# interview_status.populate(new_question.ask(user_dict, old_user_dict, 'None', [], None))
# break
except QuestionError as qError:
#logmessage("QuestionError")
docassemble.base.functions.reset_context()
question_data = dict()
if qError.question:
question_data['question'] = qError.question
if qError.subquestion:
question_data['subquestion'] = qError.subquestion
if qError.reload:
question_data['reload'] = qError.reload
if qError.dead_end:
pass
elif qError.buttons:
question_data['buttons'] = qError.buttons
else:
buttons = list()
if qError.show_exit is not False and not (qError.show_leave is True and qError.show_exit is None):
exit_button = {word('Exit'): 'exit'}
if qError.url:
exit_button.update(dict(url=qError.url))
buttons.append(exit_button)
if qError.show_leave:
leave_button = {word('Leave'): 'leave'}
if qError.url:
leave_button.update(dict(url=qError.url))
buttons.append(leave_button)
if qError.show_restart is not False:
buttons.append({word('Restart'): 'restart'})
if len(buttons):
question_data['buttons'] = buttons
new_interview_source = InterviewSourceString(content='')
new_interview = new_interview_source.get_interview()
reproduce_basics(self, new_interview)
new_question = Question(question_data, new_interview, source=new_interview_source, package=self.source.package)
new_question.name = "Question_Temp"
new_question.embeds = True
# will this be a problem? Maybe, since the question name can vary by thread.
the_question = new_question.follow_multiple_choice(user_dict, interview_status, False, 'None', [])
interview_status.populate(the_question.ask(user_dict, old_user_dict, 'None', [], None, None))
break
except AttributeError as the_error:
#logmessage("Regular attributeerror")
docassemble.base.functions.reset_context()
#logmessage(str(the_error.args))
docassemble.base.functions.wrap_up(user_dict)
raise DAError('Got error ' + str(the_error) + " " + traceback.format_exc() + "\nHistory was " + pprint.pformat(interview_status.seeking))
except MandatoryQuestion:
#logmessage("MandatoryQuestion")
docassemble.base.functions.reset_context()
break
except CodeExecute as code_error:
#logmessage("CodeExecute")
docassemble.base.functions.reset_context()
#if self.debug:
# interview_status.seeking.append({'question': question, 'reason': 'mandatory code'})
exec(code_error.compute, user_dict)
code_error.question.mark_as_answered(user_dict)
except SyntaxException as qError:
#logmessage("SyntaxException")
docassemble.base.functions.reset_context()
the_question = None
try:
the_question = question
except:
pass
docassemble.base.functions.wrap_up(user_dict)
if the_question is not None:
raise DAError(str(qError) + "\n\n" + str(self.idebug(self.data_for_debug)))
raise DAError("no question available: " + str(qError))
except CompileException as qError:
#logmessage("CompileException")
docassemble.base.functions.reset_context()
the_question = None
try:
the_question = question
except:
pass
docassemble.base.functions.wrap_up(user_dict)
if the_question is not None:
raise DAError(str(qError) + "\n\n" + str(self.idebug(self.data_for_debug)))
raise DAError("no question available: " + str(qError))
else:
docassemble.base.functions.wrap_up(user_dict)
raise DAErrorNoEndpoint('Docassemble has finished executing all code blocks marked as initial or mandatory, and finished asking all questions marked as mandatory (if any). It is a best practice to end your interview with a question that says goodbye and offers an Exit button.')
except Exception as the_error:
#logmessage("Untrapped exception")
if self.debug:
the_error.interview = self
the_error.interview_status = interview_status
the_error.user_dict = docassemble.base.functions.serializable_dict(user_dict)
if not hasattr(the_error, '__traceback__'):
cl, exc, tb = sys.exc_info()
the_error.__traceback__ = tb
del cl
del exc
del tb
raise the_error
if docassemble.base.functions.this_thread.prevent_going_back:
interview_status.can_go_back = False
docassemble.base.functions.wrap_up(user_dict)
if self.debug:
interview_status.seeking.append({'done': True, 'time': time.time()})
#return(pickleable_objects(user_dict))
def load_util(self, the_user_dict):
if not self.imports_util:
if not self.consolidated_metadata.get('suppress loading util', False):
exec(import_util, the_user_dict)
def askfor(self, missingVariable, user_dict, old_user_dict, interview_status, **kwargs):
seeking_question = kwargs.get('seeking_question', False)
variable_stack = kwargs.get('variable_stack', set())
questions_tried = kwargs.get('questions_tried', dict())
recursion_depth = kwargs.get('recursion_depth', 0)
recursion_depth += 1
language = get_language()
current_question = None
follow_mc = kwargs.get('follow_mc', True)
seeking = kwargs.get('seeking', list())
if self.debug:
seeking.append({'variable': missingVariable, 'time': time.time()})
if recursion_depth > self.recursion_limit:
raise DAError("There appears to be an infinite loop. Variables in stack are " + ", ".join(sorted(variable_stack)) + ".")
#logmessage("askfor: I don't have " + str(missingVariable) + " for language " + str(language))
#sys.stderr.write("I don't have " + str(missingVariable) + " for language " + str(language) + "\n")
origMissingVariable = missingVariable
docassemble.base.functions.set_current_variable(origMissingVariable)
# if missingVariable in variable_stack:
# raise DAError("Infinite loop: " + missingVariable + " already looked for, where stack is " + str(variable_stack))
# variable_stack.add(missingVariable)
found_generic = False
realMissingVariable = missingVariable
totry = list()
variants = list()
level_dict = dict()
generic_dict = dict()
expression_as_list = [x for x in match_brackets_or_dot.split(missingVariable) if x != '']
expression_as_list.append('')
recurse_indices(expression_as_list, list_of_indices, [], variants, level_dict, [], generic_dict, [])
#logmessage("variants: " + repr(variants))
for variant in variants:
totry.append({'real': missingVariable, 'vari': variant, 'iterators': level_dict[variant], 'generic': generic_dict[variant], 'is_generic': 0 if generic_dict[variant] == '' else 1, 'num_dots': variant.count('.'), 'num_iterators': variant.count('[')})
totry = sorted(sorted(sorted(sorted(totry, key=lambda x: len(x['iterators'])), key=lambda x: x['num_iterators'], reverse=True), key=lambda x: x['num_dots'], reverse=True), key=lambda x: x['is_generic'])
#logmessage("ask_for: totry is " + "\n".join([x['vari'] for x in totry]))
questions_to_try = list()
for mv in totry:
realMissingVariable = mv['real']
missingVariable = mv['vari']
#logmessage("Trying missingVariable " + missingVariable + " and realMissingVariable " + realMissingVariable)
if mv['is_generic']:
#logmessage("Testing out generic " + mv['generic'])
try:
root_evaluated = eval(mv['generic'], user_dict)
#logmessage("Root was evaluated")
classes_to_look_for = [type(root_evaluated).__name__]
recursive_add_classes(classes_to_look_for, type(root_evaluated))
for generic_object in classes_to_look_for:
#logmessage("Looking for generic object " + generic_object + " for " + missingVariable)
if generic_object in self.generic_questions and missingVariable in self.generic_questions[generic_object] and (language in self.generic_questions[generic_object][missingVariable] or '*' in self.generic_questions[generic_object][missingVariable]):
for lang in [language, '*']:
if lang in self.generic_questions[generic_object][missingVariable]:
for the_question_to_use in self.sort_with_orderings(self.generic_questions[generic_object][missingVariable][lang]):
questions_to_try.append((the_question_to_use, True, mv['generic'], mv['iterators'], missingVariable, generic_object))
except:
pass
continue
# logmessage("askfor: questions to try is " + str(questions_to_try))
if missingVariable in self.questions:
for lang in [language, '*']:
# logmessage("lang is " + lang)
if lang in self.questions[missingVariable]:
for the_question in self.sort_with_orderings(self.questions[missingVariable][lang]):
questions_to_try.append((the_question, False, 'None', mv['iterators'], missingVariable, None))
# logmessage("askfor: questions to try is " + str(questions_to_try))
num_cycles = 0
missing_var = "_unknown"
while True:
num_cycles += 1
if num_cycles > self.loop_limit:
raise DAError("Infinite loop detected while looking for " + missing_var)
a_question_was_skipped = False
docassemble.base.functions.reset_gathering_mode(origMissingVariable)
#logmessage("Starting the while loop")
try:
for the_question, is_generic, the_x, iterators, missing_var, generic_object in questions_to_try:
#logmessage("In for loop with question " + the_question.name)
if missing_var in questions_tried and the_question in questions_tried[missing_var]:
a_question_was_skipped = True
# logmessage("Skipping question " + the_question.name)
continue
current_question = the_question
if self.debug:
seeking.append({'question': the_question, 'reason': 'considering', 'time': time.time()})
question = current_question
if len(question.condition) > 0:
if is_generic:
if the_x != 'None':
exec("x = " + the_x, user_dict)
if len(iterators):
for indexno in range(len(iterators)):
exec(list_of_indices[indexno] + " = " + iterators[indexno], user_dict)
condition_success = True
for condition in question.condition:
if not eval(condition, user_dict):
condition_success = False
break
if not condition_success:
continue
if follow_mc:
question = the_question.follow_multiple_choice(user_dict, interview_status, is_generic, the_x, iterators)
else:
question = the_question
if question is not current_question:
if len(question.condition) > 0:
if is_generic:
if the_x != 'None':
exec("x = " + the_x, user_dict)
if len(iterators):
for indexno in range(len(iterators)):
exec(list_of_indices[indexno] + " = " + iterators[indexno], user_dict)
condition_success = True
for condition in question.condition:
if not eval(condition, user_dict):
condition_success = False
break
if not condition_success:
continue
if question.question_type == 'fields':
field_id = safeid(missing_var)
if is_generic:
if the_x != 'None':
exec("x = " + the_x, user_dict)
if len(iterators):
for indexno in range(len(iterators)):
exec(list_of_indices[indexno] + " = " + iterators[indexno], user_dict)
skip_question = None
for field in question.fields:
if hasattr(field, 'showif_code') and hasattr(field, 'saveas') and field.saveas == field_id:
docassemble.base.functions.this_thread.misc['current_field'] = field.number
result = eval(field.showif_code, user_dict)
if hasattr(field, 'extras') and 'show_if_sign_code' in field.extras and field.extras['show_if_sign_code'] == 0:
if result:
if skip_question is not False:
skip_question = True
else:
skip_question = False
else:
if not result:
if skip_question is not False:
skip_question = True
else:
skip_question = False
if skip_question:
continue
if self.debug:
if question.question_type in ('signature', 'yesno', 'noyes', 'yesnomaybe', 'noyesmaybe', 'multiple_choice', 'settrue', 'fields', 'review', 'deadend'):
seeking.append({'question': question, 'reason': 'asking', 'time': time.time()})
else:
seeking.append({'question': question, 'reason': 'running', 'time': time.time()})
if question.question_type == "data":
question.exec_setup(is_generic, the_x, iterators, user_dict)
old_values = question.get_old_values(user_dict)
string = from_safeid(question.fields[0].saveas) + ' = ' + repr(recursive_eval_dataobject(question.fields[0].data, user_dict))
exec(string, user_dict)
question.post_exec(user_dict)
docassemble.base.functions.pop_current_variable()
question.invalidate_dependencies(user_dict, old_values)
return({'type': 'continue', 'sought': missing_var, 'orig_sought': origMissingVariable})
if question.question_type == "data_da":
question.exec_setup(is_generic, the_x, iterators, user_dict)
old_values = question.get_old_values(user_dict)
exec(import_core, user_dict)
string = from_safeid(question.fields[0].saveas) + ' = objects_from_structure(' + repr(recursive_eval_dataobject(question.fields[0].data, user_dict)) + ', root=' + repr(from_safeid(question.fields[0].saveas)) + ')'
exec(string, user_dict)
question.post_exec(user_dict)
docassemble.base.functions.pop_current_variable()
question.invalidate_dependencies(user_dict, old_values)
return({'type': 'continue', 'sought': missing_var, 'orig_sought': origMissingVariable})
if question.question_type == "data_from_code":
question.exec_setup(is_generic, the_x, iterators, user_dict)
old_values = question.get_old_values(user_dict)
string = from_safeid(question.fields[0].saveas) + ' = ' + repr(recursive_eval_data_from_code(question.fields[0].data, user_dict))
exec(string, user_dict)
question.post_exec(user_dict)
docassemble.base.functions.pop_current_variable()
question.invalidate_dependencies(user_dict, old_values)
return({'type': 'continue', 'sought': missing_var, 'orig_sought': origMissingVariable})
if question.question_type == "data_from_code_da":
question.exec_setup(is_generic, the_x, iterators, user_dict)
old_values = question.get_old_values(user_dict)
exec(import_core, user_dict)
string = from_safeid(question.fields[0].saveas) + ' = objects_from_structure(' + repr(recursive_eval_data_from_code(question.fields[0].data, user_dict)) + ', root=' + repr(from_safeid(question.fields[0].saveas)) + ')'
exec(string, user_dict)
question.post_exec(user_dict)
docassemble.base.functions.pop_current_variable()
question.invalidate_dependencies(user_dict, old_values)
return({'type': 'continue', 'sought': missing_var, 'orig_sought': origMissingVariable})
if question.question_type == "objects":
question.exec_setup(is_generic, the_x, iterators, user_dict)
success = False
old_variable = None
docassemble.base.functions.this_thread.current_question = question
for keyvalue in question.objects:
# logmessage("In a for loop for keyvalue")
for variable, object_type_name in keyvalue.items():
if variable != missing_var:
continue
was_defined = False
try:
exec("__oldvariable__ = " + str(missing_var), user_dict)
old_variable = user_dict['__oldvariable__']
exec("del " + str(missing_var), user_dict)
was_defined = True
except:
pass
user_dict["__object_type"] = eval(object_type_name, user_dict)
if re.search(r"\.", variable):
m = re.search(r"(.*)\.(.*)", variable)
variable = m.group(1)
attribute = m.group(2)
# command = variable + "." + attribute + " = " + object_type + "()"
command = variable + ".initializeAttribute(" + repr(attribute) + ", __object_type)"
# logmessage("Running " + command)
exec(command, user_dict)
else:
if user_dict["__object_type"].__class__.__name__ == 'DAObjectPlusParameters':
command = variable + ' = __object_type.object_type(' + repr(variable) + ', **__object_type.parameters)'
else:
command = variable + ' = __object_type(' + repr(variable) + ')'
# logmessage("Running " + command)
exec(command, user_dict)
if "__object_type" in user_dict:
del user_dict["__object_type"]
if missing_var in variable_stack:
variable_stack.remove(missing_var)
try:
eval(missing_var, user_dict)
success = True
# logmessage("the variable was defined")
break
except:
# logmessage("the variable was not defined")
if was_defined:
try:
exec(str(missing_var) + " = __oldvariable__", user_dict)
#exec("__oldvariable__ = " + str(missing_var), user_dict)
exec("del __oldvariable__", user_dict)
except:
pass
continue
if success:
# logmessage("success, break")
break
# logmessage("testing for success")
if not success:
# logmessage("no success, continue")
continue
#question.mark_as_answered(user_dict)
# logmessage("pop current variable")
question.post_exec(user_dict)
docassemble.base.functions.pop_current_variable()
if old_variable is not None:
question.invalidate_dependencies_of_variable(user_dict, missing_var, old_variable)
# logmessage("Returning")
return({'type': 'continue', 'sought': missing_var, 'orig_sought': origMissingVariable})
if question.question_type == "template":
question.exec_setup(is_generic, the_x, iterators, user_dict)
temp_vars = dict()
if is_generic:
if the_x != 'None':
temp_vars['x'] = user_dict['x']
if len(iterators):
for indexno in range(len(iterators)):
temp_vars[list_of_indices[indexno]] = user_dict[list_of_indices[indexno]]
if question.target is not None:
return({'type': 'template', 'question_text': question.content.text(user_dict).rstrip(), 'subquestion_text': None, 'continue_label': None, 'audiovideo': None, 'decorations': None, 'help_text': None, 'attachments': None, 'question': question, 'selectcompute': dict(), 'defaults': dict(), 'hints': dict(), 'helptexts': dict(), 'extras': dict(), 'labels': dict(), 'sought': missing_var, 'orig_sought': origMissingVariable})
if question.decorations is None:
decoration_list = []
else:
decoration_list = question.decorations
actual_saveas = substitute_vars(from_safeid(question.fields[0].saveas), is_generic, the_x, iterators)
#docassemble.base.functions.this_thread.template_vars.append(actual_saveas)
found_object = False
try:
the_object = eval(actual_saveas, user_dict)
if the_object.__class__.__name__ == 'DALazyTemplate':
found_object = True
except:
pass
if not found_object:
string = "from docassemble.base.core import DALazyTemplate"
exec(string, user_dict)
string = from_safeid(question.fields[0].saveas) + ' = DALazyTemplate(' + repr(actual_saveas) + ')'
exec(string, user_dict)
the_object = eval(actual_saveas, user_dict)
if the_object.__class__.__name__ != 'DALazyTemplate':
raise DAError("askfor: failure to define template object")
the_object.source_content = question.content
the_object.source_subject = question.subcontent
the_object.source_decorations = [dec['image'] for dec in decoration_list]
the_object.userdict = user_dict
the_object.tempvars = temp_vars
question.post_exec(user_dict)
docassemble.base.functions.pop_current_variable()
return({'type': 'continue', 'sought': missing_var, 'orig_sought': origMissingVariable})
if question.question_type == "template_code":
question.exec_setup(is_generic, the_x, iterators, user_dict)
the_filenames = eval(question.compute, user_dict)
if not isinstance(the_filenames, list):
if hasattr(the_filenames, 'instanceName') and hasattr(the_filenames, 'elements') and isinstance(the_filenames.elements, list):
the_filenames = the_filenames.elements
else:
the_filenames = [the_filenames]
raw_content = ''
for the_filename in the_filenames:
the_orig_filename = the_filename
if the_filename.__class__.__name__ in ('DAFile', 'DAFileList', 'DAFileCollection', 'DAStaticFile'):
the_filename = the_filename.path()
elif isinstance(the_filename, str):
if re.search(r'^https?://', str(the_filename)):
temp_template_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", delete=False)
try:
urlretrieve(url_sanitize(str(the_filename)), temp_template_file.name)
except Exception as err:
raise DAError("askfor: error downloading " + str(the_filename) + ": " + str(err))
the_filename = temp_template_file.name
else:
the_filename = docassemble.base.functions.package_template_filename(the_filename, package=question.package)
else:
the_filename = None
if the_filename is None or not os.path.isfile(the_filename):
raise DAError("askfor: error obtaining template file from code: " + repr(the_orig_filename))
with open(the_filename, 'r', encoding='utf-8') as the_file:
raw_content += the_file.read()
temp_vars = dict()
if is_generic:
if the_x != 'None':
temp_vars['x'] = user_dict['x']
if len(iterators):
for indexno in range(len(iterators)):
temp_vars[list_of_indices[indexno]] = user_dict[list_of_indices[indexno]]
if question.decorations is None:
decoration_list = []
else:
decoration_list = question.decorations
actual_saveas = substitute_vars(from_safeid(question.fields[0].saveas), is_generic, the_x, iterators)
found_object = False
try:
the_object = eval(actual_saveas, user_dict)
if the_object.__class__.__name__ == 'DALazyTemplate':
found_object = True
except:
pass
if not found_object:
string = "from docassemble.base.core import DALazyTemplate"
exec(string, user_dict)
string = from_safeid(question.fields[0].saveas) + ' = DALazyTemplate(' + repr(actual_saveas) + ')'
exec(string, user_dict)
the_object = eval(actual_saveas, user_dict)
if the_object.__class__.__name__ != 'DALazyTemplate':
raise DAError("askfor: failure to define template object")
the_object.source_content = TextObject(raw_content, question=question)
the_object.source_subject = question.subcontent
the_object.source_decorations = [dec['image'] for dec in decoration_list]
the_object.userdict = user_dict
the_object.tempvars = temp_vars
question.post_exec(user_dict)
docassemble.base.functions.pop_current_variable()
return({'type': 'continue', 'sought': missing_var, 'orig_sought': origMissingVariable})
if question.question_type == "table":
question.exec_setup(is_generic, the_x, iterators, user_dict)
temp_vars = dict()
if is_generic:
if the_x != 'None':
temp_vars['x'] = user_dict['x']
if len(iterators):
for indexno in range(len(iterators)):
temp_vars[list_of_indices[indexno]] = user_dict[list_of_indices[indexno]]
table_info = TableInfo()
table_info.header = question.fields[0].extras['header']
table_info.is_editable = question.fields[0].extras['is_editable']
table_info.require_gathered = question.fields[0].extras['require_gathered']
table_info.show_incomplete = question.fields[0].extras['show_incomplete']
table_info.not_available_label = question.fields[0].extras['not_available_label']
table_info.row = question.fields[0].extras['row']
table_info.column = question.fields[0].extras['column']
table_info.indent = " " * (4 * int(question.fields[0].extras['indent']))
table_info.table_width = self.table_width
table_info.empty_message = question.fields[0].extras['empty_message']
table_info.saveas = from_safeid(question.fields[0].saveas)
actual_saveas = substitute_vars(table_info.saveas, is_generic, the_x, iterators)
#docassemble.base.functions.this_thread.template_vars.append(actual_saveas)
string = "from docassemble.base.core import DALazyTableTemplate"
exec(string, user_dict)
found_object = False
try:
the_object = eval(actual_saveas, user_dict)
if the_object.__class__.__name__ == 'DALazyTableTemplate':
found_object = True
except:
pass
if not found_object:
string = from_safeid(question.fields[0].saveas) + ' = DALazyTableTemplate(' + repr(actual_saveas) + ')'
exec(string, user_dict)
the_object = eval(actual_saveas, user_dict)
if the_object.__class__.__name__ != 'DALazyTableTemplate':
raise DAError("askfor: failure to define template object")
the_object.table_info = table_info
the_object.userdict = user_dict
the_object.tempvars = temp_vars
#logmessage("Pop variable for table")
question.post_exec(user_dict)
docassemble.base.functions.pop_current_variable()
return({'type': 'continue', 'sought': missing_var, 'orig_sought': origMissingVariable})
if question.question_type == 'attachments':
question.exec_setup(is_generic, the_x, iterators, user_dict)
old_values = question.get_old_values(user_dict)
#logmessage("original missing variable is " + origMissingVariable)
attachment_text = question.processed_attachments(user_dict, seeking_var=origMissingVariable, use_cache=False)
if missing_var in variable_stack:
variable_stack.remove(missing_var)
try:
eval(missing_var, user_dict)
#question.mark_as_answered(user_dict)
except Exception as err:
logmessage("Problem with attachments block: " + err.__class__.__name__ + ": " + str(err))
continue
question.post_exec(user_dict)
docassemble.base.functions.pop_current_variable()
question.invalidate_dependencies(user_dict, old_values)
return({'type': 'continue', 'sought': missing_var, 'orig_sought': origMissingVariable})
if question.question_type in ["code", "event_code"]:
question.exec_setup(is_generic, the_x, iterators, user_dict)
was_defined = False
old_values = question.get_old_values(user_dict)
try:
exec("__oldvariable__ = " + str(missing_var), user_dict)
exec("del " + str(missing_var), user_dict)
was_defined = True
except:
pass
if question.question_type == 'event_code':
docassemble.base.functions.pop_event_stack(origMissingVariable)
docassemble.base.functions.this_thread.current_question = question
if was_defined:
exec_with_trap(question, user_dict, old_variable=missing_var)
else:
exec_with_trap(question, user_dict)
interview_status.mark_tentative_as_answered(user_dict)
if missing_var in variable_stack:
variable_stack.remove(missing_var)
if question.question_type == 'event_code':
docassemble.base.functions.pop_current_variable()
docassemble.base.functions.pop_event_stack(origMissingVariable)
question.invalidate_dependencies(user_dict, old_values)
if was_defined:
exec("del __oldvariable__", user_dict)
return({'type': 'continue', 'sought': missing_var, 'orig_sought': origMissingVariable})
try:
eval(missing_var, user_dict)
if was_defined:
exec("del __oldvariable__", user_dict)
if seeking_question:
continue
#question.mark_as_answered(user_dict)
docassemble.base.functions.pop_current_variable()
docassemble.base.functions.pop_event_stack(origMissingVariable)
question.invalidate_dependencies(user_dict, old_values)
return({'type': 'continue', 'sought': missing_var, 'orig_sought': origMissingVariable})
except:
if was_defined:
try:
exec(str(missing_var) + " = __oldvariable__", user_dict)
#exec("__oldvariable__ = " + str(missing_var), user_dict)
exec("del __oldvariable__", user_dict)
except:
pass
continue
else:
interview_status.mark_tentative_as_answered(user_dict)
if question.question_type == 'continue':
continue
return question.ask(user_dict, old_user_dict, the_x, iterators, missing_var, origMissingVariable)
if a_question_was_skipped:
raise DAError("Infinite loop: " + missingVariable + " already looked for, where stack is " + str(variable_stack))
if 'forgive_missing_question' in docassemble.base.functions.this_thread.misc and origMissingVariable in docassemble.base.functions.this_thread.misc['forgive_missing_question']:
docassemble.base.functions.pop_current_variable()
docassemble.base.functions.pop_event_stack(origMissingVariable)
if 'action' in docassemble.base.functions.this_thread.current_info and docassemble.base.functions.this_thread.current_info['action'] == origMissingVariable:
del docassemble.base.functions.this_thread.current_info['action']
return({'type': 'continue', 'sought': origMissingVariable, 'orig_sought': origMissingVariable})
if self.options.get('use catchall', False) and not origMissingVariable.endswith('.value'):
string = "from docassemble.base.core import DACatchAll"
exec(string, user_dict)
string = origMissingVariable + ' = DACatchAll(' + repr(origMissingVariable) + ')'
exec(string, user_dict)
docassemble.base.functions.pop_current_variable()
docassemble.base.functions.pop_event_stack(origMissingVariable)
return({'type': 'continue', 'sought': origMissingVariable, 'orig_sought': origMissingVariable})
raise DAErrorMissingVariable("Interview has an error. There was a reference to a variable '" + origMissingVariable + "' that could not be looked up in the question file (for language '" + str(language) + "') or in any of the files incorporated by reference into the question file.", variable=origMissingVariable)
except ForcedReRun as the_exception:
docassemble.base.functions.pop_current_variable()
docassemble.base.functions.pop_event_stack(origMissingVariable)
return({'type': 're_run', 'sought': origMissingVariable, 'orig_sought': origMissingVariable})
except (NameError, DAAttributeError, DAIndexError) as the_exception:
if 'pending_error' in docassemble.base.functions.this_thread.misc:
del docassemble.base.functions.this_thread.misc['pending_error']
#logmessage("Error in " + the_exception.__class__.__name__ + " is " + str(the_exception))
if self.debug and docassemble.base.functions.this_thread.evaluation_context == 'docx':
logmessage("NameError exception during document assembly: " + str(the_exception))
docassemble.base.functions.reset_context()
seeking_question = False
if isinstance(the_exception, ForcedNameError):
#logmessage("askfor: got a ForcedNameError for " + str(the_exception.name))
follow_mc = False
seeking_question = True
#logmessage("Seeking question is True")
newMissingVariable = the_exception.name
#logmessage("next action is " + repr(the_exception.next_action))
if the_exception.next_action is not None and not interview_status.checkin:
if 'event_stack' not in user_dict['_internal']:
user_dict['_internal']['event_stack'] = dict()
session_uid = interview_status.current_info['user']['session_uid']
if session_uid not in user_dict['_internal']['event_stack']:
user_dict['_internal']['event_stack'][session_uid] = list()
new_items = list()
for new_item in the_exception.next_action:
already_there = False
for event_item in user_dict['_internal']['event_stack'][session_uid]:
if event_item['action'] == new_item:
already_there = True
break
if not already_there:
new_items.append(new_item)
if len(new_items):
user_dict['_internal']['event_stack'][session_uid] = new_items + user_dict['_internal']['event_stack'][session_uid]
#interview_status.next_action.extend(the_exception.next_action)
if the_exception.arguments is not None:
docassemble.base.functions.this_thread.current_info.update(dict(action=the_exception.name, arguments=the_exception.arguments))
if the_exception.name.startswith('_da_'):
docassemble.base.functions.pop_current_variable()
docassemble.base.functions.pop_event_stack(origMissingVariable)
return({'type': 're_run', 'sought': origMissingVariable, 'orig_sought': origMissingVariable})
docassemble.base.functions.this_thread.misc['forgive_missing_question'] = [the_exception.name]
else:
#logmessage("regular nameerror")
follow_mc = True
newMissingVariable = extract_missing_name(the_exception)
if newMissingVariable == 'file':
raise
#newMissingVariable = str(the_exception).split("'")[1]
#if newMissingVariable in questions_tried and newMissingVariable in variable_stack:
# raise DAError("Infinite loop: " + missingVariable + " already looked for, where stack is " + str(variable_stack))
if newMissingVariable not in questions_tried:
questions_tried[newMissingVariable] = set()
else:
variable_stack.add(missingVariable)
if current_question.question_type != 'objects':
questions_tried[newMissingVariable].add(current_question)
try:
eval(origMissingVariable, user_dict)
was_defined = True
except:
was_defined = False
question_result = self.askfor(newMissingVariable, user_dict, old_user_dict, interview_status, variable_stack=variable_stack, questions_tried=questions_tried, seeking=seeking, follow_mc=follow_mc, recursion_depth=recursion_depth, seeking_question=seeking_question)
if question_result['type'] == 'continue' and missing_var != newMissingVariable:
if not was_defined:
try:
eval(origMissingVariable, user_dict)
now_defined = True
except:
now_defined = False
if now_defined:
docassemble.base.functions.pop_current_variable()
return({'type': 'continue', 'sought': missing_var, 'orig_sought': origMissingVariable})
# logmessage("Continuing after asking for newMissingVariable " + str(newMissingVariable))
continue
docassemble.base.functions.pop_current_variable()
return(question_result)
except UndefinedError as the_exception:
#logmessage("UndefinedError")
if self.debug and docassemble.base.functions.this_thread.evaluation_context == 'docx':
#logmessage(the_exception.__class__.__name__ + " exception during document assembly: " + str(the_exception) + "\n" + traceback.format_exc())
logmessage(the_exception.__class__.__name__ + " exception during document assembly: " + str(the_exception) + "\n")
docassemble.base.functions.reset_context()
newMissingVariable = extract_missing_name(the_exception)
if newMissingVariable not in questions_tried:
questions_tried[newMissingVariable] = set()
else:
variable_stack.add(missingVariable)
if current_question.question_type != 'objects':
questions_tried[newMissingVariable].add(current_question)
question_result = self.askfor(newMissingVariable, user_dict, old_user_dict, interview_status, variable_stack=variable_stack, questions_tried=questions_tried, seeking=seeking, follow_mc=True, recursion_depth=recursion_depth, seeking_question=seeking_question)
if question_result['type'] == 'continue':
continue
docassemble.base.functions.pop_current_variable()
return(question_result)
except CommandError as qError:
#logmessage("CommandError: " + str(qError))
docassemble.base.functions.reset_context()
question_data = dict(command=qError.return_type, url=qError.url, sleep=qError.sleep)
new_interview_source = InterviewSourceString(content='')
new_interview = new_interview_source.get_interview()
reproduce_basics(self, new_interview)
new_question = Question(question_data, new_interview, source=new_interview_source, package=self.source.package)
new_question.name = "Question_Temp"
return(new_question.ask(user_dict, old_user_dict, 'None', [], missing_var, origMissingVariable))
except ResponseError as qError:
#logmessage("ResponseError")
docassemble.base.functions.reset_context()
#logmessage("Trapped ResponseError2")
question_data = dict(extras=dict())
if hasattr(qError, 'response') and qError.response is not None:
question_data['response'] = qError.response
elif hasattr(qError, 'binaryresponse') and qError.binaryresponse is not None:
question_data['binaryresponse'] = qError.binaryresponse
elif hasattr(qError, 'filename') and qError.filename is not None:
question_data['response filename'] = qError.filename
elif hasattr(qError, 'url') and qError.url is not None:
question_data['redirect url'] = qError.url
elif hasattr(qError, 'all_variables') and qError.all_variables:
if hasattr(qError, 'include_internal'):
question_data['include_internal'] = qError.include_internal
question_data['content type'] = 'application/json'
question_data['all_variables'] = True
elif hasattr(qError, 'nullresponse') and qError.nullresponse:
question_data['null response'] = qError.nullresponse
elif hasattr(qError, 'sleep') and qError.sleep:
question_data['sleep'] = qError.sleep
if hasattr(qError, 'content_type') and qError.content_type:
question_data['content type'] = qError.content_type
if hasattr(qError, 'response_code') and qError.response_code:
question_data['response code'] = qError.response_code
new_interview_source = InterviewSourceString(content='')
new_interview = new_interview_source.get_interview()
reproduce_basics(self, new_interview)
new_question = Question(question_data, new_interview, source=new_interview_source, package=self.source.package)
new_question.name = "Question_Temp"
#the_question = new_question.follow_multiple_choice(user_dict)
docassemble.base.functions.pop_event_stack(origMissingVariable)
return(new_question.ask(user_dict, old_user_dict, 'None', [], missing_var, origMissingVariable))
except BackgroundResponseError as qError:
# logmessage("BackgroundResponseError")
docassemble.base.functions.reset_context()
#logmessage("Trapped BackgroundResponseError2")
question_data = dict(extras=dict())
if hasattr(qError, 'backgroundresponse'):
question_data['backgroundresponse'] = qError.backgroundresponse
if hasattr(qError, 'sleep'):
question_data['sleep'] = qError.sleep
new_interview_source = InterviewSourceString(content='')
new_interview = new_interview_source.get_interview()
reproduce_basics(self, new_interview)
new_question = Question(question_data, new_interview, source=new_interview_source, package=self.source.package)
new_question.name = "Question_Temp"
docassemble.base.functions.pop_event_stack(origMissingVariable)
return(new_question.ask(user_dict, old_user_dict, 'None', [], missing_var, origMissingVariable))
except BackgroundResponseActionError as qError:
# logmessage("BackgroundResponseActionError")
docassemble.base.functions.reset_context()
#logmessage("Trapped BackgroundResponseActionError2")
question_data = dict(extras=dict())
if hasattr(qError, 'action'):
question_data['action'] = qError.action
new_interview_source = InterviewSourceString(content='')
new_interview = new_interview_source.get_interview()
reproduce_basics(self, new_interview)
new_question = Question(question_data, new_interview, source=new_interview_source, package=self.source.package)
new_question.name = "Question_Temp"
docassemble.base.functions.pop_event_stack(origMissingVariable)
return(new_question.ask(user_dict, old_user_dict, 'None', [], missing_var, origMissingVariable))
except QuestionError as qError:
#logmessage("QuestionError")
docassemble.base.functions.reset_context()
#logmessage("Trapped QuestionError")
question_data = dict()
if qError.question:
question_data['question'] = qError.question
if qError.subquestion:
question_data['subquestion'] = qError.subquestion
if qError.dead_end:
pass
elif qError.buttons:
question_data['buttons'] = qError.buttons
else:
buttons = list()
if qError.show_exit is not False and not (qError.show_leave is True and qError.show_exit is None):
exit_button = {word('Exit'): 'exit'}
if qError.url:
exit_button.update(dict(url=qError.url))
buttons.append(exit_button)
if qError.show_leave:
leave_button = {word('Leave'): 'leave'}
if qError.url:
leave_button.update(dict(url=qError.url))
buttons.append(leave_button)
if qError.show_restart is not False:
buttons.append({word('Restart'): 'restart'})
if len(buttons):
question_data['buttons'] = buttons
new_interview_source = InterviewSourceString(content='')
new_interview = new_interview_source.get_interview()
reproduce_basics(self, new_interview)
new_question = Question(question_data, new_interview, source=new_interview_source, package=self.source.package)
new_question.name = "Question_Temp"
new_question.embeds = True
# will this be a problem? yup
the_question = new_question.follow_multiple_choice(user_dict, interview_status, False, 'None', [])
return(the_question.ask(user_dict, old_user_dict, 'None', [], missing_var, origMissingVariable))
except CodeExecute as code_error:
#logmessage("CodeExecute")
docassemble.base.functions.reset_context()
#if self.debug:
# interview_status.seeking.append({'question': question, 'reason': 'mandatory code'})
#logmessage("Going to execute " + str(code_error.compute) + " where missing_var is " + str(missing_var))
exec(code_error.compute, user_dict)
try:
eval(missing_var, user_dict)
code_error.question.mark_as_answered(user_dict)
#logmessage("Got here 1")
#logmessage("returning from running code")
docassemble.base.functions.pop_current_variable()
#logmessage("Got here 2")
return({'type': 'continue', 'sought': missing_var, 'orig_sought': origMissingVariable})
except:
#raise DAError("Problem setting that variable")
continue
except SyntaxException as qError:
#logmessage("SyntaxException")
docassemble.base.functions.reset_context()
the_question = None
try:
the_question = question
except:
pass
if the_question is not None:
raise DAError(str(qError) + "\n\n" + str(self.idebug(self.data_for_debug)))
raise DAError("no question available in askfor: " + str(qError))
except CompileException as qError:
#logmessage("CompileException")
docassemble.base.functions.reset_context()
the_question = None
try:
the_question = question
except:
pass
if the_question is not None:
raise DAError(str(qError) + "\n\n" + str(self.idebug(self.data_for_debug)))
raise DAError("no question available in askfor: " + str(qError))
# except SendFileError as qError:
# #logmessage("Trapped SendFileError2")
# question_data = dict(extras=dict())
# if hasattr(qError, 'filename') and qError.filename is not None:
# question_data['response filename'] = qError.filename
# if hasattr(qError, 'content_type') and qError.content_type:
# question_data['content type'] = qError.content_type
# new_interview_source = InterviewSourceString(content='')
# new_interview = new_interview_source.get_interview()
# new_question = Question(question_data, new_interview, source=new_interview_source, package=self.source.package)
# new_question.name = "Question_Temp"
# return(new_question.ask(user_dict, old_user_dict, 'None', [], None, None))
if 'forgive_missing_question' in docassemble.base.functions.this_thread.misc and origMissingVariable in docassemble.base.functions.this_thread.misc['forgive_missing_question']:
docassemble.base.functions.pop_current_variable()
docassemble.base.functions.pop_event_stack(origMissingVariable)
return({'type': 'continue', 'sought': missing_var, 'orig_sought': origMissingVariable})
raise DAErrorMissingVariable("Interview has an error. There was a reference to a variable '" + origMissingVariable + "' that could not be found in the question file (for language '" + str(language) + "') or in any of the files incorporated by reference into the question file.", variable=origMissingVariable)
def substitute_vars(var, is_generic, the_x, iterators, last_only=False):
if is_generic:
if the_x != 'None':
var = re.sub(r'^x\b', the_x, var)
if len(iterators):
if last_only:
indexno = len(iterators) - 1
var = re.sub(r'\[' + list_of_indices[indexno] + r'\]', '[' + str(iterators[indexno]) + ']', var)
else:
for indexno in range(len(iterators)):
#the_iterator = iterators[indexno]
#if isinstance(the_iterator, str) and re.match(r'^-?[0-9]+$', the_iterator):
# the_iterator = int(the_iterator)
#var = re.sub(r'\[' + list_of_indices[indexno] + r'\]', '[' + repr(the_iterator) + ']', var)
var = re.sub(r'\[' + list_of_indices[indexno] + r'\]', '[' + str(iterators[indexno]) + ']', var)
return var
def substitute_vars_action(action, is_generic, the_x, iterators):
if isinstance(action, str):
return substitute_vars(action, is_generic, the_x, iterators)
elif isinstance(action, dict):
new_dict = dict()
for key, val in action.items():
if key == 'action' and not key.startswith('_da_'):
new_dict[key] = substitute_vars_action(val, is_generic, the_x, iterators)
elif key == 'arguments' and isinstance(val, dict) and 'variables' in val and len(val) == 1:
new_dict[key] = substitute_vars_action(val, is_generic, the_x, iterators)
elif key == 'variables' and isinstance(val, list):
new_dict[key] = substitute_vars_action(val, is_generic, the_x, iterators)
else:
new_dict[key] = val
return new_dict
elif isinstance(action, list):
new_list = list()
for item in action:
new_list.append(substitute_vars_action(item, is_generic, the_x, iterators))
return new_list
else:
return action
def reproduce_basics(interview, new_interview):
new_interview.metadata = interview.metadata
new_interview.external_files = interview.external_files
def unpack_list(item, target_list=None):
if target_list is None:
target_list = list()
if not isinstance(item, (list, dict)):
target_list.append(item)
else:
for subitem in item:
unpack_list(subitem, target_list)
return target_list
def process_selections(data, manual=False, exclude=None):
if exclude is None:
to_exclude = list()
else:
to_exclude = unpack_list(exclude)
result = []
if (isinstance(data, abc.Iterable) and not isinstance(data, (str, dict)) and not (hasattr(data, 'elements') and isinstance(data.elements, dict))) or (hasattr(data, 'elements') and isinstance(data.elements, (list, set))):
for entry in data:
if isinstance(entry, dict) or (hasattr(entry, 'elements') and isinstance(entry.elements, dict)):
the_item = dict()
for key in entry:
if len(entry) > 1:
if key in ['default', 'help', 'image', 'label']:
continue
if 'default' in entry:
the_item['default'] = entry['default']
if 'help' in entry:
the_item['help'] = entry['help']
if 'image' in entry:
if entry['image'].__class__.__name__ == 'DAFile':
entry['image'].retrieve()
if entry['image'].mimetype is not None and entry['image'].mimetype.startswith('image'):
the_item['image'] = dict(type='url', value=entry['image'].url_for())
elif entry['image'].__class__.__name__ == 'DAFileList':
entry['image'][0].retrieve()
if entry['image'][0].mimetype is not None and entry['image'][0].mimetype.startswith('image'):
the_item['image'] = dict(type='url', value=entry['image'][0].url_for())
elif entry['image'].__class__.__name__ == 'DAFileCollection':
the_file = entry['image']._first_file()
the_file.retrieve()
if the_file.mimetype is not None and the_file.mimetype.startswith('image'):
the_item['image'] = dict(type='url', value=entry['image'][0].url_for())
elif entry['image'].__class__.__name__ == 'DAStaticFile':
the_item['image'] = dict(type='url', value=entry['image'].url_for())
else:
the_item['image'] = dict(type='decoration', value=entry['image'])
if key == 'value' and 'label' in entry:
the_item['key'] = entry[key]
the_item['label'] = entry['label']
if entry[key] not in to_exclude and ((not isinstance(entry['label'], bool)) or entry['label'] is True):
result.append(the_item)
else:
the_item['key'] = key
the_item['label'] = entry[key]
is_not_boolean = False
for key, val in entry.items():
if key in ['default', 'help', 'image', 'label']:
continue
if val not in (True, False):
is_not_boolean = True
if key not in to_exclude and (is_not_boolean or entry[key] is True):
result.append(the_item)
if (isinstance(entry, (list, tuple)) or (hasattr(entry, 'elements') and isinstance(entry.elements, list))) and len(entry) > 0:
if entry[0] not in to_exclude:
if len(entry) >= 4:
result.append(dict(key=entry[0], label=entry[1], default=entry[2], help=entry[3]))
elif len(entry) == 3:
result.append(dict(key=entry[0], label=entry[1], default=entry[2]))
elif len(entry) == 1:
result.append(dict(key=entry[0], label=entry[0]))
else:
result.append(dict(key=entry[0], label=entry[1]))
elif isinstance(entry, (str, bool, int, float)):
if entry not in to_exclude:
result.append(dict(key=entry, label=entry))
elif hasattr(entry, 'instanceName'):
if entry not in to_exclude:
result.append(dict(key=str(entry), label=str(entry)))
elif isinstance(data, dict) or (hasattr(data, 'elements') and isinstance(data.elements, dict)):
if isinstance(data, OrderedDict) or (hasattr(data, 'elements') and isinstance(data.elements, OrderedDict)):
the_items = data.items()
else:
the_items = sorted(data.items(), key=operator.itemgetter(1))
for key, value in the_items:
if key not in to_exclude:
if isinstance(value, (str, bool, int, float)):
result.append(dict(key=key, label=value))
elif hasattr(value, 'instanceName'):
result.append(dict(key=key, label=str(value)))
else:
logmessage("process_selections: non-label passed as label in dictionary")
else:
raise DAError("Unknown data type in choices selection: " + re.sub(r'[<>]', '', repr(data)))
return(result)
def extract_missing_name(the_error):
#logmessage("extract_missing_name: string was " + str(string))
m = nameerror_match.search(str(the_error))
if m:
return m.group(1)
else:
raise the_error
def auto_determine_type(field_info, the_value=None):
types = dict()
if 'selections' in field_info:
for item in field_info['selections']:
the_type = type(item[0]).__name__
if the_type not in types:
types[the_type] = 0
types[the_type] += 1
if the_value is not None:
the_type = type(the_value).__name__
if the_type not in types:
types[the_type] = 0
types[the_type] += 1
if 'str' in types or 'unicode' in types:
return
if len(types) == 2:
if 'int' in types and 'float' in types:
field_info['type'] = 'float'
return
if len(types) > 1:
return
if 'bool' in types:
field_info['type'] = 'boolean'
return
if 'int' in types:
field_info['type'] = 'integer'
return
if 'float' in types:
field_info['type'] = 'float'
return
return
def get_mimetype(filename):
if filename is None:
return 'text/plain; charset=utf-8'
mimetype, encoding = mimetypes.guess_type(filename)
extension = filename.lower()
extension = re.sub('.*\.', '', extension)
if extension == '3gpp':
mimetype = 'audio/3gpp'
if mimetype is None:
mimetype = 'text/plain'
return mimetype
def interpret_label(text):
if text is None:
return 'no label'
return str(text)
def recurse_indices(expression_array, variable_list, pre_part, final_list, var_subs_dict, var_subs, generic_dict, generic):
if len(expression_array) == 0:
return
the_expr = "".join(pre_part) + "".join(expression_array)
if the_expr not in final_list and the_expr != 'x':
final_list.append(the_expr)
var_subs_dict[the_expr] = var_subs
generic_dict[the_expr] = "".join(generic)
first_part = expression_array.pop(0)
if match_brackets.match(first_part) and len(variable_list) > 0:
new_var_subs = copy.copy(var_subs)
new_var_subs.append(re.sub(r'^\[|\]$', r'', first_part))
new_list_of_indices = copy.copy(variable_list)
var_to_use = new_list_of_indices.pop(0)
new_part = copy.copy(pre_part)
new_part.append('[' + var_to_use + ']')
recurse_indices(copy.copy(expression_array), new_list_of_indices, new_part, final_list, var_subs_dict, new_var_subs, generic_dict, generic)
if len(new_var_subs) == 0 and len(generic) == 0:
recurse_indices(copy.copy(expression_array), new_list_of_indices, ['x', '[' + var_to_use + ']'], final_list, var_subs_dict, new_var_subs, generic_dict, copy.copy(pre_part))
pre_part.append(first_part)
recurse_indices(copy.copy(expression_array), variable_list, copy.copy(pre_part), final_list, var_subs_dict, var_subs, generic_dict, copy.copy(generic))
if len(var_subs) == 0 and len(generic) == 0:
recurse_indices(copy.copy(expression_array), variable_list, ['x'], final_list, var_subs_dict, var_subs, generic_dict, copy.copy(pre_part))
def ensure_object_exists(saveas, datatype, the_user_dict, commands=None):
# logmessage("ensure object exists: " + str(saveas))
if commands is None:
execute = True
commands = list()
else:
execute = False
already_there = False
try:
eval(saveas, the_user_dict)
already_there = True
except:
pass
if already_there:
#logmessage("ensure object exists: already there")
return
use_initialize = False
parse_result = parse_var_name(saveas)
if not parse_result['valid']:
raise DAError("Variable name " + saveas + " is invalid: " + parse_result['reason'])
method = None
if parse_result['final_parts'][1] != '':
if parse_result['final_parts'][1][0] == '.':
try:
core_key = eval(parse_result['final_parts'][0], the_user_dict)
if hasattr(core_key, 'instanceName'):
method = 'attribute'
except:
pass
elif parse_result['final_parts'][1][0] == '[':
try:
core_key = eval(parse_result['final_parts'][0], the_user_dict)
if hasattr(core_key, 'instanceName'):
method = 'index'
except:
pass
if "from docassemble.base.core import DADict, DAList" not in commands:
commands.append("from docassemble.base.core import DADict, DAList")
if method == 'attribute':
attribute_name = parse_result['final_parts'][1][1:]
if datatype in ('multiselect', 'checkboxes'):
commands.append(parse_result['final_parts'][0] + ".initializeAttribute(" + repr(attribute_name) + ", DADict, auto_gather=False)")
elif datatype in ('object_multiselect', 'object_checkboxes'):
commands.append(parse_result['final_parts'][0] + ".initializeAttribute(" + repr(attribute_name) + ", DAList, auto_gather=False)")
elif method == 'index':
index_name = parse_result['final_parts'][1][1:-1]
if datatype in ('multiselect', 'checkboxes'):
commands.append(parse_result['final_parts'][0] + ".initializeObject(" + repr(index_name) + ", DADict, auto_gather=False)")
elif datatype in ('object_multiselect', 'object_checkboxes'):
commands.append(parse_result['final_parts'][0] + ".initializeObject(" + repr(index_name) + ", DAList, auto_gather=False)")
else:
if datatype in ('multiselect', 'checkboxes'):
commands.append(saveas + ' = DADict(' + repr(saveas) + ', auto_gather=False)')
elif datatype in ('object_multiselect', 'object_checkboxes'):
commands.append(saveas + ' = DAList(' + repr(saveas) + ', auto_gather=False)')
if execute:
for command in commands:
#logmessage("Doing " + command)
exec(command, the_user_dict)
def invalid_variable_name(varname):
if not isinstance(varname, str):
return True
if re.search(r'[\n\r\(\)\{\}\*\^\#]', varname):
return True
varname = re.sub(r'[\.\[].*', '', varname)
if not valid_variable_match.match(varname):
return True
return False
def exec_with_trap(the_question, the_dict, old_variable=None):
try:
exec(the_question.compute, the_dict)
the_question.post_exec(the_dict)
except (NameError, UndefinedError, CommandError, ResponseError, BackgroundResponseError, BackgroundResponseActionError, QuestionError, AttributeError, MandatoryQuestion, CodeExecute, SyntaxException, CompileException):
if old_variable is not None:
try:
exec(str(old_variable) + " = __oldvariable__", the_dict)
exec("del __oldvariable__", the_dict)
except:
pass
raise
except Exception as e:
cl, exc, tb = sys.exc_info()
exc.user_dict = docassemble.base.functions.serializable_dict(the_dict)
if len(traceback.extract_tb(tb)) == 2:
line_with_error = traceback.extract_tb(tb)[-1][1]
if isinstance(line_with_error, int) and line_with_error > 0 and hasattr(the_question, 'sourcecode'):
exc.da_line_with_error = the_question.sourcecode.splitlines()[line_with_error - 1]
exc.__traceback__ = tb
del cl
del exc
del tb
raise
ok_outside_string = string.ascii_letters + string.digits + '.[]_'
ok_inside_string = string.ascii_letters + string.digits + string.punctuation + " "
def parse_var_name(var):
var_len = len(var)
cur_pos = 0
in_bracket = 0
in_quote = 0
the_quote = None
dots = list()
brackets = list()
while cur_pos < var_len:
char = var[cur_pos]
if char == '[':
if cur_pos == 0:
return dict(valid=False, reason='bracket at start')
if var[cur_pos - 1] == '.':
return dict(valid=False, reason='dot before bracket')
if not in_quote:
if in_bracket:
return dict(valid=False, reason='nested brackets')
in_bracket = 1
brackets.append(cur_pos)
elif char == ']':
if cur_pos == 0:
return dict(valid=False)
if var[cur_pos - 1] == '.':
return dict(valid=False, reason='dot before bracket')
if not in_quote:
if in_bracket:
in_bracket = 0
else:
return dict(valid=False, reason='unexpected end bracket')
elif char in ("'", '"'):
if cur_pos == 0 or not in_bracket:
return dict(valid=False, reason='unexpected quote mark')
if in_quote:
if char == the_quote and var[cur_pos - 1] != "\\":
in_quote = 0
else:
in_quote = 1
the_quote = char
else:
if not (in_quote or in_bracket):
if char not in ok_outside_string:
return dict(valid=False, reason='invalid character in variable name')
if cur_pos == 0:
if char in string.digits or char == '.':
return dict(valid=False, reason='starts with digit or dot')
else:
if var[cur_pos - 1] == '.' and char in string.digits:
return dict(valid=False, reason='attribute starts with digit')
if in_quote:
if char not in ok_inside_string:
return dict(valid=False, reason='invalid character in string')
else:
if char == '.':
if in_bracket:
return dict(valid=False, reason="dot in bracket")
if cur_pos > 0 and var[cur_pos - 1] == '.':
return dict(valid=False, reason = 'two dots')
dots.append(cur_pos)
cur_pos += 1
if in_bracket:
return dict(valid=False, reason='dangling bracket part')
if in_quote:
return dict(valid=False, reason='dangling quote part')
objects = [var[0:dot_pos] for dot_pos in dots]
bracket_objects = [var[0:bracket_pos] for bracket_pos in brackets]
final_cut = 0
if len(dots):
final_cut = dots[-1]
if len(brackets):
if brackets[-1] > final_cut:
final_cut = brackets[-1]
if final_cut > 0:
final_parts = (var[0:final_cut], var[final_cut:])
else:
final_parts = (var, '')
return dict(valid=True, objects=objects, bracket_objects=bracket_objects, final_parts=final_parts)
class DAExtension(Extension):
def filter_stream(self, stream):
in_var = False
met_pipe = False
for token in stream:
if token.type == 'variable_begin':
in_var = True
met_pipe = False
if token.type == 'variable_end':
in_var = False
if not met_pipe:
yield Token(token.lineno, 'pipe', None)
yield Token(token.lineno, 'name', 'ampersand_filter')
# if in_var and token.type == 'pipe':
# met_pipe = True
yield token
class DAEnvironment(Environment):
def from_string(self, source, **kwargs):
source = re.sub(r'({[\%\{].*?[\%\}]})', fix_quotes, source)
return super().from_string(source, **kwargs)
def getitem(self, obj, argument):
try:
return obj[argument]
except (AttributeError, TypeError, LookupError):
return self.undefined(obj=obj, name=argument, accesstype='item')
def getattr(self, obj, attribute):
try:
return getattr(obj, attribute)
except AttributeError:
pass
return self.undefined(obj=obj, name=attribute, accesstype='attribute')
def ampersand_filter(value):
if value.__class__.__name__ in ('DAFile', 'DALink', 'DAStaticFile', 'DAFileCollection', 'DAFileList'):
return value
if value.__class__.__name__ in ('InlineImage', 'RichText', 'Listing', 'Document', 'Subdoc', 'DALazyTemplate'):
return str(value)
if isinstance(value, (int, bool, float, NoneType)):
return value
if not isinstance(value, str):
value = str(value)
value = docassemble.base.file_docx.sanitize_xml(value)
if '<w:r>' in value or '</w:t>' in value:
return re.sub(r'&(?!#?[0-9A-Za-z]+;)', '&', value)
return re.sub(r'>', '>', re.sub(r'<', '<', re.sub(r'&(?!#?[0-9A-Za-z]+;)', '&', value)))
class DAStrictUndefined(StrictUndefined):
__slots__ = ('_undefined_type')
def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError, accesstype=None):
self._undefined_hint = hint
self._undefined_obj = obj
self._undefined_name = name
self._undefined_exception = exc
self._undefined_type = accesstype
@internalcode
def __getattr__(self, name):
if name[:2] == '__':
raise AttributeError(name)
return self._fail_with_undefined_error(attribute=True)
@internalcode
def __getitem__(self, index):
if index[:2] == '__':
raise IndexError(index)
return self._fail_with_undefined_error(item=True)
@internalcode
def _fail_with_undefined_error(self, *args, **kwargs):
if True or self._undefined_hint is None:
if self._undefined_obj is missing:
hint = "'%s' is undefined" % self._undefined_name
elif self._undefined_type == 'item' and hasattr(self._undefined_obj, 'instanceName'):
hint = "'%s[%r]' is undefined" % (
self._undefined_obj.instanceName,
self._undefined_name
)
elif 'attribute' in kwargs or self._undefined_type == 'attribute':
if hasattr(self._undefined_obj, 'instanceName'):
hint = "'%s.%s' is undefined" % (
self._undefined_obj.instanceName,
self._undefined_name
)
else:
hint = '%r has no attribute %r' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
else:
if hasattr(self._undefined_obj, 'instanceName'):
hint = "'%s[%r]' is undefined" % (
self._undefined_obj.instanceName,
self._undefined_name
)
else:
hint = '%s has no element %r' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
else:
hint = self._undefined_hint
raise self._undefined_exception(hint)
__add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
__truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
__mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
__getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \
__float__ = __complex__ = __pow__ = __rpow__ = __sub__ = \
__rsub__= __iter__ = __str__ = __len__ = __nonzero__ = __eq__ = \
__ne__ = __bool__ = __hash__ = _fail_with_undefined_error
def mygetattr(y, attr):
for attribute in attr.split('.'):
y = getattr(y, attribute)
return y
def str_or_original(y, case_sensitive):
if case_sensitive:
if hasattr(y, 'instanceName'):
if y.__class__.__name__ in ('Value', 'PeriodicValue'):
return y.amount()
return str(y)
return y
if hasattr(y, 'instanceName'):
if y.__class__.__name__ in ('Value', 'PeriodicValue'):
return y.amount()
return str(y).lower()
try:
return y.lower()
except:
return y
def dictsort_filter(dictionary, case_sensitive=False, by='key', reverse=False):
if by == 'value':
return sorted(dictionary.items(), key=lambda y: str_or_original(y[1], case_sensitive), reverse=reverse)
else:
return sorted(dictionary.items(), key=lambda y: str_or_original(y[0], case_sensitive), reverse=reverse)
def sort_filter(array, reverse=False, case_sensitive=False, attribute=None):
if attribute is None:
if not case_sensitive:
def key_func(y):
return str_or_original(y, case_sensitive)
else:
key_func = None
else:
if isinstance(attribute, list):
attributes = [str(y).strip() for y in attribute]
else:
attributes = [y.strip() for y in str(attribute).split(',')]
def key_func(y):
return [str_or_original(mygetattr(y, attribute), case_sensitive) for attribute in attributes]
return sorted(array, key=key_func, reverse=reverse)
_GroupTuple = namedtuple('_GroupTuple', ['grouper', 'list'])
_GroupTuple.__repr__ = tuple.__repr__
_GroupTuple.__str__ = tuple.__str__
def groupby_filter(array, attr_name):
def func(y):
return mygetattr(y, attr_name)
return [_GroupTuple(key, list(values)) for key, values in groupby(sorted(array, key=func), func)]
def max_filter(array, case_sensitive=False, attribute=None):
it = iter(array)
try:
first = next(it)
except StopIteration:
raise DAError("max: list was empty")
if attribute:
def key_func(y):
return str_or_original(mygetattr(y, attribute), case_sensitive=case_sensitive)
else:
def key_func(y):
return str_or_original(y, case_sensitive=case_sensitive)
return max(chain([first], it), key=key_func)
def min_filter(array, case_sensitive=False, attribute=None):
it = iter(array)
try:
first = next(it)
except StopIteration:
raise DAError("min: list was empty")
if attribute:
def key_func(y):
return str_or_original(mygetattr(y, attribute), case_sensitive=case_sensitive)
else:
def key_func(y):
return str_or_original(y, case_sensitive=case_sensitive)
return min(chain([first], it), key=key_func)
def sum_filter(array, attribute=None, start=0):
if attribute is not None:
array = [mygetattr(y, attribute) for y in array]
return sum(array, start)
def unique_filter(array, case_sensitive=False, attribute=None):
seen = set()
if attribute is None:
for item in array:
new_item = str_or_original(item, case_sensitive)
if new_item not in seen:
seen.add(new_item)
yield item
else:
for item in array:
new_item = str_or_original(mygetattr(item, attribute), case_sensitive)
if new_item not in seen:
seen.add(new_item)
yield mygetattr(item, attribute)
def join_filter(array, d="", attribute=None):
if attribute is not None:
return d.join([str(mygetattr(y, attribute)) for y in array])
return d.join([str(y) for y in array])
def attr_filter(var, attr_name):
return mygetattr(var, attr_name)
def selectattr_filter(*pargs, **kwargs):
if len(pargs) > 2:
array = pargs[0]
attr_name = pargs[1]
func_name = pargs[2]
env = custom_jinja_env()
func = lambda item: env.call_test(func_name, item, pargs[3:], kwargs)
for item in array:
if func(mygetattr(item, attr_name)):
yield item
else:
for item in pargs[0]:
if mygetattr(item, pargs[1]):
yield item
def rejectattr_filter(*pargs, **kwargs):
if len(pargs) > 2:
array = pargs[0]
attr_name = pargs[1]
func_name = pargs[2]
env = custom_jinja_env()
func = lambda item: env.call_test(func_name, item, pargs[3:], kwargs)
for item in array:
if not func(mygetattr(item, attr_name)):
yield item
else:
for item in pargs[0]:
if not mygetattr(item, pargs[1]):
yield item
def map_filter(*pargs, **kwargs):
if len(pargs) >= 2:
array = pargs[0]
the_filter = pargs[1]
env = custom_jinja_env()
if the_filter not in env.filters:
raise DAError('filter passed to map() does not exist')
for item in array:
yield env.call_filter(the_filter, item, pargs[2:], kwargs)
else:
if 'attribute' in kwargs:
if 'default' in kwargs:
for item in pargs[0]:
yield mygetattr(item, kwargs['attribute'], kwargs['default'])
else:
for item in pargs[0]:
yield mygetattr(item, kwargs['attribute'])
elif 'index' in kwargs:
if 'default' in kwargs:
for item in pargs[0]:
yield item.get(kwargs['index'], kwargs['default'])
else:
for item in pargs[0]:
yield item[kwargs['index']]
elif 'function' in kwargs:
the_kwargs = kwargs.get('kwargs', dict())
the_pargs = kwargs.get('pargs', list())
if not isinstance(the_kwargs, dict):
raise DAError('kwargs passed to map() must be a dictionary')
if not isinstance(the_pargs, list):
raise DAError('pargs passed to map() must be a list')
for item in pargs[0]:
yield kwargs['function'](item, *the_pargs, **the_kwargs)
else:
raise DAError('map() must refer to a function, index, attribute, or filter')
def markdown_filter(text):
return docassemble.base.file_docx.markdown_to_docx(text, docassemble.base.functions.this_thread.current_question, docassemble.base.functions.this_thread.misc.get('docx_template', None))
def inline_markdown_filter(text):
return docassemble.base.file_docx.inline_markdown_to_docx(text, docassemble.base.functions.this_thread.current_question, docassemble.base.functions.this_thread.misc.get('docx_template', None))
builtin_jinja_filters = {
'ampersand_filter': ampersand_filter,
'markdown': markdown_filter,
'add_separators': docassemble.base.functions.add_separators,
'inline_markdown': inline_markdown_filter,
'paragraphs': docassemble.base.functions.single_to_double_newlines,
'manual_line_breaks': docassemble.base.functions.manual_line_breaks,
'RichText': docassemble.base.file_docx.RichText,
'groupby': groupby_filter,
'max': max_filter,
'min': min_filter,
'sum': sum_filter,
'unique': unique_filter,
'join': join_filter,
'attr': attr_filter,
'selectattr': selectattr_filter,
'rejectattr': rejectattr_filter,
'sort': sort_filter,
'dictsort': dictsort_filter,
'nice_number': docassemble.base.functions.nice_number,
'ordinal': docassemble.base.functions.ordinal,
'ordinal_number': docassemble.base.functions.ordinal_number,
'currency': docassemble.base.functions.currency,
'comma_list': docassemble.base.functions.comma_list,
'comma_and_list': docassemble.base.functions.comma_and_list,
'capitalize': docassemble.base.functions.capitalize,
'salutation': docassemble.base.functions.salutation,
'alpha': docassemble.base.functions.alpha,
'roman': docassemble.base.functions.roman,
'word': docassemble.base.functions.word,
'bold': docassemble.base.functions.bold,
'italic': docassemble.base.functions.italic,
'title_case': docassemble.base.functions.title_case,
'single_paragraph': docassemble.base.functions.single_paragraph,
'phone_number_formatted': docassemble.base.functions.phone_number_formatted,
'phone_number_in_e164': docassemble.base.functions.phone_number_in_e164,
'country_name': docassemble.base.functions.country_name,
'fix_punctuation': docassemble.base.functions.fix_punctuation,
'redact': docassemble.base.functions.redact,
'verbatim': docassemble.base.functions.verbatim,
'map': map_filter
}
registered_jinja_filters = {}
def custom_jinja_env():
env = DAEnvironment(undefined=DAStrictUndefined, extensions=[DAExtension])
env.filters.update(registered_jinja_filters)
env.filters.update(builtin_jinja_filters)
return env
def register_jinja_filter(filter_name, func):
if filter_name in builtin_jinja_filters:
raise DAError("Cannot register filter with same name as built-in filter %s" % filter_name)
registered_jinja_filters[filter_name] = func
def get_docx_variables(the_path):
import docassemble.base.legal
names = set()
if not os.path.isfile(the_path):
raise DAError("Missing docx template file " + os.path.basename(the_path))
try:
docx_template = docassemble.base.file_docx.DocxTemplate(the_path)
the_env = custom_jinja_env()
the_xml = docx_template.get_xml()
the_xml = re.sub(r'<w:p>', '\n<w:p>', the_xml)
the_xml = re.sub(r'({[\%\{].*?[\%\}]})', fix_quotes, the_xml)
the_xml = docx_template.patch_xml(the_xml)
parsed_content = the_env.parse(the_xml)
except Exception as the_err:
raise DAError("There was an error parsing the docx file: " + the_err.__class__.__name__ + " " + str(the_err))
for key in jinja2meta.find_undeclared_variables(parsed_content):
if not key.startswith('_'):
names.add(key)
for name in docassemble.base.legal.__all__:
if name in names:
names.remove(name)
return sorted(list(names))
def allow_users_list(obj):
if not (isinstance(obj, list) or (hasattr(obj, 'instanceName') and hasattr(obj, 'elements') and isinstance(obj.elements, list))):
obj = [obj]
new_list = list()
for item in obj:
if isinstance(item, str) and re.search(r'^[0-9]+$', item):
item = int(item)
if isinstance(item, (int, str)):
new_list.append(item)
else:
email_address_method = getattr(item, 'email_address', None)
if callable(email_address_method):
new_list.append(item.email)
else:
new_list.append(str(item))
return new_list
def allow_privileges_list(obj):
if not (isinstance(obj, list) or (hasattr(obj, 'instanceName') and hasattr(obj, 'elements') and isinstance(obj.elements, list))):
obj = [obj]
new_list = list()
for item in obj:
if isinstance(item, str):
new_list.append(item)
return new_list
class MLStripper(HTMLParser):
def __init__(self):
super().__init__()
self.reset()
self.strict = False
self.convert_charrefs= True
self.text = StringIO()
def handle_data(self, d):
self.text.write(d)
def get_data(self):
return self.text.getvalue()
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
| 65.172717 | 2,221 | 0.538663 |
01da6c48b603a747e724394760e48018649ccd5a | 823 | py | Python | meiduo_mall/manage.py | shenhaiyu0923/meiduo_project | 0ba91533294c5bb6f8ca54f93eabdff007a3560f | [
"MIT"
] | 4 | 2021-04-30T05:45:32.000Z | 2021-04-30T05:56:03.000Z | meiduo_mall/manage.py | shenhaiyu0923/meiduo_project | 0ba91533294c5bb6f8ca54f93eabdff007a3560f | [
"MIT"
] | null | null | null | meiduo_mall/manage.py | shenhaiyu0923/meiduo_project | 0ba91533294c5bb6f8ca54f93eabdff007a3560f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "meiduo_mall.settings.dev")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.migrations
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.782609 | 79 | 0.648846 |
c6f66cabd411594ef2112b6b2bff31855d4c5ca0 | 1,755 | py | Python | server/src/police_lineups/controllers/lineups/queries.py | vabalcar/police-lineups | 9c4a17d58e973d6db6e442bd9d5f4313ad4d51b7 | [
"MIT"
] | null | null | null | server/src/police_lineups/controllers/lineups/queries.py | vabalcar/police-lineups | 9c4a17d58e973d6db6e442bd9d5f4313ad4d51b7 | [
"MIT"
] | 2 | 2021-09-24T11:43:58.000Z | 2021-09-24T12:00:21.000Z | server/src/police_lineups/controllers/lineups/queries.py | vabalcar/police-lineups | 9c4a17d58e973d6db6e442bd9d5f4313ad4d51b7 | [
"MIT"
] | null | null | null | from typing import List
from swagger_server.models import Lineup, Person
from police_lineups.controllers.utils.responses import Responses
from police_lineups.db import DbLineup, DbLineupPerson, DbPerson, DbUser
from police_lineups.singletons import Context
from .utils import owner_auth_guard
def get_lineups():
return [
Lineup(
lineup_id=db_lineup.lineup_id,
name=db_lineup.name,
last_edit_date_time=db_lineup.last_edit_date_time,
owner_username=db_lineup.owner_id.username) for db_lineup in DbLineup.select().join(DbUser)]
def get_lineups_for_current_user():
return [
Lineup(
lineup_id=db_lineup.lineup_id,
name=db_lineup.name,
last_edit_date_time=db_lineup.last_edit_date_time) for db_lineup in DbLineup.select().where(
DbLineup.owner_id == Context().user.user_id)]
def get_lineup(lineup_id):
db_lineup: DbLineup = DbLineup.get_or_none(lineup_id)
if db_lineup is None:
return Responses.NOT_FOUND
owner_auth_guard(db_lineup)
lineup_people: List[Person] = [
Person(
person_id=db_lineup_person.person_id.person_id,
photo_blob_name=db_lineup_person.person_id.photo_blob_name,
full_name=db_lineup_person.person_id.full_name,
birth_date=db_lineup_person.person_id.birth_date,
nationality=db_lineup_person.person_id.nationality)
for db_lineup_person in DbLineupPerson.select().join(DbPerson).where(
DbLineupPerson.lineup_id == lineup_id)]
return Lineup(
lineup_id=db_lineup.lineup_id,
name=db_lineup.name,
last_edit_date_time=db_lineup.last_edit_date_time,
people=lineup_people)
| 33.75 | 104 | 0.71567 |
cd4aa44c97e6f28ebc73dee7a5d8e39402669e51 | 14,316 | py | Python | pyfo/utils/core.py | bradleygramhansen/pyfo | 559678080f27e7d9f3f194a0c28e9e8bfe71a7f3 | [
"MIT"
] | 3 | 2018-06-11T09:16:13.000Z | 2019-03-08T05:22:43.000Z | pyfo/utils/core.py | bradleygramhansen/pyfo | 559678080f27e7d9f3f194a0c28e9e8bfe71a7f3 | [
"MIT"
] | null | null | null | pyfo/utils/core.py | bradleygramhansen/pyfo | 559678080f27e7d9f3f194a0c28e9e8bfe71a7f3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Author: Bradley Gram-Hansen
Time created: 10:02
Date created: 10/11/2017
License: MIT
'''
import torch
import numpy as np
import torch.distributions as dists
from torch.distributions import constraints, biject_to
try:
import networkx as _nx
except ModuleNotFoundError:
_nx = None
try:
import matplotlib.pyplot as _plt
import matplotlib.patches as mpatches
except ModuleNotFoundError:
_plt = None
class DualAveraging(object):
"""
Dual Averaging is a scheme to solve convex optimization problems. It belongs
to a class of subgradient methods which uses subgradients to update parameters
(in primal space) of a model. Under some conditions, the averages of generated
parameters during the scheme are guaranteed to converge to an optimal value.
However, a counter-intuitive aspect of traditional subgradient methods is
"new subgradients enter the model with decreasing weights" (see :math:`[1]`).
Dual Averaging scheme solves that phenomenon by updating parameters using
weights equally for subgradients (which lie in a dual space), hence we have
the name "dual averaging".
This class implements a dual averaging scheme which is adapted for Markov chain
Monte Carlo (MCMC) algorithms. To be more precise, we will replace subgradients
by some statistics calculated during an MCMC trajectory. In addition,
introducing some free parameters such as ``t0`` and ``kappa``is helpful and
still guarantees the convergence of the scheme.
References
[1] `Primal-dual subgradient methods for convex problems`,
Yurii Nesterov
[2] `The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo`,
Matthew D. Hoffman, Andrew Gelman
:param float prox_center: A "prox-center" parameter introduced in :math:`[1]`
which pulls the primal sequence towards it.
:param float t0: A free parameter introduced in :math:`[2]`
that stabilizes the initial steps of the scheme.
:param float kappa: A free parameter introduced in :math:`[2]`
that controls the weights of steps of the scheme.
For a small ``kappa``, the scheme will quickly forget states
from early steps. This should be a number in :math:`(0.5, 1]`.
:param float gamma: A free parameter which controls the speed
of the convergence of the scheme.
"""
def __init__(self, prox_center=0, t0=10, kappa=0.75, gamma=0.05):
self.prox_center = prox_center
self.t0 = t0
self.kappa = kappa
self.gamma = gamma
self._x_avg = 0 # average of primal sequence
self._g_avg = 0 # average of dual sequence
self._t = 0
def step(self, g):
"""
Updates states of the scheme given a new statistic/subgradient ``g``.
:param float g: A statistic calculated during an MCMC trajectory or subgradient.
"""
self._t += 1
# g_avg = (g_1 + ... + g_t) / t
self._g_avg = (1 - 1/(self._t + self.t0)) * self._g_avg + g / (self._t + self.t0)
# According to formula (3.4) of [1], we have
# x_t = argmin{ g_avg . x + loc_t . |x - x0|^2 },
# where loc_t := beta_t / t, beta_t := (gamma/2) * sqrt(t)
self._x_t = self.prox_center - (self._t ** 0.5) / self.gamma * self._g_avg
# weight for the new x_t
weight_t = self._t ** (-self.kappa)
self._x_avg = (1 - weight_t) * self._x_avg + weight_t * self._x_t
def get_state(self):
r"""
Returns the latest :math:`x_t` and average of
:math:`\left\{x_i\right\}_{i=1}^t` in primal space.
"""
return self._x_t, self._x_avg
def create_network_graph(vertices):
"""
Create a `networkx` graph. Used by the method `display_graph()`.
:return: Either a `networkx.DiGraph` instance or `None`.
"""
if _nx:
G = _nx.DiGraph()
for v in vertices:
G.add_node(v.display_name)
for a in v.ancestors:
G.add_edge(a.display_name, v.display_name)
return G
else:
return None
def display_graph(vertices):
"""
Transform the graph to a `networkx.DiGraph`-structure and display it using `matplotlib` -- if the necessary
libraries are installed.
:return: `True` if the graph was drawn, `False` otherwise.
"""
G =create_network_graph(vertices)
_is_conditioned = None
if _nx and _plt and G:
try:
from networkx.drawing.nx_agraph import graphviz_layout
pos = graphviz_layout(G, prog='dot')
except ModuleNotFoundError:
from networkx.drawing.layout import shell_layout
pos = shell_layout(G)
except ImportError:
from networkx.drawing.layout import shell_layout
pos = shell_layout(G)
_plt.subplot(111)
_plt.axis('off')
_nx.draw_networkx_nodes(G, pos,
node_color='r',
node_size=1250,
nodelist=[v.display_name for v in vertices if v.is_sampled])
_nx.draw_networkx_nodes(G, pos,
node_color='b',
node_size=1250,
nodelist=[v.display_name for v in vertices if v.is_observed])
for v in vertices:
_nx.draw_networkx_edges(G, pos, arrows=True,
edgelist=[(a.display_name, v.display_name) for a in v.ancestors])
if v.condition_ancestors is not None and len(v.condition_ancestors) > 0:
_is_conditioned = 1
_nx.draw_networkx_edges(G, pos, arrows=True,
style='dashed',
edge_color='g',
edgelist=[(a.display_name, v.display_name) for a in v.condition_ancestors])
_nx.draw_networkx_labels(G, pos, font_color='w', font_weight='bold')
# for node, _ in G.nodes():
red_patch = mpatches.Circle((0,0), radius=2, color='r', label='Sampled Variables')
blue_patch = mpatches.Circle((0,0), radius=2, color='b', label='Observed Variables')
green_patch = mpatches.Circle((0,0), radius=2, color='g', label='Conditioned Variables') if _is_conditioned else 0
if _is_conditioned:
_plt.legend(handles=[red_patch, blue_patch, green_patch])
else:
_plt.legend(handles=[red_patch, blue_patch])
_plt.show()
return True
else:
return False
def VariableCast(value, grad = False):
'''casts an input to torch.tensor object
:param value Type: scalar, torch.Tensor object, torch.Tensor, numpy ndarray
:param grad Type: bool . If true then we require the gradient of that object
output
------
torch.tensor object
'''
dtype = torch.float
if value is None:
return None
elif isinstance(value,torch.Tensor):
return torch.tensor(value,dtype=dtype,requires_grad=grad)
elif isinstance(value, np.ndarray):
tensor = torch.from_numpy(value).float()
return torch.tensor(tensor, dtype=dtype, requires_grad = grad)
elif isinstance(value,list):
return torch.tensor(value,dtype=dtype, requires_grad=grad).unsqueeze(-1)
else:
return torch.tensor([value],dtype=dtype, requires_grad = grad).unsqueeze(-1)
def tensor_to_list(self,values):
''' Converts a tensor to a list
values = torch.FloatTensor or torch.tensor'''
params = []
for value in values:
if isinstance(value, torch.tensor):
temp = torch.tensor(value.data, requires_grad=True)
params.append(temp)
else:
temp = VariableCast(value)
temp = torch.tensor(value.data, requires_grad=True)
params.append(value)
return params
def TensorCast(value):
if isinstance(value, torch.tensor):
return value
else:
return torch.tensor([value])
def list_to_tensor(self, params):
'''
Unpacks the parameters list tensors and converts it to list
returns tensor of num_rows = len(values) and num_cols = 1
problem:
if there are col dimensions greater than 1, then this will not work
'''
print('Warning ---- UNSTABLE FUNCTION ----')
assert(isinstance(params, list))
temp = torch.tensor(torch.Tensor(len(params)).unsqueeze(-1))
for i in range(len(params)):
temp[i,:] = params[i]
return temp
def logical_trans(var):
"""
Returns logical 0 or 1 for given variable.
:param var: Is a 1-d torch.Tensor, float or np.array
:return: Bool
"""
print("Warning: logoical_trans() has not been tested on tensors of dimension greater than 1")
value = VariableCast(var)
if value.data[0]:
return True
else:
return False
def get_tensor_data(t):
"""
Returns data of torch.Tensor.autograd.Variable
:param t: torch.tensor
:return: torch.Tensor
"""
if isinstance(t, torch.tensor):
return t.data
return t
def my_import(name):
'''
Helper function for extracting the whole module and not just the package.
See answer by clint miller for details:
htorch.tensorps://stackoverflow.com/questions/951124/dynamic-loading-of-python-modules
:param name
:type string
:return module
'''
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = torch.tensorr(mod, comp)
return mod
def transform_latent_support(latent_vars, dist_to_latent):
"""
Returns a new state with the required transformations for the log pdf. It checks the support of each continuous
distribution and if that support does not encompass the whole real line, the required bijector is added to a
transform list.
TODO: Ensure that only continuous latent variables are beingpassed through this function for now.
:param latent_vars: dictionary of {latent_var: distribution_name}
:param dist_to_latent: dictionary that maps latent_variable names to distribution name
:return: transform: dictionary of {latent_var: bijector_for_latent}
"""
transforms = {}
for latent in latent_vars:
# print('Debug statement: latent vars: {0} and type: {1}'.format(dist_to_latent[latent], type(dist_to_latent[latent])))
temp_support = getattr(dists,dist_to_latent[latent]).support
# print('Debug statement temp_support {0}'.format(temp_support))
if temp_support is not constraints.real:
transforms[latent] = biject_to(temp_support).inv
else:
transforms[latent] = constraints.real
return transforms
def convert_dict_vars_to_numpy(self, state, latent_vars ):
"""
:param state: Information on the whole state. Likely to be torch objects
:param latent_vars: type: str descript: A list of the latent variables in the state.
:return: the torch latent variables converted to numpy arrays
Converts variables in stat to numpy arrays for plotting purposes
"""
for latent in self.all_vars:
state[latent] = state[latent].numpy()
# state[i] = state[i].data.numpy()
return state
def _grad_logp(input, parameters, latents):
"""
Returns the gradient of the log pdf, with respect for
each parameter. Note the double underscore, this is to ensure that if
this method is overwritten, then no problems occur when overidded.
:param state:
:return: torch.autograd.Variable
"""
# print(50 *'=')
# print('Debug statement in _grad_logp \n '+50*'='+'\nChecking gradient flag. \n Printing input : {0} \n Printing parameters : {1} \n Checking if gradient turned on: {2} '.format(input, parameters, parameters.requires_grad))
gradient_of_params = {}
# dict([[key, torch.autograd.grad(outputs=input.sum(), inputs=parameters[key], retain_graph=True)][0] for key in
# latents])
for key in latents:
gradient_of_params[key] = torch.autograd.grad(outputs=input.sum(), inputs=parameters[key], retain_graph=True)[0]
# For debugging only, when using simple normal model .
# -log(N(0,1)) = -log(c1) + (x^{2}/ 2)
# dlog / d
true_gradient = {}
for key in latents:
true_gradient[key] = parameters[key]
# print(50*'=')
# print('Debug statement in _grad_logp. Printing torch gradient : {0} \n and True gradient {1}'.format(gradient_of_params, true_gradient))
# print(50 * '=')
return gradient_of_params
def _to_leaf(state, latent_vars):
"""
Ensures that all latent parameters are reset to leaf nodes, before
calling
:param state:
:return:
"""
for key in latent_vars:
state[key] = torch.tensor(state[key], requires_grad=True)
return state
def _generate_log_pdf(model, state):
"""
The compiled pytorch function, log_pdf, should automatically
return the pdf.
:param keys type: list of discrete embedded discrete parameters
:return: log_pdf
Maybe overidden in other methods, that require dynamic pdfs.
For example
if you have a model called my mymodel, you could write the following:
Model = compile_model(mymodel) # returns class
class MyNewModel(Model):
def gen_log_pdf(self, state):
for vertex in self.vertices:
pass
return "Whatever you fancy"
# This overrides the base method.
# Then all you have to do is pass
# My model into kernel of choice, i.e
kernel = MCMC(MyNewModel,kernel=HMC)
kernel.run_inference()
If you require gradients, ensure that you have used the the core._to_leaf() function on the 'state'
"""
# if set_leafs:
# # only sets the gradients of the latent variables.
# _state = _to_leaf(state=state, latent_vars=latents)
# else:
# _state = state
# print(50*'=')
# for key in state:
# print('Debug statement in _generate_log_p \n',50*'='+ '\n Printing set_leafs : {0} \n Printing latents : {1} \n gradient: {2} \n key: {3} '.format(set_leafs, latents, state[key].requires_grad, key))
return model.gen_log_pdf(state) | 38.796748 | 228 | 0.644873 |
d564eb8478e4a7a274690bf29d71055ec6de75ab | 565 | py | Python | Solucion_taller_selevtivos/ejercicio14.py | ItsZeus03/Algoritmos-y-Programaciaon | caeddc442f76e4a4b428d668a6730c8096b38ae0 | [
"MIT"
] | null | null | null | Solucion_taller_selevtivos/ejercicio14.py | ItsZeus03/Algoritmos-y-Programaciaon | caeddc442f76e4a4b428d668a6730c8096b38ae0 | [
"MIT"
] | null | null | null | Solucion_taller_selevtivos/ejercicio14.py | ItsZeus03/Algoritmos-y-Programaciaon | caeddc442f76e4a4b428d668a6730c8096b38ae0 | [
"MIT"
] | null | null | null | """
entradas
lectura_antigua-->float-->lan
lectura_actual-->float-->lac
salida
pago-->float-->pa
"""
fa=input("Ingrese los klivatios consumidos en el mes pasado y el actual (mespasado mesactual) ")
(lan,lac)=fa.split(" ")
lan=float(lan)
lac=float(lac)
kv=lac-lan
if(kv>=0 and kv<=100):
pa=kv*4600
print("Total a pagar "+str(pa)+" COP")
elif(kv>=101 and kv<=300):
pa=kv*8000
print("Total a pagar "+str(pa)+" COP")
elif(kv>=301 and kv<=500):
pa=kv*100000
print("Total a pagar "+str(pa)+" COP")
else:
pa=kv*120000
print("Total a pagar "+str(pa)+" COP") | 23.541667 | 96 | 0.658407 |
c4f1b5398efacc33eea11303718a89d623a7edfc | 11,295 | py | Python | lib/JumpScale/sal/openvswitch/VXNet/utils.py | Jumpscale/jumpscale_core8 | f80ac9b1ab99b833ee7adb17700dcf4ef35f3734 | [
"Apache-2.0"
] | 8 | 2016-04-14T14:04:57.000Z | 2020-06-09T00:24:34.000Z | lib/JumpScale/sal/openvswitch/VXNet/utils.py | Jumpscale/jumpscale_core8 | f80ac9b1ab99b833ee7adb17700dcf4ef35f3734 | [
"Apache-2.0"
] | 418 | 2016-01-25T10:30:00.000Z | 2021-09-08T12:29:13.000Z | lib/JumpScale/sal/openvswitch/VXNet/utils.py | Jumpscale/jumpscale_core8 | f80ac9b1ab99b833ee7adb17700dcf4ef35f3734 | [
"Apache-2.0"
] | 9 | 2016-04-21T07:21:17.000Z | 2022-01-24T10:35:54.000Z | __author__ = 'delandtj'
from JumpScale import j
import os
import os.path
import subprocess
import sys
import time
command_name = sys.argv[0]
vsctl = "/usr/bin/ovs-vsctl"
ofctl = "/usr/bin/ovs-ofctl"
ip = "/sbin/ip"
ethtool = "/sbin/ethtool"
PHYSMTU = 2000
# TODO : errorhandling
def send_to_syslog(msg):
pass
# print msg
# pid = os.getpid()
# print ("%s[%d] - %s" % (command_name, pid, msg))
# syslog.syslog("%s[%d] - %s" % (command_name, pid, msg))
def doexec(args):
"""Execute a subprocess, then return its return code, stdout and stderr"""
send_to_syslog(args)
proc = subprocess.Popen(args, stdin=None, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True, bufsize=-1)
rc = proc.wait()
# rc = proc.communicate()
stdout = proc.stdout
stderr = proc.stderr
return rc, stdout, stderr
def dobigexec(args):
"""Execute a subprocess, then return its return code, stdout and stderr"""
send_to_syslog(args)
proc = subprocess.Popen(args, stdin=None, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True, bufsize=-1)
rc = proc.communicate()
return rc
def get_all_namespaces():
cmd = '%s netns ls' % ip
r, s, e = doexec(cmd.split())
return [line.strip() for line in s.readlines()]
def get_all_ifaces():
"""
List of network interfaces
@rtype : dict
"""
netpath = '/sys/class/net'
ifaces = {}
for i in os.listdir(netpath):
addresspath = os.path.join(netpath, i, "address")
if os.path.exists(addresspath):
with open(addresspath) as f:
addr = f.readline().strip()
ifaces[i] = addr
return ifaces
def get_all_bridges():
cmd = '%s list-br' % vsctl
r, s, e = doexec(cmd.split())
l = [line.strip() for line in s.readlines()]
return l
def ip_link_set(device, args):
cmd = "ip l set " + device + " " + args
doexec(cmd.split())
def limit_interface_rate(limit, interface, burst):
cmd = "%s set interface %s ingress_policing_rate=%s"
r, s, e = doexec(cmd.split())
if r:
raise j.exception.RuntimeError(
"Problem with setting rate on interface: %s , problem was : %s " % (interface, e))
cmd = "%s set interface %s ingress_policing_burst=%s"
r, s, e = doexec(cmd.split())
if r:
raise j.exception.RuntimeError(
"Problem with setting burst on interface: %s , problem was : %s " % (interface, e))
def createBridge(name):
cmd = '%s --may-exist add-br %s' % (vsctl, name)
r, s, e = doexec(cmd.split())
if r:
raise j.exceptions.RuntimeError("Problem with creation of bridge %s, err was: %s" % (name, e))
if name == "public":
cmd = '%s set Bridge %s stp_enable=true' % (vsctl, name)
r, s, e = doexec(cmd.split())
if r:
raise j.exceptions.RuntimeError("Problem setting STP on bridge %s, err was: %s" % (name, e))
def destroyBridge(name):
cmd = '%s --if-exists del-br %s' % (vsctl, name)
r, s, e = doexec(cmd.split())
if r:
raise j.exceptions.RuntimeError("Problem with destruction of bridge %s, err was: %s" % (name, e))
def listBridgePorts(name):
cmd = '%s list-ports %s' % (vsctl, name)
r, s, e = doexec(cmd.split())
if r:
raise j.exception.RuntimeError("Problem with listing of bridge %s's ports , err was: %s " % (name, e))
return s.read()
def VlanPatch(parentbridge, vlanbridge, vlanid):
parentpatchport = '%s-%s' % (vlanbridge, str(vlanid))
bridgepatchport = '%s-%s' % (parentbridge, str(vlanid))
cmd = '%s add-port %s %s tag=%s -- set Interface %s type=patch options:peer=%s' % (
vsctl, parentbridge, parentpatchport, vlanid, parentpatchport, bridgepatchport)
r, s, e = doexec(cmd.split())
if r:
raise j.exceptions.RuntimeError("Add extra vlan pair failed %s" % (e.readlines()))
cmd = '%s add-port %s %s -- set Interface %s type=patch options:peer=%s' % (
vsctl, vlanbridge, bridgepatchport, bridgepatchport, parentpatchport)
r, s, e = doexec(cmd.split())
if r:
raise j.exceptions.RuntimeError("Add extra vlan pair failed %s" % (e.readlines()))
def addVlanPatch(parbr, vlbr, id, mtu=None):
def bridge_exists(br):
brexist = "{0} br-exists {1}".format(vsctl, br)
r, s, e = doexec(brexist.split())
return r == 0
def port_exists(br, port):
listprts = "{0} list-ports {1}".format(vsctl, br)
r, s, e = doexec(listprts.split())
return port in s.read()
parport = "{}-{!s}".format(vlbr, id)
brport = "{}-{!s}".format(parbr, id)
if not bridge_exists(vlbr):
brcreate = "{0} add-br {1}".format(vsctl, vlbr)
r, s, e = doexec(brcreate.split())
if not port_exists(vlbr, brport):
addport = "{0} add-port {1} {3} -- set Interface {3} type=patch options:peer={2}".format(
vsctl, vlbr, parport, brport)
r, s, e = doexec(addport.split())
if not port_exists(parbr, parport):
c = "{4} add-port {0} {2} tag={3!s} -- set Interface {2} type=patch options:peer={1}".format(
parbr, brport, parport, id, vsctl)
r, s, e = doexec(c.split())
if mtu:
ip_link_set(vlbr, 'mtu {0}'.format(mtu))
def createNameSpace(name):
if name not in get_all_namespaces():
cmd = '%s netns add %s' % (ip, name)
r, s, e = doexec(cmd.split())
else:
send_to_syslog('Namespace %s already exists, not creating' % name)
def destroyNameSpace(name):
if name in get_all_namespaces():
cmd = '%s netns delete %s' % (ip, name)
r, s, e = doexec(cmd.split())
else:
send_to_syslog('Namespace %s doesn\'t exist, nothing done ' % name)
def createVethPair(left, right):
cmd = '%s link add %s type veth peer name %s' % (ip, left, right)
allifaces = get_all_ifaces()
if left in allifaces or right in allifaces:
# one of them already exists
send_to_syslog("Problem with creation of vet pair %s, %s :one of them exists" % (left, right))
r, s, e = doexec(cmd.split())
# wait for it to come up
time.sleep(.2)
ip_link_set(left, 'up')
ip_link_set(right, 'up') # when sent into namespace, it'll be down again
disable_ipv6(left) # not right, as it can be used in a namespace
def destroyVethPair(left):
cmd = '%s link del %s ' % (ip, left)
r, s, e = doexec(cmd.split())
if r:
raise j.exceptions.RuntimeError("Problem with destruction of Veth pair %s, err was: %s" % (left, e))
def createVXlan(vxname, vxid, multicast, vxbackend):
"""
Always brought up too
Created with no protocol, and upped (no ipv4, no ipv6)
Fixed standard : 239.0.x.x, id
# 0000-fe99 for customer vxlans, ff00-ffff for environments
MTU of VXLAN = 1500
"""
cmd = 'ip link add %s type vxlan id %s group %s ttl 60 dev %s' % (vxname, vxid, multicast, vxbackend)
r, s, e = doexec(cmd.split())
disable_ipv6(vxname)
setMTU(vxname, 1500)
ip_link_set(vxname, 'up')
if r:
send_to_syslog("Problem with creation of vxlan %s, err was: %s" % (vxname, e.readlines()))
def destroyVXlan(name):
cmd = '%s link del %s ' % (ip, name)
r, s, e = doexec(cmd.split())
if r:
send_to_syslog("Problem with destruction of Veth pair %s, err was: %s" % (name, e.readlines()))
exit(1)
def addIPv4(interface, ipobj, namespace=None):
netmask = ipobj.prefixlen
ipv4addr = ipobj.ip
# if ip existst on interface, we assume all ok
if namespace is not None:
cmd = '%s netns exec %s ip addr add %s/%s dev %s' % (ip, namespace, ipv4addr, netmask, interface)
else:
cmd = '%s addr add %s/%s dev %s' % (ip, ipv4addr, netmask, interface)
r, s, e = doexec(cmd.split())
if r:
send_to_syslog('Could not add IP %s to interface %s ' % (ipv4addr, interface))
return r, e
def addIPv6(interface, ipobj, namespace=None):
netmask = ipobj.prefixlen
ipv6addr = ipobj.ip
# if ip existst on interface, we assume all ok
if namespace is not None and namespace in allnamespaces:
cmd = '%s netns exec %s ip addr add %s/%s dev %s' % (ip, namespace, ipv6addr, netmask, interface)
else:
cmd = '%s addr add %s/%s dev %s' % (ip, ipv6addr, netmask, interface)
r, s, e = doexec(cmd.split())
if r:
send_to_syslog('Could not add IP %s to interface %s ' % (ipv6addr, interface))
return r, e
def connectIfToBridge(bridge, interfaces):
for interface in interfaces:
cmd = '%s --if-exists del-port %s %s' % (vsctl, bridge, interface)
r, s, e = doexec(cmd.split())
cmd = '%s --may-exist add-port %s %s' % (vsctl, bridge, interface)
r, s, e = doexec(cmd.split())
if r:
raise j.exceptions.RuntimeError('Error adding port %s to bridge %s' % (interface, bridge))
def removeIfFromBridge(bridge, interfaces):
for interface in interfaces:
cmd = '%s --if-exists del-port %s %s' % (vsctl, bridge, interface)
r, s, e = doexec(cmd.split())
if r:
raise j.exceptions.RuntimeError('Error adding port %s to bridge %s' % (interface, bridge))
def connectIfToNameSpace(nsname, interface):
cmd = '%s link set %s netns %s' % (ip, interface, nsname)
r, s, e = doexec(cmd.split())
if r:
raise j.exceptions.RuntimeError("Error moving %s to namespace %s" % (interface, nsname))
def disable_ipv6(interface):
if interface in get_all_ifaces():
cmd = 'sysctl -w net.ipv6.conf.%s.disable_ipv6=1' % interface
r, s, e = doexec(cmd.split())
def setMTU(interface, mtu):
cmd = 'ip link set %s mtu %s' % (interface, mtu)
r, s, e = doexec(cmd.split())
if r:
raise j.exceptions.RuntimeError('Could not set %s to MTU %s' % (interface, mtu))
def addBond(bridge, bondname, iflist, lacp="active", lacp_time="fast", mode="balance-tcp", trunks=None):
# bond_mode=balance-tcp lacp=active bond_fake_iface=false
# other_config:lacp-time=fast bond_updelay=2000 bond_downdelay=400
"""
Add a bond to a bridge
:param bridge: BridgeName (string)
:param bondname: Bondname (string)
:param iflist: list or tuple
:param lacp: "active" or "passive"
:param lacp_time: mode "fast" or "slow"
:param mode: balance-tcp, balance-slb, active-passive
:param trunks: allowed VLANS (list or tuple)
"""
intf = re.split('\W+', iflist)
if isinstance(trunks, str):
tr = re.split('\W+', trunks)
buildup = "add-bond %s %s " % (bridge, bondname) + " ".join(e for e in list(set(intf))) + " lacp=%s " % lacp
buildup = buildup + " -- set Port %s bond_mode=%s bond_fake_iface=false " % (bondname, mode)
buildup = buildup + "other_config:lacp-time=%s bond_updelay=2000 bond_downdelay=400 " % lacp_time
if trunks is not None:
trlist = ",".join(str(e) for e in list(set(tr)))
buildup = buildup + "trunks=" + trlist
# no use to autoconf ipv6, as this won't work anyway
for i in iflist:
disable_ipv6(i)
r, s, e = doexec(buildup.split())
if e:
raise j.exceptions.RuntimeError("Could not create bond %s for bridge %s" % (bondname, bridge))
| 34.753846 | 112 | 0.612749 |
bcd0263f00274b0f10e9c41b45040b999796b429 | 2,730 | py | Python | smqtk_classifier/interfaces/classify_image_supervised.py | bardkw/SMQTK-Classifier | 68022a0be089ace123d20c1c080fb84e103a50da | [
"BSD-3-Clause"
] | 1 | 2021-04-09T20:52:55.000Z | 2021-04-09T20:52:55.000Z | smqtk_classifier/interfaces/classify_image_supervised.py | bardkw/SMQTK-Classifier | 68022a0be089ace123d20c1c080fb84e103a50da | [
"BSD-3-Clause"
] | 14 | 2021-04-06T14:22:34.000Z | 2022-02-23T15:12:55.000Z | smqtk_classifier/interfaces/classify_image_supervised.py | bardkw/SMQTK-Classifier | 68022a0be089ace123d20c1c080fb84e103a50da | [
"BSD-3-Clause"
] | 3 | 2021-04-02T20:35:41.000Z | 2021-11-09T20:13:46.000Z | import abc
from typing import Mapping, Hashable
from .classify_image import ClassifyImage, IMAGE_ITER_T
from smqtk_classifier.exceptions import ExistingModelError
class ClassifyImageSupervised(ClassifyImage):
"""
Class of classifiers that are trainable via supervised training, i.e. are
given specific Image examples for class labels.
"""
@abc.abstractmethod
def has_model(self) -> bool:
"""
:return: If this instance currently has a model loaded. If no model is
present, classification of images cannot happen (needs to be
trained).
"""
def train(
self,
class_examples: Mapping[Hashable, IMAGE_ITER_T]
) -> None:
"""
Train the supervised classifier model.
If a model is already loaded, we will raise an exception in order to
prevent accidental overwrite.
If the same label is provided to both ``class_examples`` and ``kwds``,
the examples given to the reference in ``kwds`` will prevail.
:param class_examples: Dictionary mapping class labels to iterables of
Image training examples.
:raises ValueError: There were no class examples provided.
:raises ValueError: Less than 2 classes were given.
:raises RuntimeError: A model already exists in this instance.
Following through with training would overwrite this model.
Throwing an exception for information protection.
"""
if self.has_model():
raise ExistingModelError("Instance currently has a model. Halting "
"training to prevent overwrite of "
"existing trained model.")
if not class_examples:
raise ValueError("No class examples were provided.")
elif len(class_examples) < 2:
raise ValueError("Need 2 or more classes for training. Given %d."
% len(class_examples))
return self._train(class_examples)
@abc.abstractmethod
def _train(
self,
class_examples: Mapping[Hashable, IMAGE_ITER_T]
) -> None:
"""
Internal method that trains the classifier implementation.
This method is called after checking that there is not already a model
trained, thus it can be assumed that no model currently exists.
The class labels will have already been checked before entering this
method, so it can be assumed that the ``class_examples`` will container
at least two classes.
:param class_examples: Dictionary mapping class labels to iterables of
Image training examples.
"""
| 36.891892 | 79 | 0.645421 |
d2526c0b411ca1179907990347ed5a3b9487c292 | 19,972 | py | Python | utility/entity/character.py | DrLarck/DragonBotZ | eab773d6e55f7f5f325828fe249800193120abaf | [
"MIT"
] | 3 | 2020-05-01T07:38:38.000Z | 2020-06-02T12:03:40.000Z | utility/entity/character.py | DrLarck/DragonBotZ | eab773d6e55f7f5f325828fe249800193120abaf | [
"MIT"
] | 19 | 2020-11-01T22:15:57.000Z | 2021-09-08T15:28:30.000Z | utility/entity/character.py | DrLarck/DragonBotZ | eab773d6e55f7f5f325828fe249800193120abaf | [
"MIT"
] | 1 | 2021-03-05T04:51:21.000Z | 2021-03-05T04:51:21.000Z | """
Character object
--
Author : Drlarck
Last update : 1/11/20 by DrLarck
"""
import asyncio
# util
from utility.graphic.embed import CustomEmbed
from utility.graphic.icon import GameIcon
from utility.graphic.color import GameColor
from utility.entity.ability import Ability
class Character:
def __init__(self, client):
# Public
self.client = client
self.name = ""
self.id = 0
self.unique_id = ""
self.level = 1
self.npc = False # Tells if it's a non playable character
self.posture = 0
self.image = CharacterImage()
self.type = CharacterType()
self.rarity = CharacterRarity()
self.health = CharacterHealth()
self.ki = CharacterKi()
self.damage = CharacterDamage()
self.critical = CharacterCritical()
self.armor = CharacterDefense()
self.spirit = CharacterDefense()
# Items
self.training_item = CharacterTrainingItem(self)
# Abilities
self.ability = []
# Private
self.__embed = CustomEmbed()
# Public method
async def generate(self, name="", char_id=0, level=1,
card="", thumbnail="",
type_value=0, rarity_value=0, health=0,
ki=100, physical=0, ki_power=0,
crit_chance=0, crit_bonus=0, armor_fixed=0,
armor_floating=0, spirit_fixed=0, spirit_floating=0,
ability=[]):
"""
Generate a character instance.
:param name: (`str`)
:param char_id: (`int`)
:param level: (`int`)
:param card: (`url`)
:param thumbnail: (`url`)
:param type_value: (`int`)
:param rarity_value: (`int`)
:param health: (`int`)
:param ki: (`int`)
:param physical: (`int`)
:param ki_power: (`int`)
:param crit_chance: (`int`)
:param crit_bonus: (`int`)
:param armor_fixed: (`int`)
:param armor_floating: (`int`)
:param spirit_fixed: (`int`)
:param spirit_floating: (`int`)
:param ability: (`list`)
--
:return: `Character`
"""
# New character instance
new_char = Character(self.client)
# Init all the attributes
new_char.name = name
new_char.id = char_id
new_char.level = level
# Set bonus per lvl
level_bonus = pow(1.02, new_char.level-1) # Default +5 % stat per level
new_char.image.card = card
new_char.image.thumbnail = thumbnail
new_char.type.value = type_value
new_char.rarity.value = rarity_value
new_char.health.maximum = int(health * level_bonus)
new_char.ki.maximum = ki
new_char.damage.physical = int(physical * level_bonus)
new_char.damage.ki = int(ki_power * level_bonus)
new_char.critical.chance = crit_chance
new_char.critical.bonus = crit_bonus
new_char.armor.fixed = int(armor_fixed * level_bonus)
new_char.armor.floating = armor_floating
new_char.spirit.fixed = int(spirit_fixed * level_bonus)
new_char.spirit.floating = spirit_floating
# Get the character's abilities
ability_ref = Ability(self.client)
for ability_id in ability:
await asyncio.sleep(0)
# If the ability id is not an actual ability
if not isinstance(ability_id, Ability):
# Get the id as int
ability_id = int(ability_id)
# Get the ability instance
ability = await ability_ref.get_ability_data(ability_id)
# If the ability has been found, add it to the character
if ability is not None:
new_char.ability.append(ability)
# If the char has no abilities, add passed abilities as parameter
if len(new_char.ability) == 0:
new_char.ability = ability
# Get the icons
new_char.rarity.icon = await GameIcon().get_rarity_icon(new_char.rarity.value)
new_char.type.icon = await GameIcon().get_type_icon(new_char.type.value)
# Return the character
return new_char
async def get_display_card(self, client):
"""
Generate a display card of this character
:param client: (`discord.ext.commands.Bot`)
--
:return: `discord.Embed`
"""
# Init
color = await GameColor().get_rarity_color(self.rarity.value)
embed = await self.__embed.setup(client, color=color)
# Info
info = f"""
__Name__ : **{self.name}**{self.type.icon}
__Reference__ : `#{self.id}`
__Rarity__ : {self.rarity.icon}
"""
embed.add_field(name="Info :", value=info, inline=False)
embed.set_image(url=self.image.card)
return embed
async def get_combat_card(self, client, team_index):
"""
Return the combat format display card
:param client: (`discord.ext.commands.Bot`)
:param team_index: (`int`)
--
:return: `Embed`
"""
# Init
color = GameColor()
if team_index == 0:
color = color.player_a
else:
color = color.player_b
# Thumbnail
# If the thumbnail is not defined, use the card image
if self.image.thumbnail == "":
thumb = self.image.card
# Use the defined thumbnail image
else:
thumb = self.image.thumbnail
embed = await self.__embed.setup(client, color=color, thumbnail_url=thumb)
# Setting up the character display
display_info = f"""
__Name__ : {self.image.icon}**{self.name}**{self.type.icon}
__Level__ : {self.level:,}
__Health__ : **{self.health.current:,}**/{self.health.maximum:,} :hearts:
__Ki__ : **{self.ki.current}**/{self.ki.maximum} :fire:
"""
# Damage
phy_min = await self.damage.get_physical_min()
ki_min = await self.damage.get_ki_min()
display_damage = f"""
__Physical__ : **{phy_min:,}** - **{self.damage.physical:,}** :punch:
__Ki power__ : **{ki_min:,}** - **{self.damage.ki:,}** ☄️
"""
# Defense
display_defense = f"""
__Armor__ : **{self.armor.fixed:,}** | **{self.armor.floating:,} %** :shield:
__Spirit__ : **{self.spirit.fixed:,}** | **{self.spirit.floating:,} %** 🏵️
"""
# Fields
embed.add_field(name=f"**{self.name}** info",
value=display_info,
inline=False)
embed.add_field(name="Damage",
value=display_damage,
inline=False)
embed.add_field(name="Defense",
value=display_defense,
inline=False)
return embed
async def init(self):
"""
Init the character for combat purpose.
--
:return: `None`
"""
# Init health
await self.health.init()
# Init abilities
for ability in self.ability:
await asyncio.sleep(0)
await ability.init(self)
return
async def is_playable(self):
"""
Tells if the character is playable or not
--
:return: `bool`
"""
# Init
playable = True
# If the character is stunned
if self.posture == 3:
playable = False
# If the character is dead
elif self.health.current <= 0:
playable = False
# If the character has posture a normal posture
else:
playable = True
return playable
class CharacterImage:
def __init__(self):
# Public
self.card = ""
self.thumbnail = ""
self.icon = ""
class CharacterType:
def __init__(self):
# Public
self.value = 0
self.icon = ""
class CharacterRarity:
def __init__(self):
# Public
self.value = 0
self.icon = ""
class CharacterHealth:
def __init__(self):
# Public
self.maximum = 0
self.current = 0
# Public method
async def init(self):
"""
Init the current health
--
:return: `None`
"""
self.current = self.maximum
return
async def limit(self):
"""
Avoid the current health to reach a value that is < 0 or higher than the max health
--
:return: `None`
"""
if self.current < 0:
self.current = 0
if self.current > self.maximum:
self.current = self.maximum
return
class CharacterKi:
def __init__(self):
# Public
self.maximum = 0
self.current = 0
# Public method
async def limit(self):
"""
Avoid the current ki value to reach a value that is < 0 or higher than maximum
--
:return: `None`
"""
if self.current < 0:
self.current = 0
if self.current > self.maximum:
self.current = self.maximum
return
class CharacterDamage:
def __init__(self):
# Public
self.physical = 0
self.ki = 0
# Private
# This represents the difference in % between the max value and the min value
# For example, if the range is set to 10 and the max value is 100
# The min value would be 90 and max 100
self.__physical_range = 10
self.__ki_range = 10
# Public method
async def get_physical_min(self):
"""
Return the minimal value of the physical damage range
--
:return: `int`
"""
minimal = self.physical * (1 - (self.__physical_range / 100))
return int(minimal)
async def get_ki_min(self):
"""
Return the minimal value of the ki damage range
--
:return: `None`
"""
minimal = self.ki * (1 - (self.__ki_range / 100))
return int(minimal)
class CharacterCritical:
def __init__(self):
# Public
self.chance = 0
self.bonus = 0
class CharacterDefense:
def __init__(self):
# Public
self.fixed = 0
self.floating = 0
class CharacterTrainingItem:
def __init__(self, character):
"""
:param character: (`Character`)
"""
# Public
self.character = character
self.equipped = []
# Private
self.__database = self.character.client.database
# Private
async def __get_equipped(self):
"""
Get the equipped training items
--
:return: `None`
"""
# Get the equipped items' unique id
unique_items = await self.__database.fetch_value("""
SELECT training_item
FROM character_unique
WHERE character_unique_id = $1;
""", [self.character.unique_id])
# Get the list of items
unique_items = unique_items.split()
# Set the equipped list
self.equipped = unique_items
return
# Public
async def apply_effect(self):
"""
Apply the equipped items effects on the character
--
:return: `None`
"""
# Apply the effect of each items
for item in self.equipped:
await asyncio.sleep(0)
await item.apply_effect(self)
return
class CharacterGetter:
# Private
__cache = []
__cache_ok = False # Indicates if the cache has already been filled
# Public
async def get_cache_size(self):
"""Return the cache size
--
@return int"""
return len(self.__cache)
async def set_cache(self, client):
"""
Set the character cache
:param client: object discord.Bot
:param context: object discord.ext.commands.Context
--
:return: `None`
"""
if self.__cache_ok is False:
data = await client.database.fetch_row("""
SELECT *
FROM character_reference
ORDER BY reference;
""")
if len(data) > 0:
# Storing each character in the cache as Character objects
for character in data:
await asyncio.sleep(0)
# Get the set of character's abilities
ability_set = character[15]
ability_set = ability_set.split()
character = await Character(client).generate(
char_id=character[0], name=character[1], type_value=character[2],
rarity_value=character[3], card=character[4], thumbnail=character[4],
health=character[5], ki=character[6], physical=character[7],
ki_power=character[8], armor_fixed=character[9], armor_floating=character[10],
spirit_fixed=character[11], spirit_floating=character[12],
ability=ability_set
)
self.__cache.append(character)
# Cache has been filled
self.__cache_ok = True
print("Character Cache : DONE")
else: # The cache has already been filled
print("Character Cache : The cache has already been filled.")
return
async def get_reference_character(self, reference, client, level=1):
"""
Get a base character
:param reference: (`int`)
@param int level
@param object discord.ext.commands.Bot client
--
:return: `Character` or `None`
"""
# Get the character from the cache
if reference > 0 and reference - 1 < len(self.__cache):
char = self.__cache[reference - 1]
copy = await Character(client).generate(
char_id=char.id, level=level, name=char.name, card=char.image.card,
thumbnail=char.image.thumbnail, type_value=char.type.value,
rarity_value=char.rarity.value, health=char.health.maximum,
ki=char.ki.maximum, physical=char.damage.physical, ki_power=char.damage.ki,
armor_fixed=char.armor.fixed, armor_floating=char.armor.floating,
spirit_fixed=char.spirit.fixed, spirit_floating=char.spirit.floating,
ability=char.ability
)
await copy.init()
return copy
else:
print(f"Character {reference} not found.")
return None
async def get_from_unique(self, client, database, unique_id):
"""
Get a Character object from a unique id
:param client: discord.ext.commands.Bot
:param database: (`Database`)
:param unique_id: (`str`)
--
:return: `Character` or `None` if not found
"""
character_row = await database.fetch_row("""
SELECT *
FROM character_unique
WHERE character_unique_id = $1;
""", [unique_id])
# If the character exists
if len(character_row) > 0:
character_row = character_row[0]
# If the character doesn't exist
else:
return
if character_row is not None:
# Get the character object according to the character's reference
character = await self.get_reference_character(character_row[1], client)
# Create a copy of the character
copy = await character.generate(
name=character.name, char_id=character.id,
level=character_row[6], card=character.image.card,
thumbnail=character.image.thumbnail,
type_value=character.type.value,
rarity_value=character.rarity.value,
health=character.health.maximum, ki=character.ki.maximum,
physical=character.damage.physical, ki_power=character.damage.ki,
armor_fixed=character.armor.fixed, armor_floating=character.armor.floating,
spirit_fixed=character.spirit.fixed, spirit_floating=character.spirit.floating,
ability=character.ability
)
await copy.init()
return copy
return
class CharacterExperience:
def __init__(self, client):
self.client = client
self.__database = self.client.database
async def add_experience(self, unique_id, amount):
"""Add experience points to the character
@param str unique_id
@param int amount
--
@return int or None as new character level"""
# Get the character's experience
get_exp = """SELECT character_experience
FROM character_unique
WHERE character_unique_id = $1;"""
character_exp = await self.__database.fetch_value(get_exp, [unique_id])
# Add the amount of exp to the character experience
character_exp += amount
# Check if the character levels up
# returns the updated amount of exp
# and the nex character level if it has changed
character_exp, new_level = await self.level_up(unique_id, character_exp)
# Update character xp
update_exp = """UPDATE character_unique
SET character_experience = $1
WHERE character_unique_id = $2;"""
await self.__database.execute(update_exp, [character_exp, unique_id])
return new_level
async def level_up(self, unique_id, experience):
"""Update the character level according to its current level
and the amount of exp that it has
@param str unique_id
@param int experience
--
@return int new amount of experience"""
# Level up formula
# level 1 character has to collect 100 exp points
# to level up to the level 2
# the amount of exp needed is increased by 10 % per level
# formula is :
# next_level : level -> 100 * (1.1) ^ level
# Get the character's informations
character_level = """SELECT character_level
FROM character_unique
WHERE character_unique_id = $1;"""
level = await self.__database.fetch_value(character_level, [unique_id])
old_level = level
# Get the required amount of exp to reach the next level
next_level = int(100 * pow(1.1, level))
# Check if the character has enough exp to reach the next level
# repeat it until the character experience is inferior to the
# next level
while experience >= next_level and level < 150:
await asyncio.sleep(0)
level += 1
experience -= next_level
# Get the required amount of exp to reach the next level
next_level = int(100 * pow(1.1, level))
# Update the character level
update_level = """UPDATE character_unique
SET character_level = $1
WHERE character_unique_id = $2;"""
await self.__database.execute(update_level, [level, unique_id])
# Check if the character has leveled up
new_level = None
if level != old_level:
new_level = level
return experience, new_level
| 26.558511 | 102 | 0.553775 |
4bcc2351ed984ebbc31c6c94f0486411722de37b | 3,630 | py | Python | redis_client.py | jinserk/pytorch-redis | 0b0bfabc9241f941bc39a2e695943b5ebd6b4fcb | [
"MIT"
] | 4 | 2020-04-27T00:47:35.000Z | 2021-04-12T07:52:20.000Z | redis_client.py | jinserk/pytorch-redis | 0b0bfabc9241f941bc39a2e695943b5ebd6b4fcb | [
"MIT"
] | null | null | null | redis_client.py | jinserk/pytorch-redis | 0b0bfabc9241f941bc39a2e695943b5ebd6b4fcb | [
"MIT"
] | 1 | 2020-04-26T17:51:12.000Z | 2020-04-26T17:51:12.000Z | import sys
import io
import redis
import torch
from tqdm.auto import tqdm
ver = sys.version_info
if ver >= (3, 8):
PICKLE_VERSION = 5
else:
PICKLE_VERSION = 4
CXN = redis.ConnectionPool(host='localhost', port=6379, db=0)
class RedisListObject:
def __init__(self, name):
self.name = name
def __len__(self):
with redis.StrictRedis(connection_pool=CXN) as rdb:
return rdb.llen(self.name)
def __setitem__(self, index, value):
with redis.StrictRedis(connection_pool=CXN) as rdb:
if index >= rdb.llen(self.name):
raise IndexError
with io.BytesIO() as buf:
torch.save(value, buf, pickle_protocol=PICKLE_VERSION, _use_new_zipfile_serialization=True)
if PICKLE_VERSION >= 5:
rdb.lset(self.name, index, buf.getbuffer())
else:
rdb.lset(self.name, index, buf.getvalue())
def __getitem__(self, index):
with redis.StrictRedis(connection_pool=CXN) as rdb:
if not rdb.exists(self.name):
raise redis.DataError(f'Dataset named {self.name} does not exist')
if index >= rdb.llen(self.name):
raise IndexError
with io.BytesIO(rdb.lindex(self.name, index)) as buf:
return torch.load(buf)
def append(self, value):
with io.BytesIO() as buf:
torch.save(value, buf, pickle_protocol=PICKLE_VERSION, _use_new_zipfile_serialization=True)
#print(len(buf.getvalue()))
with redis.StrictRedis(connection_pool=CXN) as rdb:
func = rdb.rpush if rdb.exists(self.name) else rdb.lpush
if PICKLE_VERSION >= 5:
func(self.name, buf.getbuffer())
else:
func(self.name, buf.getvalue())
def delete(self):
with redis.StrictRedis(connection_pool=CXN) as rdb:
if rdb.exists(self.name):
rdb.delete(self.name)
else:
raise redis.DataError(f'Dataset named {self.name} does not exist')
class RedisClient:
def get(self, key):
with redis.StrictRedis(connection_pool=CXN) as rdb:
if rdb.exists(key):
return RedisListObject(key)
else:
raise redis.DataError(f'Dataset named {key} does not exist')
def set_data_list(self, key, values):
try:
obj = self.get(key)
obj.delete()
except:
obj = RedisListObject(key)
for item in tqdm(values, desc=f"storing {key}", dynamic_ncols=True):
obj.append(item)
def keys(self):
with redis.StrictRedis(connection_pool=CXN) as rdb:
return rdb.keys()
def stats(self):
with redis.StrictRedis(connection_pool=CXN) as rdb:
try:
return rdb.memory_stats()
except:
return rdb.execute_command('MEMORY STATS')
def check_lens(self, nums):
try:
for k, v in nums.items():
obj = self.get(k)
if v != 0 and len(obj):
return False
except:
return False
def flushdb(self):
with redis.StrictRedis(connection_pool=CXN) as rdb:
rdb.flushdb()
if __name__ == "__main__":
c = RedisClient()
print(c.stats())
data_list = [tuple(torch.rand(10, 10) for _ in range(10)) for _ in range(10)]
c.set_data_list("test", data_list)
print(c.get("test")[0], c.get("test")[1])
c.flushdb()
print(c.stats())
| 30.25 | 107 | 0.570799 |
40b9f37281ab453ec80fdbfb793fdeb4eb4fa22c | 385 | py | Python | squid/squid/wsgi.py | DjangoNYC/squid | e9776df722d6c4d8e43738c053c610475f73f0db | [
"MIT"
] | null | null | null | squid/squid/wsgi.py | DjangoNYC/squid | e9776df722d6c4d8e43738c053c610475f73f0db | [
"MIT"
] | null | null | null | squid/squid/wsgi.py | DjangoNYC/squid | e9776df722d6c4d8e43738c053c610475f73f0db | [
"MIT"
] | null | null | null | """
WSGI config for squid project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "squid.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 25.666667 | 78 | 0.787013 |
db52ca4f3f3c18c1766ac7c1d8ba45ca69e7c21b | 504 | py | Python | custom_components/samsungtv_custom/samsungctl_080b/upnp/UPNP_Device/instance_singleton.py | AdamOttvar/ha-samsungtv-custom | da6bf9349d1e33bf143a115b4f2a3754d6754472 | [
"Apache-2.0"
] | 117 | 2019-10-08T05:39:44.000Z | 2022-01-31T15:43:08.000Z | custom_components/samsungtv_custom/samsungctl_080b/upnp/UPNP_Device/instance_singleton.py | AdamOttvar/ha-samsungtv-custom | da6bf9349d1e33bf143a115b4f2a3754d6754472 | [
"Apache-2.0"
] | 76 | 2019-10-08T06:04:08.000Z | 2022-02-26T18:47:22.000Z | custom_components/samsungtv_custom/samsungctl_080b/upnp/UPNP_Device/instance_singleton.py | AdamOttvar/ha-samsungtv-custom | da6bf9349d1e33bf143a115b4f2a3754d6754472 | [
"Apache-2.0"
] | 59 | 2019-10-08T06:32:37.000Z | 2022-03-14T23:14:07.000Z | # -*- coding: utf-8 -*-
class InstanceSingleton(type):
_objects = {}
def __call__(cls, id, *args, **kwargs):
if id not in InstanceSingleton._objects:
InstanceSingleton._objects[id] = (
super(InstanceSingleton, cls).__call__(id, *args, **kwargs)
)
else:
try:
InstanceSingleton._objects[id](id, *args, **kwargs)
except TypeError:
pass
return InstanceSingleton._objects[id]
| 25.2 | 75 | 0.543651 |
9a507658b9e6ee9a2fe51fc1c8bc6f45cd44fa28 | 813 | py | Python | reddit/processing/features.py | yusueliu/reddit | e598a7ba783fa0b67063355e61c2017a5e58a6f5 | [
"MIT"
] | null | null | null | reddit/processing/features.py | yusueliu/reddit | e598a7ba783fa0b67063355e61c2017a5e58a6f5 | [
"MIT"
] | null | null | null | reddit/processing/features.py | yusueliu/reddit | e598a7ba783fa0b67063355e61c2017a5e58a6f5 | [
"MIT"
] | null | null | null | import spacy
from sklearn.base import BaseEstimator, TransformerMixin
nlp = spacy.load('en_core_web_sm')
class TextTokenizer(BaseEstimator, TransformerMixin):
def __init__(self, variable=None, stopword_exceptions=None):
self.variable = variable
if stopword_exceptions:
nlp.Defaults.stop_words -= set(list(stopword_exceptions))
def _lemmatize_and_remove_stop_words(self, text):
return [t.lemma_ for t in nlp(text) if not t.is_stop and len(t.lemma_) > 1]
def _normalize(self, text):
words = self._lemmatize_and_remove_stop_words(text)
return ' '.join(words)
def fit(self, X, y=None):
return self
def transform(self, X):
X = X.copy()
X = X[self.variable].apply(self._normalize)
return X
| 32.52 | 83 | 0.661747 |
acad58b07a967d64f187f382f9f5da23a6ebd327 | 7,663 | py | Python | webexteamssdk/docs/conf.py | Steeve135/WebexBot | 6188ca2cfccd8885c5c2e492f17a6e935dee416e | [
"MIT"
] | null | null | null | webexteamssdk/docs/conf.py | Steeve135/WebexBot | 6188ca2cfccd8885c5c2e492f17a6e935dee416e | [
"MIT"
] | 4 | 2020-03-24T16:20:45.000Z | 2021-06-01T22:56:24.000Z | webexteamssdk/docs/conf.py | Steeve135/WebexBot | 6188ca2cfccd8885c5c2e492f17a6e935dee416e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
from webexteamssdk._version import get_versions
project = u'webexteamssdk'
copyright = u'Copyright (c) 2016-2018 Cisco and/or its affiliates.'
author = u'Chris Lunsford'
version = get_versions()['version']
release = get_versions()['version']
language = None
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
master_doc = 'index'
source_suffix = '.rst'
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
templates_path = ['_templates']
pygments_style = 'sphinx'
add_module_names = False
autodoc_member_order = 'bysource'
# autodoc_default_flags = ['members', 'undoc-members']
autodoc_default_options = {
'members': None,
'undoc-members': None,
}
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'collapse_navigation': False
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'webexteamssdk vv0.3'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'webexteamssdkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'webexteamssdk.tex', u'webexteamssdk Documentation',
u'Chris Lunsford', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'webexteamssdk', u'webexteamssdk Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'webexteamssdk', u'webexteamssdk Documentation',
author, 'webexteamssdk', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| 26.424138 | 80 | 0.693984 |
47dce01e1c6fa7a4a3e9f5d7456def5096257256 | 20,915 | py | Python | tests/python/test_ast_refactor.py | Aaronoooooo/taichi | 4ed04254f1cbf6f99628e2ee32464f821837c5ff | [
"MIT"
] | 1 | 2021-11-25T11:05:27.000Z | 2021-11-25T11:05:27.000Z | tests/python/test_ast_refactor.py | kuangzihan/taichi | 5d1122ff126893dc1f2fd3950eddb1600711c137 | [
"MIT"
] | null | null | null | tests/python/test_ast_refactor.py | kuangzihan/taichi | 5d1122ff126893dc1f2fd3950eddb1600711c137 | [
"MIT"
] | null | null | null | import numpy as np
import pytest
import taichi as ti
from taichi import approx
@ti.test()
def test_binop():
@ti.kernel
def foo(x: ti.i32, y: ti.i32, a: ti.template()):
a[0] = x + y
a[1] = x - y
a[2] = x * y
a[3] = ti.ti_float(x) / y
a[4] = x // y
a[5] = x % y
a[6] = x**y
a[7] = x << y
a[8] = x >> y
a[9] = x | y
a[10] = x ^ y
a[11] = x & y
x = 37
y = 3
a = ti.field(ti.f32, shape=(12, ))
b = ti.field(ti.f32, shape=(12, ))
a[0] = x + y
a[1] = x - y
a[2] = x * y
a[3] = x / y
a[4] = x // y
a[5] = x % y
a[6] = x**y
a[7] = x << y
a[8] = x >> y
a[9] = x | y
a[10] = x ^ y
a[11] = x & y
foo(x, y, b)
for i in range(12):
assert a[i] == approx(b[i])
@ti.test()
def test_augassign():
@ti.kernel
def foo(x: ti.i32, y: ti.i32, a: ti.template(), b: ti.template()):
for i in a:
a[i] = x
a[0] += y
a[1] -= y
a[2] *= y
a[3] //= y
a[4] %= y
a[5] **= y
a[6] <<= y
a[7] >>= y
a[8] |= y
a[9] ^= y
a[10] &= y
b[0] = x
b[0] /= y
x = 37
y = 3
a = ti.field(ti.i32, shape=(11, ))
b = ti.field(ti.i32, shape=(11, ))
c = ti.field(ti.f32, shape=(1, ))
d = ti.field(ti.f32, shape=(1, ))
a[0] = x + y
a[1] = x - y
a[2] = x * y
a[3] = x // y
a[4] = x % y
a[5] = x**y
a[6] = x << y
a[7] = x >> y
a[8] = x | y
a[9] = x ^ y
a[10] = x & y
c[0] = x / y
foo(x, y, b, d)
for i in range(11):
assert a[i] == b[i]
assert c[0] == approx(d[0])
@ti.test()
def test_unaryop():
@ti.kernel
def foo(x: ti.i32, a: ti.template()):
a[0] = +x
a[1] = -x
a[2] = not x
a[3] = ~x
x = 1234
a = ti.field(ti.i32, shape=(4, ))
b = ti.field(ti.i32, shape=(4, ))
a[0] = +x
a[1] = -x
a[2] = not x
a[3] = ~x
foo(x, b)
for i in range(4):
assert a[i] == b[i]
@ti.test()
def test_boolop():
@ti.kernel
def foo(a: ti.template()):
a[0] = 0 and 0
a[1] = 0 and 1
a[2] = 1 and 0
a[3] = 1 and 1
a[4] = 0 or 0
a[5] = 0 or 1
a[6] = 1 or 0
a[7] = 1 or 1
a[8] = 1 and 1 and 1 and 1
a[9] = 1 and 1 and 1 and 0
a[10] = 0 or 0 or 0 or 0
a[11] = 0 or 0 or 1 or 0
a = ti.field(ti.i32, shape=(12, ))
b = ti.field(ti.i32, shape=(12, ))
a[0] = 0 and 0
a[1] = 0 and 1
a[2] = 1 and 0
a[3] = 1 and 1
a[4] = 0 or 0
a[5] = 0 or 1
a[6] = 1 or 0
a[7] = 1 or 1
a[8] = 1 and 1 and 1 and 1
a[9] = 1 and 1 and 1 and 0
a[10] = 0 or 0 or 0 or 0
a[11] = 0 or 0 or 1 or 0
foo(b)
for i in range(12):
assert a[i] == b[i]
@ti.test()
def test_compare_fail():
with pytest.raises(ti.TaichiSyntaxError) as e:
@ti.kernel
def foo():
1 in [1]
foo()
assert e.value.args[0] == '"In" is not supported in Taichi kernels.'
@ti.test()
def test_single_compare():
@ti.kernel
def foo(a: ti.template(), b: ti.template(), c: ti.template()):
for i in ti.static(range(3)):
c[i * 6] = a[i] == b[i]
c[i * 6 + 1] = a[i] != b[i]
c[i * 6 + 2] = a[i] < b[i]
c[i * 6 + 3] = a[i] <= b[i]
c[i * 6 + 4] = a[i] > b[i]
c[i * 6 + 5] = a[i] >= b[i]
a = ti.Vector([1, 1, 2])
b = ti.Vector([2, 1, 1])
c = ti.field(ti.i32, shape=(18, ))
d = ti.field(ti.i32, shape=(18, ))
for i in range(3):
c[i * 6] = a[i] == b[i]
c[i * 6 + 1] = a[i] != b[i]
c[i * 6 + 2] = a[i] < b[i]
c[i * 6 + 3] = a[i] <= b[i]
c[i * 6 + 4] = a[i] > b[i]
c[i * 6 + 5] = a[i] >= b[i]
foo(a, b, d)
for i in range(18):
assert c[i] == d[i]
@ti.test()
def test_chain_compare():
@ti.kernel
def foo(a: ti.i32, b: ti.i32, c: ti.template()):
c[0] = a == b == a
c[1] = a == b != a
c[2] = a != b == a
c[3] = a < b > a
c[4] = a > b < a
c[5] = a < b < a
c[6] = a > b > a
c[7] = a == a == a == a
c[8] = a == a == a != a
c[9] = a < b > a < b
c[10] = a > b > a < b
a = 1
b = 2
c = ti.field(ti.i32, shape=(11, ))
d = ti.field(ti.i32, shape=(11, ))
c[0] = a == b == a
c[1] = a == b != a
c[2] = a != b == a
c[3] = a < b > a
c[4] = a > b < a
c[5] = a < b < a
c[6] = a > b > a
c[7] = a == a == a == a
c[8] = a == a == a != a
c[9] = a < b > a < b
c[10] = a > b > a < b
foo(a, b, d)
for i in range(11):
assert c[i] == d[i]
@ti.test()
def test_return():
@ti.kernel
def foo(x: ti.i32) -> ti.i32:
return x + 1
assert foo(1) == 2
@ti.test()
def test_format_print():
a = ti.field(ti.i32, shape=(10, ))
@ti.kernel
def foo():
a[0] = 1.0
a[5] = 2.0
print('Test if the string.format and fstring print works')
print('string.format: a[0]={}, a[5]={}'.format(a[0], a[5]))
print(f'fstring: a[0]={a[0]}, a[5]={a[5]}')
@ti.test(print_preprocessed_ir=True)
def test_if():
@ti.kernel
def foo(x: ti.i32) -> ti.i32:
ret = 0
if x:
ret = 1
else:
ret = 0
return ret
assert foo(1)
assert not foo(0)
@ti.test(print_preprocessed_ir=True)
def test_static_if():
@ti.kernel
def foo(x: ti.template()) -> ti.i32:
ret = 0
if ti.static(x):
ret = 1
else:
ret = 0
return ret
assert foo(1)
assert not foo(0)
@ti.test(print_preprocessed_ir=True)
def test_struct_for():
a = ti.field(ti.i32, shape=(10, ))
@ti.kernel
def foo(x: ti.i32):
for i in a:
a[i] = x
x = 5
foo(x)
for i in range(10):
assert a[i] == 5
@ti.test(print_preprocessed_ir=True)
def test_grouped_struct_for():
a = ti.field(ti.i32, shape=(4, 4))
@ti.kernel
def foo(x: ti.i32):
for I in ti.grouped(a):
a[I] = x
x = 5
foo(x)
for i in range(4):
for j in range(4):
assert a[i, j] == 5
@ti.test(print_preprocessed_ir=True)
def test_static_for():
a = ti.field(ti.i32, shape=(10, ))
@ti.kernel
def foo(x: ti.i32):
for i in ti.static(range(10)):
a[i] = x
x = 5
foo(x)
for i in range(10):
assert a[i] == 5
@ti.test(print_preprocessed_ir=True)
def test_static_grouped_for():
a = ti.field(ti.i32, shape=(4, 4))
@ti.kernel
def foo(x: ti.i32):
for i in ti.static(ti.grouped(ti.ndrange((1, 3), (1, 3)))):
a[i] = x
x = 5
foo(x)
for i in range(4):
for j in range(4):
if 1 <= i < 3 and 1 <= j < 3:
assert a[i, j] == 5
else:
assert a[i, j] == 0
@ti.test(print_preprocessed_ir=True)
def test_range_for_single_argument():
a = ti.field(ti.i32, shape=(10, ))
@ti.kernel
def foo(x: ti.i32):
for i in range(5):
a[i] = x
x = 5
foo(x)
for i in range(10):
if i < 5:
assert a[i] == 5
else:
assert a[i] == 0
@ti.test(print_preprocessed_ir=True)
def test_range_for_two_arguments():
a = ti.field(ti.i32, shape=(10, ))
@ti.kernel
def foo(x: ti.i32):
for i in range(3, 7):
a[i] = x
x = 5
foo(x)
for i in range(10):
if 3 <= i < 7:
assert a[i] == 5
else:
assert a[i] == 0
@ti.test()
def test_range_for_three_arguments():
a = ti.field(ti.i32, shape=(10, ))
with pytest.raises(ti.TaichiSyntaxError) as e:
@ti.kernel
def foo(x: ti.i32):
for i in range(3, 7, 2):
a[i] = x
x = 5
foo(x)
assert e.value.args[0] == "Range should have 1 or 2 arguments, found 3"
@ti.test(print_preprocessed_ir=True)
def test_ndrange_for():
x = ti.field(ti.f32, shape=(16, 32, 64))
@ti.kernel
def func():
for i, j, k in ti.ndrange((4, 10), (3, 8), 17):
x[i, j, k] = i + j * 10 + k * 100
func()
for i in range(16):
for j in range(32):
for k in range(64):
if 4 <= i < 10 and 3 <= j < 8 and k < 17:
assert x[i, j, k] == i + j * 10 + k * 100
else:
assert x[i, j, k] == 0
@ti.test(print_preprocessed_ir=True)
def test_grouped_ndrange_for():
x = ti.field(ti.i32, shape=(6, 6, 6))
y = ti.field(ti.i32, shape=(6, 6, 6))
@ti.kernel
def func():
lower = ti.Vector([0, 1, 2])
upper = ti.Vector([3, 4, 5])
for I in ti.grouped(
ti.ndrange((lower[0], upper[0]), (lower[1], upper[1]),
(lower[2], upper[2]))):
x[I] = I[0] + I[1] + I[2]
for i in range(0, 3):
for j in range(1, 4):
for k in range(2, 5):
y[i, j, k] = i + j + k
func()
for i in range(6):
for j in range(6):
for k in range(6):
assert x[i, j, k] == y[i, j, k]
@ti.test(print_preprocessed_ir=True)
def test_static_for_break():
n = 10
@ti.kernel
def foo(a: ti.template()):
for i in ti.static(range(n)):
a[i] = 3
if ti.static(i >= 5):
break
a[i] = 10
a[i] = 5
a = ti.field(ti.i32, shape=(n, ))
foo(a)
for i in range(n):
if i < 5:
assert a[i] == 5
elif i == 5:
assert a[i] == 3
else:
assert a[i] == 0
@ti.test(print_preprocessed_ir=True)
def test_static_grouped_for_break():
n = 4
@ti.kernel
def foo(a: ti.template()):
for I in ti.static(ti.grouped(ti.ndrange(n, n))):
a[I] = 3
if ti.static(I[0] >= 3):
break
a[I] = 10
a[I] = 5
a = ti.field(ti.i32, shape=(n, n))
foo(a)
for i in range(n):
for j in range(n):
if i < 3:
assert a[i, j] == 5
elif i == 3 and j == 0:
assert a[i, j] == 3
else:
assert a[i, j] == 0
@ti.test(print_preprocessed_ir=True)
def test_static_for_continue():
n = 10
@ti.kernel
def foo(a: ti.template()):
for i in ti.static(range(n)):
a[i] = 3
if ti.static(i >= 5):
continue
a[i] = 10
a[i] = 5
a = ti.field(ti.i32, shape=(n, ))
foo(a)
for i in range(n):
if i < 5:
assert a[i] == 5
else:
assert a[i] == 3
@ti.test(print_preprocessed_ir=True)
def test_static_grouped_for_continue():
n = 4
@ti.kernel
def foo(a: ti.template()):
for I in ti.static(ti.grouped(ti.ndrange(n, n))):
a[I] = 3
if ti.static(I[0] >= 3):
continue
a[I] = 10
a[I] = 5
a = ti.field(ti.i32, shape=(n, n))
foo(a)
for i in range(n):
for j in range(n):
if i < 3:
assert a[i, j] == 5
else:
assert a[i, j] == 3
@ti.test(print_preprocessed_ir=True)
def test_for_break():
n = 4
@ti.kernel
def foo(a: ti.template()):
for i in range(n):
for j in range(n):
a[i, j] = 3
if i >= 3:
break
a[i, j] = 10
a[i, j] = 5
a = ti.field(ti.i32, shape=(n, n))
foo(a)
for i in range(n):
for j in range(n):
if i < 3:
assert a[i, j] == 5
elif i == 3 and j == 0:
assert a[i, j] == 3
else:
assert a[i, j] == 0
@ti.test(print_preprocessed_ir=True)
def test_for_continue():
n = 4
@ti.kernel
def foo(a: ti.template()):
for i in range(n):
for j in range(n):
a[i, j] = 3
if i >= 3:
continue
a[i, j] = 10
a[i, j] = 5
a = ti.field(ti.i32, shape=(n, n))
foo(a)
for i in range(n):
for j in range(n):
if i < 3:
assert a[i, j] == 5
else:
assert a[i, j] == 3
@ti.test()
def test_while():
x = ti.field(ti.f32)
N = 1
ti.root.dense(ti.i, N).place(x)
@ti.kernel
def func():
i = 0
s = 0
while i < 10:
s += i
i += 1
x[0] = s
func()
assert x[0] == 45
@ti.test()
def test_while_break():
ret = ti.field(ti.i32, shape=())
@ti.kernel
def func():
i = 0
s = 0
while True:
s += i
i += 1
if i > 10:
break
ret[None] = s
func()
assert ret[None] == 55
@ti.test()
def test_while_continue():
ret = ti.field(ti.i32, shape=())
@ti.kernel
def func():
i = 0
s = 0
while i < 10:
i += 1
if i % 2 == 0:
continue
s += i
ret[None] = s
func()
assert ret[None] == 25
@ti.test(print_preprocessed_ir=True)
def test_func():
@ti.func
def bar(x):
return x * x, -x
a = ti.field(ti.i32, shape=(10, ))
b = ti.field(ti.i32, shape=(10, ))
@ti.kernel
def foo():
for i in a:
a[i], b[i] = bar(i)
foo()
for i in range(10):
assert a[i] == i * i
assert b[i] == -i
@ti.test(print_preprocessed_ir=True)
def test_func_in_python_func():
@ti.func
def bar(x: ti.template()):
if ti.static(x):
mat = bar(x // 2)
mat = mat @ mat
if ti.static(x % 2):
mat = mat @ ti.Matrix([[1, 1], [1, 0]])
return mat
else:
return ti.Matrix([[1, 0], [0, 1]])
def fibonacci(x):
return ti.subscript(bar(x), 1, 0)
@ti.kernel
def foo(x: ti.template()) -> ti.i32:
return fibonacci(x)
fib = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34]
for i in range(10):
assert foo(i) == fib[i]
@ti.test(print_preprocessed_ir=True)
def test_ifexp():
@ti.kernel
def foo(x: ti.i32) -> ti.i32:
return 1 if x else 0
assert foo(1) == 1
assert foo(0) == 0
@ti.test(print_preprocessed_ir=True)
def test_static_ifexp():
@ti.kernel
def foo(x: ti.template()) -> ti.i32:
return 1 if ti.static(x) else 0
assert foo(1) == 1
assert foo(0) == 0
@ti.test()
def test_static_assign():
a = ti.field(ti.i32, shape=(1, ))
b = ti.field(ti.i32, shape=(1, ))
@ti.kernel
def foo(xx: ti.template(), yy: ti.template()) -> ti.i32:
x, y = ti.static(xx, yy)
x[0] -= 1
y[0] -= 1
return x[0] + y[0]
a[0] = 2
b[0] = 3
assert foo(a, b) == 3
@ti.test()
def test_static_assign_element():
with pytest.raises(ti.TaichiSyntaxError) as e:
@ti.kernel
def foo():
a = ti.static([1, 2, 3])
a[0] = ti.static(2)
foo()
assert e.value.args[
0] == "Static assign cannot be used on elements in arrays"
@ti.test()
def test_recreate_variable():
with pytest.raises(ti.TaichiSyntaxError) as e:
@ti.kernel
def foo():
a = 1
a = ti.static(2)
foo()
assert e.value.args[0] == "Recreating variables is not allowed"
@ti.test()
def test_taichi_other_than_ti():
import taichi as tc
@tc.func
def bar(x: tc.template()):
if tc.static(x):
mat = bar(x // 2)
mat = mat @ mat
if tc.static(x % 2):
mat = mat @ tc.Matrix([[1, 1], [1, 0]])
return mat
else:
return tc.Matrix([[1, 0], [0, 1]])
def fibonacci(x):
return tc.subscript(bar(x), 1, 0)
@tc.kernel
def foo(x: tc.template()) -> tc.i32:
return fibonacci(x)
fib = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34]
for i in range(10):
assert foo(i) == fib[i]
@ti.test(require=ti.extension.assertion, debug=True, gdb_trigger=False)
def test_assert_message():
@ti.kernel
def func():
x = 20
assert 10 <= x < 20, 'Foo bar'
with pytest.raises(RuntimeError, match='Foo bar'):
func()
@ti.test(require=ti.extension.assertion, debug=True, gdb_trigger=False)
def test_assert_message_formatted():
x = ti.field(dtype=int, shape=16)
x[10] = 42
@ti.kernel
def assert_formatted():
for i in x:
assert x[i] == 0, 'x[%d] expect=%d got=%d' % (i, 0, x[i])
@ti.kernel
def assert_float():
y = 0.5
assert y < 0, 'y = %f' % y
with pytest.raises(RuntimeError, match=r'x\[10\] expect=0 got=42'):
assert_formatted()
# TODO: note that we are not fully polished to be able to recover from
# assertion failures...
with pytest.raises(RuntimeError, match=r'y = 0.5'):
assert_float()
# success case
x[10] = 0
assert_formatted()
@ti.test()
def test_dict():
@ti.kernel
def foo(x: ti.template()) -> ti.i32:
a = {1: 2, 3: 4}
b = {5: 6, **a}
return b[x]
assert foo(1) == 2
with pytest.raises(KeyError):
foo(2)
@ti.test()
def test_listcomp():
@ti.func
def identity(dt, n: ti.template()):
return ti.Matrix([[ti.cast(int(i == j), dt) for j in range(n)]
for i in range(n)])
@ti.kernel
def foo(n: ti.template()) -> ti.i32:
a = identity(ti.i32, n)
b = [j for i in a for j in i]
ret = 0
for i in ti.static(range(n)):
for j in ti.static(range(n)):
ret += i * j * b[i * n + j]
return ret
assert foo(5) == 1 + 4 + 9 + 16
@ti.test()
def test_dictcomp():
@ti.kernel
def foo(n: ti.template()) -> ti.i32:
a = {i: i * i for i in range(n) if i % 3 if i % 2}
ret = 0
for i in ti.static(range(n)):
if ti.static(i % 3):
if ti.static(i % 2):
ret += a[i]
return ret
assert foo(10) == 1 * 1 + 5 * 5 + 7 * 7
@ti.test()
def test_dictcomp_fail():
@ti.kernel
def foo(n: ti.template(), m: ti.template()) -> ti.i32:
a = {i: i * i for i in range(n) if i % 3 if i % 2}
return a[m]
with pytest.raises(KeyError):
foo(5, 2)
with pytest.raises(KeyError):
foo(5, 3)
@pytest.mark.skipif(not ti.has_pytorch(), reason='Pytorch not installed.')
@ti.test(arch=[ti.cpu, ti.cuda, ti.opengl])
def test_ndarray():
n = 4
m = 7
@ti.kernel
def run(x: ti.any_arr(element_dim=2, layout=ti.Layout.AOS),
y: ti.any_arr()):
for i in ti.static(range(n)):
for j in ti.static(range(m)):
x[i, j][0, 0] += i + j + y[i, j]
a = ti.Matrix.ndarray(1, 1, ti.i32, shape=(n, m))
for i in range(n):
for j in range(m):
a[i, j][0, 0] = i * j
b = np.ones((n, m), dtype=np.int32)
run(a, b)
for i in range(n):
for j in range(m):
assert a[i, j][0, 0] == i * j + i + j + 1
@ti.test(arch=ti.cpu)
def test_sparse_matrix_builder():
n = 8
Abuilder = ti.linalg.SparseMatrixBuilder(n, n, max_num_triplets=100)
@ti.kernel
def fill(Abuilder: ti.linalg.sparse_matrix_builder()):
for i, j in ti.static(ti.ndrange(n, n)):
Abuilder[i, j] += i + j
fill(Abuilder)
A = Abuilder.build()
for i in range(n):
for j in range(n):
assert A[i, j] == i + j
@ti.test()
def test_func_default_value():
@ti.func
def bar(s, t=1):
return s + t
@ti.kernel
def foo() -> ti.i32:
return bar(1)
assert foo() == 2
@ti.test()
def test_func_default_value_fail():
with pytest.raises(ti.TaichiSyntaxError):
@ti.func
def bar(s, t=1):
return s + t
@ti.kernel
def foo() -> ti.i32:
return bar(1, 2, 3)
foo()
@ti.test()
def test_raise():
dim = 1
m = ti.Matrix.field(dim, dim, ti.f32)
ti.root.place(m)
with pytest.raises(Exception) as e:
@ti.kernel
def foo():
ti.polar_decompose(m, ti.f32)
foo()
assert e.value.args[
0] == "Polar decomposition only supports 2D and 3D matrices."
@ti.test()
def test_scalar_argument():
@ti.kernel
def add(a: ti.f32, b: ti.f32) -> ti.f32:
a = a + b
return a
assert add(1.0, 2.0) == approx(3.0)
| 21.126263 | 75 | 0.448721 |
ae3b6fd7e45b1a0443006038ba007fe4226472e0 | 2,770 | py | Python | doc/scripts/simplify_cmake_depsgraph.py | tiferrei/astrobee | a9aa0a7e9a7dd5a28c264acfd06ccde18103190a | [
"Apache-2.0"
] | 629 | 2017-08-31T23:09:00.000Z | 2022-03-30T11:55:40.000Z | doc/scripts/simplify_cmake_depsgraph.py | tiferrei/astrobee | a9aa0a7e9a7dd5a28c264acfd06ccde18103190a | [
"Apache-2.0"
] | 269 | 2018-05-05T12:31:16.000Z | 2022-03-30T22:04:11.000Z | doc/scripts/simplify_cmake_depsgraph.py | tiferrei/astrobee | a9aa0a7e9a7dd5a28c264acfd06ccde18103190a | [
"Apache-2.0"
] | 248 | 2017-08-31T23:20:56.000Z | 2022-03-30T22:29:16.000Z | #!/usr/bin/python3
#
# Simplify a cmake generated dependency graph.
#
# Cmake has the capability to generate executable/librairies dependency graph.
# However, the generated graphs are unreadable because every library file is
# represented as a unique node. This script groups the libraries files by
# package name.
# The 'groups' variable list the patterns used to re-group the various nodes
# from a same package into a single node. The groups list is specifically
# crafted for ARS: it mostly address the ROS and Gazebo libs, plus some
# key other dependencies.
#
# Usage:
# 1. Generate the dependency graphs with something like
# cd $BUILD_PATH
# cmake --graphviz=deps/ars .
# 2. Simplify the desired graphs with something like
# cd deps
# $SOURCE_PATH/doc/scripts/simplify_cmake_depsgraph.py ars.executive \
# > executive.dot
# dot -Teps executive.dot -o executive.eps
#
import re
import sys
groups = [
("nodeROS", "ROS Libraries", "/opt/ros/.+"),
("nodeGazebo", "gazebo", "/usr/lib/.+/libgazebo[_a-z0-9]*.so"),
("nodeBoost", "boost", "/usr/lib/.+/libboost.+\.so"),
("nodeOpenCV", "Open CV", "opencv_[_a-z0-9]+"),
("nodeLua", "lua", "/usr/lib/.+/liblua.+\.so"),
("nodeTinyXML", "tinyxml", "/usr/lib/.+/libtinyxml.*\.so"),
("nodeGflags", "gflags", "/usr/lib/.+/libgflag.*\.so"),
("nodeGlog", "glog", "/usr/lib/.+/libglog.*\.so"),
("nodeLinux", "Linux System Libraries", "/usr/lib/.*.so")
# Not sure if libPocoFoundation should be listed individually
# ("nodeLinux", "Linux System Libraries", "/usr/lib/libPoco.*.so"),
# ("nodeLinux", "Linux System Libraries", "/usr/lib/.+linux-gnu/.+\.so")
]
nodes = list()
def process_dot(file):
global nodes
lines = file.readlines()
# Identify groups of libraries
# outer loop is groups: this way the order of the group list is respected
# and it allows to glob larger pattern after more specific patterns
# have already been processed
for g in groups:
for i, l in enumerate(lines):
pattern = re.compile('\s"(node[0-9]+)"\s\[\slabel="(' + g[2] + ')"\s.+')
result = re.search(pattern, l)
if result:
lines.pop(i)
lines.insert(i, l.replace(result.group(2), g[1]))
nodes.append((result.group(1), g[0]))
# Replace nodes with common group node name
for n in nodes:
lines = [l.replace(n[0], n[1]) for l in lines]
# Add strict to avoid multiple edges
lines[0] = "strict " + lines[0]
# Output the new file
for l in lines:
print(l, end=" ")
if len(sys.argv) < 2:
print("provide input file as first arg")
exit
f = open(sys.argv[1], "r")
process_dot(f)
# print(nodes)
| 32.97619 | 84 | 0.627798 |
9da61494bf7a3757ff2974ede68b4309cc6adbde | 1,849 | py | Python | src/urls.py | orlowdev/aite | 6fcb02211d9fcb6be84de99deebc2aabe8075f61 | [
"Apache-2.0"
] | 1 | 2021-04-13T15:44:05.000Z | 2021-04-13T15:44:05.000Z | src/urls.py | orlowdev/aite | 6fcb02211d9fcb6be84de99deebc2aabe8075f61 | [
"Apache-2.0"
] | null | null | null | src/urls.py | orlowdev/aite | 6fcb02211d9fcb6be84de99deebc2aabe8075f61 | [
"Apache-2.0"
] | null | null | null | """src URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic.base import TemplateView
from rest_framework_jwt.views import obtain_jwt_token
from src.angular_js.views import AngularTemplateView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/auth/token', obtain_jwt_token),
url(r'^api/comments/', include("comments.api.urls", namespace="api-comments")),
url(r'^api/contact-forms/', include("contact_forms.api.urls", namespace="api-contact-forms")),
url(r'^api/calendar/', include("calendars.api.urls", namespace="api-calendars")),
url(r'^api/posts/', include("posts.api.urls", namespace="api-posts")),
url(r'^api/users/', include("accounts.api.urls", namespace="api-users")),
url(r'^api/templates/(?P<item>[A-Za-z0-9\_\-\.\/]+)\.html$', AngularTemplateView.as_view()),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += [
url(r'', TemplateView.as_view(template_name='angular/main.html')),
]
| 39.340426 | 98 | 0.718767 |
18848600e1b67b67f6ce985da532897e614de92e | 1,622 | py | Python | uber_agent/city.py | hotpxl/uber-agent | 70729f9b09a17336af3a8bc6f51e0b27b10e3fc3 | [
"MIT"
] | null | null | null | uber_agent/city.py | hotpxl/uber-agent | 70729f9b09a17336af3a8bc6f51e0b27b10e3fc3 | [
"MIT"
] | null | null | null | uber_agent/city.py | hotpxl/uber-agent | 70729f9b09a17336af3a8bc6f51e0b27b10e3fc3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pickle
import math
import random
class City():
def __init__(self, locations, travel_times, fare_estimates, coordinates):
self._locations = locations
self._travel_times = travel_times
self._fare_estimates = fare_estimates
self._coordinates = coordinates
def save(self, filename='city.p'):
with open(filename, 'wb') as f:
pickle.dump(self, f)
@staticmethod
def load(filename='city.p'):
with open(filename, 'rb') as f:
return pickle.load(f)
def locations(self):
return self._locations[:]
def travel_time(self, a, b):
return self._travel_times[a][b]
def fare_estimate(self, a, b):
return self._fare_estimates[a][b]
def coordinate(self, a):
return self._coordinates[a]
def distance(self, a, b):
return math.sqrt(
sum(
map(lambda i, j: (i - j)**2, self._coordinates[a],
self._coordinates[b])))
class TripGenerator():
def __init__(self, city):
self._city = city
def driver_at(self, l):
locations = self._city.locations()
assert l in locations, 'Invalid location.'
weights = reversed(list(range(1, len(locations) + 1)))
return self.start_from(random.choices(locations, weights=weights)[0])
def start_from(self, l):
locations = self._city.locations()
assert l in locations, 'Invalid location.'
locations.remove(l)
destination = random.choice(locations)
return (l, destination)
| 27.491525 | 77 | 0.610358 |
4386c6d34a371af0d7fd77faa363f547f2c35884 | 935 | py | Python | py/codeforces/883E.py | shhuan/algorithms | 2830c7e2ada8dfd3dcdda7c06846116d4f944a27 | [
"MIT"
] | null | null | null | py/codeforces/883E.py | shhuan/algorithms | 2830c7e2ada8dfd3dcdda7c06846116d4f944a27 | [
"MIT"
] | null | null | null | py/codeforces/883E.py | shhuan/algorithms | 2830c7e2ada8dfd3dcdda7c06846116d4f944a27 | [
"MIT"
] | 1 | 2022-03-09T04:52:55.000Z | 2022-03-09T04:52:55.000Z | # -*- coding: utf-8 -*-
import math
import collections
import bisect
import heapq
import time
import random
import itertools
import sys
"""
created by shhuan at 2017/10/22 17:36
"""
N = int(input())
W = input()
M = int(input())
words = set()
for i in range(M):
words.add(input())
revealed = set(W) - {'*'}
idx = [i for i, w in enumerate(W) if w == '*']
badwords = set()
guesses = set()
for i in idx:
for u in words:
if u[i] in revealed:
badwords.add(u)
for i, w in enumerate(W):
if w != '*':
for u in words:
if u[i] != w:
badwords.add(u)
words -= badwords
for i in idx:
w = W[i]
for u in words:
c = u[i]
guesses.add(c)
ans = 0
for g in guesses:
left = {v for v in words}
for i in idx:
for w in {v for v in left}:
if w[i] == g:
left.remove(w)
if not left:
ans += 1
print(ans) | 15.583333 | 46 | 0.522995 |
668df73ebc1e3b3bdeeba0ba60594ce79d63373b | 7,766 | py | Python | docs/conf.py | psreddy85/mlops_wafer | c3c2ac00ee55bf90956b25d4bd2054de6afa8287 | [
"MIT"
] | null | null | null | docs/conf.py | psreddy85/mlops_wafer | c3c2ac00ee55bf90956b25d4bd2054de6afa8287 | [
"MIT"
] | null | null | null | docs/conf.py | psreddy85/mlops_wafer | c3c2ac00ee55bf90956b25d4bd2054de6afa8287 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# MLOps_Wafer documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MLOps_Wafer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mlops_maindoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'mlops_main.tex',
u'MLOps_Wafer Documentation',
u"sharath", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mlops_main', u'MLOps_Wafer Documentation',
[u"sharath"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'mlops_main', u'MLOps_Wafer Documentation',
u"sharath", 'MLOps_Wafer',
'wafer project using mlops', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 31.697959 | 80 | 0.707185 |
bd2afbe0d87a261631e10f6ea0924928d49f20d4 | 37 | py | Python | .history/chapter01/python_03_list_20201124211540.py | KustomApe/nerdape | aef6fb2d1f8c364b26d91bf8570b4487a24de69a | [
"MIT"
] | null | null | null | .history/chapter01/python_03_list_20201124211540.py | KustomApe/nerdape | aef6fb2d1f8c364b26d91bf8570b4487a24de69a | [
"MIT"
] | null | null | null | .history/chapter01/python_03_list_20201124211540.py | KustomApe/nerdape | aef6fb2d1f8c364b26d91bf8570b4487a24de69a | [
"MIT"
] | null | null | null | """[リストについて]
リストの構文とリストの使い方について
"""
| 7.4 | 18 | 0.675676 |
01ddd92c72ffd60c0d5e6ba79af60629466754c9 | 1,979 | py | Python | test/sst/7.1.0/goblin_singlestream1-trace.py | tactcomplabs/gc64-hmcsim | 79bf4ffae74dc52bb605adb3e0e1eb84649f9624 | [
"BSD-2-Clause"
] | 10 | 2018-02-26T02:39:36.000Z | 2020-10-20T14:55:56.000Z | test/sst/7.1.0/goblin_singlestream1-trace.py | tactcomplabs/gc64-hmcsim | 79bf4ffae74dc52bb605adb3e0e1eb84649f9624 | [
"BSD-2-Clause"
] | 5 | 2017-09-07T11:41:35.000Z | 2020-10-12T14:35:39.000Z | test/sst/6.1.0/goblin_singlestream1-trace.py | tactcomplabs/gc64-hmcsim | 79bf4ffae74dc52bb605adb3e0e1eb84649f9624 | [
"BSD-2-Clause"
] | 4 | 2017-09-07T06:03:43.000Z | 2021-09-10T13:44:19.000Z | import sst
# Define SST core options
sst.setProgramOption("timebase", "1ps")
sst.setProgramOption("stopAtCycle", "0 ns")
# Define the simulation components
comp_cpu = sst.Component("cpu", "miranda.BaseCPU")
comp_cpu.addParams({
"verbose" : 0,
"generator" : "miranda.SingleStreamGenerator",
"generatorParams.verbose" : 0,
"generatorParams.startat" : 3,
"generatorParams.count" : 500000,
"generatorParams.max_address" : 512000,
"printStats" : 1,
})
# Tell SST what statistics handling we want
sst.setStatisticLoadLevel(4)
# Enable statistics outputs
comp_cpu.enableAllStatistics({"type":"sst.AccumulatorStatistic"})
comp_l1cache = sst.Component("l1cache", "memHierarchy.Cache")
comp_l1cache.addParams({
"access_latency_cycles" : "2",
"cache_frequency" : "2 Ghz",
"replacement_policy" : "lru",
"coherence_protocol" : "MESI",
"associativity" : "4",
"cache_line_size" : "64",
"prefetcher" : "cassini.StridePrefetcher",
"debug" : "1",
"L1" : "1",
"cache_size" : "2KB"
})
# Enable statistics outputs
comp_l1cache.enableAllStatistics({"type":"sst.AccumulatorStatistic"})
comp_memory = sst.Component("memory", "memHierarchy.MemController")
comp_memory.addParams({
"coherence_protocol" : "MESI",
"backend.access_time" : "1000 ns",
"backend.mem_size" : "512MiB",
"clock" : "1GHz",
"backend" : "memHierarchy.goblinHMCSim",
"backend.trace-banks" : "1",
"backend.trace-queue" : "1",
"backend.trace-cmds" : "1",
"backend.trace-latency" : "1",
"backend.trace-stalls" : "1"
})
# Define the simulation links
link_cpu_cache_link = sst.Link("link_cpu_cache_link")
link_cpu_cache_link.connect( (comp_cpu, "cache_link", "1000ps"), (comp_l1cache, "high_network_0", "1000ps") )
link_cpu_cache_link.setNoCut()
link_mem_bus_link = sst.Link("link_mem_bus_link")
link_mem_bus_link.connect( (comp_l1cache, "low_network_0", "50ps"), (comp_memory, "direct_link", "50ps") )
| 30.921875 | 109 | 0.688732 |
fefca61b9687f183494d5e9db51d39bf34cb5bdb | 5,009 | py | Python | samples/fcd_create_vdisk_from_snapshot.py | whchoi98/whchoi_pyvmomi-community-samples | 4bc90d0780267d9c14382ae72b50ef3475a96e46 | [
"Apache-2.0"
] | null | null | null | samples/fcd_create_vdisk_from_snapshot.py | whchoi98/whchoi_pyvmomi-community-samples | 4bc90d0780267d9c14382ae72b50ef3475a96e46 | [
"Apache-2.0"
] | null | null | null | samples/fcd_create_vdisk_from_snapshot.py | whchoi98/whchoi_pyvmomi-community-samples | 4bc90d0780267d9c14382ae72b50ef3475a96e46 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Written by Chris Arceneaux
# GitHub: https://github.com/carceneaux
# Email: carceneaux@thinksis.com
# Website: http://arsano.ninja
#
# Note: Example code For testing purposes only
#
# This code has been released under the terms of the Apache-2.0 license
# http://opensource.org/licenses/Apache-2.0
"""
Python program for creating a first class disk (fcd) from a snapshot
"""
import atexit
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
from tools import cli, tasks, disk, pbmhelper
from pyVim import connect
from pyVmomi import vmodl, vim, pbm, VmomiSupport
def get_args():
"""
Adds additional args for creating a fcd from a snapshot
-d source_datastore
-v source_vdisk
-n snapshot
-D dest_datastore
-V dest_vdisk
"""
parser = cli.build_arg_parser()
parser.add_argument('-d', '--source_datastore',
required=True,
action='store',
help='Datastore name where source disk is located')
parser.add_argument('-v', '--source_vdisk',
required=True,
action='store',
help='First Class Disk name with specified snapshot')
# because -s is reserved for 'service', we use -n for snapshot name
parser.add_argument('-n', '--snapshot',
required=True,
action='store',
help='Snapshot name to be cloned')
parser.add_argument('-D', '--dest_datastore',
required=True,
action='store',
help='Datastore name where new disk is located')
parser.add_argument('-V', '--dest_vdisk',
required=True,
action='store',
help='First Class Disk name to be created')
# because -s is reserved for 'service' and -p is reserved for 'password'
parser.add_argument('-e', '--policy',
action='store',
help='Storage Policy name for new disk. If unset, '
'the default policy of the datastore specified '
'will apply.')
my_args = parser.parse_args()
return cli.prompt_for_password(my_args)
def main():
"""
Simple command-line program for creating a new vdisk from a snapshot
"""
args = get_args()
try:
if args.disable_ssl_verification:
service_instance = connect.SmartConnectNoSSL(host=args.host,
user=args.user,
pwd=args.password,
port=int(args.port))
else:
service_instance = connect.SmartConnect(host=args.host,
user=args.user,
pwd=args.password,
port=int(args.port))
atexit.register(connect.Disconnect, service_instance)
content = service_instance.RetrieveContent()
# Connect to SPBM Endpoint
pbmSi = pbmhelper.create_pbm_session(service_instance._stub)
pbmContent = pbmSi.RetrieveContent()
# Retrieving Storage Policy
if args.policy:
p = pbmhelper.retrieve_storage_policy(pbmContent, args.policy)
policy = [vim.vm.DefinedProfileSpec(
profileId=p.profileId.uniqueId)]
else:
policy = None
# Retrieve Source Datastore Object
source_datastore = disk.get_obj(
content, [vim.Datastore], args.source_datastore)
# Retrieve Source FCD Object
source_vdisk = disk.retrieve_fcd(
content, source_datastore, args.source_vdisk)
# Retrieve Snapshot Object
snapshot = disk.retrieve_fcd_snapshot(
content, source_datastore, source_vdisk, args.snapshot)
# Retrieve Destination Datastore Object
dest_datastore = disk.get_obj(
content, [vim.Datastore], args.dest_datastore)
# Create FCD from Snapshot
storage = content.vStorageObjectManager
if policy:
task = storage.CreateDiskFromSnapshot_Task(
source_vdisk.config.id,
dest_datastore,
snapshot,
args.dest_vdisk,
policy)
else:
task = storage.CreateDiskFromSnapshot_Task(
source_vdisk.config.id,
dest_datastore,
snapshot,
args.dest_vdisk)
tasks.wait_for_tasks(service_instance, [task])
except vmodl.MethodFault as error:
print("Caught vmodl fault : " + error.msg)
return -1
return 0
# Start program
if __name__ == "__main__":
main()
| 32.738562 | 77 | 0.558994 |
33b681373543e2fe5d697beb91667b32e6e8320b | 510 | py | Python | plotly/validators/heatmap/colorbar/_tickmode.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | 2 | 2020-03-24T11:41:14.000Z | 2021-01-14T07:59:43.000Z | plotly/validators/heatmap/colorbar/_tickmode.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | null | null | null | plotly/validators/heatmap/colorbar/_tickmode.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | 4 | 2019-06-03T14:49:12.000Z | 2022-01-06T01:05:12.000Z | import _plotly_utils.basevalidators
class TickmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name='tickmode', parent_name='heatmap.colorbar', **kwargs
):
super(TickmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='colorbars',
implied_edits={},
role='info',
values=['auto', 'linear', 'array'],
**kwargs
)
| 28.333333 | 78 | 0.609804 |
91d2bafb054360811b97406c3179dffde9cdd7f6 | 555 | py | Python | ordenacao/quicksort.py | GustavoCunhaLacerda/algoritmos-python | 6799ab02bce3971728ce3503b48e8fda339c7b7c | [
"MIT"
] | 1 | 2020-09-19T04:42:29.000Z | 2020-09-19T04:42:29.000Z | ordenacao/quicksort.py | GustavoCunhaLacerda/AlgoritmosPython | 6799ab02bce3971728ce3503b48e8fda339c7b7c | [
"MIT"
] | null | null | null | ordenacao/quicksort.py | GustavoCunhaLacerda/AlgoritmosPython | 6799ab02bce3971728ce3503b48e8fda339c7b7c | [
"MIT"
] | null | null | null | def quick_sort(list, start=0, end=None):
if end is None:
end = len(list)-1
if start < end:
pivot = partition(list, start, end)
quick_sort(list, start, pivot-1)
quick_sort(list, pivot+1, end)
def partition(list, start, end):
pivot = list[end]
smaller = start
for bigger in range(start, end):
if list[bigger] <= pivot:
list[bigger], list[smaller] = list[smaller], list[bigger]
smaller += 1
list[end], list[smaller] = list[smaller], list[end]
return smaller
| 26.428571 | 69 | 0.583784 |
7817b8f6b2688dfaad857c114446d1dcf5d1b5fc | 39,232 | py | Python | sdk/python/pulumi_aws/ec2/eip.py | aamir-locus/pulumi-aws | 3e234b050129bde35d8e072a88bd608562f02142 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/ec2/eip.py | aamir-locus/pulumi-aws | 3e234b050129bde35d8e072a88bd608562f02142 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/ec2/eip.py | aamir-locus/pulumi-aws | 3e234b050129bde35d8e072a88bd608562f02142 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['EipArgs', 'Eip']
@pulumi.input_type
class EipArgs:
def __init__(__self__, *,
associate_with_private_ip: Optional[pulumi.Input[str]] = None,
customer_owned_ipv4_pool: Optional[pulumi.Input[str]] = None,
instance: Optional[pulumi.Input[str]] = None,
network_border_group: Optional[pulumi.Input[str]] = None,
network_interface: Optional[pulumi.Input[str]] = None,
public_ipv4_pool: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a Eip resource.
:param pulumi.Input[str] associate_with_private_ip: A user specified primary or secondary private IP address to
associate with the Elastic IP address. If no private IP address is specified,
the Elastic IP address is associated with the primary private IP address.
:param pulumi.Input[str] customer_owned_ipv4_pool: The ID of a customer-owned address pool. For more on customer owned IP addressed check out [Customer-owned IP addresses guide](https://docs.aws.amazon.com/outposts/latest/userguide/outposts-networking-components.html#ip-addressing)
:param pulumi.Input[str] instance: EC2 instance ID.
:param pulumi.Input[str] network_border_group: The location from which the IP address is advertised. Use this parameter to limit the address to this location.
:param pulumi.Input[str] network_interface: Network interface ID to associate with.
:param pulumi.Input[str] public_ipv4_pool: EC2 IPv4 address pool identifier or `amazon`. This option is only available for VPC EIPs.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. Tags can only be applied to EIPs in a VPC.
:param pulumi.Input[bool] vpc: Boolean if the EIP is in a VPC or not.
"""
if associate_with_private_ip is not None:
pulumi.set(__self__, "associate_with_private_ip", associate_with_private_ip)
if customer_owned_ipv4_pool is not None:
pulumi.set(__self__, "customer_owned_ipv4_pool", customer_owned_ipv4_pool)
if instance is not None:
pulumi.set(__self__, "instance", instance)
if network_border_group is not None:
pulumi.set(__self__, "network_border_group", network_border_group)
if network_interface is not None:
pulumi.set(__self__, "network_interface", network_interface)
if public_ipv4_pool is not None:
pulumi.set(__self__, "public_ipv4_pool", public_ipv4_pool)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if vpc is not None:
pulumi.set(__self__, "vpc", vpc)
@property
@pulumi.getter(name="associateWithPrivateIp")
def associate_with_private_ip(self) -> Optional[pulumi.Input[str]]:
"""
A user specified primary or secondary private IP address to
associate with the Elastic IP address. If no private IP address is specified,
the Elastic IP address is associated with the primary private IP address.
"""
return pulumi.get(self, "associate_with_private_ip")
@associate_with_private_ip.setter
def associate_with_private_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "associate_with_private_ip", value)
@property
@pulumi.getter(name="customerOwnedIpv4Pool")
def customer_owned_ipv4_pool(self) -> Optional[pulumi.Input[str]]:
"""
The ID of a customer-owned address pool. For more on customer owned IP addressed check out [Customer-owned IP addresses guide](https://docs.aws.amazon.com/outposts/latest/userguide/outposts-networking-components.html#ip-addressing)
"""
return pulumi.get(self, "customer_owned_ipv4_pool")
@customer_owned_ipv4_pool.setter
def customer_owned_ipv4_pool(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "customer_owned_ipv4_pool", value)
@property
@pulumi.getter
def instance(self) -> Optional[pulumi.Input[str]]:
"""
EC2 instance ID.
"""
return pulumi.get(self, "instance")
@instance.setter
def instance(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance", value)
@property
@pulumi.getter(name="networkBorderGroup")
def network_border_group(self) -> Optional[pulumi.Input[str]]:
"""
The location from which the IP address is advertised. Use this parameter to limit the address to this location.
"""
return pulumi.get(self, "network_border_group")
@network_border_group.setter
def network_border_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_border_group", value)
@property
@pulumi.getter(name="networkInterface")
def network_interface(self) -> Optional[pulumi.Input[str]]:
"""
Network interface ID to associate with.
"""
return pulumi.get(self, "network_interface")
@network_interface.setter
def network_interface(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_interface", value)
@property
@pulumi.getter(name="publicIpv4Pool")
def public_ipv4_pool(self) -> Optional[pulumi.Input[str]]:
"""
EC2 IPv4 address pool identifier or `amazon`. This option is only available for VPC EIPs.
"""
return pulumi.get(self, "public_ipv4_pool")
@public_ipv4_pool.setter
def public_ipv4_pool(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_ipv4_pool", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource. Tags can only be applied to EIPs in a VPC.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def vpc(self) -> Optional[pulumi.Input[bool]]:
"""
Boolean if the EIP is in a VPC or not.
"""
return pulumi.get(self, "vpc")
@vpc.setter
def vpc(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "vpc", value)
@pulumi.input_type
class _EipState:
def __init__(__self__, *,
allocation_id: Optional[pulumi.Input[str]] = None,
associate_with_private_ip: Optional[pulumi.Input[str]] = None,
association_id: Optional[pulumi.Input[str]] = None,
carrier_ip: Optional[pulumi.Input[str]] = None,
customer_owned_ip: Optional[pulumi.Input[str]] = None,
customer_owned_ipv4_pool: Optional[pulumi.Input[str]] = None,
domain: Optional[pulumi.Input[str]] = None,
instance: Optional[pulumi.Input[str]] = None,
network_border_group: Optional[pulumi.Input[str]] = None,
network_interface: Optional[pulumi.Input[str]] = None,
private_dns: Optional[pulumi.Input[str]] = None,
private_ip: Optional[pulumi.Input[str]] = None,
public_dns: Optional[pulumi.Input[str]] = None,
public_ip: Optional[pulumi.Input[str]] = None,
public_ipv4_pool: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering Eip resources.
:param pulumi.Input[str] associate_with_private_ip: A user specified primary or secondary private IP address to
associate with the Elastic IP address. If no private IP address is specified,
the Elastic IP address is associated with the primary private IP address.
:param pulumi.Input[str] carrier_ip: The carrier IP address.
:param pulumi.Input[str] customer_owned_ip: Customer owned IP.
:param pulumi.Input[str] customer_owned_ipv4_pool: The ID of a customer-owned address pool. For more on customer owned IP addressed check out [Customer-owned IP addresses guide](https://docs.aws.amazon.com/outposts/latest/userguide/outposts-networking-components.html#ip-addressing)
:param pulumi.Input[str] domain: Indicates if this EIP is for use in VPC (`vpc`) or EC2 Classic (`standard`).
:param pulumi.Input[str] instance: EC2 instance ID.
:param pulumi.Input[str] network_border_group: The location from which the IP address is advertised. Use this parameter to limit the address to this location.
:param pulumi.Input[str] network_interface: Network interface ID to associate with.
:param pulumi.Input[str] private_dns: The Private DNS associated with the Elastic IP address (if in VPC).
:param pulumi.Input[str] private_ip: Contains the private IP address (if in VPC).
:param pulumi.Input[str] public_dns: Public DNS associated with the Elastic IP address.
:param pulumi.Input[str] public_ip: Contains the public IP address.
:param pulumi.Input[str] public_ipv4_pool: EC2 IPv4 address pool identifier or `amazon`. This option is only available for VPC EIPs.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. Tags can only be applied to EIPs in a VPC.
:param pulumi.Input[bool] vpc: Boolean if the EIP is in a VPC or not.
"""
if allocation_id is not None:
pulumi.set(__self__, "allocation_id", allocation_id)
if associate_with_private_ip is not None:
pulumi.set(__self__, "associate_with_private_ip", associate_with_private_ip)
if association_id is not None:
pulumi.set(__self__, "association_id", association_id)
if carrier_ip is not None:
pulumi.set(__self__, "carrier_ip", carrier_ip)
if customer_owned_ip is not None:
pulumi.set(__self__, "customer_owned_ip", customer_owned_ip)
if customer_owned_ipv4_pool is not None:
pulumi.set(__self__, "customer_owned_ipv4_pool", customer_owned_ipv4_pool)
if domain is not None:
pulumi.set(__self__, "domain", domain)
if instance is not None:
pulumi.set(__self__, "instance", instance)
if network_border_group is not None:
pulumi.set(__self__, "network_border_group", network_border_group)
if network_interface is not None:
pulumi.set(__self__, "network_interface", network_interface)
if private_dns is not None:
pulumi.set(__self__, "private_dns", private_dns)
if private_ip is not None:
pulumi.set(__self__, "private_ip", private_ip)
if public_dns is not None:
pulumi.set(__self__, "public_dns", public_dns)
if public_ip is not None:
pulumi.set(__self__, "public_ip", public_ip)
if public_ipv4_pool is not None:
pulumi.set(__self__, "public_ipv4_pool", public_ipv4_pool)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if vpc is not None:
pulumi.set(__self__, "vpc", vpc)
@property
@pulumi.getter(name="allocationId")
def allocation_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "allocation_id")
@allocation_id.setter
def allocation_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "allocation_id", value)
@property
@pulumi.getter(name="associateWithPrivateIp")
def associate_with_private_ip(self) -> Optional[pulumi.Input[str]]:
"""
A user specified primary or secondary private IP address to
associate with the Elastic IP address. If no private IP address is specified,
the Elastic IP address is associated with the primary private IP address.
"""
return pulumi.get(self, "associate_with_private_ip")
@associate_with_private_ip.setter
def associate_with_private_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "associate_with_private_ip", value)
@property
@pulumi.getter(name="associationId")
def association_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "association_id")
@association_id.setter
def association_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "association_id", value)
@property
@pulumi.getter(name="carrierIp")
def carrier_ip(self) -> Optional[pulumi.Input[str]]:
"""
The carrier IP address.
"""
return pulumi.get(self, "carrier_ip")
@carrier_ip.setter
def carrier_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "carrier_ip", value)
@property
@pulumi.getter(name="customerOwnedIp")
def customer_owned_ip(self) -> Optional[pulumi.Input[str]]:
"""
Customer owned IP.
"""
return pulumi.get(self, "customer_owned_ip")
@customer_owned_ip.setter
def customer_owned_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "customer_owned_ip", value)
@property
@pulumi.getter(name="customerOwnedIpv4Pool")
def customer_owned_ipv4_pool(self) -> Optional[pulumi.Input[str]]:
"""
The ID of a customer-owned address pool. For more on customer owned IP addressed check out [Customer-owned IP addresses guide](https://docs.aws.amazon.com/outposts/latest/userguide/outposts-networking-components.html#ip-addressing)
"""
return pulumi.get(self, "customer_owned_ipv4_pool")
@customer_owned_ipv4_pool.setter
def customer_owned_ipv4_pool(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "customer_owned_ipv4_pool", value)
@property
@pulumi.getter
def domain(self) -> Optional[pulumi.Input[str]]:
"""
Indicates if this EIP is for use in VPC (`vpc`) or EC2 Classic (`standard`).
"""
return pulumi.get(self, "domain")
@domain.setter
def domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain", value)
@property
@pulumi.getter
def instance(self) -> Optional[pulumi.Input[str]]:
"""
EC2 instance ID.
"""
return pulumi.get(self, "instance")
@instance.setter
def instance(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance", value)
@property
@pulumi.getter(name="networkBorderGroup")
def network_border_group(self) -> Optional[pulumi.Input[str]]:
"""
The location from which the IP address is advertised. Use this parameter to limit the address to this location.
"""
return pulumi.get(self, "network_border_group")
@network_border_group.setter
def network_border_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_border_group", value)
@property
@pulumi.getter(name="networkInterface")
def network_interface(self) -> Optional[pulumi.Input[str]]:
"""
Network interface ID to associate with.
"""
return pulumi.get(self, "network_interface")
@network_interface.setter
def network_interface(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_interface", value)
@property
@pulumi.getter(name="privateDns")
def private_dns(self) -> Optional[pulumi.Input[str]]:
"""
The Private DNS associated with the Elastic IP address (if in VPC).
"""
return pulumi.get(self, "private_dns")
@private_dns.setter
def private_dns(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_dns", value)
@property
@pulumi.getter(name="privateIp")
def private_ip(self) -> Optional[pulumi.Input[str]]:
"""
Contains the private IP address (if in VPC).
"""
return pulumi.get(self, "private_ip")
@private_ip.setter
def private_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_ip", value)
@property
@pulumi.getter(name="publicDns")
def public_dns(self) -> Optional[pulumi.Input[str]]:
"""
Public DNS associated with the Elastic IP address.
"""
return pulumi.get(self, "public_dns")
@public_dns.setter
def public_dns(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_dns", value)
@property
@pulumi.getter(name="publicIp")
def public_ip(self) -> Optional[pulumi.Input[str]]:
"""
Contains the public IP address.
"""
return pulumi.get(self, "public_ip")
@public_ip.setter
def public_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_ip", value)
@property
@pulumi.getter(name="publicIpv4Pool")
def public_ipv4_pool(self) -> Optional[pulumi.Input[str]]:
"""
EC2 IPv4 address pool identifier or `amazon`. This option is only available for VPC EIPs.
"""
return pulumi.get(self, "public_ipv4_pool")
@public_ipv4_pool.setter
def public_ipv4_pool(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_ipv4_pool", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource. Tags can only be applied to EIPs in a VPC.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def vpc(self) -> Optional[pulumi.Input[bool]]:
"""
Boolean if the EIP is in a VPC or not.
"""
return pulumi.get(self, "vpc")
@vpc.setter
def vpc(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "vpc", value)
class Eip(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
associate_with_private_ip: Optional[pulumi.Input[str]] = None,
customer_owned_ipv4_pool: Optional[pulumi.Input[str]] = None,
instance: Optional[pulumi.Input[str]] = None,
network_border_group: Optional[pulumi.Input[str]] = None,
network_interface: Optional[pulumi.Input[str]] = None,
public_ipv4_pool: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Provides an Elastic IP resource.
> **Note:** EIP may require IGW to exist prior to association. Use `depends_on` to set an explicit dependency on the IGW.
> **Note:** Do not use `network_interface` to associate the EIP to `lb.LoadBalancer` or `ec2.NatGateway` resources. Instead use the `allocation_id` available in those resources to allow AWS to manage the association, otherwise you will see `AuthFailure` errors.
## Example Usage
Single EIP associated with an instance:
```python
import pulumi
import pulumi_aws as aws
lb = aws.ec2.Eip("lb",
instance=aws_instance["web"]["id"],
vpc=True)
```
Multiple EIPs associated with a single network interface:
```python
import pulumi
import pulumi_aws as aws
multi_ip = aws.ec2.NetworkInterface("multi-ip",
subnet_id=aws_subnet["main"]["id"],
private_ips=[
"10.0.0.10",
"10.0.0.11",
])
one = aws.ec2.Eip("one",
vpc=True,
network_interface=multi_ip.id,
associate_with_private_ip="10.0.0.10")
two = aws.ec2.Eip("two",
vpc=True,
network_interface=multi_ip.id,
associate_with_private_ip="10.0.0.11")
```
Attaching an EIP to an Instance with a pre-assigned private ip (VPC Only):
```python
import pulumi
import pulumi_aws as aws
default = aws.ec2.Vpc("default",
cidr_block="10.0.0.0/16",
enable_dns_hostnames=True)
gw = aws.ec2.InternetGateway("gw", vpc_id=default.id)
tf_test_subnet = aws.ec2.Subnet("tfTestSubnet",
vpc_id=default.id,
cidr_block="10.0.0.0/24",
map_public_ip_on_launch=True,
opts=pulumi.ResourceOptions(depends_on=[gw]))
foo = aws.ec2.Instance("foo",
ami="ami-5189a661",
instance_type="t2.micro",
private_ip="10.0.0.12",
subnet_id=tf_test_subnet.id)
bar = aws.ec2.Eip("bar",
vpc=True,
instance=foo.id,
associate_with_private_ip="10.0.0.12",
opts=pulumi.ResourceOptions(depends_on=[gw]))
```
Allocating EIP from the BYOIP pool:
```python
import pulumi
import pulumi_aws as aws
byoip_ip = aws.ec2.Eip("byoip-ip",
public_ipv4_pool="ipv4pool-ec2-012345",
vpc=True)
```
## Import
EIPs in a VPC can be imported using their Allocation ID, e.g.
```sh
$ pulumi import aws:ec2/eip:Eip bar eipalloc-00a10e96
```
EIPs in EC2 Classic can be imported using their Public IP, e.g.
```sh
$ pulumi import aws:ec2/eip:Eip bar 52.0.0.0
```
[1]https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_AssociateAddress.html
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] associate_with_private_ip: A user specified primary or secondary private IP address to
associate with the Elastic IP address. If no private IP address is specified,
the Elastic IP address is associated with the primary private IP address.
:param pulumi.Input[str] customer_owned_ipv4_pool: The ID of a customer-owned address pool. For more on customer owned IP addressed check out [Customer-owned IP addresses guide](https://docs.aws.amazon.com/outposts/latest/userguide/outposts-networking-components.html#ip-addressing)
:param pulumi.Input[str] instance: EC2 instance ID.
:param pulumi.Input[str] network_border_group: The location from which the IP address is advertised. Use this parameter to limit the address to this location.
:param pulumi.Input[str] network_interface: Network interface ID to associate with.
:param pulumi.Input[str] public_ipv4_pool: EC2 IPv4 address pool identifier or `amazon`. This option is only available for VPC EIPs.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. Tags can only be applied to EIPs in a VPC.
:param pulumi.Input[bool] vpc: Boolean if the EIP is in a VPC or not.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[EipArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an Elastic IP resource.
> **Note:** EIP may require IGW to exist prior to association. Use `depends_on` to set an explicit dependency on the IGW.
> **Note:** Do not use `network_interface` to associate the EIP to `lb.LoadBalancer` or `ec2.NatGateway` resources. Instead use the `allocation_id` available in those resources to allow AWS to manage the association, otherwise you will see `AuthFailure` errors.
## Example Usage
Single EIP associated with an instance:
```python
import pulumi
import pulumi_aws as aws
lb = aws.ec2.Eip("lb",
instance=aws_instance["web"]["id"],
vpc=True)
```
Multiple EIPs associated with a single network interface:
```python
import pulumi
import pulumi_aws as aws
multi_ip = aws.ec2.NetworkInterface("multi-ip",
subnet_id=aws_subnet["main"]["id"],
private_ips=[
"10.0.0.10",
"10.0.0.11",
])
one = aws.ec2.Eip("one",
vpc=True,
network_interface=multi_ip.id,
associate_with_private_ip="10.0.0.10")
two = aws.ec2.Eip("two",
vpc=True,
network_interface=multi_ip.id,
associate_with_private_ip="10.0.0.11")
```
Attaching an EIP to an Instance with a pre-assigned private ip (VPC Only):
```python
import pulumi
import pulumi_aws as aws
default = aws.ec2.Vpc("default",
cidr_block="10.0.0.0/16",
enable_dns_hostnames=True)
gw = aws.ec2.InternetGateway("gw", vpc_id=default.id)
tf_test_subnet = aws.ec2.Subnet("tfTestSubnet",
vpc_id=default.id,
cidr_block="10.0.0.0/24",
map_public_ip_on_launch=True,
opts=pulumi.ResourceOptions(depends_on=[gw]))
foo = aws.ec2.Instance("foo",
ami="ami-5189a661",
instance_type="t2.micro",
private_ip="10.0.0.12",
subnet_id=tf_test_subnet.id)
bar = aws.ec2.Eip("bar",
vpc=True,
instance=foo.id,
associate_with_private_ip="10.0.0.12",
opts=pulumi.ResourceOptions(depends_on=[gw]))
```
Allocating EIP from the BYOIP pool:
```python
import pulumi
import pulumi_aws as aws
byoip_ip = aws.ec2.Eip("byoip-ip",
public_ipv4_pool="ipv4pool-ec2-012345",
vpc=True)
```
## Import
EIPs in a VPC can be imported using their Allocation ID, e.g.
```sh
$ pulumi import aws:ec2/eip:Eip bar eipalloc-00a10e96
```
EIPs in EC2 Classic can be imported using their Public IP, e.g.
```sh
$ pulumi import aws:ec2/eip:Eip bar 52.0.0.0
```
[1]https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_AssociateAddress.html
:param str resource_name: The name of the resource.
:param EipArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(EipArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
associate_with_private_ip: Optional[pulumi.Input[str]] = None,
customer_owned_ipv4_pool: Optional[pulumi.Input[str]] = None,
instance: Optional[pulumi.Input[str]] = None,
network_border_group: Optional[pulumi.Input[str]] = None,
network_interface: Optional[pulumi.Input[str]] = None,
public_ipv4_pool: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = EipArgs.__new__(EipArgs)
__props__.__dict__["associate_with_private_ip"] = associate_with_private_ip
__props__.__dict__["customer_owned_ipv4_pool"] = customer_owned_ipv4_pool
__props__.__dict__["instance"] = instance
__props__.__dict__["network_border_group"] = network_border_group
__props__.__dict__["network_interface"] = network_interface
__props__.__dict__["public_ipv4_pool"] = public_ipv4_pool
__props__.__dict__["tags"] = tags
__props__.__dict__["vpc"] = vpc
__props__.__dict__["allocation_id"] = None
__props__.__dict__["association_id"] = None
__props__.__dict__["carrier_ip"] = None
__props__.__dict__["customer_owned_ip"] = None
__props__.__dict__["domain"] = None
__props__.__dict__["private_dns"] = None
__props__.__dict__["private_ip"] = None
__props__.__dict__["public_dns"] = None
__props__.__dict__["public_ip"] = None
super(Eip, __self__).__init__(
'aws:ec2/eip:Eip',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
allocation_id: Optional[pulumi.Input[str]] = None,
associate_with_private_ip: Optional[pulumi.Input[str]] = None,
association_id: Optional[pulumi.Input[str]] = None,
carrier_ip: Optional[pulumi.Input[str]] = None,
customer_owned_ip: Optional[pulumi.Input[str]] = None,
customer_owned_ipv4_pool: Optional[pulumi.Input[str]] = None,
domain: Optional[pulumi.Input[str]] = None,
instance: Optional[pulumi.Input[str]] = None,
network_border_group: Optional[pulumi.Input[str]] = None,
network_interface: Optional[pulumi.Input[str]] = None,
private_dns: Optional[pulumi.Input[str]] = None,
private_ip: Optional[pulumi.Input[str]] = None,
public_dns: Optional[pulumi.Input[str]] = None,
public_ip: Optional[pulumi.Input[str]] = None,
public_ipv4_pool: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc: Optional[pulumi.Input[bool]] = None) -> 'Eip':
"""
Get an existing Eip resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] associate_with_private_ip: A user specified primary or secondary private IP address to
associate with the Elastic IP address. If no private IP address is specified,
the Elastic IP address is associated with the primary private IP address.
:param pulumi.Input[str] carrier_ip: The carrier IP address.
:param pulumi.Input[str] customer_owned_ip: Customer owned IP.
:param pulumi.Input[str] customer_owned_ipv4_pool: The ID of a customer-owned address pool. For more on customer owned IP addressed check out [Customer-owned IP addresses guide](https://docs.aws.amazon.com/outposts/latest/userguide/outposts-networking-components.html#ip-addressing)
:param pulumi.Input[str] domain: Indicates if this EIP is for use in VPC (`vpc`) or EC2 Classic (`standard`).
:param pulumi.Input[str] instance: EC2 instance ID.
:param pulumi.Input[str] network_border_group: The location from which the IP address is advertised. Use this parameter to limit the address to this location.
:param pulumi.Input[str] network_interface: Network interface ID to associate with.
:param pulumi.Input[str] private_dns: The Private DNS associated with the Elastic IP address (if in VPC).
:param pulumi.Input[str] private_ip: Contains the private IP address (if in VPC).
:param pulumi.Input[str] public_dns: Public DNS associated with the Elastic IP address.
:param pulumi.Input[str] public_ip: Contains the public IP address.
:param pulumi.Input[str] public_ipv4_pool: EC2 IPv4 address pool identifier or `amazon`. This option is only available for VPC EIPs.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. Tags can only be applied to EIPs in a VPC.
:param pulumi.Input[bool] vpc: Boolean if the EIP is in a VPC or not.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _EipState.__new__(_EipState)
__props__.__dict__["allocation_id"] = allocation_id
__props__.__dict__["associate_with_private_ip"] = associate_with_private_ip
__props__.__dict__["association_id"] = association_id
__props__.__dict__["carrier_ip"] = carrier_ip
__props__.__dict__["customer_owned_ip"] = customer_owned_ip
__props__.__dict__["customer_owned_ipv4_pool"] = customer_owned_ipv4_pool
__props__.__dict__["domain"] = domain
__props__.__dict__["instance"] = instance
__props__.__dict__["network_border_group"] = network_border_group
__props__.__dict__["network_interface"] = network_interface
__props__.__dict__["private_dns"] = private_dns
__props__.__dict__["private_ip"] = private_ip
__props__.__dict__["public_dns"] = public_dns
__props__.__dict__["public_ip"] = public_ip
__props__.__dict__["public_ipv4_pool"] = public_ipv4_pool
__props__.__dict__["tags"] = tags
__props__.__dict__["vpc"] = vpc
return Eip(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allocationId")
def allocation_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "allocation_id")
@property
@pulumi.getter(name="associateWithPrivateIp")
def associate_with_private_ip(self) -> pulumi.Output[Optional[str]]:
"""
A user specified primary or secondary private IP address to
associate with the Elastic IP address. If no private IP address is specified,
the Elastic IP address is associated with the primary private IP address.
"""
return pulumi.get(self, "associate_with_private_ip")
@property
@pulumi.getter(name="associationId")
def association_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "association_id")
@property
@pulumi.getter(name="carrierIp")
def carrier_ip(self) -> pulumi.Output[str]:
"""
The carrier IP address.
"""
return pulumi.get(self, "carrier_ip")
@property
@pulumi.getter(name="customerOwnedIp")
def customer_owned_ip(self) -> pulumi.Output[str]:
"""
Customer owned IP.
"""
return pulumi.get(self, "customer_owned_ip")
@property
@pulumi.getter(name="customerOwnedIpv4Pool")
def customer_owned_ipv4_pool(self) -> pulumi.Output[Optional[str]]:
"""
The ID of a customer-owned address pool. For more on customer owned IP addressed check out [Customer-owned IP addresses guide](https://docs.aws.amazon.com/outposts/latest/userguide/outposts-networking-components.html#ip-addressing)
"""
return pulumi.get(self, "customer_owned_ipv4_pool")
@property
@pulumi.getter
def domain(self) -> pulumi.Output[str]:
"""
Indicates if this EIP is for use in VPC (`vpc`) or EC2 Classic (`standard`).
"""
return pulumi.get(self, "domain")
@property
@pulumi.getter
def instance(self) -> pulumi.Output[str]:
"""
EC2 instance ID.
"""
return pulumi.get(self, "instance")
@property
@pulumi.getter(name="networkBorderGroup")
def network_border_group(self) -> pulumi.Output[str]:
"""
The location from which the IP address is advertised. Use this parameter to limit the address to this location.
"""
return pulumi.get(self, "network_border_group")
@property
@pulumi.getter(name="networkInterface")
def network_interface(self) -> pulumi.Output[str]:
"""
Network interface ID to associate with.
"""
return pulumi.get(self, "network_interface")
@property
@pulumi.getter(name="privateDns")
def private_dns(self) -> pulumi.Output[str]:
"""
The Private DNS associated with the Elastic IP address (if in VPC).
"""
return pulumi.get(self, "private_dns")
@property
@pulumi.getter(name="privateIp")
def private_ip(self) -> pulumi.Output[str]:
"""
Contains the private IP address (if in VPC).
"""
return pulumi.get(self, "private_ip")
@property
@pulumi.getter(name="publicDns")
def public_dns(self) -> pulumi.Output[str]:
"""
Public DNS associated with the Elastic IP address.
"""
return pulumi.get(self, "public_dns")
@property
@pulumi.getter(name="publicIp")
def public_ip(self) -> pulumi.Output[str]:
"""
Contains the public IP address.
"""
return pulumi.get(self, "public_ip")
@property
@pulumi.getter(name="publicIpv4Pool")
def public_ipv4_pool(self) -> pulumi.Output[str]:
"""
EC2 IPv4 address pool identifier or `amazon`. This option is only available for VPC EIPs.
"""
return pulumi.get(self, "public_ipv4_pool")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the resource. Tags can only be applied to EIPs in a VPC.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def vpc(self) -> pulumi.Output[bool]:
"""
Boolean if the EIP is in a VPC or not.
"""
return pulumi.get(self, "vpc")
| 42.970427 | 292 | 0.643811 |
bdc874f10fe3f84ce85e218a408890c53bd42e18 | 4,133 | py | Python | course_retrieval/course_retrieval.py | neelkapadia/WolfPal | 5a5b2b37285acc56338383b90c39ea3ac755fed3 | [
"MIT"
] | 1 | 2018-03-25T17:32:07.000Z | 2018-03-25T17:32:07.000Z | course_retrieval/course_retrieval.py | neelkapadia/WolfPal | 5a5b2b37285acc56338383b90c39ea3ac755fed3 | [
"MIT"
] | 14 | 2018-04-02T00:32:10.000Z | 2018-04-25T20:15:16.000Z | course_retrieval/course_retrieval.py | neelkapadia/WolfPal | 5a5b2b37285acc56338383b90c39ea3ac755fed3 | [
"MIT"
] | 3 | 2018-04-25T20:04:02.000Z | 2018-06-27T00:47:14.000Z | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from bs4 import BeautifulSoup
import requests
from pymongo import MongoClient
import json
import script_db
import pickle
# def get_credentials():
# pkl_file = open('.cred.pkl', 'rb')
# data = pickle.load(pkl_file)
# return data[0], data[1], data[2], data[3]
# username, password, db_name, collection_name = get_credentials()
# client = MongoClient("ds239359.mlab.com", 39359, connectTimeoutMS=30000, socketTimeoutMS=None, socketKeepAlive=True)
# db = client["wolfpal"]
# db.authenticate("paylot","wolfpal123")
url = 'https://www.acs.ncsu.edu/php/coursecat/directory.php'
driver = webdriver.Chrome()
driver.get(url)
driver.find_element_by_xpath("//select[@name='course-career']/option[text()='Graduate']").click()
code = driver.find_element_by_id("auto-subject")
code.send_keys("CSC")
#code.send_keys(Keys.DOWN)
#code.send_keys(Keys.ENTER)
driver.find_element_by_id("subject-search-button").click()
time.sleep(10)
print driver.current_url
page = requests.get(driver.current_url)
#print page.text
#durlsoup = BeautifulSoup(page.text,"html.parser")
html_list = driver.find_element_by_id("course-search-results")
items = html_list.find_elements_by_tag_name("li")
# list_of_courses = []
# fp = open("CSC-course-list.txt","w")
# for item in items:
# text = item.text
# fp.write(text)
# fp.write('\n')
# fp.close()
prereqs = []
#days = []
descriptions = []
unit = []
title = []
names = []
ids = []
timings = []
schedule = []
courses = []
i = 1
# for item in items:
# print item.text
for item in items:
days = []
print item.text
driver.find_element_by_link_text(item.text).click()
time.sleep(10)
description = (driver.find_element_by_id("course-descr").text)
descriptions.append(description)
unit.append(driver.find_element_by_id("course-units").text)
print unit
prereq = driver.find_element_by_id("course-reqs").text
prereqs.append(prereq)
print prereqs
t = driver.find_element_by_id("modalTitle").text
print t
id_name = t.split(":")
id_name[0] = id_name[0].replace(" ","")
id_name[1]= id_name[1].lstrip()
title.append(driver.find_element_by_id("modalTitle").text)
print title
ids.append(id_name[0])
names.append(id_name[1])
print ids
print names
sem = driver.find_element_by_tag_name("em").text
if "Fall" and "Spring" in sem:
semester = "Fall,Spring"
elif "Fall" in sem:
semester = "Fall"
elif "Spring" in sem:
semester = "Spring"
print semester
if " future" in driver.find_element_by_id("course-sem").text:
days.append("NA")
else:
term = driver.find_element_by_partial_link_text("2018").click()
time.sleep(10)
dayl = driver.find_elements_by_css_selector('li.meet.hidden-xs')
for day in dayl:
days.append(day.text)
timing = driver.find_element_by_xpath("""//*[@id="search-results"]/table/tbody/tr/td[5]""").text.split('\n')
if "TBD" not in timing:
times = timing[1].split("-")
timeslot = times[0].split(" ")
ftime = timeslot[0]
else:
ftime = "TBD"
days.append("NA")
print days
print ftime
#script_db.db_insert(username,password,db_name,collection_name,id_name[0],id_name[1],"Fall",description)
driver.find_element_by_xpath("""//*[@id="details-modal"]/div/div/div[3]/button""").click()
time.sleep(10)
json_schedule = json.dumps(
{
'course_id': str(i),
'semester': semester,
'day':days,
'time':ftime,
'project': True,
'fieldwork': True,
'ratings': "4"
})
entry_s = json.loads(json_schedule)
schedule.append(entry_s)
#print schedule
json_courses = json.dumps(
{
'code': id_name[0],
'syllabus_id': str(i),
'course_name': id_name[1],
'description':description,
'core': True,
'channel_id': str(i)
})
entry_c = json.loads(json_courses)
courses.append(entry_c)
#print courses
i = i+1
fp1 = open("courses.txt","w")
fp1.write(str(courses))
fp1.close()
fp2 = open("schedule.txt","w")
fp2.write(str(schedule))
fp2.close()
#button.click()
#print urlsoup.prettify()
# print urlsoup
# description = urlsoup.find("p",id="course-descr").text
# print description
#print description
| 23.350282 | 118 | 0.701669 |
3048e1d24fa8803198fe729ea2defb299803ef1e | 8,155 | py | Python | docs/conf.py | ashish2py/django-postman | 1cd1ce446912fbd179594f1ed1b535d74e1f891f | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | ashish2py/django-postman | 1cd1ce446912fbd179594f1ed1b535d74e1f891f | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | ashish2py/django-postman | 1cd1ce446912fbd179594f1ed1b535d74e1f891f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import postman
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django postman'
copyright = u'2017, Ashish Tiwari'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = postman.__version__
# The full version, including alpha/beta/rc tags.
release = postman.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-postmandoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-postman.tex', u'django postman Documentation',
u'Ashish Tiwari', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-postman', u'django postman Documentation',
[u'Ashish Tiwari'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-postman', u'django postman Documentation',
u'Ashish Tiwari', 'django-postman', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 31.980392 | 80 | 0.717964 |
3c7f175bca1ae3b29c32bc4e309ecb19aaf7134a | 2,766 | py | Python | demo/tensorflow/nn_tensorboard.py | zhengxin2016/corpus | c33574cf195bfe4aa57def95349f6baa4cd8200c | [
"Apache-2.0"
] | null | null | null | demo/tensorflow/nn_tensorboard.py | zhengxin2016/corpus | c33574cf195bfe4aa57def95349f6baa4cd8200c | [
"Apache-2.0"
] | null | null | null | demo/tensorflow/nn_tensorboard.py | zhengxin2016/corpus | c33574cf195bfe4aa57def95349f6baa4cd8200c | [
"Apache-2.0"
] | 1 | 2018-07-04T05:38:09.000Z | 2018-07-04T05:38:09.000Z | #!/usr/bin/env python3
import os, sys
import tensorflow as tf
import numpy as np
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' #all info
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #warning, error
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' #error
def add_layer(inputs, in_size, out_size, activation_function=None):
with tf.name_scope('weights'):
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
tf.summary.histogram('weights', Weights)
with tf.name_scope('biases'):
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
tf.summary.histogram('biases', biases)
with tf.name_scope('Wx_plus_b'):
Wx_plus_b = tf.matmul(inputs, Weights) + biases
keep_prob = 1
Wx_plus_b = tf.nn.dropout(Wx_plus_b, keep_prob)
tf.summary.histogram('Wx_plus_b', Wx_plus_b)
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
tf.summary.histogram('outputs', outputs)
return outputs
x_data = np.linspace(-1, 1, 300)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
with tf.name_scope('input_x'):
xs = tf.placeholder(tf.float32, [None, 1])
with tf.name_scope('input_y'):
ys = tf.placeholder(tf.float32, [None, 1])
with tf.name_scope('L1'):
l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)
with tf.name_scope('outputs'):
prediction = add_layer(l1, 10, 1, activation_function=None)
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),
reduction_indices=[1]))
tf.summary.scalar('loss', loss)
with tf.name_scope('train_step'):
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
sess = tf.Session()
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter('logs/', sess.graph)
sess.run(init)
for i in range(1000):
sess.run(train_step, feed_dict={xs:x_data, ys:y_data})
if i % 100 == 0:
print(sess.run(loss, feed_dict={xs:x_data, ys:y_data}))
result = sess.run(merged, feed_dict={xs:x_data, ys:y_data})
writer.add_summary(result, i)
save_path = saver.save(sess, "save/save.ckpt")
print("Save to path:", save_path)
sess.close()
##########################
print('\n\nrestore....\n')
saver = tf.train.Saver()
sess = tf.Session()
saver.restore(sess, 'save/save.ckpt')
for i in range(1000):
sess.run(train_step, feed_dict={xs:x_data, ys:y_data})
if i % 100 == 0:
print(sess.run(loss, feed_dict={xs:x_data, ys:y_data}))
while 1:
x = input('input:')
x = float(x.strip())
y = sess.run(prediction, feed_dict={xs:[[x]]})
print(x, y)
| 31.431818 | 70 | 0.661605 |
cbe7ef700346235eadb1ca0e379ade3421f45b33 | 7,543 | py | Python | examples/dmri_group_connectivity_mrtrix.py | dPys/nipype | 75030b29297808e7c9a9e91b411b685154dff60b | [
"Apache-2.0"
] | 1 | 2020-02-24T15:44:50.000Z | 2020-02-24T15:44:50.000Z | examples/dmri_group_connectivity_mrtrix.py | dPys/nipype | 75030b29297808e7c9a9e91b411b685154dff60b | [
"Apache-2.0"
] | null | null | null | examples/dmri_group_connectivity_mrtrix.py | dPys/nipype | 75030b29297808e7c9a9e91b411b685154dff60b | [
"Apache-2.0"
] | null | null | null | """
==================================================
dMRI: Group connectivity - MRtrix, FSL, FreeSurfer
==================================================
Introduction
============
This script, dmri_group_connectivity_mrtrix.py, runs group-based connectivity analysis using
the dmri.mrtrix.connectivity_mapping Nipype workflow. Further detail on the processing can be
found in :doc:`dmri_connectivity_advanced`. This tutorial can be run using::
python dmri_group_connectivity_mrtrix.py
We perform this analysis using one healthy subject and two subjects who suffer from Parkinson's disease.
The whole package (960 mb as .tar.gz / 1.3 gb uncompressed) including the Freesurfer directories for these subjects, can be acquired from here:
* http://db.tt/b6F1t0QV
A data package containing the outputs of this pipeline can be obtained from here:
* http://db.tt/elmMnIt1
Along with MRtrix, FSL, and Freesurfer, you must also have the Connectome File Format
library installed as well as the Connectome Mapper (cmp).
* MRtrix: http://www.brain.org.au/software/mrtrix/
* FSL: http://www.fmrib.ox.ac.uk/fsl/
* Freesurfer: http://surfer.nmr.mgh.harvard.edu/
* CTMK: http://www.cmtk.org/
* CFF: sudo apt-get install python-cfflib
Or on github at:
* CFFlib: https://github.com/LTS5/cfflib
* CMP: https://github.com/LTS5/cmp
Output data can be visualized in ConnectomeViewer, TrackVis, Gephi,
the MRtrix Viewer (mrview), and anything that can view Nifti files.
* ConnectomeViewer: https://github.com/LTS5/connectomeviewer
* TrackVis: http://trackvis.org/
* Gephi: http://gephi.org/
The fiber data is available in Numpy arrays, and the connectivity matrix
is also produced as a MATLAB matrix.
Import the workflows
--------------------
First, we import the necessary modules from nipype.
"""
import nipype.interfaces.fsl as fsl
import nipype.interfaces.freesurfer as fs # freesurfer
import os.path as op # system functions
import cmp
from niflow.nipype1.workflows.dmri.mrtrix.group_connectivity import create_group_connectivity_pipeline
from niflow.nipype1.workflows.dmri.connectivity.group_connectivity import (
create_merge_network_results_by_group_workflow,
create_merge_group_network_results_workflow,
create_average_networks_by_group_workflow)
"""
Set the proper directories
--------------------------
First, we import the necessary modules from nipype.
"""
subjects_dir = op.abspath('groupcondatapackage/subjects/')
data_dir = op.abspath('groupcondatapackage/data/')
fs.FSCommand.set_default_subjects_dir(subjects_dir)
fsl.FSLCommand.set_default_output_type('NIFTI')
"""
Define the groups
-----------------
Here we define the groups for this study. We would like to search for differences between the healthy subject and the two
vegetative patients. The group list is defined as a Python dictionary (see http://docs.python.org/tutorial/datastructures.html),
with group IDs ('controls', 'parkinsons') as keys, and subject/patient names as values. We set the main output directory as 'groupcon'.
"""
group_list = {}
group_list['controls'] = ['cont17']
group_list['parkinsons'] = ['pat10', 'pat20']
"""
The output directory must be named as well.
"""
global output_dir
output_dir = op.abspath('dmri_group_connectivity_mrtrix')
"""
Main processing loop
====================
The title for the final grouped-network connectome file is dependent on the group names. The resulting file for this example
is 'parkinsons-controls.cff'. The following code implements the format a-b-c-...x.cff for an arbitary number of groups.
.. warning::
The 'info' dictionary below is used to define the input files. In this case, the diffusion weighted image contains the string 'dti'.
The same applies to the b-values and b-vector files, and this must be changed to fit your naming scheme.
The workflow is created given the information input about the groups and subjects.
.. seealso::
* nipype/workflows/dmri/mrtrix/group_connectivity.py
* nipype/workflows/dmri/mrtrix/connectivity_mapping.py
* :doc:`dmri_connectivity_advanced`
We set values for absolute threshold used on the fractional anisotropy map. This is done
in order to identify single-fiber voxels. In brains with more damage, however, it may be necessary
to reduce the threshold, since their brains are have lower average fractional anisotropy values.
We invert the b-vectors in the encoding file, and set the maximum harmonic order
of the pre-tractography spherical deconvolution step. This is done to show
how to set inputs that will affect both groups.
Next we create and run the second-level pipeline. The purpose of this workflow is simple:
It is used to merge each subject's CFF file into one, so that there is a single file containing
all of the networks for each group. This can be useful for performing Network Brain Statistics
using the NBS plugin in ConnectomeViewer.
.. seealso::
http://www.connectomeviewer.org/documentation/users/tutorials/tut_nbs.html
"""
title = ''
for idx, group_id in enumerate(group_list.keys()):
title += group_id
if not idx == len(list(group_list.keys())) - 1:
title += '-'
info = dict(
dwi=[['subject_id', 'dti']],
bvecs=[['subject_id', 'bvecs']],
bvals=[['subject_id', 'bvals']])
l1pipeline = create_group_connectivity_pipeline(
group_list, group_id, data_dir, subjects_dir, output_dir, info)
# Here with invert the b-vectors in the Y direction and set the maximum harmonic order of the
# spherical deconvolution step
l1pipeline.inputs.connectivity.mapping.fsl2mrtrix.invert_y = True
l1pipeline.inputs.connectivity.mapping.csdeconv.maximum_harmonic_order = 6
# Here we define the parcellation scheme and the number of tracks to produce
parcellation_name = 'scale500'
l1pipeline.inputs.connectivity.mapping.Parcellate.parcellation_name = parcellation_name
cmp_config = cmp.configuration.PipelineConfiguration()
cmp_config.parcellation_scheme = "Lausanne2008"
l1pipeline.inputs.connectivity.mapping.inputnode_within.resolution_network_file = cmp_config._get_lausanne_parcellation(
'Lausanne2008')[parcellation_name]['node_information_graphml']
l1pipeline.inputs.connectivity.mapping.probCSDstreamtrack.desired_number_of_tracks = 100000
l1pipeline.run()
l1pipeline.write_graph(format='eps', graph2use='flat')
# The second-level pipeline is created here
l2pipeline = create_merge_network_results_by_group_workflow(
group_list, group_id, data_dir, subjects_dir, output_dir)
l2pipeline.inputs.l2inputnode.network_file = cmp_config._get_lausanne_parcellation(
'Lausanne2008')[parcellation_name]['node_information_graphml']
l2pipeline.run()
l2pipeline.write_graph(format='eps', graph2use='flat')
"""
Now that the for loop is complete there are two grouped CFF files each containing the appropriate subjects.
It is also convenient to have every subject in a single CFF file, so that is what the third-level pipeline does.
"""
l3pipeline = create_merge_group_network_results_workflow(
group_list, data_dir, subjects_dir, output_dir, title)
l3pipeline.run()
l3pipeline.write_graph(format='eps', graph2use='flat')
"""
The fourth and final workflow averages the networks and saves them in another CFF file
"""
l4pipeline = create_average_networks_by_group_workflow(
group_list, data_dir, subjects_dir, output_dir, title)
l4pipeline.run()
l4pipeline.write_graph(format='eps', graph2use='flat')
| 40.772973 | 143 | 0.750497 |
5a9dbb3f70b5cf6cc5984c74e08a0aa42f6e26d6 | 650 | py | Python | sandbox/lib/jumpscale/Jumpscale/servers/gedis_websocket/GedisWebsocketFactory.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
] | null | null | null | sandbox/lib/jumpscale/Jumpscale/servers/gedis_websocket/GedisWebsocketFactory.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
] | null | null | null | sandbox/lib/jumpscale/Jumpscale/servers/gedis_websocket/GedisWebsocketFactory.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
] | null | null | null | import socket
from Jumpscale import j
from .GedisWebsocketServer import GedisWebsocketServer
JSConfigClient = j.baseclasses.object_config_collection
class GedisWebsocketFactory(JSConfigClient):
__jslocation__ = "j.servers.gedis_websocket"
_CHILDCLASS = GedisWebsocketServer
def _init(self, **kwargs):
self._default = None
@property
def default(self):
if not self._default:
self._default = self.get("default")
return self._default
def test(self):
self.client_gedis = j.clients.gedis.get("main", port=8900)
self.client_gedis.actors.chatbot.ping()
return "DONE"
| 25 | 66 | 0.7 |
54cdc6416ab2b295c3bee8ad2be0912c7318ef0f | 239 | py | Python | frappe/core/doctype/patch_log/test_patch_log.py | oryxsolutions/frappe | d193ea22d17ca40d57432040a8afad72287d9e23 | [
"MIT"
] | null | null | null | frappe/core/doctype/patch_log/test_patch_log.py | oryxsolutions/frappe | d193ea22d17ca40d57432040a8afad72287d9e23 | [
"MIT"
] | null | null | null | frappe/core/doctype/patch_log/test_patch_log.py | oryxsolutions/frappe | d193ea22d17ca40d57432040a8afad72287d9e23 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import unittest
# test_records = frappe.get_test_records('Patch Log')
class TestPatchLog(unittest.TestCase):
pass
| 21.727273 | 68 | 0.736402 |
473eae16743c94d7aeab444891b5853e47a1f072 | 7,627 | py | Python | cr-errands.py | bryant81/cr-statics | bde0cd75ed3cb2dd03eb8eca3d585ba92f0155fc | [
"MIT"
] | null | null | null | cr-errands.py | bryant81/cr-statics | bde0cd75ed3cb2dd03eb8eca3d585ba92f0155fc | [
"MIT"
] | 1 | 2018-01-30T06:32:17.000Z | 2018-01-30T08:53:38.000Z | cr-errands.py | bryant81/cr-statics | bde0cd75ed3cb2dd03eb8eca3d585ba92f0155fc | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#encoding=utf-8
import sys
import login_client
import argparse
import xlrd, xlwt
from pyecharts import Pie, Page
rd_departments_list = ['嵌入部', '系统部', '软件部', '项目管理部', '硬件部', '结构部', '测试部', '研发中心']
cities = ("阿坝","阿拉善","阿里","安康","安庆","鞍山","安顺","安阳","澳门","北京","白银",
"保定","宝鸡","保山","包头","巴中","北海","蚌埠","本溪","毕节","滨州","百色","亳州",
"重庆","成都","长沙","长春","沧州","常德","昌都","长治","常州","巢湖","潮州","承德",
"郴州","赤峰","池州","崇左","楚雄","滁州","朝阳","大连","东莞","大理","丹东","大庆",
"大同","大兴安岭","德宏","德阳","德州","定西","迪庆","东营","鄂尔多斯","恩施","鄂州",
"福州","防城港","佛山","抚顺","抚州","阜新","阜阳","广州","桂林","贵阳","甘南",
"赣州","甘孜","广安","广元","贵港","果洛","杭州","哈尔滨","合肥","海口","呼和浩特",
"海北","海东","海南","海西","邯郸","汉中","鹤壁","河池","鹤岗","黑河","衡水","衡阳",
"河源","贺州","红河","淮安","淮北","怀化","淮南","黄冈","黄南","黄山","黄石","惠州",
"葫芦岛","呼伦贝尔","湖州","菏泽","济南","佳木斯","吉安","江门","焦作","嘉兴","嘉峪关",
"揭阳","吉林","金昌","晋城","景德镇","荆门","荆州","金华","济宁","晋中","锦州","九江",
"酒泉","昆明","开封","兰州","拉萨","来宾","莱芜","廊坊","乐山","凉山","连云港",
"聊城","辽阳","辽源","丽江","临沧","临汾","临夏","临沂","林芝","丽水","六安","六盘水",
"柳州","陇南","龙岩","娄底","漯河","洛阳","泸州","吕梁","马鞍山","茂名","眉山","梅州",
"绵阳","牡丹江","南京","南昌","南宁","宁波","南充","南平","南通","南阳","那曲","内江",
"宁德","怒江","盘锦","攀枝花","平顶山","平凉","萍乡","莆田","濮阳","青岛","黔东南",
"黔南","黔西南","庆阳","清远","秦皇岛","钦州","齐齐哈尔","泉州","曲靖","衢州","日喀则",
"日照","上海","深圳","苏州","沈阳","石家庄","三门峡","三明","三亚","商洛","商丘","上饶",
"山南","汕头","汕尾","韶关","绍兴","邵阳","十堰","朔州","四平","绥化","遂宁","随州","宿迁",
"宿州","天津","太原","泰安","泰州","台州","唐山","天水","铁岭","铜川","通化","通辽",
"铜陵","铜仁","台湾","武汉","乌鲁木齐","无锡","威海","潍坊","文山","温州","乌海","芜湖",
"乌兰察布","武威","梧州","厦门","西安","西宁","襄樊","湘潭","湘西","咸宁","咸阳","孝感",
"邢台","新乡","信阳","新余","忻州","西双版纳","宣城","许昌","徐州","香港","锡林郭勒","兴安",
"银川","雅安","延安","延边","盐城","阳江","阳泉","扬州","烟台","宜宾","宜昌","宜春",
"营口","益阳","永州","岳阳","榆林","运城","云浮","玉树","玉溪","玉林","杂多县","赞皇县",
"枣强县","枣阳市","枣庄","泽库县","增城市","曾都区","泽普县","泽州县","札达县","扎赉特旗",
"扎兰屯市","扎鲁特旗","扎囊县","张北县","张店区","章贡区","张家港","张家界","张家口","漳平市",
"漳浦县","章丘市","樟树市","张湾区","彰武县","漳县","张掖","漳州","长子县","湛河区","湛江",
"站前区","沾益县","诏安县","召陵区","昭平县","肇庆","昭通","赵县","昭阳区","招远市","肇源县",
"肇州县","柞水县","柘城县","浙江","镇安县","振安区","镇巴县","正安县","正定县","正定新区",
"正蓝旗","正宁县","蒸湘区","正镶白旗","正阳县","郑州","镇海区","镇江","浈江区","镇康县",
"镇赉县","镇平县","振兴区","镇雄县","镇原县","志丹县","治多县","芝罘区","枝江市",
"芷江侗族自治县","织金县","中方县","中江县","钟楼区","中牟县","中宁县","中山","中山区",
"钟山区","钟山县","中卫","钟祥市","中阳县","中原区","周村区","周口","周宁县","舟曲县","舟山",
"周至县","庄河市","诸城市","珠海","珠晖区","诸暨市","驻马店","准格尔旗","涿鹿县","卓尼",
"涿州市","卓资县","珠山区","竹山县","竹溪县","株洲","株洲县","淄博","子长县","淄川区","自贡",
"秭归县","紫金县","自流井区","资溪县","资兴市","资阳")
def get_city_from_remark(remark):
if remark.find('展') != -1:
return '展会'
if remark.find('训') != -1 or remark.find('招聘') != -1:
return '培训'
for city in cities:
if remark.find(city) != -1:
return city
spec_city = [('一所', '北京'),('通广', '济南'), ('西藏', '拉萨'), ('鲁软', '济南'), ('5000','合肥'), ('天地伟业','天津'),
('东电','重庆'), ('南瑞','南京'), ('山东','济南')]
for spec in spec_city:
if remark.find(spec[0]) != -1:
return spec[1]
print(remark)
return '未知'
parser = argparse.ArgumentParser()
parser.add_argument('url', type=str, help='the website address of erp')
parser.add_argument('username', type=str, help='the username login with')
parser.add_argument('password', type=str, help='the password login with')
parser.add_argument('inputfile', type=str, help='the employee need static')
parser.add_argument('outputfile', type=str, help='the statics datasheet')
parser.add_argument('year', type=int, help='the year of the statics')
args = parser.parse_args()
input_sheet = xlrd.open_workbook(args.inputfile).sheets()[0]
output_workbook = xlwt.Workbook(encoding='utf-8')
output_sheet = output_workbook.add_sheet('2017-statics')
SHEET_NAME_INDEX = 0
SHEET_EMAIL_INDEX = 1
SHEET_ARRANDS_TIME = 2
SHEET_ARRANDS_CITY = 3
SHEET_ARRANDS_REMARK = 4
output_sheet.write(0, SHEET_NAME_INDEX, '姓名')
output_sheet.write(0, SHEET_EMAIL_INDEX, '邮箱')
output_sheet.write(0, SHEET_ARRANDS_TIME, '出差时间')
output_sheet.write(0, SHEET_ARRANDS_REMARK, '出差是由')
employees_list = []
for index in range(0, input_sheet.nrows):
items = input_sheet.row(index)
name = items[0].value
email = items[1].value
employees_list.append([name, email])
client = login_client.LoginClient(args.url, args.username, args.password)
login_result, login_description = client.login_in()
if login_result:
print('登录成功')
else:
print('登录失败:', login_description)
sys.exit(-1)
output_row_index = 1
statics_count = 0
sales_errands_list = []
tranning_errands_list = []
exhibition_errands_list = []
for employee in employees_list:
name = employee[0]
email = employee[1]
employee_info = client.get_employee_info(name, email)
department_name = employee_info.department_name
if department_name in rd_departments_list:
errands_list = client.get_employee_errands(employee_info, args.year)
for errands in errands_list:
city = get_city_from_remark(errands[1])
if city == '展会':
exhibition_errands_list.append(errands)
elif city == '培训' or city == '招聘':
tranning_errands_list.append(errands)
else:
sales_errands_list.append((errands[0], city, errands[1]))
output_sheet.write(output_row_index, SHEET_NAME_INDEX, name)
output_sheet.write(output_row_index, SHEET_EMAIL_INDEX, email)
output_sheet.write(output_row_index, SHEET_ARRANDS_TIME, int(errands[0]))
output_sheet.write(output_row_index, SHEET_ARRANDS_CITY, city)
output_sheet.write(output_row_index, SHEET_ARRANDS_REMARK, errands[1])
output_row_index = output_row_index + 1
statics_count = statics_count + 1
#print('姓名:%s 出差时间:%s 出差原因:%s'%(name, errands[0], errands[1]))
#print(exhibition_errands_list)
#print(tranning_errands_list)
#print(sales_errands_list)
city_errands_count = {}
errands_count = 0
city_errands_time = {}
errands_time = 0
for errands in sales_errands_list:
city = errands[1]
if city not in city_errands_count:
city_errands_count[city] = 1
else:
city_errands_count[city] = city_errands_count[city] + 1
errands_count = errands_count + 1
if city not in city_errands_time:
city_errands_time[city] = int(errands[0])
else:
city_errands_time[city] = city_errands_time[city] + int(errands[0])
errands_time = errands_time + int(errands[0])
#print(city_errands_count)
#print(city_errands_time)
page = Page()
pie_errands_count = Pie('2017研发出差次数统计(单位:次) 累计:%d次'% errands_count, width=1280, height=720, title_top='bootom')
pie_errands_count.add('', city_errands_count.keys(), city_errands_count.values(), is_label_show=True, label_text_color='#F00', legend_top='bottom')
pie_errands_time = Pie('2017研发出差时间统计(单位:工作小时/每个工作日7.5小时) 累计: %d 小时'% errands_time, width=1280, height=720, title_top='bootom')
pie_errands_time.add('', city_errands_time.keys(), city_errands_time.values(), is_label_show=True, label_text_color='#F00', legend_top='bottom')
page.add(pie_errands_count)
page.add(pie_errands_time)
page.render('2017研发出差统计.html')
output_workbook.save(args.outputfile)
| 39.112821 | 147 | 0.5748 |
c615a44852de414106dd329a0287a1b581f395dd | 1,486 | py | Python | signac/core/json.py | Carreau/signac | 7086d8981c926703a023654d1c59bbedcfae6298 | [
"BSD-3-Clause"
] | 1 | 2020-12-28T18:00:24.000Z | 2020-12-28T18:00:24.000Z | signac/core/json.py | Carreau/signac | 7086d8981c926703a023654d1c59bbedcfae6298 | [
"BSD-3-Clause"
] | 81 | 2020-12-28T20:23:57.000Z | 2022-03-01T06:03:40.000Z | signac/core/json.py | admdev8/signac | d639e682ca7ebaff781d68621a2d86cc26de04b9 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2018 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
"""Wrapper around json parsing library."""
import logging
from json import load, loads, JSONEncoder
from json.decoder import JSONDecodeError
from typing import Any, Dict, Optional
logger = logging.getLogger(__name__)
try:
import numpy
NUMPY = True
except ImportError:
NUMPY = False
class CustomJSONEncoder(JSONEncoder):
"""Attempt to JSON-encode objects beyond the default supported types.
This encoder will attempt to obtain a JSON-serializable representation of
an object that is otherwise not serializable, by calling the object's
`_as_dict()` method.
"""
def default(self, o: Any) -> Dict[str, Any]:
if NUMPY:
if isinstance(o, numpy.number):
return o.item()
elif isinstance(o, numpy.ndarray):
return o.tolist()
try:
return o._as_dict()
except AttributeError:
# Call the super method, which raises a TypeError if it cannot
# encode the object.
return super(CustomJSONEncoder, self).default(o)
def dumps(o: Any, sort_keys: bool = False, indent: Optional[int] = None) -> str:
"""Convert a JSON-compatible mapping into a string."""
return CustomJSONEncoder(sort_keys=sort_keys, indent=indent).encode(o)
__all__ = ['loads', 'load', 'dumps', 'JSONDecodeError']
| 32.304348 | 80 | 0.674966 |
20c0e9cb6a2271abe5d5eaf76fd28f36518fa5b9 | 55,089 | py | Python | mathchem/mathchem.py | Pshemysuaf/mathchem-package | e084a838fc836325872f37e3f638a0e13fd368f8 | [
"MIT"
] | null | null | null | mathchem/mathchem.py | Pshemysuaf/mathchem-package | e084a838fc836325872f37e3f638a0e13fd368f8 | [
"MIT"
] | null | null | null | mathchem/mathchem.py | Pshemysuaf/mathchem-package | e084a838fc836325872f37e3f638a0e13fd368f8 | [
"MIT"
] | null | null | null | import numpy as np
class Mol():
r"""
Molecule.
"""
__g6_string = ''
# Adjacency matrix
__A = []
# Incidence matrix
__B = []
# Laplacian matrix
__L = []
# Normalized laplacian matrix
__NL = []
# Signless laplacian matrix
__Q = []
# Distance matrix
__D = []
# Resistance Distance matrix
__RD = []
__Order = 0
__Edges = []
__Sage_graph = None
__NX_graph = None
__Degrees = []
__Spectrum = []
__Laplacian_spectrum = []
__Distance_spectrum = []
__Norm_laplacian_spectrum = []
__Signless_laplacian_spectrum = []
__RD_spectrum = []
__Is_connected = None
# Switch it to False when we know that the graph is connected. Useful for big calculations
__Check_connectedness = True
def _reset_(self):
""" Reset all attributes """
self.__g6_string = ''
# Adjacency matrix
self.__A = []
# Incidence matrix
self.__B = []
# Laplacian matrix
self.__L = []
# Normalized laplacian matrix
self.__NL = []
# Signless laplacian matrix
self.__Q = []
# Distance matrix
self.__D = []
# Resistance Distance matrix
self.__RD = []
self.__Order = 0
self.__Edges = []
self.__Sage_graph = None
self.__NX_graph = None
self.__Degrees = []
self.__Spectrum = []
self.__Laplacian_spectrum = []
self.__Distance_spectrum = []
self.__Norm_laplacian_spectrum = []
self.__Signless_laplacian_spectrum = []
self.__RD_spectrum = []
self.__Is_connected = None
# allow to set structure from somewhere
# used in utilites
def _set_A(self, A):
self.__A = A
def _set_Edges(self, edges):
self.__Edges = edges
def _set_Order(self, order):
self.__Order = order
# native method to initialize Mol class is to provide g6 string
def __init__(self, string=None, check_connectedness=True):
""" Molecular graph class """
self.__Check_connectedness = check_connectedness
if string != None:
if string[0] == '>':
if string.startswith('>>graph6<<'):
string = string[10:]
elif string.startswith('>>sparse6<<'):
string = string[11:]
if string[0] == ':':
self.read_s6(string)
else:
self.read_g6(string)
def __repr__(self):
if self.__A != None: return 'Molecular graph on ' + str(self.__Order) + ' vertices and ' + str(
self.size()) + ' edges'
return 'Empty Molecular graph'
def __len__(self):
if self.__A != None:
return len(self.__A)
else:
return 0
def set_check_connectedness(self, c):
""" Switch on/off of checking connectedness for the graph. Might be useful in batch calculations to economy time.
args: c (True/False)
"""
self.check_connectedness = c
def g6_string(self):
""" Return a graph6 string representation of the graph
Alias: graph6_string """
return self.__g6_string
# alias like in Sage:
graph6_string = g6_string
def order(self):
""" Return number of vertices """
return self.__Order
# alias for order
n = order
def edges(self):
""" Return list of edges """
return self.__Edges
def size(self):
""" Return number of edges"""
return len(self.__Edges)
# alias for size
m = size
def vertices(self):
""" Return list of vertices """
return range(self.__Order)
def sage_graph(self):
""" Return Sage Graph object """
if self.__Sage_graph is None: self._init_sage_graph_()
return self.__Sage_graph
def NX_graph(self):
""" Return NetworkX graph object """
if self.__NX_graph is None:
import networkx as nx
self.__NX_graph = nx.Graph(self.__Edges)
return self.__NX_graph
nx_graph = NX_graph
def _init_sage_graph_(self):
""" Initialize SAGE graph from Adjacency matrix"""
from sage.graphs.graph import Graph
self.__Sage_graph = Graph(self.__Edges)
def read_g6(self, s):
""" Initialize graph from graph6 string """
def graph_bit(pos, off):
return ((ord(s[off + 1 + pos / 6]) - 63) & (2 ** (5 - pos % 6))) != 0
if s.startswith('>>graph6<<'):
s = s[10:]
# reset all the attributes before changing the structure
self._reset_()
n = ord(s[0]) - 63
off = 0
if n == 63:
if ord(s[1]) - 63 != 63:
n = ((ord(s[1]) - 63) << 12) + ((ord(s[2]) - 63) << 6) + ord(s[3]) - 63
off = 3
else:
n = ((ord(s[2]) - 63) << 30) + ((ord(s[3]) - 63) << 24) + ((ord(s[4]) - 63) << 18) + (
(ord(s[5]) - 63) << 12) + ((ord(s[6]) - 63) << 6) + ord(s[7]) - 63
off = 7
self.__Order = n
self.__A = [[0 for col in range(n)] for row in range(n)]
i = 0;
j = 1
self.__Edges = [];
for x in range(n * (n - 1) / 2):
if graph_bit(x, off):
self.__A[i][j] = 1
self.__A[j][i] = 1
self.__Edges.append((i, j))
if j - i == 1:
i = 0
j += 1
else:
i += 1
self.__g6_string = s
read_graph6 = read_g6
def read_s6(self, s):
""" Initialize graph from sparse6 string """
def graph_bit(pos, off):
return ((ord(s[off + 1 + pos / 6]) - 63) & (2 ** (5 - pos % 6))) != 0
if s.startswith('>>sparse6<<'):
s = s[11:]
if not s[0] == ':':
print('This is not a sparse6 format!')
return False
# reset all the attributes before changing the structure
self._reset_()
s = s[1:]
n = ord(s[0]) - 63
off = 0
if n == 63:
if ord(s[1]) - 63 != 63:
n = ((ord(s[1]) - 63) << 12) + ((ord(s[2]) - 63) << 6) + ord(s[3]) - 63
off = 3
else:
n = ((ord(s[2]) - 63) << 30) + ((ord(s[3]) - 63) << 24) + ((ord(s[4]) - 63) << 18) + (
(ord(s[5]) - 63) << 12) + ((ord(s[6]) - 63) << 6) + ord(s[7]) - 63
off = 7
self.__Order = n
k = 1
while 1 << k < n:
k += 1
data = s[off + 1:]
# print n,k
# print data
def parseData():
"""Return stream of pairs b[i], x[i] for sparse6 format."""
chunks = iter(data)
d = None # partial data word
dLen = 0 # how many unparsed bits are left in d
while 1:
if dLen < 1:
d = ord(next(chunks)) - 63
dLen = 6
dLen -= 1
b = (d >> dLen) & 1 # grab top remaining bit
x = d & ((1 << dLen) - 1) # partially built up value of x
xLen = dLen # how many bits included so far in x
while xLen < k: # now grab full chunks until we have enough
d = ord(next(chunks)) - 63
dLen = 6
x = (x << 6) + d
xLen += 6
x = (x >> (xLen - k)) # shift back the extra bits
dLen = xLen - k
yield b, x
self.__A = [[0 for col in range(n)] for row in range(n)]
self.__Edges = [];
v = 0
for b, x in parseData():
if b: v += 1
if x >= n:
break # padding with ones can cause overlarge number here
elif x > v:
v = x
else:
self.__A[x][v] = 1
self.__A[v][x] = 1
self.__Edges.append((x, v))
self.__g6_string = ''
read_sparse6 = read_s6
def read_matrix(self, matrix):
"""Initialize graph from adjacency matrix including numpy.matrix"""
if type(matrix) == np.matrix:
matrix = matrix.astype(int).tolist()
self._reset_()
self.__Order = len(matrix)
self.__A = matrix
for i in range(self.__Order):
for j in range(i):
if matrix[i][j] == 1:
self.__Edges.append((i, j))
def read_edgelist(self, edges):
"""Initialize graph from list of edges.
Example:
m = mathchem.Mol()
m.read_edgelist( [(4,3),(3,1),(1,4))] )"""
# first relabel nodes
nodes = []
for e in edges:
if not e[0] in nodes: nodes.append(e[0])
if not e[1] in nodes: nodes.append(e[1])
self._reset_()
self.__Order = len(nodes)
d = dict(zip(nodes, range(len(nodes))))
self.__Edges = [(d[e[0]], d[e[1]]) for e in edges]
self.__A = [[0 for col in range(self.__Order)] for row in range(self.__Order)]
for i, j in self.__Edges:
self.__A[i][j] = 1
self.__A[j][i] = 1
def write_dot_file(self, filename):
f_out = open(filename, 'w')
f_out.writelines('graph Mol {\n')
for (i, j) in self.edges():
f_out.writelines(' ' + str(i) + ' -- ' + str(j) + ';\n')
f_out.writelines('}')
f_out.close()
#
#
# matrices
#
#
def adjacency_matrix(self):
""" Return Adjacency matrix
Alias : A
"""
return self.__A
A = adjacency_matrix
def incidence_matrix(self):
""" Return Incidence matrix
Alias: B
"""
if self.__B == []:
def func(u_v):
u, v = u_v
col = [0] * self.__Order
col[u] = 1
col[v] = 1
return col
# apply func to each edge
b = map(lambda e: func(e), self.edges())
# transpose the result
self.__B = map(list, zip(*b))
return self.__B
B = incidence_matrix
def laplacian_matrix(self):
""" Return Laplacian matrix
L = D-A
where D - matrix whose diagonal elements are the degrees of the corresponding vertices
A - adjacency matrix
Alias : L
"""
if self.__L == []:
self.__L = np.diag(self.degrees()) - np.matrix(self.__A);
return self.__L
L = laplacian_matrix
def signless_laplacian_matrix(self):
""" Return Signless Laplacian matrix
Q = D+A
Alias : Q
"""
if self.__Q == []:
self.__Q = np.diag(self.degrees()) + np.matrix(self.__A);
return self.__Q
Q = signless_laplacian_matrix
def normalized_laplacian_matrix(self):
""" Return Normalized Laplacian matrix
NL = deg^(-1/2) * L * deg(1/2)
Alias : NL
"""
## TODO: check if we have zeros in degrees()
if self.__NL == []:
d1 = np.diag(np.power(self.degrees(), -.5))
d2 = np.diag(np.power(self.degrees(), .5))
self.__NL = d1 * self.laplacian_matrix() * d2
return self.__NL
NL = normalized_laplacian_matrix
def distance_matrix(self):
""" Return Distance matrix
Alias : D
"""
if self.__Order == 0: return []
if self.__D == []:
# use here float only for using np.inf - infinity
A = np.matrix(self.__A, dtype=float)
n, m = A.shape
I = np.identity(n)
A[A == 0] = np.inf # set zero entries to inf
A[I == 1] = 0 # except diagonal which should be zero
for i in range(n):
r = A[i, :]
A = np.minimum(A, r + r.T)
self.__D = np.matrix(A, dtype=int)
return self.__D
D = distance_matrix
def reciprocal_distance_matrix(self):
""" Return Reciprocal Distance matrix """
rd = np.matrix(self.distance_matrix(), dtype=float)
# probably there exists more python way to apply a function to each element of matrix
for i in range(self.__Order):
for j in range(self.__Order):
if not rd[i, j] == 0: rd[i, j] = 1 / rd[i, j]
return rd
def resistance_distance_matrix(self):
""" Return Resistance Distance matrix """
if not self.is_connected() or self.__Order == 0:
return False
if self.__RD == []:
# from numpy import linalg as la
n = self.__Order
s = n * self.laplacian_matrix() + 1
sn = n * np.linalg.inv(s)
RD = np.ndarray((n, n))
for i in range(n):
for j in range(n):
RD[i, j] = np.float64(
np.longdouble(sn[i, i]) + np.longdouble(sn[j, j]) - 2 * np.longdouble(sn[i, j]))
self.__RD = RD
return self.__RD
def seidel_matrix(self):
""" Return Seidel matrix
S = J - I - 2A
Alias: S
"""
n = self.__Order
return np.ones((n, n)) - np.identity(n) - 2 * np.matrix(self.__A)
S = seidel_matrix
#
#
# Graph invariants
#
#
def diameter(self):
""" Return diameter of the graph
Diameter is the maximum value of distance matrix
"""
if self.__Order == 0: return 0
return self.distance_matrix().max()
def degrees(self):
""" Return degree of the vertex
Alias : deg
"""
if self.__Degrees == []:
self.__Degrees = map(lambda r: sum(r), self.__A)
## calcuate degrees for all vertices
return self.__Degrees
deg = degrees
def eccentricity(self):
""" Eccentricity of the graph for all its vertices"""
if self.__Order == 0: return None
return self.distance_matrix().max(axis=0).tolist()[0]
def distances_from_vertex(self, v):
""" Return list of all distances from a given vertex to all others"""
# used to test graph where it is connected or not
seen = {}
level = 0
nextlevel = [v]
while nextlevel:
thislevel = nextlevel
nextlevel = []
for v in thislevel:
if v not in seen:
seen[v] = level
nb = [i for (i, j) in zip(range(len(self.__A[v])), self.__A[v]) if j != 0]
nextlevel.extend(nb)
# if (cutoff is not None and cutoff <= level): break
level = level + 1
return seen
def is_connected(self):
""" Return True/False depends on the graph is connected or not """
if self.__Order == 0: return False
if not self.__Check_connectedness: return True
if self.__Is_connected is None:
# we take vertex 0 and check whether we can reach all other vertices
self.__Is_connected = len(self.distances_from_vertex(0)) == self.order()
return self.__Is_connected
#
#
# Graph spectra
#
#
def spectrum(self, matrix="adjacency"):
r""" Spectrum of the graph
args:
matrix (str or matrix)
'adjacency' or 'A' : default
'laplacian' or 'L'
'distance' or 'D'
'signless_laplacian' or 'Q'
'normalized_laplacian' or 'NL'
'resistance_distance' or 'RD'
'reciprocal_distance'
arbitrary matrix
"""
from numpy import linalg as la
if type(matrix) is str:
if self.__Order == 0: return []
if matrix == "adjacency" or matrix == "A":
if self.__Spectrum == []:
s = la.eigvalsh(self.__A).tolist()
s.sort(reverse=True)
self.__Spectrum = s
return self.__Spectrum
elif matrix == "laplacian" or matrix == "L":
if self.__Laplacian_spectrum == []:
s = la.eigvalsh(self.laplacian_matrix()).tolist()
s.sort(reverse=True)
self.__Laplacian_spectrum = map(lambda x: x if x > 0 else 0, s)
return self.__Laplacian_spectrum
elif matrix == "distance" or matrix == "D":
if self.__Distance_spectrum == []:
s = la.eigvalsh(self.distance_matrix()).tolist()
s.sort(reverse=True)
self.__Distance_spectrum = s
return self.__Distance_spectrum
elif matrix == "signless_laplacian" or matrix == "Q":
if self.__Signless_laplacian_spectrum == []:
## TODO: check if we have zeros in degrees()
s = la.eigvalsh(self.signless_laplacian_matrix()).tolist()
s.sort(reverse=True)
self.__Signless_laplacian_spectrum = map(lambda x: x if x > 0 else 0, s)
return self.__Signless_laplacian_spectrum
elif matrix == "normalized_laplacian" or matrix == "NL":
if self.__Norm_laplacian_spectrum == []:
## TODO: check if we have zeros in degrees()
s = la.eigvalsh(self.normalized_laplacian_matrix()).tolist()
s.sort(reverse=True)
self.__Norm_laplacian_spectrum = s
return self.__Norm_laplacian_spectrum
elif matrix == "resistance_distance" or matrix == "RD":
if self.__RD_spectrum == []:
s = la.eigvalsh(self.resistance_distance_matrix()).tolist()
s.sort(reverse=True)
self.__RD_spectrum = s
return self.__RD_spectrum
# NO CACHE
elif matrix == "reciprocal_distance":
s = la.eigvalsh(self.reciprocal_distance_matrix()).tolist()
s.sort(reverse=True)
return s
else:
return False
# if the parameter is an arbitrary matrix
# DEPRECATED:
# use mathchem.spectrum(matrix) for arbitrary matrices
#
else:
s = la.eigvalsh(matrix).tolist()
s.sort(reverse=True)
return s
# for arbitrary matrices use:
# mathchem.spectral_moment(matrix)
def spectral_moment(self, k, matrix="adjacency"):
""" Return k-th spectral moment
parameters: matrix - see spectrum help
"""
return np.sum(np.power(self.spectrum(matrix), k))
# for arbitrary matrices use:
# mathchem.spectral_radius(matrix)
def spectral_radius(self, matrix="adjacency"):
s = self.spectrum(matrix)
return max(abs(s[0]), abs(s[len(s) - 1]))
# for arbitrary matrices use:
# mathchem.energy(matrix)
def energy(self, matrix="adjacency"):
""" Return energy of the graph
parameters: matrix - see spectrum help
"""
if self.__Order == 0: return False
s = self.spectrum(matrix)
a = np.sum(s, dtype=np.longdouble) / len(s)
return np.float64(np.sum(map(lambda x: abs(x - a), s), dtype=np.longdouble))
def incidence_energy(self):
""" Return incidence energy (IE)
Incidence energy is the sum of singular values of incidence matrix
"""
if self.__Order == 0: return False
from numpy.linalg import svd
return np.float64(np.sum(svd(self.incidence_matrix(), compute_uv=False), dtype=np.longdouble))
#
#
# Chemical indices
#
#
def zagreb_m1_index(self):
""" Zagreb M1 Index """
return sum(map(lambda d: d ** 2, self.degrees()))
def zagreb_m2_index(self):
""" Zagreb M2 Index
The molecular graph must contain at least one edge, otherwise the function Return False
Zagreb M2 Index is a special case of Connectivity Index with power = 1"""
return sum(map(lambda e: self.degrees()[e[0]] * self.degrees()[e[1]], self.edges()))
def zagreb_m1_coindex(self):
""" Zagreb M1 Coindex """
return 2 * self.size() * (self.__Order - 1) - self.zagreb_m1_index()
def zagreb_m2_coindex(self):
""" Zagreb M2 Coindex """
return 2 * (self.size() ** 2) - self.zagreb_m2_index() - self.zagreb_m1_index() * .5
def connectivity_index(self, power):
""" Connectivity index (R)"""
E = self.edges() # E - all edges
if len(E) == 0: return 0
return np.float64(
np.sum(map(lambda e: (self.degrees()[e[0]] * self.degrees()[e[1]]) ** power, E), dtype=np.longdouble))
def augmented_zagreb_index(self):
""" Augmented Zagreb Index"""
E = self.edges() # E - all edges
d = self.degrees()
if len(E) < 2: return 0
return np.float64(np.sum(map(lambda e: (np.longdouble(d[e[0] * d[e[1]]]) / (d[e1] + d[e2] - 2)) ** 3, E),
dtype=np.longdouble))
def sum_connectivity_index(self):
""" Sum-Connectivity index"""
E = self.edges() # E - all edges
if len(E) == 0: return 0
return np.float64(
np.sum(map(lambda e: (self.degrees()[e[0]] + self.degrees()[e[1]]) ** (-0.5), E), dtype=np.longdouble))
def geometric_arithmetic_index(self):
""" Geometric-Arithmetic index"""
E = self.edges() # E - all edges
if len(E) == 0: return 0
return np.float64(np.sum(map(lambda e: 2.0 * np.sqrt(self.degrees()[e[0]] * self.degrees()[e[1]]) / (
self.degrees()[e1] + self.degrees()[e2]), E), dtype=np.longdouble))
def eccentric_connectivity_index(self):
""" Eccentric Connectivity Index
The molecuar graph must be connected, otherwise the function Return False"""
if not self.is_connected():
return False
return sum(map(lambda a, b: a * b, self.degrees(), self.eccentricity()))
def randic_index(self):
""" Randic Index
The molecular graph must contain at least one edge, otherwise the function Return False
Randic Index is a special case of Connectivity Index with power = -1/2"""
return self.connectivity_index(-0.5)
def atom_bond_connectivity_index(self):
""" Atom-Bond Connectivity Index (ABC) """
s = np.longdouble(0) # summator
for (u, v) in self.edges():
d1 = np.float64(self.degrees()[u])
d2 = np.float64(self.degrees()[v])
s += np.longdouble(((d1 + d2 - 2) / (d1 * d2)) ** .5)
return np.float64(s)
def estrada_index(self, matrix="adjacency"):
""" Estrada Index (EE)
args:
matrix -- see spectrum for help, default value is 'adjacency'
There is an alias 'distance_estrada_index' for distance matrix
"""
return np.float64(np.sum(map(lambda x: np.exp(x.real), self.spectrum(matrix)), dtype=np.longdouble))
def distance_estrada_index(self):
""" Distance Estrada Index (DEE)
Special case of Estrada index with distance matrix
"""
return self.estrada_index('distance')
def degree_distance(self):
""" Degree Distance (DD)
The molecuar graph must be connected, otherwise the function Return False"""
if not self.is_connected():
return False
dd = np.matrix(self.degrees()) * self.distance_matrix().sum(axis=1)
return dd[0, 0]
def reverse_degree_distance(self):
""" Reverse Distance Degree (rDD)
The molecuar graph must be connected, otherwise the function Return False"""
if not self.is_connected():
return False
return 2 * (self.order() - 1) * len(self.edges()) * self.diameter() - self.degree_distance()
def molecular_topological_index(self):
""" (Schultz) Molecular Topological Index (MTI)
The molecuar graph must be connected, otherwise the function Return False"""
if not self.is_connected():
return False
# (A+D)*d
A = np.matrix(self.__A)
d = np.matrix(self.degrees())
return np.float64(((A + self.distance_matrix()) * d.T).sum(dtype=np.longdouble))
def eccentric_distance_sum(self):
""" Distance Sum
The molecuar graph must be connected, otherwise the function Return False"""
if not self.is_connected():
return False
return (self.eccentricity() * self.distance_matrix().sum(axis=1))[0, 0]
# strange - it is slow ((
def balaban_j_index(self):
""" Balaban J index
The molecuar graph must be connected, otherwise the function Return False"""
if not self.is_connected():
return False
ds = self.distance_matrix().sum(axis=1)
m = len(self.edges())
k = (m / (m - self.__Order + 2.0))
return np.float64(k * np.sum(map(lambda u: 1 / np.sqrt((ds[u[0][0, 0] * ds[u[1]]][0, 0])), self.edges()),
dtype=np.longdouble))
def sum_balaban_index(self):
""" Sum Balaban index
The molecuar graph must be connected, otherwise the function Return False"""
if not self.is_connected():
return False
ds = self.distance_matrix().sum(axis=1)
m = len(self.edges())
k = (m / (m - self.__Order + 2.0))
return np.float64(k * np.sum(map(lambda u: 1 / np.sqrt((ds[u[0]][0, 0] + ds[u[1]][0, 0])), self.edges()),
dtype=np.longdouble))
def kirchhoff_index(self):
""" Kirchhoff Index (Kf)
Kf = 1/2 * sum_i sum_j RD[i,j]
Based on resistance distance matrix RD
Alias: resistance
The molecuar graph must be connected, otherwise the function Return False
"""
if not self.is_connected():
return False
return np.float64(self.resistance_distance_matrix().sum(dtype=np.longdouble) / 2)
resistance = kirchhoff_index
def wiener_index(self):
""" Wiener Index (W)
W = 1/2 * sum_i sum_j D[i,j]
where D is distance matrix
The molecuar graph must be connected, otherwise the function Return False
"""
if not self.is_connected():
return False
return self.distance_matrix().sum(dtype=np.float64) / 2
def terminal_wiener_index(self):
""" Calculate Terminal Wiener Index (TW)
TW = Sum of all distances between pendent vertices (with degree = 1)
"""
if not self.is_connected(): return False
s = 0
for u in range(self.order()):
if self.degrees()[u] != 1: continue
for v in range(u + 1, self.order()):
if self.degrees()[v] == 1:
s = s + self.distance_matrix()[u, v]
return s
def reverse_wiener_index(self):
""" Reverse Wiener Index (RW)
RW = 1/2 * sum_i!=j ( d - D[i,j] )
where D is distance matrix and d is diameter
The molecuar graph must be connected, otherwise the function Return False
"""
if not self.is_connected():
return False
# here we use formula: RW = 1/2 * n * (n-1) * d - W
return self.diameter() * (self.__Order * (self.__Order - 1)) / 2 - self.wiener_index()
def hyper_wiener_index(self):
""" Hyper-Wiener Index (WW)
WW = 1/2 * ( sum_ij d(i,j)^2 + sum_i_j d(i,j) )
where D is distance matrix
The molecuar graph must be connected, otherwise the function Return False
"""
if not self.is_connected():
return False
return (np.power(self.distance_matrix(),
2).sum() + self.distance_matrix().sum()) / 4 # since we have symmetric matrix
def harary_index(self):
""" Harary Index (H)
H = 1/2 sum_i sum_j Rd[i,j]
where Rd is reciprocal distance matrix
Rd[i,j] = 1 / D[i,j] for D[i,j] != 0
Rd[i,j] = 0 otherwise
The molecuar graph must be connected, otherwise the function Return False
"""
if not self.is_connected():
return False
return np.float64(self.reciprocal_distance_matrix().sum(dtype=np.longdouble)) / 2
def LEL(self):
""" Return Laplacian-like energy (LEL) """
return np.float64(np.sum(map(lambda x: np.sqrt(x), self.spectrum('laplacian')), dtype=np.longdouble))
def multiplicative_sum_zagreb_index(self):
""" Log( Multiplicative Sum Zagreb index )"""
d = self.degrees()
return np.float64(
np.sum(map(lambda u: np.log(np.float64(d[u[0]] + d[u[1]])), self.edges()), dtype=np.longdouble))
def multiplicative_p2_zagreb_index(self):
"""Calculates Log( Multiplicative P2 Zagreb index )"""
d = self.degrees()
return np.float64(
np.sum(map(lambda u: np.log(np.float64(d[u[0]] * d[u[1]])), self.edges()), dtype=np.longdouble))
def multiplicative_p1_zagreb_index(self):
"""Calculates Log( Multiplicative P1 Zagreb index )"""
d = self.degrees()
return np.float64(np.sum(map(lambda v: np.log(np.float64(d[v] ** 2)), self.vertices()), dtype=np.longdouble))
def szeged_index(self):
"""Calculates Szeged index"""
if not self.is_connected():
return False
s = 0
D = self.distance_matrix()
for u, v in self.edges():
diff = D[u, :] - D[v, :]
s += (diff > 0).sum() * (diff < 0).sum()
return float(s)
def revised_szeged_index(self):
"""Calculates Revised Szeged index"""
if not self.is_connected():
return False
s = 0.0
D = self.distance_matrix()
for u, v in self.edges():
diff = D[u, :] - D[v, :]
o = (diff == 0).sum()
s += ((diff > 0).sum() + .5 * o) * ((diff < 0).sum() + .5 * o)
return s
def homo_lumo_index(self):
"""Calculates HOMO-LUMO index"""
if not self.is_connected():
return False
n = self.order()
if n % 2 == 0:
h = int(n / 2 - 1) # because array indices start from 0 instead of 1
l = int(h + 1)
return max([abs(self.spectrum()[h]), abs(self.spectrum()[l])])
# else:
h = int((n - 1) / 2)
return abs(self.spectrum()[h])
HL_index = homo_lumo_index
# Adriatic indices
# DEPRECATED
# use mathchem.all_adriatic()
def all_adriatic(self):
""" Generate all possible parameters sets for adriatic indices"""
r = []
for p in [0, 1]:
for i in [1, 2, 3]:
for j in range(1, 9):
if i == 3:
for a in [0.5, 2]:
r.append((p, i, j, a))
elif i == 2 and j in range(1, 6):
for a in [-1, -0.5, 0.5, 1, 2]:
r.append((p, i, j, a))
elif i == 2 or i == 1:
for a in [0.5, 1, 2]:
r.append((p, i, j, a))
return r
def adriatic_name(self, p, i, j, a):
""" Return the name for given parameters of Adriatic indices"""
# (j)
name1 = {1: 'Randic type ', \
2: 'sum ', \
3: 'inverse sum ', \
4: 'misbalance ', \
5: 'inverse misbalance ', \
6: 'min-max ', \
7: 'max-min ', \
8: 'symmetric division '}
# (i,a)
name2 = {(1, 0.5): 'lor', \
(1, 1): 'lo', \
(1, 2): 'los', \
(2, -1): 'in', \
(2, -0.5): 'ir', \
(2, 0.5): 'ro', \
(2, 1): '', \
(2, 2): 's', \
(3, 0.5): 'ha', \
(3, 2): 'two'}
# (p)
name3 = {0: 'deg', 1: 'di'}
return (name1[j] + name2[(i, a)] + name3[p])
def _adriatic_entry_(self, du, dv, i, j, a):
""" Return an individual edge contribution for Adriatic indices and matrices"""
# phi(x,a)
phi = {1: lambda x, a: np.log(x) ** a, 2: lambda x, a: x ** a, 3: lambda x, a: a ** x}
# gamma (x,y)
gamma = { \
1: lambda x, y: x * y, \
2: lambda x, y: x + y, \
3: lambda x, y: 0 if x + y == 0 else 1.0 / (x + y), \
4: lambda x, y: abs(x - y), \
5: lambda x, y: 0 if x == y else 1.0 / abs(x - y), \
6: lambda x, y: 0 if max(x, y) == 0 else min(x, y) / max(x, y), \
7: lambda x, y: 0 if min(x, y) == 0 else max(x, y) / min(x, y), \
8: lambda x, y: 0 if x == 0 or y == 0 else x / y + y / x}
return gamma[j](phi[i](du, a), phi[i](dv, a))
def adriatic_matrix(self, p, i, j, a):
""" Return the Adriatic matrix with given parameters"""
if p == 0:
d = self.degrees()
else:
d = self.distance_matrix().sum(axis=0).tolist()[0]
AM = [[0] * self.order() for k in range(self.order())]
for (u, v) in self.edges():
AM[u][v] = AM[v][u] = self._adriatic_entry_(np.float64(d[u]), np.float64(d[v]), i, j, a)
return AM
def adriatic_index(self, p, i, j, a):
""" Return the Adriatic index with given parameters"""
if p == 0:
d = self.degrees()
else:
d = self.distance_matrix().sum(axis=0).tolist()[0]
func = lambda u: self._adriatic_entry_(np.float64(d[u[0]]), np.float64(d[u[1]]), i, j, a)
return np.float64(np.sum(map(func, self.edges()), dtype=np.longdouble))
# Adriatic indices by names
def randic_type_lordeg_index(self):
""" Adriatic index: Randic type lordeg index"""
return self.adriatic_index(0, 1, 1, 0.5)
def randic_type_lodeg_index(self):
""" Adriatic index: Randic type lodeg index"""
return self.adriatic_index(0, 1, 1, 1)
def randic_type_losdeg_index(self):
""" Adriatic index: Randic type losdeg index"""
return self.adriatic_index(0, 1, 1, 2)
def sum_lordeg_index(self):
""" Adriatic index: sum lordeg index"""
return self.adriatic_index(0, 1, 2, 0.5)
def sum_lodeg_index(self):
""" Adriatic index: sum lodeg index"""
return self.adriatic_index(0, 1, 2, 1)
def sum_losdeg_index(self):
""" Adriatic index: sum losdeg index"""
return self.adriatic_index(0, 1, 2, 2)
def inverse_sum_lordeg_index(self):
""" Adriatic index: inverse sum lordeg index"""
return self.adriatic_index(0, 1, 3, 0.5)
def inverse_sum_lodeg_index(self):
""" Adriatic index: inverse sum lodeg index"""
return self.adriatic_index(0, 1, 3, 1)
def inverse_sum_losdeg_index(self):
""" Adriatic index: inverse sum losdeg index"""
return self.adriatic_index(0, 1, 3, 2)
def misbalance_lordeg_index(self):
""" Adriatic index: misbalance lordeg index"""
return self.adriatic_index(0, 1, 4, 0.5)
def misbalance_lodeg_index(self):
""" Adriatic index: misbalance lodeg index"""
return self.adriatic_index(0, 1, 4, 1)
def misbalance_losdeg_index(self):
""" Adriatic index: misbalance losdeg index"""
return self.adriatic_index(0, 1, 4, 2)
def inverse_misbalance_lordeg_index(self):
""" Adriatic index: inverse misbalance lordeg index"""
return self.adriatic_index(0, 1, 5, 0.5)
def inverse_misbalance_lodeg_index(self):
""" Adriatic index: inverse misbalance lodeg index"""
return self.adriatic_index(0, 1, 5, 1)
def inverse_misbalance_losdeg_index(self):
""" Adriatic index: inverse misbalance losdeg index"""
return self.adriatic_index(0, 1, 5, 2)
def min_max_lordeg_index(self):
""" Adriatic index: min-max lordeg index"""
return self.adriatic_index(0, 1, 6, 0.5)
def min_max_lodeg_index(self):
""" Adriatic index: min-max lodeg index"""
return self.adriatic_index(0, 1, 6, 1)
def min_max_losdeg_index(self):
""" Adriatic index: min-max losdeg index"""
return self.adriatic_index(0, 1, 6, 2)
def max_min_lordeg_index(self):
""" Adriatic index: max-min lordeg index"""
return self.adriatic_index(0, 1, 7, 0.5)
def max_min_lodeg_index(self):
""" Adriatic index: max-min lodeg index"""
return self.adriatic_index(0, 1, 7, 1)
def max_min_losdeg_index(self):
""" Adriatic index: max-min losdeg index"""
return self.adriatic_index(0, 1, 7, 2)
def symmetric_division_lordeg_index(self):
""" Adriatic index: symmetric division lordeg index"""
return self.adriatic_index(0, 1, 8, 0.5)
def symmetric_division_lodeg_index(self):
""" Adriatic index: symmetric division lodeg index"""
return self.adriatic_index(0, 1, 8, 1)
def symmetric_division_losdeg_index(self):
""" Adriatic index: symmetric division losdeg index"""
return self.adriatic_index(0, 1, 8, 2)
def randic_type_indeg_index(self):
""" Adriatic index: Randic type indeg index"""
return self.adriatic_index(0, 2, 1, -1)
def randic_type_irdeg_index(self):
""" Adriatic index: Randic type irdeg index"""
return self.adriatic_index(0, 2, 1, -0.5)
def randic_type_rodeg_index(self):
""" Adriatic index: Randic type rodeg index"""
return self.adriatic_index(0, 2, 1, 0.5)
def randic_type_deg_index(self):
""" Adriatic index: Randic type deg index"""
return self.adriatic_index(0, 2, 1, 1)
def randic_type_sdeg_index(self):
""" Adriatic index: Randic type sdeg index"""
return self.adriatic_index(0, 2, 1, 2)
def sum_indeg_index(self):
""" Adriatic index: sum indeg index"""
return self.adriatic_index(0, 2, 2, -1)
def sum_irdeg_index(self):
""" Adriatic index: sum irdeg index"""
return self.adriatic_index(0, 2, 2, -0.5)
def sum_rodeg_index(self):
""" Adriatic index: sum rodeg index"""
return self.adriatic_index(0, 2, 2, 0.5)
def sum_deg_index(self):
""" Adriatic index: sum deg index"""
return self.adriatic_index(0, 2, 2, 1)
def sum_sdeg_index(self):
""" Adriatic index: sum sdeg index"""
return self.adriatic_index(0, 2, 2, 2)
def inverse_sum_indeg_index(self):
""" Adriatic index: inverse sum indeg index"""
return self.adriatic_index(0, 2, 3, -1)
def inverse_sum_irdeg_index(self):
""" Adriatic index: inverse sum irdeg index"""
return self.adriatic_index(0, 2, 3, -0.5)
def inverse_sum_rodeg_index(self):
""" Adriatic index: inverse sum rodeg index"""
return self.adriatic_index(0, 2, 3, 0.5)
def inverse_sum_deg_index(self):
""" Adriatic index: inverse sum deg index"""
return self.adriatic_index(0, 2, 3, 1)
def inverse_sum_sdeg_index(self):
""" Adriatic index: inverse sum sdeg index"""
return self.adriatic_index(0, 2, 3, 2)
def misbalance_indeg_index(self):
""" Adriatic index: misbalance indeg index"""
return self.adriatic_index(0, 2, 4, -1)
def misbalance_irdeg_index(self):
""" Adriatic index: misbalance irdeg index"""
return self.adriatic_index(0, 2, 4, -0.5)
def misbalance_rodeg_index(self):
""" Adriatic index: misbalance rodeg index"""
return self.adriatic_index(0, 2, 4, 0.5)
def misbalance_deg_index(self):
""" Adriatic index: misbalance deg index"""
return self.adriatic_index(0, 2, 4, 1)
def misbalance_sdeg_index(self):
""" Adriatic index: misbalance sdeg index"""
return self.adriatic_index(0, 2, 4, 2)
def inverse_misbalance_indeg_index(self):
""" Adriatic index: inverse misbalance indeg index"""
return self.adriatic_index(0, 2, 5, -1)
def inverse_misbalance_irdeg_index(self):
""" Adriatic index: inverse misbalance irdeg index"""
return self.adriatic_index(0, 2, 5, -0.5)
def inverse_misbalance_rodeg_index(self):
""" Adriatic index: inverse misbalance rodeg index"""
return self.adriatic_index(0, 2, 5, 0.5)
def inverse_misbalance_deg_index(self):
""" Adriatic index: inverse misbalance deg index"""
return self.adriatic_index(0, 2, 5, 1)
def inverse_misbalance_sdeg_index(self):
""" Adriatic index: inverse misbalance sdeg index"""
return self.adriatic_index(0, 2, 5, 2)
def min_max_rodeg_index(self):
""" Adriatic index: min-max rodeg index"""
return self.adriatic_index(0, 2, 6, 0.5)
def min_max_deg_index(self):
""" Adriatic index: min-max deg index"""
return self.adriatic_index(0, 2, 6, 1)
def min_max_sdeg_index(self):
""" Adriatic index: min-max sdeg index"""
return self.adriatic_index(0, 2, 6, 2)
def max_min_rodeg_index(self):
""" Adriatic index: max-min rodeg index"""
return self.adriatic_index(0, 2, 7, 0.5)
def max_min_deg_index(self):
""" Adriatic index: max-min deg index"""
return self.adriatic_index(0, 2, 7, 1)
def max_min_sdeg_index(self):
""" Adriatic index: max-min sdeg index"""
return self.adriatic_index(0, 2, 7, 2)
def symmetric_division_rodeg_index(self):
""" Adriatic index: symmetric division rodeg index"""
return self.adriatic_index(0, 2, 8, 0.5)
def symmetric_division_deg_index(self):
""" Adriatic index: symmetric division deg index"""
return self.adriatic_index(0, 2, 8, 1)
def symmetric_division_sdeg_index(self):
""" Adriatic index: symmetric division sdeg index"""
return self.adriatic_index(0, 2, 8, 2)
def randic_type_hadeg_index(self):
""" Adriatic index: Randic type hadeg index"""
return self.adriatic_index(0, 3, 1, 0.5)
def randic_type_twodeg_index(self):
""" Adriatic index: Randic type twodeg index"""
return self.adriatic_index(0, 3, 1, 2)
def sum_hadeg_index(self):
""" Adriatic index: sum hadeg index"""
return self.adriatic_index(0, 3, 2, 0.5)
def sum_twodeg_index(self):
""" Adriatic index: sum twodeg index"""
return self.adriatic_index(0, 3, 2, 2)
def inverse_sum_hadeg_index(self):
""" Adriatic index: inverse sum hadeg index"""
return self.adriatic_index(0, 3, 3, 0.5)
def inverse_sum_twodeg_index(self):
""" Adriatic index: inverse sum twodeg index"""
return self.adriatic_index(0, 3, 3, 2)
def misbalance_hadeg_index(self):
""" Adriatic index: misbalance hadeg index"""
return self.adriatic_index(0, 3, 4, 0.5)
def misbalance_twodeg_index(self):
""" Adriatic index: misbalance twodeg index"""
return self.adriatic_index(0, 3, 4, 2)
def inverse_misbalance_hadeg_index(self):
""" Adriatic index: inverse misbalance hadeg index"""
return self.adriatic_index(0, 3, 5, 0.5)
def inverse_misbalance_twodeg_index(self):
""" Adriatic index: inverse misbalance twodeg index"""
return self.adriatic_index(0, 3, 5, 2)
def min_max_hadeg_index(self):
""" Adriatic index: min-max hadeg index"""
return self.adriatic_index(0, 3, 6, 0.5)
def min_max_twodeg_index(self):
""" Adriatic index: min-max twodeg index"""
return self.adriatic_index(0, 3, 6, 2)
def max_min_hadeg_index(self):
""" Adriatic index: max-min hadeg index"""
return self.adriatic_index(0, 3, 7, 0.5)
def max_min_twodeg_index(self):
""" Adriatic index: max-min twodeg index"""
return self.adriatic_index(0, 3, 7, 2)
def symmetric_division_hadeg_index(self):
""" Adriatic index: symmetric division hadeg index"""
return self.adriatic_index(0, 3, 8, 0.5)
def symmetric_division_twodeg_index(self):
""" Adriatic index: symmetric division twodeg index"""
return self.adriatic_index(0, 3, 8, 2)
def randic_type_lordi_index(self):
""" Adriatic index: Randic type lordi index"""
return self.adriatic_index(1, 1, 1, 0.5)
def randic_type_lodi_index(self):
""" Adriatic index: Randic type lodi index"""
return self.adriatic_index(1, 1, 1, 1)
def randic_type_losdi_index(self):
""" Adriatic index: Randic type losdi index"""
return self.adriatic_index(1, 1, 1, 2)
def sum_lordi_index(self):
""" Adriatic index: sum lordi index"""
return self.adriatic_index(1, 1, 2, 0.5)
def sum_lodi_index(self):
""" Adriatic index: sum lodi index"""
return self.adriatic_index(1, 1, 2, 1)
def sum_losdi_index(self):
""" Adriatic index: sum losdi index"""
return self.adriatic_index(1, 1, 2, 2)
def inverse_sum_lordi_index(self):
""" Adriatic index: inverse sum lordi index"""
return self.adriatic_index(1, 1, 3, 0.5)
def inverse_sum_lodi_index(self):
""" Adriatic index: inverse sum lodi index"""
return self.adriatic_index(1, 1, 3, 1)
def inverse_sum_losdi_index(self):
""" Adriatic index: inverse sum losdi index"""
return self.adriatic_index(1, 1, 3, 2)
def misbalance_lordi_index(self):
""" Adriatic index: misbalance lordi index"""
return self.adriatic_index(1, 1, 4, 0.5)
def misbalance_lodi_index(self):
""" Adriatic index: misbalance lodi index"""
return self.adriatic_index(1, 1, 4, 1)
def misbalance_losdi_index(self):
""" Adriatic index: misbalance losdi index"""
return self.adriatic_index(1, 1, 4, 2)
def inverse_misbalance_lordi_index(self):
""" Adriatic index: inverse misbalance lordi index"""
return self.adriatic_index(1, 1, 5, 0.5)
def inverse_misbalance_lodi_index(self):
""" Adriatic index: inverse misbalance lodi index"""
return self.adriatic_index(1, 1, 5, 1)
def inverse_misbalance_losdi_index(self):
""" Adriatic index: inverse misbalance losdi index"""
return self.adriatic_index(1, 1, 5, 2)
def min_max_lordi_index(self):
""" Adriatic index: min-max lordi index"""
return self.adriatic_index(1, 1, 6, 0.5)
def min_max_lodi_index(self):
""" Adriatic index: min-max lodi index"""
return self.adriatic_index(1, 1, 6, 1)
def min_max_losdi_index(self):
""" Adriatic index: min-max losdi index"""
return self.adriatic_index(1, 1, 6, 2)
def max_min_lordi_index(self):
""" Adriatic index: max-min lordi index"""
return self.adriatic_index(1, 1, 7, 0.5)
def max_min_lodi_index(self):
""" Adriatic index: max-min lodi index"""
return self.adriatic_index(1, 1, 7, 1)
def max_min_losdi_index(self):
""" Adriatic index: max-min losdi index"""
return self.adriatic_index(1, 1, 7, 2)
def symmetric_division_lordi_index(self):
""" Adriatic index: symmetric division lordi index"""
return self.adriatic_index(1, 1, 8, 0.5)
def symmetric_division_lodi_index(self):
""" Adriatic index: symmetric division lodi index"""
return self.adriatic_index(1, 1, 8, 1)
def symmetric_division_losdi_index(self):
""" Adriatic index: symmetric division losdi index"""
return self.adriatic_index(1, 1, 8, 2)
def randic_type_indi_index(self):
""" Adriatic index: Randic type indi index"""
return self.adriatic_index(1, 2, 1, -1)
def randic_type_irdi_index(self):
""" Adriatic index: Randic type irdi index"""
return self.adriatic_index(1, 2, 1, -0.5)
def randic_type_rodi_index(self):
""" Adriatic index: Randic type rodi index"""
return self.adriatic_index(1, 2, 1, 0.5)
def randic_type_di_index(self):
""" Adriatic index: Randic type di index"""
return self.adriatic_index(1, 2, 1, 1)
def randic_type_sdi_index(self):
""" Adriatic index: Randic type sdi index"""
return self.adriatic_index(1, 2, 1, 2)
def sum_indi_index(self):
""" Adriatic index: sum indi index"""
return self.adriatic_index(1, 2, 2, -1)
def sum_irdi_index(self):
""" Adriatic index: sum irdi index"""
return self.adriatic_index(1, 2, 2, -0.5)
def sum_rodi_index(self):
""" Adriatic index: sum rodi index"""
return self.adriatic_index(1, 2, 2, 0.5)
def sum_di_index(self):
""" Adriatic index: sum di index"""
return self.adriatic_index(1, 2, 2, 1)
def sum_sdi_index(self):
""" Adriatic index: sum sdi index"""
return self.adriatic_index(1, 2, 2, 2)
def inverse_sum_indi_index(self):
""" Adriatic index: inverse sum indi index"""
return self.adriatic_index(1, 2, 3, -1)
def inverse_sum_irdi_index(self):
""" Adriatic index: inverse sum irdi index"""
return self.adriatic_index(1, 2, 3, -0.5)
def inverse_sum_rodi_index(self):
""" Adriatic index: inverse sum rodi index"""
return self.adriatic_index(1, 2, 3, 0.5)
def inverse_sum_di_index(self):
""" Adriatic index: inverse sum di index"""
return self.adriatic_index(1, 2, 3, 1)
def inverse_sum_sdi_index(self):
""" Adriatic index: inverse sum sdi index"""
return self.adriatic_index(1, 2, 3, 2)
def misbalance_indi_index(self):
""" Adriatic index: misbalance indi index"""
return self.adriatic_index(1, 2, 4, -1)
def misbalance_irdi_index(self):
""" Adriatic index: misbalance irdi index"""
return self.adriatic_index(1, 2, 4, -0.5)
def misbalance_rodi_index(self):
""" Adriatic index: misbalance rodi index"""
return self.adriatic_index(1, 2, 4, 0.5)
def misbalance_di_index(self):
""" Adriatic index: misbalance di index"""
return self.adriatic_index(1, 2, 4, 1)
def misbalance_sdi_index(self):
""" Adriatic index: misbalance sdi index"""
return self.adriatic_index(1, 2, 4, 2)
def inverse_misbalance_indi_index(self):
""" Adriatic index: inverse misbalance indi index"""
return self.adriatic_index(1, 2, 5, -1)
def inverse_misbalance_irdi_index(self):
""" Adriatic index: inverse misbalance irdi index"""
return self.adriatic_index(1, 2, 5, -0.5)
def inverse_misbalance_rodi_index(self):
""" Adriatic index: inverse misbalance rodi index"""
return self.adriatic_index(1, 2, 5, 0.5)
def inverse_misbalance_di_index(self):
""" Adriatic index: inverse misbalance di index"""
return self.adriatic_index(1, 2, 5, 1)
def inverse_misbalance_sdi_index(self):
""" Adriatic index: inverse misbalance sdi index"""
return self.adriatic_index(1, 2, 5, 2)
def min_max_rodi_index(self):
""" Adriatic index: min-max rodi index"""
return self.adriatic_index(1, 2, 6, 0.5)
def min_max_di_index(self):
""" Adriatic index: min-max di index"""
return self.adriatic_index(1, 2, 6, 1)
def min_max_sdi_index(self):
""" Adriatic index: min-max sdi index"""
return self.adriatic_index(1, 2, 6, 2)
def max_min_rodi_index(self):
""" Adriatic index: max-min rodi index"""
return self.adriatic_index(1, 2, 7, 0.5)
def max_min_di_index(self):
""" Adriatic index: max-min di index"""
return self.adriatic_index(1, 2, 7, 1)
def max_min_sdi_index(self):
""" Adriatic index: max-min sdi index"""
return self.adriatic_index(1, 2, 7, 2)
def symmetric_division_rodi_index(self):
""" Adriatic index: symmetric division rodi index"""
return self.adriatic_index(1, 2, 8, 0.5)
def symmetric_division_di_index(self):
""" Adriatic index: symmetric division di index"""
return self.adriatic_index(1, 2, 8, 1)
def symmetric_division_sdi_index(self):
""" Adriatic index: symmetric division sdi index"""
return self.adriatic_index(1, 2, 8, 2)
def randic_type_hadi_index(self):
""" Adriatic index: Randic type hadi index"""
return self.adriatic_index(1, 3, 1, 0.5)
def randic_type_twodi_index(self):
""" Adriatic index: Randic type twodi index"""
return self.adriatic_index(1, 3, 1, 2)
def sum_hadi_index(self):
""" Adriatic index: sum hadi index"""
return self.adriatic_index(1, 3, 2, 0.5)
def sum_twodi_index(self):
""" Adriatic index: sum twodi index"""
return self.adriatic_index(1, 3, 2, 2)
def inverse_sum_hadi_index(self):
""" Adriatic index: inverse sum hadi index"""
return self.adriatic_index(1, 3, 3, 0.5)
def inverse_sum_twodi_index(self):
""" Adriatic index: inverse sum twodi index"""
return self.adriatic_index(1, 3, 3, 2)
def misbalance_hadi_index(self):
""" Adriatic index: misbalance hadi index"""
return self.adriatic_index(1, 3, 4, 0.5)
def misbalance_twodi_index(self):
""" Adriatic index: misbalance twodi index"""
return self.adriatic_index(1, 3, 4, 2)
def inverse_misbalance_hadi_index(self):
""" Adriatic index: inverse misbalance hadi index"""
return self.adriatic_index(1, 3, 5, 0.5)
def inverse_misbalance_twodi_index(self):
""" Adriatic index: inverse misbalance twodi index"""
return self.adriatic_index(1, 3, 5, 2)
def min_max_hadi_index(self):
""" Adriatic index: min-max hadi index"""
return self.adriatic_index(1, 3, 6, 0.5)
def min_max_twodi_index(self):
""" Adriatic index: min-max twodi index"""
return self.adriatic_index(1, 3, 6, 2)
def max_min_hadi_index(self):
""" Adriatic index: max-min hadi index"""
return self.adriatic_index(1, 3, 7, 0.5)
def max_min_twodi_index(self):
""" Adriatic index: max-min twodi index"""
return self.adriatic_index(1, 3, 7, 2)
def symmetric_division_hadi_index(self):
""" Adriatic index: symmetric division hadi index"""
return self.adriatic_index(1, 3, 8, 0.5)
def symmetric_division_twodi_index(self):
""" Adriatic index: symmetric division twodi index"""
return self.adriatic_index(1, 3, 8, 2)
| 33.186145 | 121 | 0.560802 |
6365b052af1cd0b1cbf4a3b7f82e8915ae433561 | 511 | py | Python | url.py | tekonrust/urlshortener | d5696a1209c06047af2e301bbaea019a0490d6bf | [
"Unlicense"
] | 1 | 2021-01-28T06:48:31.000Z | 2021-01-28T06:48:31.000Z | url.py | tekonrust/urlshortener | d5696a1209c06047af2e301bbaea019a0490d6bf | [
"Unlicense"
] | null | null | null | url.py | tekonrust/urlshortener | d5696a1209c06047af2e301bbaea019a0490d6bf | [
"Unlicense"
] | null | null | null | def LongOrShort():
mode= input("ENTER 'S' TO SHORTEN OR 'E' TO EXPAND :-")
if mode.upper() =='S' :
import pyshorteners
p = pyshorteners.Shortener()
url=input("enter url to be shortened : ")
chota=(p.tinyurl.short(url))
print("shortened url :-",chota)
if mode.upper() =='E':
import pyshorteners
p = pyshorteners.Shortener()
url=input("enter url to be expanded : ")
print("expanded url :-",p.tinyurl.expand(url))
LongOrShort()
| 31.9375 | 60 | 0.581213 |
dcfc3040efec4059e86e486a60fd9cb69740de30 | 6,426 | py | Python | ironic/drivers/modules/ovhapi/ovh_base.py | yanndegat/ironic | 8857ec76443dea7778bb9c0d66568304e52495e5 | [
"Apache-2.0"
] | null | null | null | ironic/drivers/modules/ovhapi/ovh_base.py | yanndegat/ironic | 8857ec76443dea7778bb9c0d66568304e52495e5 | [
"Apache-2.0"
] | null | null | null | ironic/drivers/modules/ovhapi/ovh_base.py | yanndegat/ironic | 8857ec76443dea7778bb9c0d66568304e52495e5 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020, OVH SAS.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import json
import re
import time
from oslo_log import log
import requests
LOG = log.getLogger(__file__)
# Regex to obfuscate log requests when debugging
OBFUSCATE_REGEX = re.compile(
'X-Ovh-Application|password|Signature|X-Ovh-Consumer',
flags=re.IGNORECASE
)
# Mapping between OVH API region names and corresponding endpoints
ENDPOINTS = {
'ovh-eu': 'https://eu.api.ovh.com/1.0',
'ovh-us': 'https://api.us.ovhcloud.com/1.0',
'ovh-ca': 'https://ca.api.ovh.com/1.0',
'kimsufi-eu': 'https://eu.api.kimsufi.com/1.0',
'kimsufi-ca': 'https://ca.api.kimsufi.com/1.0',
'soyoustart-eu': 'https://eu.api.soyoustart.com/1.0',
'soyoustart-ca': 'https://ca.api.soyoustart.com/1.0',
}
class Api(object):
def __init__(self, endpoint_url, application_key, application_secret,
consumer_key="", debug=False):
"""Initializes an OVH API client.
:param endpoint_url: the OVH endpoint you want to call
:param application_key: your application key given by OVH on
application registration
:param application_secret: your application secret given by OVH on
application registration
:param consumer_key: the consumer key you want to use, if any, given
after a credential request
:param debug: whether or not to log requests
"""
self.endpoint_url = endpoint_url
self.application_key = application_key
self.application_secret = application_secret
self.consumer_key = consumer_key
self.debug = debug
self.session = requests.Session()
self._time_delta = None
def time_delta(self):
"""Retrieves the API's time delta.
Retrieves the time delta between this computer and the OVH cluster
to sign further queries.
:returns: the time delta in seconds.
"""
if self._time_delta is None:
result = self.session.get(self.endpoint_url + "/auth/time")
result.raise_for_status()
self._time_delta = int(result.text) - int(time.time())
return self._time_delta
def _call(self, method, path, content=None):
"""Calls the API with the given parameters.
The request will be signed if the consumer key has been set.
:param method: the HTTP method of the request (get/post/put/delete)
:param path: the url you want to request
:param content: the object you want to send in your request
(will be automatically serialized to JSON)
:raises: requests.exceptions.HTTPError if the API return an error
"""
target_url = self.endpoint_url + path
now = str(int(time.time()) + self.time_delta())
body = ""
if content is not None:
body = json.dumps(content)
headers = {
"Content-type": "application/json",
"X-Ovh-Application": self.application_key,
"X-Ovh-Timestamp": now,
}
if self.consumer_key != "":
# Compute the call signature for authentication
s1 = hashlib.sha1()
s1.update("+".join([
self.application_secret,
self.consumer_key,
method.upper(),
target_url,
body,
now
]).encode('utf-8'))
headers["X-Ovh-Consumer"] = self.consumer_key
headers["X-Ovh-Signature"] = "$1$" + s1.hexdigest()
# Re-use the session init at startup
req = getattr(self.session, method.lower())
self._log_request(method.upper(), target_url, headers, body)
try:
result = req(target_url, stream=False, headers=headers, data=body)
result.raise_for_status()
except requests.exceptions.HTTPError as e:
LOG.error("Error querying OVH API: %(error)s",
{'error': e})
# TODO(pgaxatte): convert exception to a custom Ironic exception
raise e
return result
def _log_request(self, method, target_url, headers, data):
"""Logs the request made for debugging purposes.
:param method: the HTTP method of the request (get/post/put/delete)
:param target_url: the url requested
:param headers: the headers passed in the request
:param data: the data passed in the request
"""
if not self.debug:
return
string_parts = [
"curl -g -i",
"-X '%s'" % method,
"'%s'" % target_url,
]
for k, v in headers.items():
if OBFUSCATE_REGEX.search(k):
v = 'OBFUSCATED'
header = "-H '{}: {}'".format(k, v)
string_parts.append(header)
LOG.debug("OVH API REQ: %(req)s", {'req': " ".join(string_parts)})
if data:
LOG.debug("OVH API REQ BODY: %(body)s", {'body': data})
def get(self, path):
"""Wraps call to _call("get")
:param path: the url of the resource you want to get
"""
return self._call("get", path)
def put(self, path, content):
"""Wraps a call to _call("put")
:param path: the url of the resource you want to modify
:param content: the object you want to modify
"""
return self._call("put", path, content)
def post(self, path, content):
"""Wraps a call to _call("post")
:param path: the url of the resource you want to create
:param content: the object you want to create
"""
return self._call("post", path, content)
def delete(self, path):
"""Wraps a call to _call("delete")
:param path: the url of the resource you want to delete
"""
return self._call("delete", path)
| 34 | 78 | 0.606287 |
8f63695b93046d2daef41b7cddd0d6ba197a17da | 3,222 | py | Python | node.py | succa/adversarial-ml-text-classification | 1efce8e198c2825dea2f50148e83864a1b6a6fd1 | [
"MIT"
] | null | null | null | node.py | succa/adversarial-ml-text-classification | 1efce8e198c2825dea2f50148e83864a1b6a6fd1 | [
"MIT"
] | null | null | null | node.py | succa/adversarial-ml-text-classification | 1efce8e198c2825dea2f50148e83864a1b6a6fd1 | [
"MIT"
] | null | null | null | from data_utils import extract_features
from paraphrase import perturb_text
import numpy as np
import spacy
import math
import random
nlp = spacy.load('en_core_web_lg')
# Node class
class Node:
def __init__(self,
text,
root=None,
grad_guide=None,
parent=None,
candidates_dict=None,
chosen_index=None,
indexes_already_used=None,
level=0):
self.text = text
self.root = root if root != None else self
self.grad_guide = grad_guide
self.parent = parent if parent != None else self
self.candidates_dict = candidates_dict
self.chosen_index = chosen_index
self.indexes_already_used = indexes_already_used if indexes_already_used != None else []
self.level = level
#These will be updated in the goal and expand function
self.features = []
self.prob = None
self.cl = None
def expand(self, n_changes_per_level=None,
max_depth_level=None,
most_salient=True,
use_typos=False, use_homoglyphs=False,
max_length=1000, verbose=False):
if max_depth_level == None:
max_depth_level=7
if self.level > max_depth_level:
return []
# Compute the Forward Gradient
model = self.grad_guide.model
if not len(self.features):
self.features = extract_features([self.text], max_length=max_length)[0].reshape(1, -1)
grads = self.grad_guide.wordwise_grads(self.features).squeeze()
indexes_to_use = sorted(np.setdiff1d(range(len(self.text)), self.indexes_already_used),
key=lambda k: grads[k],
reverse=most_salient)
n_changes = 0
perturbed_texts = []
for index in indexes_to_use:
if index in self.candidates_dict:
n_changes += 1
perturbed_texts += perturb_text(self.text,
index,
self.candidates_dict[index])
if n_changes == n_changes_per_level:
break
if verbose:
print("Level: {} Npert: {} Text: {}".format(self.level, len(perturbed_texts), self.text))
children = np.empty([len(perturbed_texts)], dtype=Node)
for index, perturbed_text in enumerate(perturbed_texts):
indexes_already_used=self.indexes_already_used.copy()
indexes_already_used.append(perturbed_text[1])
children[index] = Node(nlp(perturbed_text[0]),
self.root,
self.grad_guide,
self, #parent of the child
self.candidates_dict,
perturbed_text[1],
indexes_already_used,
self.level+1)
return children
def __repr__(self):
return '{}'.format(self.text) | 34.645161 | 101 | 0.534451 |
c7a90f6d271819078412c0703ab1e301f1f33794 | 322 | py | Python | aqua_guard/config/docs.py | pooja586/aqua_guard | 4968b5006a8187f72d1d1394251d140d94c2b74f | [
"MIT"
] | null | null | null | aqua_guard/config/docs.py | pooja586/aqua_guard | 4968b5006a8187f72d1d1394251d140d94c2b74f | [
"MIT"
] | null | null | null | aqua_guard/config/docs.py | pooja586/aqua_guard | 4968b5006a8187f72d1d1394251d140d94c2b74f | [
"MIT"
] | null | null | null | """
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/aqua_guard"
# docs_base_url = "https://[org_name].github.io/aqua_guard"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "Aqua Guard"
| 26.833333 | 68 | 0.726708 |
053374f211fcf0a93af4ec15538cf54e6beaacd2 | 3,362 | py | Python | scripts/generate_config.py | AgileCloudLab/single-threaded-rlnc-benchmark | 914c18cf408d62f7294f796f386e98740d6fc83d | [
"MIT"
] | null | null | null | scripts/generate_config.py | AgileCloudLab/single-threaded-rlnc-benchmark | 914c18cf408d62f7294f796f386e98740d6fc83d | [
"MIT"
] | null | null | null | scripts/generate_config.py | AgileCloudLab/single-threaded-rlnc-benchmark | 914c18cf408d62f7294f796f386e98740d6fc83d | [
"MIT"
] | null | null | null | import sys
one_kb = 1024
one_mb = 1024 * one_kb
one_gb = 1024 * one_mb
def generate_file_name(data_size, data_unit, iterations, finite_field, threads, generation_sizes, file_format):
# TODO: Update to new formating
name = '{!s}_{!s}_{!s}_{!s}_{!s}_{!s}.{!s}'.format(data_size, data_unit, iterations, finite_field, threads, "_".join(str(gen) for gen in generation_sizes), file_format)
return name
def convert_data_size_to_bytes(data_size, unit):
if unit == 'b':
return data_size
elif unit == 'k':
return data_size * one_kb
elif unit == 'm':
return data_size * one_mb
elif unit == 'g':
return data_size * one_gb
else:
print('Unsupported data unit, falling back to bytes')
return data_size
def assert_valid_generation_size(generation_sizes, data_size):
for gen in generation_sizes:
if not gen < data_size and data_size % gen == 0:
return False
return True
def calculate_symbol_size(generation_sizes, data_size):
obj = {}
for gen in generation_sizes:
obj[gen] = int(data_size / gen)
return obj
def mk_config(iterations, data_size, finite_field, threads, systematic_on, gen_conf):
obj = {}
obj['iterations'] = iterations
obj['data_size'] = data_size
obj['finite_field'] = finite_field
obj['threads'] = threads
obj['systematic_on'] = systematic_on
obj['gens'] = gen_conf
print(obj)
return obj
def write_csv(path, file_name, exp_config):
config_lines = list()
print(type(exp_config['gens']))
for key, value in exp_config['gens'].items():
config_lines.append('{!s},{!s},{!s},{!s},{!s},{!s}\n'.format(exp_config['iterations'],
exp_config['threads'],
key,
value,
exp_config['finite_field'],
exp_config['systematic_on']))
temp = config_lines.pop()
temp = temp[:-1]
config_lines.append(temp)
with open(path + file_name, 'w') as file:
file.writelines(config_lines)
file.close()
def write_config(path, file_name, exp_config):
if file_name.endswith('.csv'):
write_csv(path, file_name, exp_config)
config_path = input("output path: ")
file_format = input("file format [csv]: ")
iterations = int(input("iterations: "))
threads = int(input("Number of threads: "))
finite_field = input('finite field [1,2]: ')
data_size = int(input("Data size in number: "))
data_unit = input("data unit [b,k,m,g]: ")
while (not data_unit in ['b','k','m','g']):
data_unit = input("data unit [b,k,m,g]: ")
generation_sizes = input("generation size [command seperated list]: ")
systematic_on = int(input("Systematic on [0,1]: "))
generation_sizes = [int(gen) for gen in generation_sizes.split(',')]
data_size = convert_data_size_to_bytes(data_size, data_unit)
if not assert_valid_generation_size(generation_sizes, data_size):
sys.exit()
gen_conf = calculate_symbol_size(generation_sizes, data_size)
file_name = generate_file_name(data_size, data_unit, iterations, finite_field, threads, generation_sizes, file_format)
exp_config = mk_config(iterations, data_size, finite_field, threads, systematic_on, gen_conf)
write_config(config_path, file_name, exp_config)
| 30.288288 | 172 | 0.651398 |
896f59715a6a512763f9c3a86132ea45168c9157 | 2,227 | py | Python | lambda/lambda_function.py | rafty/Tool_DeleteCloudWatchStreams | a322f99dff5341dcd4e09f10bc1ff372a6ee8bb3 | [
"Apache-2.0"
] | null | null | null | lambda/lambda_function.py | rafty/Tool_DeleteCloudWatchStreams | a322f99dff5341dcd4e09f10bc1ff372a6ee8bb3 | [
"Apache-2.0"
] | null | null | null | lambda/lambda_function.py | rafty/Tool_DeleteCloudWatchStreams | a322f99dff5341dcd4e09f10bc1ff372a6ee8bb3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import datetime
import logging
import boto3
logger = logging.getLogger()
logger.setLevel(level=logging.INFO)
logs = boto3.client('logs')
def delete_stream(log_stream_name, log_group_name):
logger.info('delete_stream: {}'.format(log_stream_name))
logs.delete_log_stream(logGroupName=log_group_name,
logStreamName=log_stream_name)
return log_stream_name
def extract_streams_to_delete(log_streams):
# datetime to unixtime and to milliseconds
three_days_ago = int((datetime.datetime.utcnow() -
datetime.timedelta(days=3)).timestamp()*1000)
now = int(datetime.datetime.utcnow().timestamp()*1000)
streams_to_delete = [
stream.get('logStreamName')
for stream in log_streams.get('logStreams')
if stream.get('lastEventTimestamp', now) <= three_days_ago]
logger.info('streams_to_delete: {}'.format(streams_to_delete))
return streams_to_delete
def describe_log_streams(log_group_name, next_token):
if next_token:
log_streams = logs.describe_log_streams(
logGroupName=log_group_name,
nextToken=next_token
)
else:
log_streams = logs.describe_log_streams(
logGroupName=log_group_name,
)
return log_streams
def lambda_handler(event, context):
logger.info('lambda_handler(event): {}'.format(event))
log_groups = logs.describe_log_groups()
for log_group in log_groups.get('logGroups'):
log_group_name = log_group.get('logGroupName')
if log_group['storedBytes'] == 0:
logs.delete_log_group(logGroupName=log_group_name)
logger.info('delete log group: {}'.format(log_group_name))
continue
next_token = None
while True:
log_streams = describe_log_streams(log_group_name, next_token)
next_token = log_streams.get('nextToken', None)
streams_to_delete = extract_streams_to_delete(log_streams)
list(map(lambda x: delete_stream(x, log_group_name),
streams_to_delete))
if not next_token or len(log_streams.get('logStreams')):
break
| 30.930556 | 74 | 0.668163 |
82d7dad39a157081c29ea65df03b70ba04caac5e | 2,677 | py | Python | docs/conf.py | noahkw/acmetk | 4bf6202babbfa1cf91801a8f1bd3ae3a02737799 | [
"MIT"
] | 3 | 2021-03-15T11:25:22.000Z | 2021-04-01T09:05:07.000Z | docs/conf.py | noahkw/acmetk | 4bf6202babbfa1cf91801a8f1bd3ae3a02737799 | [
"MIT"
] | 60 | 2021-03-16T13:28:56.000Z | 2021-04-03T14:07:31.000Z | docs/conf.py | noahkw/acmetk | 4bf6202babbfa1cf91801a8f1bd3ae3a02737799 | [
"MIT"
] | 1 | 2021-03-15T11:25:05.000Z | 2021-03-15T11:25:05.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- Variables ---------------------------------------------------------------
rst_prolog = """
.. |GIT_URL| replace:: https://github.com/noahkw/acmetk.git
"""
# -- Project information -----------------------------------------------------
project = "ACME Toolkit"
copyright = "2020, Noah Wöhler"
author = "Noah Wöhler"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx-prompt",
"sphinx_substitution_extensions",
]
intersphinx_mapping = {
"aiohttp": ("https://docs.aiohttp.org/en/latest/", None),
"acme": ("https://acme-python.readthedocs.io/en/latest/", None),
"cryptography": ("https://cryptography.io/en/latest/", None),
"dns": ("https://dnspython.readthedocs.io/en/latest/", None),
"josepy": ("https://python-jose.readthedocs.io/en/latest/", None),
"python": ("https://docs.python.org/3", None),
}
autodoc_default_options = {"private-members": True}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_glpi_theme
html_theme = "glpi"
html_theme_path = sphinx_glpi_theme.get_html_themes_path()
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| 34.766234 | 79 | 0.650355 |
d45a9fa6099adddbf89fceabad24be3fddfeaf03 | 4,141 | py | Python | MIDI Remote Scripts/pushbase/select_playing_clip_component.py | aarkwright/ableton_devices | fe5df3bbd64ccbc136bba722ba1e131a02969798 | [
"MIT"
] | null | null | null | MIDI Remote Scripts/pushbase/select_playing_clip_component.py | aarkwright/ableton_devices | fe5df3bbd64ccbc136bba722ba1e131a02969798 | [
"MIT"
] | null | null | null | MIDI Remote Scripts/pushbase/select_playing_clip_component.py | aarkwright/ableton_devices | fe5df3bbd64ccbc136bba722ba1e131a02969798 | [
"MIT"
] | null | null | null | # uncompyle6 version 3.3.5
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)]
# Embedded file name: c:\Jenkins\live\output\win_64_static\Release\python-bundle\MIDI Remote Scripts\pushbase\select_playing_clip_component.py
# Compiled at: 2018-11-30 15:48:12
"""
Component that automatically selects the playing clip in the selected track.
"""
from __future__ import absolute_import, print_function, unicode_literals
from functools import partial
from ableton.v2.base import index_if, nop, listens, task
from ableton.v2.control_surface.control import ButtonControl
from ableton.v2.control_surface.mode import AddLayerMode
from .consts import MessageBoxText
from .messenger_mode_component import MessengerModesComponent
class SelectPlayingClipComponent(MessengerModesComponent):
action_button = ButtonControl(color=b'DefaultButton.Alert')
def __init__(self, playing_clip_above_layer=None, playing_clip_below_layer=None, *a, **k):
super(SelectPlayingClipComponent, self).__init__(*a, **k)
self._update_mode_task = self._tasks.add(task.sequence(task.delay(1), task.run(self._update_mode)))
self._update_mode_task.kill()
self.add_mode(b'default', None)
self.add_mode(b'above', [
AddLayerMode(self, playing_clip_above_layer)], message=MessageBoxText.PLAYING_CLIP_ABOVE_SELECTED_CLIP)
self.add_mode(b'below', [
AddLayerMode(self, playing_clip_below_layer)], message=MessageBoxText.PLAYING_CLIP_BELOW_SELECTED_CLIP)
self.selected_mode = b'default'
self.notify_when_enabled = True
self._on_detail_clip_changed.subject = self.song.view
self._on_playing_slot_index_changed.subject = self.song.view.selected_track
self._notification_reference = partial(nop, None)
return
@action_button.pressed
def action_button(self, button):
self._go_to_playing_clip()
@listens(b'detail_clip')
def _on_detail_clip_changed(self):
self._update_mode_task.restart()
@listens(b'playing_slot_index')
def _on_playing_slot_index_changed(self):
self._update_mode_task.restart()
def _go_to_playing_clip(self):
song_view = self.song.view
playing_clip_slot = self._playing_clip_slot()
if playing_clip_slot:
song_view.highlighted_clip_slot = playing_clip_slot
song_view.detail_clip = playing_clip_slot.clip
self._hide_notification()
def _hide_notification(self):
if self._notification_reference() is not None:
self._notification_reference().hide()
return
def show_notification(self, display_text):
self._notification_reference = super(SelectPlayingClipComponent, self).show_notification(display_text, blink_text=MessageBoxText.SELECTED_CLIP_BLINK, notification_time=-1)
def _selected_track_clip_is_playing(self):
playing_clip_slot = self._playing_clip_slot()
return not (playing_clip_slot and playing_clip_slot.clip != self.song.view.detail_clip)
def _playing_clip_slot(self):
track = self.song.view.selected_track
try:
playing_slot_index = track.playing_slot_index
slot = track.clip_slots[playing_slot_index] if 0 <= playing_slot_index < len(track.clip_slots) else None
return slot
except RuntimeError:
pass
return
def _selected_track_clip_is_above_playing_clip(self):
song_view = self.song.view
track = song_view.selected_track
playing_slot_index = track.playing_slot_index
selected_index = index_if(lambda slot: slot == song_view.highlighted_clip_slot, track.clip_slots)
return playing_slot_index <= selected_index
def _update_mode(self):
if not self._selected_track_clip_is_playing():
if self._selected_track_clip_is_above_playing_clip():
self.selected_mode = b'above'
else:
self.selected_mode = b'below'
else:
self.selected_mode = b'default'
self._hide_notification() | 44.053191 | 179 | 0.725429 |
8f7cf23993a8603c1740a080c60588dbf81f24ca | 22,008 | py | Python | qa/rpc-tests/replace-by-fee.py | kazucoin/kazusilver | fc81623ed5fd5f9f9fd9ce85139201ece6a2332e | [
"MIT"
] | 1 | 2019-06-02T17:21:08.000Z | 2019-06-02T17:21:08.000Z | qa/rpc-tests/replace-by-fee.py | kazucoin/kazusilver | fc81623ed5fd5f9f9fd9ce85139201ece6a2332e | [
"MIT"
] | null | null | null | qa/rpc-tests/replace-by-fee.py | kazucoin/kazusilver | fc81623ed5fd5f9f9fd9ce85139201ece6a2332e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test replace by fee code
#
from test_framework.test_framework import KazuSilverTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
MAX_REPLACEMENT_LIMIT = 100
def txToHex(tx):
return bytes_to_hex_str(tx.serialize())
def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])):
"""Create a txout with a given amount and scriptPubKey
Mines coins as needed.
confirmed - txouts created will be confirmed in the blockchain;
unconfirmed otherwise.
"""
fee = 1*COIN
while node.getbalance() < satoshi_round((amount + fee)/COIN):
node.generate(100)
#print (node.getbalance(), amount, fee)
new_addr = node.getnewaddress()
#print new_addr
txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN))
tx1 = node.getrawtransaction(txid, 1)
txid = int(txid, 16)
i = None
for i, txout in enumerate(tx1['vout']):
#print i, txout['scriptPubKey']['addresses']
if txout['scriptPubKey']['addresses'] == [new_addr]:
#print i
break
assert i is not None
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(txid, i))]
tx2.vout = [CTxOut(amount, scriptPubKey)]
tx2.rehash()
signed_tx = node.signrawtransaction(txToHex(tx2))
txid = node.sendrawtransaction(signed_tx['hex'], True)
# If requested, ensure txouts are confirmed.
if confirmed:
mempool_size = len(node.getrawmempool())
while mempool_size > 0:
node.generate(1)
new_size = len(node.getrawmempool())
# Error out if we have something stuck in the mempool, as this
# would likely be a bug.
assert(new_size < mempool_size)
mempool_size = new_size
return COutPoint(int(txid, 16), 0)
class ReplaceByFeeTest(KazuSilverTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-debug",
"-relaypriority=0", "-whitelist=127.0.0.1",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
"-limitdescendantsize=101"
]))
self.is_network_split = False
def run_test(self):
make_utxo(self.nodes[0], 1*COIN)
print("Running test simple doublespend...")
self.test_simple_doublespend()
print("Running test doublespend chain...")
self.test_doublespend_chain()
print("Running test doublespend tree...")
self.test_doublespend_tree()
print("Running test replacement feeperkb...")
self.test_replacement_feeperkb()
print("Running test spends of conflicting outputs...")
self.test_spends_of_conflicting_outputs()
print("Running test new unconfirmed inputs...")
self.test_new_unconfirmed_inputs()
print("Running test too many replacements...")
self.test_too_many_replacements()
print("Running test opt-in...")
self.test_opt_in()
print("Running test prioritised transactions...")
self.test_prioritised_transactions()
print("Passed\n")
def test_simple_doublespend(self):
"""Simple doublespend"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Should fail because we haven't changed the fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(1*COIN, CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
# Extra 0.1 KSLV fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
mempool = self.nodes[0].getrawmempool()
assert (tx1a_txid not in mempool)
assert (tx1b_txid in mempool)
assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
def test_doublespend_chain(self):
"""Doublespend of a long chain"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
prevout = tx0_outpoint
remaining_value = initial_nValue
chain_txids = []
while remaining_value > 10*COIN:
remaining_value -= 1*COIN
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = [CTxOut(remaining_value, CScript([1]))]
tx_hex = txToHex(tx)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
chain_txids.append(txid)
prevout = COutPoint(int(txid, 16), 0)
# Whether the double-spend is allowed is evaluated by including all
# child fees - 40 KSLV - so this attempt is rejected.
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 30*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False) # transaction mistakenly accepted!
# Accepted with sufficient fee
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for doublespent_txid in chain_txids:
assert(doublespent_txid not in mempool)
def test_doublespend_tree(self):
"""Doublespend of a big tree of transactions"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None):
if _total_txs is None:
_total_txs = [0]
if _total_txs[0] >= max_txs:
return
txout_value = (initial_value - fee) // tree_width
if txout_value < fee:
return
vout = [CTxOut(txout_value, CScript([i+1]))
for i in range(tree_width)]
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = vout
tx_hex = txToHex(tx)
assert(len(tx.serialize()) < 100000)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
yield tx
_total_txs[0] += 1
txid = int(txid, 16)
for i, txout in enumerate(tx.vout):
for x in branch(COutPoint(txid, i), txout_value,
max_txs,
tree_width=tree_width, fee=fee,
_total_txs=_total_txs):
yield x
fee = int(0.0001*COIN)
n = MAX_REPLACEMENT_LIMIT
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
# Attempt double-spend, will fail because too little fee paid
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
# 1 KSLV fee is enough
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for tx in tree_txs:
tx.rehash()
assert (tx.hash not in mempool)
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2):
fee = int(0.0001*COIN)
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
assert_equal("too many potential replacements" in exp.error['message'], True)
else:
assert(False)
for tx in tree_txs:
tx.rehash()
self.nodes[0].getrawtransaction(tx.hash)
def test_replacement_feeperkb(self):
"""Replacement requires fee-per-KB to be higher"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the fee per KB is much lower, so the replacement is
# rejected.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*999000]))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
utxo1 = make_utxo(self.nodes[0], int(1.2*COIN))
utxo2 = make_utxo(self.nodes[0], 3*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(utxo1, nSequence=0)]
tx1a.vout = [CTxOut(int(1.1*COIN), CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
tx1a_txid = int(tx1a_txid, 16)
# Direct spend an output of the transaction we're replacing.
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)]
tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0))
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction()
tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx1b.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
tx1b_txid = int(tx1b_txid, 16)
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
CTxIn(COutPoint(tx1b_txid, 0))]
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = make_utxo(self.nodes[0], int(1.1*COIN))
unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1*COIN), False)
tx1 = CTransaction()
tx1.vin = [CTxIn(confirmed_utxo)]
tx1.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1_hex = txToHex(tx1)
tx1_txid = self.nodes[0].sendrawtransaction(tx1_hex, True)
tx2 = CTransaction()
tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
tx2.vout = tx1.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
# transactions
# Start by creating a single transaction with many outputs
initial_nValue = 10*COIN
utxo = make_utxo(self.nodes[0], initial_nValue)
fee = int(0.0001*COIN)
split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1))
actual_fee = initial_nValue - split_value*(MAX_REPLACEMENT_LIMIT+1)
outputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
splitting_tx.vin = [CTxIn(utxo, nSequence=0)]
splitting_tx.vout = outputs
splitting_tx_hex = txToHex(splitting_tx)
txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True)
txid = int(txid, 16)
# Now spend each of those outputs individually
for i in range(MAX_REPLACEMENT_LIMIT+1):
tx_i = CTransaction()
tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)]
tx_i.vout = [CTxOut(split_value-fee, CScript([b'a']))]
tx_i_hex = txToHex(tx_i)
self.nodes[0].sendrawtransaction(tx_i_hex, True)
# Now create doublespend of the whole lot; should fail.
# Need a big enough fee to cover all spending transactions and have
# a higher fee rate
double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1)
inputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
inputs.append(CTxIn(COutPoint(txid, i), nSequence=0))
double_tx = CTransaction()
double_tx.vin = inputs
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
try:
self.nodes[0].sendrawtransaction(double_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
assert_equal("too many potential replacements" in exp.error['message'], True)
else:
assert(False)
# If we remove an input, it should pass
double_tx = CTransaction()
double_tx.vin = inputs[0:-1]
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
self.nodes[0].sendrawtransaction(double_tx_hex, True)
def test_opt_in(self):
""" Replacing should only work if orig tx opted in """
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a non-opting in transaction
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Shouldn't be able to double-spend
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
print(tx1b_txid)
assert(False)
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a different non-opting in transaction
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Still shouldn't be able to double-spend
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx2b_hex = txToHex(tx2b)
try:
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Now create a new transaction that spends from tx1a and tx2a
# opt-in on one of the inputs
# Transaction should be replaceable on either input
tx1a_txid = int(tx1a_txid, 16)
tx2a_txid = int(tx2a_txid, 16)
tx3a = CTransaction()
tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff),
CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)]
tx3a.vout = [CTxOut(int(0.9*COIN), CScript([b'c'])), CTxOut(int(0.9*COIN), CScript([b'd']))]
tx3a_hex = txToHex(tx3a)
self.nodes[0].sendrawtransaction(tx3a_hex, True)
tx3b = CTransaction()
tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx3b.vout = [CTxOut(int(0.5*COIN), CScript([b'e']))]
tx3b_hex = txToHex(tx3b)
tx3c = CTransaction()
tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)]
tx3c.vout = [CTxOut(int(0.5*COIN), CScript([b'f']))]
tx3c_hex = txToHex(tx3c)
self.nodes[0].sendrawtransaction(tx3b_hex, True)
# If tx3b was accepted, tx3c won't look like a replacement,
# but make sure it is accepted anyway
self.nodes[0].sendrawtransaction(tx3c_hex, True)
def test_prioritised_transactions(self):
# Ensure that fee deltas used via prioritisetransaction are
# correctly used by replacement logic
# 1. Check that feeperkb uses modified fees
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the actual fee per KB is much lower.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*740000]))]
tx1b_hex = txToHex(tx1b)
# Verify tx1b cannot replace tx1a.
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Use prioritisetransaction to set tx1a's fee to 0.
self.nodes[0].prioritisetransaction(tx1a_txid, 0, int(-0.1*COIN))
# Now tx1b should be able to replace tx1a
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
assert(tx1b_txid in self.nodes[0].getrawmempool())
# 2. Check that absolute fee checks use modified fee.
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Lower fee, but we'll prioritise it
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(1.01*COIN), CScript([b'a']))]
tx2b.rehash()
tx2b_hex = txToHex(tx2b)
# Verify tx2b cannot replace tx2a.
try:
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Now prioritise tx2b to have a higher modified fee
self.nodes[0].prioritisetransaction(tx2b.hash, 0, int(0.1*COIN))
# tx2b should now be accepted
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
assert(tx2b_txid in self.nodes[0].getrawmempool())
if __name__ == '__main__':
ReplaceByFeeTest().main()
| 37.238579 | 105 | 0.601145 |
e8fe83bd571027b047b469e8a4ecfcce78e64de0 | 201 | py | Python | labsys/auth/__init__.py | gems-uff/labsys | b8990d7ef6377b6d34f66c277684af1ef94bd5c3 | [
"MIT"
] | 1 | 2017-05-04T17:32:17.000Z | 2017-05-04T17:32:17.000Z | labsys/auth/__init__.py | gems-uff/labsys | b8990d7ef6377b6d34f66c277684af1ef94bd5c3 | [
"MIT"
] | 19 | 2017-06-05T22:52:45.000Z | 2018-06-02T18:17:26.000Z | labsys/auth/__init__.py | gems-uff/labsys | b8990d7ef6377b6d34f66c277684af1ef94bd5c3 | [
"MIT"
] | null | null | null | from flask import Blueprint
from .models import Permission
blueprint = Blueprint('auth', __name__)
@blueprint.app_context_processor
def inject_permissions():
return dict(Permission=Permission)
| 18.272727 | 39 | 0.800995 |
9d6f0988f77643aac9c4eb6613c281c28d9e50fc | 7,950 | py | Python | tests/py_dss_interface/test_loadshapes.py | davilamds/py_dss_interface | a447c97787aeac962381db88dd622ccb235eef4b | [
"MIT"
] | 8 | 2020-08-15T12:56:03.000Z | 2022-01-04T15:51:14.000Z | tests/py_dss_interface/test_loadshapes.py | eniovianna/py_dss_interface | db1c5ee2ae04d525bfd77ecd9ff41028da6ac31a | [
"MIT"
] | 24 | 2021-04-24T18:33:19.000Z | 2021-11-13T14:59:54.000Z | tests/py_dss_interface/test_loadshapes.py | eniovianna/py_dss_interface | db1c5ee2ae04d525bfd77ecd9ff41028da6ac31a | [
"MIT"
] | 7 | 2020-08-15T12:56:04.000Z | 2021-10-04T16:14:30.000Z | # -*- coding: utf-8 -*-
# @Time : 7/30/2021 02:01 PM
# @Author : Rodolfo Londero
# @Email : rodolfpl@gmail.com
# @File : test_loadshapes.py
# @Software: PyCharm
import pytest
class TestLoadShapes13Bus:
@pytest.fixture(autouse=True)
def _request(self, solve_snap_13bus):
self.dss = solve_snap_13bus
self.dss.loadshapes_write_name('default')
def new_loadshape(self, activate: bool = False):
self.dss.text("New Loadshape.Test npts=24 interval=1 Pbase=100 Qbase=50 "
"mult= "
"(0.18000001 0.19000000 0.23999999 0.33000001 0.38999999 0.41000000 "
"0.64999998 1.23000002 1.88999999 1.88999999 1.96000004 1.98000002 "
"1.45000005 1.62000000 1.88999999 1.79999995 1.78999996 1.19000006 "
"0.80000001 0.66000003 0.51999998 0.40000001 0.28000000 0.23000000)")
if activate:
self.dss.loadshapes_write_name('test')
# ===================================================================
# Integer methods
# ===================================================================
def test_loadshapes_count(self):
expected = 1
actual = self.dss.loadshapes_count()
assert actual == expected
self.new_loadshape()
expected = 2
actual = self.dss.loadshapes_count()
assert actual == expected
def test_loadshapes_first(self):
expected = 1
actual = self.dss.loads_first()
assert actual == expected
def test_loadshapes_next(self):
expected = 0
actual = self.dss.loadshapes_next()
assert actual == expected
def test_loadshapes_read_npts(self):
expected = 24
actual = self.dss.loadshapes_read_npts()
assert actual == expected
def test_loadshapes_write_npts(self):
expected = 48
self.dss.loadshapes_write_npts(expected)
actual = self.dss.loadshapes_read_npts()
assert actual == expected
def test_loadshapes_normalize(self):
expected = 0
actual = self.dss.loadshapes_normalize()
assert actual == expected
def test_loadshapes_read_use_actual(self):
expected = 0
actual = self.dss.loadshapes_read_use_actual()
assert actual == expected
def test_loadshapes_write_use_actual(self):
expected = 1
self.dss.loadshapes_write_use_actual(expected)
actual = self.dss.loadshapes_read_use_actual()
assert actual == expected
# ===================================================================
# String methods
# ===================================================================
def test_loadshapes_read_name(self):
expected = 'default'
actual = self.dss.loadshapes_read_name()
assert actual == expected
def test_loadshapes_write_name(self):
self.new_loadshape()
expected = 'test'
self.dss.loadshapes_write_name(expected)
actual = self.dss.loadshapes_read_name()
assert actual == expected
# ===================================================================
# Float methods
# ===================================================================
def test_loadshapes_read_hr_interval(self):
self.new_loadshape(True)
expected = 1
actual = self.dss.loadshapes_read_hr_interval()
assert actual == expected
# TODO: method not writing
def test_loadshapes_write_hr_interval(self):
self.new_loadshape(True)
expected = 0.5
self.dss.loadshapes_write_hr_interval(expected)
actual = self.dss.loadshapes_read_hr_interval()
assert actual == expected
def test_loadshapes_read_min_interval(self):
self.new_loadshape(True)
expected = 60
actual = self.dss.loadshapes_read_min_interval()
assert actual == expected
# TODO: method not writing
def test_loadshapes_write_min_interval(self):
self.new_loadshape(True)
expected = 120
self.dss.loadshapes_write_min_interval(expected)
actual = self.dss.loadshapes_read_min_interval()
assert actual == expected
def test_loadshapes_read_s_interval(self):
self.new_loadshape(True)
expected = 3600
actual = self.dss.loadshapes_read_s_interval()
assert actual == expected
def test_loadshapes_write_s_interval(self):
self.new_loadshape(True)
expected = 4800
self.dss.loadshapes_write_s_interval(expected)
actual = self.dss.loadshapes_read_s_interval()
assert actual == expected
def test_loadshapes_read_p_base(self):
self.new_loadshape(True)
expected = 100
actual = self.dss.loadshapes_read_p_base()
assert actual == expected
# TODO: method not writing
def test_loadshapes_write_p_base(self):
self.new_loadshape(True)
expected = 50.0
self.dss.loadshapes_write_p_base(expected)
actual = self.dss.loadshapes_read_p_base()
assert actual == expected
def test_loadshapes_read_q_base(self):
self.new_loadshape(True)
expected = 50
actual = self.dss.loadshapes_read_q_base()
assert actual == expected
# TODO: method not writing
def test_loadshapes_write_q_base(self):
self.new_loadshape(True)
expected = 100
self.dss.loadshapes_write_q_base(expected)
actual = self.dss.loadshapes_read_q_base()
assert actual == expected
# ===================================================================
# Variant methods
# ===================================================================
def test_loadshapes_all_names(self):
expected = ['default']
actual = self.dss.loadshapes_all_names()
assert actual == expected
self.new_loadshape()
expected = ['default', 'test']
actual = self.dss.loadshapes_all_names()
assert actual == expected
def test_loadshapes_read_p_mult(self):
self.new_loadshape(True)
expected = [0.18000001, 0.19, 0.23999999, 0.33000001, 0.38999999, 0.41, 0.64999998, 1.23000002,
1.88999999, 1.88999999, 1.96000004, 1.98000002, 1.45000005, 1.62, 1.88999999, 1.79999995,
1.78999996, 1.19000006, 0.80000001, 0.66000003, 0.51999998, 0.40000001, 0.28, 0.23]
actual = self.dss.loadshapes_read_p_mult()
assert actual == expected
def test_loadshapes_write_p_mult(self):
self.new_loadshape(True)
expected = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
self.dss.loadshapes_write_p_mult(expected)
actual = self.dss.loadshapes_read_p_mult()
assert actual == expected
def test_loadshapes_read_q_mult(self):
self.new_loadshape(True)
expected = [0]
actual = self.dss.loadshapes_read_q_mult()
assert actual == expected
def test_loadshapes_write_q_mult(self):
self.new_loadshape(True)
expected = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
self.dss.loadshapes_write_q_mult(expected)
actual = self.dss.loadshapes_read_q_mult()
assert actual == expected
def test_loadshapes_read_time_array(self):
self.new_loadshape(True)
expected = [0]
actual = self.dss.loadshapes_read_time_array()
assert actual == expected
# TODO: method not writing
def test_loadshapes_write_time_array(self):
self.new_loadshape(True)
expected = [-1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
self.dss.loadshapes_write_time_array(expected)
actual = self.dss.loadshapes_read_time_array()
assert actual == expected
| 35.810811 | 109 | 0.592704 |
b7625931fb1ae04b472a0c7f8b6435b8b6756b6a | 11,460 | py | Python | test/python/algorithms/test_phase_estimator.py | Drinion/qiskit-terra | c73c2bfe98a436b04afb77d8e39f59e02a8ff1ac | [
"Apache-2.0"
] | 1 | 2021-10-05T11:56:53.000Z | 2021-10-05T11:56:53.000Z | test/python/algorithms/test_phase_estimator.py | Drinion/qiskit-terra | c73c2bfe98a436b04afb77d8e39f59e02a8ff1ac | [
"Apache-2.0"
] | 24 | 2021-01-27T08:20:27.000Z | 2021-07-06T09:42:28.000Z | test/python/algorithms/test_phase_estimator.py | Drinion/qiskit-terra | c73c2bfe98a436b04afb77d8e39f59e02a8ff1ac | [
"Apache-2.0"
] | 4 | 2021-10-05T12:07:27.000Z | 2022-01-28T18:37:28.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test phase estimation"""
import unittest
from test.python.algorithms import QiskitAlgorithmsTestCase
from ddt import ddt, data
import numpy as np
from qiskit.algorithms.phase_estimators import PhaseEstimation, HamiltonianPhaseEstimation
from qiskit.opflow.evolutions import PauliTrotterEvolution, MatrixEvolution
import qiskit
from qiskit.opflow import (H, X, Y, Z, I, StateFn)
@ddt
class TestHamiltonianPhaseEstimation(QiskitAlgorithmsTestCase):
"""Tests for obtaining eigenvalues from phase estimation"""
def hamiltonian_pe(self, hamiltonian, state_preparation=None, num_evaluation_qubits=6,
backend=None,
evolution=None,
bound=None):
"""Run HamiltonianPhaseEstimation and return result with all phases."""
if backend is None:
backend = qiskit.BasicAer.get_backend('statevector_simulator')
quantum_instance = qiskit.utils.QuantumInstance(backend=backend, shots=10000)
phase_est = HamiltonianPhaseEstimation(
num_evaluation_qubits=num_evaluation_qubits,
quantum_instance=quantum_instance)
result = phase_est.estimate(
hamiltonian=hamiltonian,
state_preparation=state_preparation, evolution=evolution,
bound=bound)
return result
@data(MatrixEvolution(), PauliTrotterEvolution('suzuki', 4))
def test_pauli_sum_1(self, evolution):
"""Two eigenvalues from Pauli sum with X, Z"""
hamiltonian = 0.5 * X + Z
state_preparation = StateFn(H.to_circuit())
result = self.hamiltonian_pe(hamiltonian, state_preparation, evolution=evolution)
phase_dict = result.filter_phases(0.162, as_float=True)
phases = list(phase_dict.keys())
phases.sort()
self.assertAlmostEqual(phases[0], -1.125, delta=0.001)
self.assertAlmostEqual(phases[1], 1.125, delta=0.001)
@data(MatrixEvolution(), PauliTrotterEvolution('suzuki', 3))
def test_pauli_sum_2(self, evolution):
"""Two eigenvalues from Pauli sum with X, Y, Z"""
hamiltonian = 0.5 * X + Y + Z
state_preparation = None
result = self.hamiltonian_pe(hamiltonian, state_preparation, evolution=evolution)
phase_dict = result.filter_phases(0.1, as_float=True)
phases = list(phase_dict.keys())
phases.sort()
self.assertAlmostEqual(phases[0], -1.484, delta=0.001)
self.assertAlmostEqual(phases[1], 1.484, delta=0.001)
def test_single_pauli_op(self):
"""Two eigenvalues from Pauli sum with X, Y, Z"""
hamiltonian = Z
state_preparation = None
result = self.hamiltonian_pe(hamiltonian, state_preparation, evolution=None)
eigv = result.most_likely_eigenvalue
with self.subTest('First eigenvalue'):
self.assertAlmostEqual(eigv, 1.0, delta=0.001)
state_preparation = StateFn(X.to_circuit())
result = self.hamiltonian_pe(hamiltonian, state_preparation, bound=1.05)
eigv = result.most_likely_eigenvalue
with self.subTest('Second eigenvalue'):
self.assertAlmostEqual(eigv, -0.98, delta=0.01)
def test_H2_hamiltonian(self):
"""Test H2 hamiltonian"""
hamiltonian = (-1.0523732457728587 * (I ^ I)) + (0.3979374248431802 * (I ^ Z)) \
+ (-0.3979374248431802 * (Z ^ I)) + (-0.011280104256235324 * (Z ^ Z)) \
+ (0.18093119978423147 * (X ^ X))
state_preparation = StateFn((I ^ H).to_circuit())
evo = PauliTrotterEvolution(trotter_mode='suzuki', reps=4)
result = self.hamiltonian_pe(hamiltonian, state_preparation, evolution=evo)
with self.subTest('Most likely eigenvalues'):
self.assertAlmostEqual(result.most_likely_eigenvalue, -1.855, delta=.001)
with self.subTest('Most likely phase'):
self.assertAlmostEqual(result.phase, 0.5937, delta=.001)
with self.subTest('All eigenvalues'):
phase_dict = result.filter_phases(0.1)
phases = list(phase_dict.keys())
self.assertAlmostEqual(phases[0], -0.8979, delta=0.001)
self.assertAlmostEqual(phases[1], -1.8551, delta=0.001)
self.assertAlmostEqual(phases[2], -1.2376, delta=0.001)
def test_matrix_evolution(self):
"""1Q Hamiltonian with MatrixEvolution"""
hamiltonian = ((0.5 * X) + (0.6 * Y) + (0.7 * I))
state_preparation = None
result = self.hamiltonian_pe(hamiltonian, state_preparation, evolution=MatrixEvolution())
phase_dict = result.filter_phases(0.2, as_float=True)
phases = list(phase_dict.keys())
self.assertAlmostEqual(phases[0], 1.490, delta=0.001)
self.assertAlmostEqual(phases[1], -0.090, delta=0.001)
def _setup_from_bound(self, evolution, op_class):
hamiltonian = 0.5 * X + Y + Z
state_preparation = None
bound = 1.2 * sum([abs(hamiltonian.coeff * coeff) for coeff in hamiltonian.coeffs])
if op_class == 'MatrixOp':
hamiltonian = hamiltonian.to_matrix_op()
backend = qiskit.BasicAer.get_backend('statevector_simulator')
qi = qiskit.utils.QuantumInstance(backend=backend, shots=10000)
phase_est = HamiltonianPhaseEstimation(num_evaluation_qubits=6,
quantum_instance=qi)
result = phase_est.estimate(hamiltonian=hamiltonian,
bound=bound,
evolution=evolution,
state_preparation=state_preparation)
return result
def test_from_bound(self):
"""HamiltonianPhaseEstimation with bound"""
for op_class in ('SummedOp', 'MatrixOp'):
result = self._setup_from_bound(MatrixEvolution(), op_class)
cutoff = 0.01
phases = result.filter_phases(cutoff)
with self.subTest(f'test phases has the correct length: {op_class}'):
self.assertEqual(len(phases), 2)
with self.subTest(f'test scaled phases are correct: {op_class}'):
self.assertEqual(list(phases.keys()), [1.5, -1.5])
phases = result.filter_phases(cutoff, scaled=False)
with self.subTest(f'test unscaled phases are correct: {op_class}'):
self.assertEqual(list(phases.keys()), [0.25, 0.75])
def test_trotter_from_bound(self):
"""HamiltonianPhaseEstimation with bound and Trotterization"""
result = self._setup_from_bound(PauliTrotterEvolution(trotter_mode='suzuki', reps=3),
op_class='SummedOp')
phase_dict = result.filter_phases(0.1)
phases = list(phase_dict.keys())
with self.subTest('test phases has the correct length'):
self.assertEqual(len(phases), 2)
with self.subTest('test phases has correct values'):
self.assertAlmostEqual(phases[0], 1.5, delta=0.001)
self.assertAlmostEqual(phases[1], -1.5, delta=0.001)
@ddt
class TestPhaseEstimation(QiskitAlgorithmsTestCase):
"""Evolution tests."""
# pylint: disable=invalid-name
def one_phase(self, unitary_circuit, state_preparation=None, n_eval_qubits=6,
backend=None):
"""Run phase estimation with operator, eigenvalue pair `unitary_circuit`,
`state_preparation`. Return the bit string with the largest amplitude.
"""
if backend is None:
backend = qiskit.BasicAer.get_backend('qasm_simulator')
qi = qiskit.utils.QuantumInstance(backend=backend, shots=10000)
p_est = PhaseEstimation(num_evaluation_qubits=n_eval_qubits,
quantum_instance=qi)
result = p_est.estimate(unitary=unitary_circuit,
state_preparation=state_preparation)
phase = result.phase
return phase
@data('qasm_simulator', 'statevector_simulator')
def test_qpe_Z0(self, backend_type):
"""eigenproblem Z, |0>"""
backend = qiskit.BasicAer.get_backend(backend_type)
unitary_circuit = Z.to_circuit()
state_preparation = None # prepare |0>
phase = self.one_phase(unitary_circuit, state_preparation, backend=backend)
self.assertEqual(phase, 0.0)
@data('qasm_simulator', 'statevector_simulator')
def test_qpe_Z1(self, backend_type):
"""eigenproblem Z, |1>"""
backend = qiskit.BasicAer.get_backend(backend_type)
unitary_circuit = Z.to_circuit()
state_preparation = X.to_circuit() # prepare |1>
phase = self.one_phase(unitary_circuit, state_preparation, backend=backend)
self.assertEqual(phase, 0.5)
@data('plus', 'minus')
def test_qpe_Xplus(self, state):
"""eigenproblem X, |+>"""
unitary_circuit = X.to_circuit()
if state == 'minus': # prepare |->
state_preparation = X.to_circuit()
state_preparation.h(0)
else: # prepare |+>
state_preparation = H.to_circuit()
phase = self.one_phase(unitary_circuit, state_preparation)
if state == 'minus':
self.assertEqual(phase, 0.5)
else:
self.assertEqual(phase, 0.0)
def phase_estimation(self, unitary_circuit, state_preparation=None, num_evaluation_qubits=6,
backend=None):
"""Run phase estimation with operator, eigenvalue pair `unitary_circuit`,
`state_preparation`. Return all results
"""
if backend is None:
backend = qiskit.BasicAer.get_backend('statevector_simulator')
qi = qiskit.utils.QuantumInstance(backend=backend, shots=10000)
phase_est = PhaseEstimation(num_evaluation_qubits=num_evaluation_qubits,
quantum_instance=qi)
result = phase_est.estimate(unitary=unitary_circuit,
state_preparation=state_preparation)
return result
def test_qpe_Zplus(self):
"""superposition eigenproblem Z, |+>"""
unitary_circuit = Z.to_circuit()
state_preparation = H.to_circuit() # prepare |+>
result = self.phase_estimation(
unitary_circuit, state_preparation,
backend=qiskit.BasicAer.get_backend('statevector_simulator'))
phases = result.filter_phases(1e-15, as_float=True)
with self.subTest('test phases has correct values'):
self.assertEqual(list(phases.keys()), [0.0, 0.5])
with self.subTest('test phases has correct probabilities'):
np.testing.assert_allclose(list(phases.values()), [0.5, 0.5])
with self.subTest('test bitstring representation'):
phases = result.filter_phases(1e-15, as_float=False)
self.assertEqual(list(phases.keys()), ['000000', '100000'])
if __name__ == '__main__':
unittest.main()
| 44.941176 | 97 | 0.644154 |
40e440287ec31f09f3ae90a99de3a69ffbdacc67 | 536 | py | Python | sentimentAnalyzer/util/FetchTweets.py | myneuronews/analyzer | 43e9474a45f6bdb861b8a6dbc965c3f9a418f1bf | [
"MIT"
] | 2 | 2020-07-22T15:37:47.000Z | 2021-04-06T03:37:15.000Z | sentimentAnalyzer/util/FetchTweets.py | myneuronews/analyzer | 43e9474a45f6bdb861b8a6dbc965c3f9a418f1bf | [
"MIT"
] | 1 | 2019-05-10T15:35:43.000Z | 2021-12-19T09:19:44.000Z | sentimentAnalyzer/util/FetchTweets.py | Amrindersingh1/Twitter-Sentiment-Analyzer | 43e9474a45f6bdb861b8a6dbc965c3f9a418f1bf | [
"MIT"
] | null | null | null | import tweepy
consumer_key = ""
consumer_secret = ""
access_token = ""
access_token_secret = ""
class FetchData():
def getTwitterData(self, tag):
try:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
public_tweets = api.search( q = tag, count=100,language = 'en' )
return public_tweets
except tweepy.TweepError as e:
print("Error : " + str(e))
| 19.851852 | 76 | 0.613806 |
cecfb71e9f304578c208fa1decd583c60bc9a23e | 465 | py | Python | security/tests/test_models.py | josylad/RoomScout | a3d067dd67dfdd43702ea2e89064213dbd469157 | [
"MIT"
] | null | null | null | security/tests/test_models.py | josylad/RoomScout | a3d067dd67dfdd43702ea2e89064213dbd469157 | [
"MIT"
] | null | null | null | security/tests/test_models.py | josylad/RoomScout | a3d067dd67dfdd43702ea2e89064213dbd469157 | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.test import TestCase, Client
# TODO: Write tests for security models
class SecurityModelTests(TestCase):
def setUp(self):
self.client = Client()
User = get_user_model()
self.user = User.objects.create_user(username='FredFlintstone', email='aaron@xnovax.net', password='babadoo')
self.user2 = User.objects.create_user(username='JackyFlintstone', email='jacky@flintstone.com', password='lovefred')
| 42.272727 | 118 | 0.778495 |
9ff4101ab0c31c9ccfe448b0a7c10624d8b61434 | 10,866 | py | Python | tests/test_template.py | slicelife/shpkpr | 2fd8874f2b7dc44de309fb6466f7320fa8e0b3a5 | [
"MIT"
] | 17 | 2015-11-17T17:12:29.000Z | 2021-12-14T15:30:43.000Z | tests/test_template.py | slicelife/shpkpr | 2fd8874f2b7dc44de309fb6466f7320fa8e0b3a5 | [
"MIT"
] | 86 | 2015-11-18T15:59:52.000Z | 2020-10-01T10:19:36.000Z | tests/test_template.py | slicelife/shpkpr | 2fd8874f2b7dc44de309fb6466f7320fa8e0b3a5 | [
"MIT"
] | 2 | 2017-11-14T14:10:05.000Z | 2020-04-07T19:46:41.000Z | # third-party imports
import pytest
# local imports
from shpkpr.template import InvalidJSONError
from shpkpr.template import MissingTemplateError
from shpkpr.template import UndefinedError
from shpkpr.template import load_values_from_environment
from shpkpr.template import render_json_template
from shpkpr.template_filters import IntegerRequired
from shpkpr.template_filters import IntegerTooLarge
from shpkpr.template_filters import IntegerTooSmall
from shpkpr.template_filters import FloatRequired
from shpkpr.template_filters import FloatTooLarge
from shpkpr.template_filters import FloatTooSmall
def _write_template_to_disk(tmpdir, template_name, template_data):
"""shpkpr loads template files from disk normally. This convenience
function writes a template file to disk and returns a (directory, name)
tuple.
"""
with tmpdir.join(template_name).open("w") as f:
f.write(template_data)
return (tmpdir.strpath, template_name)
def test_load_environment_vars_without_prefix(monkeypatch):
monkeypatch.setenv('BANANA', 'bread')
monkeypatch.setenv('STRAWBERRY', 'cheesecake')
monkeypatch.setenv('APPLE_AND_BLACKCURRANT', 'crumble')
values = load_values_from_environment()
assert 'BANANA' in values
assert values['BANANA'] == 'bread'
assert 'STRAWBERRY' in values
assert values['STRAWBERRY'] == 'cheesecake'
assert 'APPLE_AND_BLACKCURRANT' in values
assert values['APPLE_AND_BLACKCURRANT'] == 'crumble'
def test_load_environment_vars_with_prefix(monkeypatch):
monkeypatch.setenv('BANANA', 'bread')
monkeypatch.setenv('SHPKPR_STRAWBERRY', 'cheesecake')
monkeypatch.setenv('SHPKPR_APPLE_AND_BLACKCURRANT', 'crumble')
monkeypatch.setenv('SHPKPR_SHPKPR_APPLE_AND_BLACKCURRANT', 'crumble')
values = load_values_from_environment("SHPKPR")
assert 'BANANA' not in values
assert 'STRAWBERRY' in values
assert values['STRAWBERRY'] == 'cheesecake'
assert 'APPLE_AND_BLACKCURRANT' in values
assert values['APPLE_AND_BLACKCURRANT'] == 'crumble'
assert 'SHPKPR_APPLE_AND_BLACKCURRANT' in values
assert values['SHPKPR_APPLE_AND_BLACKCURRANT'] == 'crumble'
def test_load_environment_vars_with_prefix_with_trailing_underscore(monkeypatch):
monkeypatch.setenv('BANANA', 'bread')
monkeypatch.setenv('SHPKPR_STRAWBERRY', 'cheesecake')
monkeypatch.setenv('SHPKPR_APPLE_AND_BLACKCURRANT', 'crumble')
monkeypatch.setenv('SHPKPR_SHPKPR_APPLE_AND_BLACKCURRANT', 'crumble')
values = load_values_from_environment("SHPKPR_")
assert 'BANANA' not in values
assert 'STRAWBERRY' in values
assert values['STRAWBERRY'] == 'cheesecake'
assert 'APPLE_AND_BLACKCURRANT' in values
assert values['APPLE_AND_BLACKCURRANT'] == 'crumble'
assert 'SHPKPR_APPLE_AND_BLACKCURRANT' in values
assert values['SHPKPR_APPLE_AND_BLACKCURRANT'] == 'crumble'
def test_render_json_template_valid(tmpdir):
template_path, template_name = _write_template_to_disk(
tmpdir, 'template.json',
'{"type_of_muffin": "{{ MUFFIN_TYPE }}"}',
)
rendered_template = render_json_template(template_path, template_name, **{"MUFFIN_TYPE": "banana"})
assert "type_of_muffin" in rendered_template
assert rendered_template["type_of_muffin"] == "banana"
def test_render_json_template_invalid_json_unquoted_string(tmpdir):
template_path, template_name = _write_template_to_disk(
tmpdir, 'template.json',
'{"type_of_muffin": {{ MUFFIN_TYPE }}}',
)
with pytest.raises(InvalidJSONError):
render_json_template(template_path, template_name, **{"MUFFIN_TYPE": "banana"})
def test_render_json_template_invalid_json_missing_value(tmpdir):
template_path, template_name = _write_template_to_disk(
tmpdir, 'template.json',
'{"type_of_muffin": {{ MUFFIN_TYPE }}}',
)
with pytest.raises(InvalidJSONError):
render_json_template(template_path, template_name, **{"MUFFIN_TYPE": ""})
def test_render_json_template_missing_value_raises(tmpdir):
template_path, template_name = _write_template_to_disk(
tmpdir, 'template.json',
'{"type_of_muffin": "{{ MUFFIN_TYPE }}"}',
)
with pytest.raises(UndefinedError):
render_json_template(template_path, template_name, **{})
def test_render_json_template_all_env(tmpdir):
template_path, template_name = _write_template_to_disk(
tmpdir, 'template.json',
'''
{
"types_of_muffin": {
{% for k, v in _all_env|filter_items("MUFFIN_", True) %}
"{{ k.lower() }}": {{ v }}{% if loop.last == False %},{% endif %}
{% endfor %}
}
}
''',
)
rendered_template = render_json_template(template_path, template_name, **{
"MUFFIN_BLUEBERRY": 4,
"MUFFIN_BANANA": 7,
"MUFFIN_CHOCOLATE": 12,
"DONUT_STRAWBERRY": 9,
})
assert "types_of_muffin" in rendered_template
assert rendered_template["types_of_muffin"]["blueberry"] == 4
assert rendered_template["types_of_muffin"]["banana"] == 7
assert rendered_template["types_of_muffin"]["chocolate"] == 12
def test_render_json_template_require_int(tmpdir):
template_path, template_name = _write_template_to_disk(
tmpdir, 'template.json',
'{"muffin_count": {{ MUFFIN_COUNT|require_int }}}',
)
rendered_template = render_json_template(template_path, template_name, **{"MUFFIN_COUNT": "1"})
assert rendered_template['muffin_count'] == 1
def test_render_json_template_require_int_requires_int(tmpdir):
template_path, template_name = _write_template_to_disk(
tmpdir, 'template.json',
'{"muffin_count": {{ MUFFIN_COUNT|require_int }}}',
)
with pytest.raises(IntegerRequired):
render_json_template(template_path, template_name, **{"MUFFIN_COUNT": "one muffin"})
def test_render_json_template_require_int_min_constraint(tmpdir):
template_path, template_name = _write_template_to_disk(
tmpdir, 'template.json',
'{"muffin_count": {{ MUFFIN_COUNT|require_int(min=50) }}}',
)
rendered_template = render_json_template(template_path, template_name, **{"MUFFIN_COUNT": "60"})
assert rendered_template['muffin_count'] == 60
def test_render_json_template_require_int_min_constraint_raises(tmpdir):
template_path, template_name = _write_template_to_disk(
tmpdir, 'template.json',
'{"muffin_count": {{ MUFFIN_COUNT|require_int(min=50) }}}',
)
with pytest.raises(IntegerTooSmall):
render_json_template(template_path, template_name, **{"MUFFIN_COUNT": "40"})
def test_render_json_template_require_int_max_constraint(tmpdir):
template_path, template_name = _write_template_to_disk(
tmpdir, 'template.json',
'{"muffin_count": {{ MUFFIN_COUNT|require_int(max=50) }}}',
)
rendered_template = render_json_template(template_path, template_name, **{"MUFFIN_COUNT": "-60"})
assert rendered_template['muffin_count'] == -60
def test_render_json_template_require_int_max_constraint_raises(tmpdir):
template_path, template_name = _write_template_to_disk(
tmpdir, 'template.json',
'{"muffin_count": {{ MUFFIN_COUNT|require_int(max=50) }}}',
)
with pytest.raises(IntegerTooLarge):
render_json_template(template_path, template_name, **{"MUFFIN_COUNT": "60"})
def test_render_json_template_require_float(tmpdir):
template_path, template_name = _write_template_to_disk(
tmpdir, 'template.json',
'{"muffin_count": {{ MUFFIN_COUNT|require_float }}}',
)
rendered_template = render_json_template(template_path, template_name, **{"MUFFIN_COUNT": "1.01"})
assert rendered_template['muffin_count'] == 1.01
def test_render_json_template_require_float_requires_float(tmpdir):
template_path, template_name = _write_template_to_disk(
tmpdir, 'template.json',
'{"muffin_count": {{ MUFFIN_COUNT|require_float }}}',
)
with pytest.raises(FloatRequired):
render_json_template(template_path, template_name, **{"MUFFIN_COUNT": "one muffin"})
def test_render_json_template_require_float_min_constraint(tmpdir):
template_path, template_name = _write_template_to_disk(
tmpdir, 'template.json',
'{"muffin_count": {{ MUFFIN_COUNT|require_float(min=50) }}}',
)
rendered_template = render_json_template(template_path, template_name, **{"MUFFIN_COUNT": "60"})
assert rendered_template['muffin_count'] == 60
def test_render_json_template_require_float_min_constraint_raises(tmpdir):
template_path, template_name = _write_template_to_disk(
tmpdir, 'template.json',
'{"muffin_count": {{ MUFFIN_COUNT|require_float(min=50) }}}',
)
with pytest.raises(FloatTooSmall):
render_json_template(template_path, template_name, **{"MUFFIN_COUNT": "40"})
def test_render_json_template_require_float_max_constraint(tmpdir):
template_path, template_name = _write_template_to_disk(
tmpdir, 'template.json',
'{"muffin_count": {{ MUFFIN_COUNT|require_float(max=50) }}}',
)
rendered_template = render_json_template(template_path, template_name, **{"MUFFIN_COUNT": "-60"})
assert rendered_template['muffin_count'] == -60
def test_render_json_template_require_float_max_constraint_raises(tmpdir):
template_path, template_name = _write_template_to_disk(
tmpdir, 'template.json',
'{"muffin_count": {{ MUFFIN_COUNT|require_float(max=50) }}}',
)
with pytest.raises(FloatTooLarge):
render_json_template(template_path, template_name, **{"MUFFIN_COUNT": "60"})
def test_render_json_template_with_inheritance_valid(tmpdir):
_write_template_to_disk(
tmpdir.mkdir('bases'), 'base.json',
'{"type_of_muffin": "{% block MUFFIN_TYPE_PLACEHOLDER %}{{ MUFFIN_TYPE }}{% endblock %}"}',
)
template_path, template_name = _write_template_to_disk(
tmpdir, 'template.json',
'''
{% extends "bases/base.json" %}
{% block MUFFIN_TYPE_PLACEHOLDER %}{{ ALT_MUFFIN_TYPE }}{% endblock %}
''',
)
rendered_template = render_json_template(template_path, template_name, **{
"MUFFIN_TYPE": "banana",
"ALT_MUFFIN_TYPE": "blueberry",
})
assert "type_of_muffin" in rendered_template
assert rendered_template["type_of_muffin"] == "blueberry"
def test_render_json_template_with_inheritance_no_parent(tmpdir):
template_path, template_name = _write_template_to_disk(
tmpdir, 'template.json',
'''
{% extends "bases/base.json" %}
{% block MUFFIN_TYPE_PLACEHOLDER %}{{ ALT_MUFFIN_TYPE }}{% endblock %}
''',
)
with pytest.raises(MissingTemplateError):
render_json_template(template_path, template_name, **{})
| 36.709459 | 103 | 0.716731 |
36e3cfcde64df8f2cd9fd9f672706ac16b906d47 | 15,043 | py | Python | afk-q-babyai/babyai/utils/wrapper.py | IouJenLiu/AFK | db2b47bb3a5614b61766114b87f143e4a61a4a8d | [
"MIT"
] | 1 | 2022-03-12T03:10:29.000Z | 2022-03-12T03:10:29.000Z | afk-q-babyai/babyai/utils/wrapper.py | IouJenLiu/AFK | db2b47bb3a5614b61766114b87f143e4a61a4a8d | [
"MIT"
] | null | null | null | afk-q-babyai/babyai/utils/wrapper.py | IouJenLiu/AFK | db2b47bb3a5614b61766114b87f143e4a61a4a8d | [
"MIT"
] | null | null | null |
from gym import RewardWrapper
import gym
import numpy as np
from ..info_seeking.knowledge_graph import KG
import random
class TransformReward(gym.Wrapper):
def __init__(self, env):
super(TransformReward, self).__init__(env)
self.count = 0
def step(self, action):
self.count += 1
obs, rewrd, done, info = super().step(action)
if action == 5:
if self.env.room_grid[0][2].objs:
if (self.env.front_pos[0], self.env.front_pos[1]) == self.env.room_grid[0][0].door_pos[0]:
rewrd = -0.05
else:
if (self.env.front_pos[0], self.env.front_pos[1]) == self.env.room_grid[0][2].door_pos[2]:
rewrd = -0.05
return obs, rewrd, done, info
class TargetLocationWrapper(gym.ObservationWrapper):
def __init__(self, env):
super(TargetLocationWrapper, self).__init__(env)
self.count = 0
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(self.agent_view_size, self.agent_view_size, 4),
dtype='uint8'
)
self.observation_space = gym.spaces.Dict({
'image': self.observation_space
})
def observation(self, observation):
if 'Room2' in self.env.mission:
obs = np.concatenate((observation['image'], np.ones((7, 7, 1))), axis=2)
else:
obs = np.concatenate((observation['image'], np.zeros((7, 7, 1))), axis=2)
observation['image'] = obs
return observation
class DirectionWrapper(gym.ObservationWrapper):
def __init__(self, env):
super(DirectionWrapper, self).__init__(env)
self.count = 0
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(self.agent_view_size, self.agent_view_size, 4),
dtype='uint8'
)
self.observation_space = gym.spaces.Dict({
'image': self.observation_space
})
def observation(self, observation):
size = observation['image'].shape[0]
obs = np.concatenate((observation['image'], self.env.agent_dir * np.ones((size, size, 1))), axis=2)
observation['image'] = obs
return observation
class XYLocationWrapper(gym.ObservationWrapper):
def __init__(self, env):
super(XYLocationWrapper, self).__init__(env)
self.count = 0
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(self.agent_view_size, self.agent_view_size, 5),
dtype='uint8'
)
self.observation_space = gym.spaces.Dict({
'image': self.observation_space
})
def observation(self, observation):
obs = np.concatenate((observation['image'], self.env.agent_pos[0] * np.ones((7, 7, 1)), self.env.agent_pos[1] * np.ones((7, 7, 1))), axis=2)
observation['image'] = obs
return observation
import re
class AnsWrapper(gym.ObservationWrapper):
def __init__(self, env):
super(AnsWrapper, self).__init__(env)
self.tokens = ['none',
'blue box', 'green box', 'grey box',
'blue key', 'green key', 'grey key',
]
n_channel = env.observation_space['image'].shape[-1] + len(self.tokens)
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(self.agent_view_size, self.agent_view_size, n_channel),
dtype='uint8'
)
self.observation_space = gym.spaces.Dict({
'image': self.observation_space
})
def observation(self, observation):
ans = re.findall("([a-z0-9]+)", observation['ans'].lower())
ans_channel = None
ans_channel = np.zeros((7, 7, len(self.tokens)))
for i, token in enumerate(self.tokens):
if token in ans:
ans_channel[:, :, i] = 1
break
if ans_channel is None:
raise ValueError
obs = np.concatenate((observation['image'], ans_channel), axis=2)
observation['image'] = obs
return observation
class InstrWrapper(gym.ObservationWrapper):
def __init__(self, env):
super(InstrWrapper, self).__init__(env)
self.tokens = ['blue ball', 'green ball', 'grey ball', 'mary', 'jack']
n_channel = env.observation_space['image'].shape[-1] + len(self.tokens)
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(self.agent_view_size, self.agent_view_size, n_channel),
dtype='uint8'
)
self.observation_space = gym.spaces.Dict({
'image': self.observation_space
})
def observation(self, observation):
ans = observation['mission'].lower()
ans_channel = None
for i, token in enumerate(self.tokens):
if token in ans:
ans_channel = np.zeros((7, 7, len(self.tokens)))
ans_channel[:, :, i] = 1
break
if ans_channel is None:
raise ValueError
obs = np.concatenate((observation['image'], ans_channel), axis=2)
observation['image'] = obs
return observation
from collections import defaultdict
import math
class CountRewardWrapper(gym.Wrapper):
def __init__(self, env, alpha=1, count_action=False):
super(CountRewardWrapper, self).__init__(env)
self.memory = defaultdict(int)
self.alpha = alpha
self.count_action = count_action
self.mini_grid_actions_map = {'left': 0, 'right': 1, 'forward': 2, 'pickup': 3, 'drop': 4, 'toggle': 5,
'done': 6}
def step(self, action):
obs, reward, done, info = super().step(action)
tuple_obs = tuple(obs['image'].reshape(1, -1)[0])
self.memory[tuple_obs] += 1
reward += self.alpha / math.sqrt(self.memory[tuple_obs])
return obs, reward, done, info
class KGWrapper(gym.Wrapper):
"""
A wrapper that returns the connected component of the KG in observation.
kg_repr = [one_hot, raw]
one_hot: each sentence is encoded as an onehot channel of the image
raw: return all raw sentences as a list in observation['kg_cc']
"""
def __init__(self, env, penalize_query=False, cc_bonus=0.05, weighted_bonus=False, kg_repr='one_hot', mode='graph', n_gram=2,
distractor_file_path=None, n_distractors=0, node_sample_mode='fixed', args=None):
super(KGWrapper, self).__init__(env)
self.kg_repr = kg_repr
n_channel = env.observation_space['image'].shape[-1]
self.moving_actions = ['left', 'right', 'forward', 'pickup', 'drop', 'toggle', 'done']
self.colors = ['red', 'green', 'blue', 'purple', 'yellow', 'grey']
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(self.agent_view_size, self.agent_view_size, n_channel),
dtype='uint8'
)
self.observation_space = gym.spaces.Dict({
'image': self.observation_space
})
mode = 'set' if mode == 'no_kg' else mode
self.KG = KG(mode=mode, n_gram=n_gram)
self.cc_bonus = cc_bonus
self.penalize_query = penalize_query
if self.penalize_query:
self.query_penalty = -0.01
self.weighted_bonus = weighted_bonus
if distractor_file_path:
# Generate on the fly
self.distractors = True
else:
self.distractors = False
self.total_frames_per_proc = args.frames // args.procs
self.cur_total_frames = 0
self.decrease_bonus = args.decrease_bonus
def bonus_coef(self):
if not self.decrease_bonus:
return 1
anneal_th = 0.6 * self.total_frames_per_proc
if self.cur_total_frames <= anneal_th:
return 1
else:
return 1.05 - (self.cur_total_frames - anneal_th) / (self.total_frames_per_proc - anneal_th)
def step(self, action):
obs, reward, done, info = super().step(action)
if isinstance(action, list) and len(action) > 1 and action[0] not in self.moving_actions:
for ans in obs['ans'].split(','):
is_CC_increase, overlap = self.KG.update(self.pre_proc_asn(ans))
if is_CC_increase:
if self.weighted_bonus:
reward += self.bonus_coef() * self.cc_bonus * overlap
else:
reward += self.bonus_coef() * self.cc_bonus
if self.penalize_query:
reward += self.query_penalty
obs = self.observation(obs, self.KG.getCC())
self.cur_total_frames += 1
return obs, reward, done, info
def reset(self, **kwargs):
obs = super().reset(**kwargs)
self.KG.reset(self.pre_proc_asn(obs['mission']))
if self.distractors:
new_nodes = self.unwrapped.useful_answers + self.gen_distractors()
random.shuffle(new_nodes)
for new_node in new_nodes:
split_node = new_node.split()
if len(self.unwrapped.useful_answers) > 2:
split_ans = self.unwrapped.useful_answers[2].split()
if len(split_node) == 4 and split_node[0] == split_ans[0] and split_node[1] == split_ans[1]:
continue
self.KG.update(self.pre_proc_asn(new_node))
obs = self.observation(obs, self.KG.getCC())
return obs
def gen_distractors(self):
names = ['tim', 'allen', 'tom', 'jack', 'mary']
objs = ['suitcase', 'toy']
colors = ['purple', 'orange', 'blue', 'green', 'gray', 'grey', 'yellow', 'red', 'white', 'pink']
shapes = ['box', 'ball', 'key']
distractors = []
for name in names:
for obj in objs:
color = random.choice(colors)
shape = random.choice(shapes)
distractors.append('{} {} {} {}'.format(name, obj, color, shape))
places = ['livingroom', 'kitchen', 'restroom']
rooms = ['room0', 'room1', 'room2', 'room3', 'room4', 'room5', 'room6', 'room7', 'room8']
for name in names:
place = random.choice(places)
room = random.choice(rooms)
distractors.append('{} {} {}'.format(name, place, room))
for name in names:
for color in colors:
for shape in objs:
place = random.choice(places)
distractors.append('{} {} {} in {}'.format(name, color, shape, place))
directions = ['east', 'west']
for color in colors:
for room in rooms:
dir = random.choice(directions)
distractors.append('{} {} in {}'.format(color, room, dir))
random.shuffle(distractors)
return distractors
def observation(self, observation, CC):
"""
:param observation: dictionary
:param CC: list of tuples
:return: modified observation
"""
if self.kg_repr == 'one_hot':
ans_channel = np.zeros((7, 7, len(self.tokens)))
for ans in CC:
for i, token in enumerate(self.tokens):
if token == ans:
ans_channel[:, :, i] = 1
break
obs = np.concatenate((observation['image'], ans_channel), axis=2)
observation['image'] = obs
elif self.kg_repr == 'raw':
raw_repr = []
for node in CC:
raw_repr.append(' '.join(node))
observation['kg_cc'] = raw_repr
else:
raise NotImplementedError
return observation
def pre_proc_asn(self, ans):
ans = re.findall("([a-z0-9]+)", ans.lower())
if 'is' in ans:
ans.remove('is')
if 'in' in ans:
ans.remove('in')
return ans
class RenderWrapper(gym.Wrapper):
def __init__(self, env):
self.env = env
self.eps_steps = 0
self.action = None
def step(self, action):
self.action = action
self.eps_steps += 1
return super().step(action)
def reset(self):
self.eps_steps = 0
self.action = None
return super().reset()
# Size in pixels of a tile in the full-scale human view
TILE_PIXELS = 32
def render(self, mode='human', close=False, highlight=True, tile_size=TILE_PIXELS, KG=None):
"""
Render the whole-grid human view
"""
if close:
if self.window:
self.window.close()
return
if mode == 'human' and not self.window:
import gym_minigrid.window
self.window = gym_minigrid.window.Window('gym_minigrid')
self.window.ax.xaxis.label.set_fontsize(10)
self.window.fig.subplots_adjust(top=1.0, bottom=0.3)
self.window.show(block=False)
# Compute which cells are visible to the agent
_, vis_mask = self.gen_obs_grid()
# Compute the world coordinates of the bottom-left corner
# of the agent's view area
f_vec = self.dir_vec
r_vec = self.right_vec
top_left = self.agent_pos + f_vec * (self.agent_view_size - 1) - r_vec * (self.agent_view_size // 2)
# Mask of which cells to highlight
highlight_mask = np.zeros(shape=(self.width, self.height), dtype=np.bool)
# For each cell in the visibility mask
for vis_j in range(0, self.agent_view_size):
for vis_i in range(0, self.agent_view_size):
# If this cell is not visible, don't highlight it
if not vis_mask[vis_i, vis_j]:
continue
# Compute the world coordinates of this cell
abs_i, abs_j = top_left - (f_vec * vis_j) + (r_vec * vis_i)
if abs_i < 0 or abs_i >= self.width:
continue
if abs_j < 0 or abs_j >= self.height:
continue
# Mark this cell to be highlighted
highlight_mask[abs_i, abs_j] = True
# Render the whole grid
img = self.grid.render(
tile_size,
self.agent_pos,
self.agent_dir,
highlight_mask=highlight_mask if highlight else None
)
if mode == 'human':
prev_query = self.prev_query if hasattr(self, 'prev_query') else ""
prev_ans = self.prev_ans if hasattr(self, 'prev_ans') else ""
caption = 'Instr: {} step: {} action: {}\n'.format(self.mission, self.eps_steps, self.action)
in_bos = 'Q: ' + prev_query + ' A: ' + prev_ans + "\n"
self.window.set_caption(caption + in_bos)
self.window.show_img(img)
return img
| 35.902148 | 148 | 0.56385 |
27b6d11a29c6def4e0fed4e69122760a340d7ec9 | 4,799 | py | Python | scripts/python/meta/tasks/proteomics/001_data_preparation.py | AaronBlare/dnam | 4d97c879cb24447eee0852eaf48fc5b3ef8e159b | [
"MIT"
] | null | null | null | scripts/python/meta/tasks/proteomics/001_data_preparation.py | AaronBlare/dnam | 4d97c879cb24447eee0852eaf48fc5b3ef8e159b | [
"MIT"
] | null | null | null | scripts/python/meta/tasks/proteomics/001_data_preparation.py | AaronBlare/dnam | 4d97c879cb24447eee0852eaf48fc5b3ef8e159b | [
"MIT"
] | null | null | null | import pandas as pd
from scripts.python.pheno.datasets.filter import filter_pheno, get_passed_fields
from scripts.python.pheno.datasets.features import get_column_name, get_default_statuses_ids, get_status_dict, get_default_statuses, get_sex_dict
from scripts.python.preprocessing.serialization.routines.pheno_betas_checking import get_pheno_betas_with_common_subjects
from scripts.python.routines.betas import betas_drop_na
import plotly.graph_objects as go
from scripts.python.routines.manifest import get_manifest
from scripts.python.routines.plot.save import save_figure
from scripts.python.routines.plot.histogram import add_histogram_trace
from scripts.python.routines.plot.layout import add_layout
import json
from pathlib import Path
path = f"E:/YandexDisk/Work/pydnameth/datasets"
datasets_info = pd.read_excel(f"{path}/datasets.xlsx", index_col='dataset')
folder_name = f"proteomics"
path_save = f"{path}/meta/tasks/{folder_name}"
Path(f"{path_save}/figs").mkdir(parents=True, exist_ok=True)
tissue_datasets = {
'Brain': ['GSE74193'],
'Liver': ['GSE48325', 'GSE61258', 'GSE61446'],
'Blood': ['GSE87571']
}
target_features = ['Status', 'Age', 'Sex']
for tissue, datasets in tissue_datasets.items():
tmp_path = f"{path_save}/{tissue}"
Path(f"{tmp_path}/figs").mkdir(parents=True, exist_ok=True)
pheno_all = pd.DataFrame(columns=target_features + ['Dataset'])
pheno_all.index.name = 'subject_id'
for d_id, dataset in enumerate(datasets):
platform = datasets_info.loc[dataset, 'platform']
manifest = get_manifest(platform)
statuses = get_default_statuses(dataset)
status_col = get_column_name(dataset, 'Status').replace(' ', '_')
statuses_ids = get_default_statuses_ids(dataset)
status_dict = get_status_dict(dataset)
status_passed_fields = get_passed_fields(status_dict, statuses)
controls_status_vals = [status_dict['Control'][x].column for x in statuses_ids['Control']]
controls_labels = ', '.join([status_dict['Control'][x].label for x in statuses_ids['Control']])
age_col = get_column_name(dataset, 'Age').replace(' ', '_')
sex_col = get_column_name(dataset, 'Sex').replace(' ', '_')
sex_dict = get_sex_dict(dataset)
continuous_vars = {'Age': age_col}
categorical_vars = {
status_col: [x.column for x in status_passed_fields],
sex_col: [sex_dict[x] for x in sex_dict]
}
pheno = pd.read_pickle(f"{path}/{platform}/{dataset}/pheno.pkl")
pheno = filter_pheno(dataset, pheno, continuous_vars, categorical_vars)
betas = pd.read_pickle(f"{path}/{platform}/{dataset}/betas.pkl")
betas = betas_drop_na(betas)
df = pd.merge(pheno, betas, left_index=True, right_index=True)
df = df.loc[df[status_col].isin(controls_status_vals), :]
pheno = df.loc[:, [status_col, sex_col, age_col]]
status_dict_inverse = dict((x.column, x.label) for x in status_passed_fields)
pheno[status_col].replace(status_dict_inverse, inplace=True)
pheno.rename(columns={status_col: 'Status'}, inplace=True)
sex_dict_inverse = {v: k for k, v in sex_dict.items()}
pheno[sex_col].replace(sex_dict_inverse, inplace=True)
pheno.rename(columns={sex_col: 'Sex'}, inplace=True)
pheno.rename(columns={age_col: 'Age'}, inplace=True)
pheno.loc[:, 'Dataset'] = dataset
pheno_all = pheno_all.append(pheno, verify_integrity=True)
cpgs = betas.columns.values
betas = df[cpgs].T
if d_id == 0:
betas_all = betas
else:
betas_all = betas_all.merge(betas, how='inner', left_index=True, right_index=True)
print(f"Number of remaining subjects: {pheno_all.shape[0]}")
betas_all = betas_all.T
betas_all.index.name = "subject_id"
pheno_all, betas_all = get_pheno_betas_with_common_subjects(pheno_all, betas_all)
pheno_all.to_pickle(f"{tmp_path}/pheno.pkl")
pheno_all.to_excel(f"{tmp_path}/pheno.xlsx", index=True)
betas_all.to_pickle(f"{tmp_path}/betas.pkl")
info = {tissue: datasets, "betas.shape": betas_all.shape}
with open(f"{tmp_path}/info.json", 'w', encoding='utf-8') as f:
json.dump(info, f, ensure_ascii=False, indent=4)
pheno_f = pheno_all.loc[pheno_all['Sex'].isin(['F']), :]
pheno_m = pheno_all.loc[pheno_all['Sex'].isin(['M']), :]
fig = go.Figure()
add_histogram_trace(fig, pheno_f['Age'].values, f"Female ({pheno_f.shape[0]})", 5.0)
add_histogram_trace(fig, pheno_m['Age'].values, f"Male ({pheno_m.shape[0]})", 5.0)
add_layout(fig, "Age", "Count", f"{tissue}")
fig.update_layout(colorway=['red', 'blue'], barmode='overlay')
save_figure(fig, f"{tmp_path}/figs/histogram_Age_Sex")
| 44.435185 | 145 | 0.695562 |
7b9fef049c3b11477096b36c50ecd18d053b9054 | 1,487 | py | Python | setup.py | lilwebsite/bigwebsite-public | 4178f3cfb0d5575907fef0916c04c975687a48a5 | [
"MIT"
] | 1 | 2019-05-09T13:23:43.000Z | 2019-05-09T13:23:43.000Z | setup.py | lilwebsite/bigwebsite-public | 4178f3cfb0d5575907fef0916c04c975687a48a5 | [
"MIT"
] | null | null | null | setup.py | lilwebsite/bigwebsite-public | 4178f3cfb0d5575907fef0916c04c975687a48a5 | [
"MIT"
] | null | null | null | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
requires = [
'passlib',
'bcrypt',
'pyramid',
'pyramid_jinja2',
'pyramid_tm',
'SQLAlchemy',
'transaction',
'zope.sqlalchemy',
'zope.interface',
'waitress',
'gevent',
'gunicorn',
'Pillow',
'PyPDF2'
]
dev_requires = [
'pyramid_debugtoolbar',
'pytest',
'WebTest'
]
setup(
name='bigwebsite',
version='1.0',
description='Dylans Website',
long_description='Bigwebsite is a website for Dylan Boroqhuez and his projects. The site is programmed by me (Carl Gessau) and can be managed by either dylan or me thanks to pyramid\'s framework.',
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='Carl Gessau',
author_email='carl@bigwebsite.cool',
url='www.bigwebsite.cool',
keywords='',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
extras_require={
'dev': dev_requires
},
#entry_points="""\
#[paste.app_factory]
#main = bigwebsite:main
#[console_scripts]
#initialize_bigwebsite_db = bigwebsite.scripts.initializedb:main
#"""
entry_points={
'paste.app_factory': [
'main = bigwebsite:main'
],
'console_scripts': [
'initialize_bigwebsite_db = bigwebsite.init_scripts.initializedb:main'
]
}
)
| 22.19403 | 199 | 0.675857 |
3ee7b43ffb373d0c20082d7b251228b6d0e26484 | 2,909 | py | Python | models/object_detection/pytorch/maskrcnn/maskrcnn-benchmark/maskrcnn_benchmark/modeling/roi_heads/box_head/box_head.py | Pandinosaurus/models-intelai | 60f5712d79a363bdb7624e3116a66a4f1a7fe208 | [
"Apache-2.0"
] | 357 | 2019-01-23T23:54:30.000Z | 2022-03-31T05:32:25.000Z | models/object_detection/pytorch/maskrcnn/maskrcnn-benchmark/maskrcnn_benchmark/modeling/roi_heads/box_head/box_head.py | Pandinosaurus/models-intelai | 60f5712d79a363bdb7624e3116a66a4f1a7fe208 | [
"Apache-2.0"
] | 65 | 2019-02-06T15:35:35.000Z | 2022-03-25T09:56:48.000Z | models/object_detection/pytorch/maskrcnn/maskrcnn-benchmark/maskrcnn_benchmark/modeling/roi_heads/box_head/box_head.py | Pandinosaurus/models-intelai | 60f5712d79a363bdb7624e3116a66a4f1a7fe208 | [
"Apache-2.0"
] | 164 | 2019-02-06T15:05:57.000Z | 2022-03-31T11:48:14.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from .roi_box_feature_extractors import make_roi_box_feature_extractor
from .roi_box_predictors import make_roi_box_predictor
from .inference import make_roi_box_post_processor
from .loss import make_roi_box_loss_evaluator
class ROIBoxHead(torch.nn.Module):
"""
Generic Box Head class.
"""
def __init__(self, cfg, in_channels):
super(ROIBoxHead, self).__init__()
self.feature_extractor = make_roi_box_feature_extractor(cfg, in_channels)
self.predictor = make_roi_box_predictor(
cfg, self.feature_extractor.out_channels)
self.post_processor = make_roi_box_post_processor(cfg)
self.loss_evaluator = make_roi_box_loss_evaluator(cfg)
def forward(self, features, proposals, targets=None):
"""
Arguments:
features (list[Tensor]): feature-maps from possibly several levels
proposals (list[BoxList]): proposal boxes
targets (list[BoxList], optional): the ground-truth targets.
Returns:
x (Tensor): the result of the feature extractor
proposals (list[BoxList]): during training, the subsampled proposals
are returned. During testing, the predicted boxlists are returned
losses (dict[Tensor]): During training, returns the losses for the
head. During testing, returns an empty dict.
"""
if self.training:
# Faster R-CNN subsamples during training the proposals with a fixed
# positive / negative ratio
with torch.no_grad():
proposals = self.loss_evaluator.subsample(proposals, targets)
# extract features that will be fed to the final classifier. The
# feature_extractor generally corresponds to the pooler + heads
x = self.feature_extractor(features, proposals)
# final classifier that converts the features into predictions
class_logits, box_regression = self.predictor(x)
x = x.to(torch.float32)
class_logits = class_logits.to(torch.float32)
box_regression = box_regression.to(torch.float32)
if not self.training:
result = self.post_processor((class_logits, box_regression), proposals)
return x, result, {}
loss_classifier, loss_box_reg = self.loss_evaluator(
[class_logits], [box_regression]
)
return (
x,
proposals,
dict(loss_classifier=loss_classifier, loss_box_reg=loss_box_reg),
)
def build_roi_box_head(cfg, in_channels):
"""
Constructs a new box head.
By default, uses ROIBoxHead, but if it turns out not to be enough, just register a new class
and make it a parameter in the config
"""
return ROIBoxHead(cfg, in_channels)
| 38.786667 | 96 | 0.673771 |
348be9f78d0fb3e33122f6112552792f6f2adf78 | 1,899 | py | Python | tests/test_init.py | openefsa/asreview | aec14fcad0532a3989befe577ceb369a9dbba243 | [
"Apache-2.0"
] | null | null | null | tests/test_init.py | openefsa/asreview | aec14fcad0532a3989befe577ceb369a9dbba243 | [
"Apache-2.0"
] | 1 | 2020-04-16T09:01:40.000Z | 2020-04-16T09:01:40.000Z | tests/test_init.py | openefsa/asreview | aec14fcad0532a3989befe577ceb369a9dbba243 | [
"Apache-2.0"
] | 1 | 2020-03-04T12:16:53.000Z | 2020-03-04T12:16:53.000Z | from pathlib import Path
import numpy as np
from asreview.review.factory import get_reviewer
from asreview.data import ASReviewData
data_fp = Path("tests", "demo_data", "generic_labels.csv")
def test_init_seed():
base_start_idx = None
n_test = 4
seeds = np.random.randint(0, 2**63, 5)
for _ in range(n_test):
all_start_idx = []
for seed in seeds:
reviewer = get_reviewer(
data_fp, mode="simulate", model="nb", state_file=None,
init_seed=seed, n_prior_excluded=1, n_prior_included=1)
assert len(reviewer.start_idx) == 2
all_start_idx.append(reviewer.start_idx)
if base_start_idx is None:
base_start_idx = all_start_idx
continue
assert np.all(np.array(base_start_idx) == np.array(all_start_idx))
def test_no_seed():
n_test_max = 100
as_data = ASReviewData.from_file(data_fp)
n_priored = np.zeros(len(as_data), dtype=int)
for _ in range(n_test_max):
reviewer = get_reviewer(
data_fp, mode="simulate", model="nb", state_file=None,
init_seed=None, n_prior_excluded=1, n_prior_included=1)
assert len(reviewer.start_idx) == 2
n_priored[reviewer.start_idx] += 1
if np.all(n_priored > 0):
return
raise ValueError(f"Error getting all priors in {n_test_max} iterations.")
def test_model_seed():
n_test = 4
seed = 192874123
last_train_idx = None
for _ in range(n_test):
reviewer = get_reviewer(
data_fp, mode="simulate", model="rf", query_strategy="random",
state_file=None,
init_seed=seed, seed=seed, n_prior_excluded=1, n_prior_included=1)
reviewer.review()
if last_train_idx is None:
last_train_idx = reviewer.train_idx
assert np.all(last_train_idx == reviewer.train_idx)
| 31.65 | 78 | 0.644023 |
244b4be9184e340f7b1a2b9411b2f5eb202b066d | 3,762 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/inspur/sm/plugins/modules/edit_ad_group.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/inspur/sm/plugins/modules/edit_ad_group.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/inspur/sm/plugins/modules/edit_ad_group.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding:utf-8 -*-
# Copyright (C) 2020 Inspur Inc. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
module: edit_ad_group
version_added: "0.1.0"
author:
- WangBaoshan (@ISIB-group)
short_description: Set active directory group information.
description:
- Set active directory group information on Inspur server.
deprecated:
removed_in: 3.0.0
why: Merge functions into the M(inspur.sm.ad_group) module.
alternative: Use M(inspur.sm.ad_group) instead.
removed_from_collection: inspur.sm
options:
id:
description:
- Group id.
choices: ['1', '2', '3', '4', '5']
type: str
required: true
name:
description:
- Group name.
type: str
domain:
description:
- Group domain.
type: str
pri:
description:
- Group privilege.
choices: ['administrator', 'user', 'operator', 'oem', 'none']
type: str
kvm:
description:
- Kvm privilege.
choices: ['enable', 'disable']
type: str
vm:
description:
- Vmedia privilege.
choices: ['enable', 'disable']
type: str
extends_documentation_fragment:
- inspur.sm.ism
'''
EXAMPLES = '''
- name: Ad group test
hosts: ism
connection: local
gather_facts: no
vars:
ism:
host: "{{ ansible_ssh_host }}"
username: "{{ username }}"
password: "{{ password }}"
tasks:
- name: "Edit active directory group information"
inspur.sm.edit_ad_group:
id: "1"
name: "wbs"
domain: "inspur.com"
pri: "administrator"
kvm: "enable"
vm: "disable"
provider: "{{ ism }}"
'''
RETURN = '''
message:
description: Messages returned after module execution.
returned: always
type: str
state:
description: Status after module execution.
returned: always
type: str
changed:
description: Check to see if a change was made on the device.
returned: always
type: bool
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.inspur.sm.plugins.module_utils.ism import (ism_argument_spec, get_connection)
class AD(object):
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
self.results = dict()
def init_module(self):
"""Init module object"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=False)
def run_command(self):
self.module.params['subcommand'] = 'setadgroup'
self.results = get_connection(self.module)
if self.results['State'] == 'Success':
self.results['changed'] = True
def show_result(self):
"""Show result"""
self.module.exit_json(**self.results)
def work(self):
"""Worker"""
self.run_command()
self.show_result()
def main():
argument_spec = dict(
id=dict(type='str', required=True, choices=['1', '2', '3', '4', '5']),
name=dict(type='str', required=False),
domain=dict(type='str', required=False),
pri=dict(type='str', required=False, choices=['administrator', 'user', 'operator', 'oem', 'none']),
kvm=dict(type='str', required=False, choices=['enable', 'disable']),
vm=dict(type='str', required=False, choices=['enable', 'disable']),
)
argument_spec.update(ism_argument_spec)
ad_obj = AD(argument_spec)
ad_obj.work()
if __name__ == '__main__':
main()
| 25.591837 | 107 | 0.611909 |
cd0379251e4f9d33859ac89dba5b176ee45808ed | 3,845 | py | Python | telegrambot/handlers/dispatcher.py | StudentiUniMi/backend | 7915de730b273ef36f1adca10b1c3cacff820faa | [
"MIT"
] | 5 | 2021-08-09T20:37:28.000Z | 2022-03-08T12:25:49.000Z | telegrambot/handlers/dispatcher.py | StudentiUniMi/backend | 7915de730b273ef36f1adca10b1c3cacff820faa | [
"MIT"
] | 4 | 2021-08-14T12:36:44.000Z | 2021-12-12T01:25:08.000Z | telegrambot/handlers/dispatcher.py | StudentiUniMi/backend | 7915de730b273ef36f1adca10b1c3cacff820faa | [
"MIT"
] | 2 | 2021-08-09T19:57:16.000Z | 2021-08-11T20:19:30.000Z | import logging as logg
from telegram import Update
from telegram.ext import (
MessageHandler,
Filters,
CommandHandler,
ChatMemberHandler,
CallbackQueryHandler,
Updater,
ChatJoinRequestHandler
)
from telegrambot.handlers import messages, members, moderation, errors, memes
LOG = logg.getLogger(__name__)
dispatchers = {}
def setup_dispatcher(dispatcher):
dispatcher.add_error_handler(errors.telegram_error_handler)
# Pre-processing
dispatcher.add_handler(MessageHandler(
filters=Filters.chat_type.groups,
callback=messages.handle_group_messages,
), group=0)
# Groups
dispatcher.add_handler(ChatJoinRequestHandler(
callback=members.handle_join_request,
), group=1)
dispatcher.add_handler(CallbackQueryHandler(
callback=members.handle_join_approval,
pattern="^join_chat="
))
dispatcher.add_handler(ChatMemberHandler(
callback=members.handle_chat_member_updates,
chat_member_types=ChatMemberHandler.ANY_CHAT_MEMBER,
), group=1)
dispatcher.add_handler(MessageHandler(
filters=Filters.status_update,
callback=members.handle_left_chat_member_updates,
), group=1)
dispatcher.add_handler(MessageHandler(
filters=Filters.chat_type.groups,
callback=messages.handle_admin_tagging,
), group=1)
# Admin commands
dispatcher.add_handler(CommandHandler(
command="warn",
callback=moderation.handle_warn_command,
), group=2)
dispatcher.add_handler(CommandHandler(
command="kick",
callback=moderation.handle_kick_command,
), group=2)
dispatcher.add_handler(CommandHandler(
command="ban",
callback=moderation.handle_ban_command,
), group=2)
dispatcher.add_handler(CommandHandler(
command="superban",
callback=moderation.handle_global_ban_command,
), group=2)
dispatcher.add_handler(CommandHandler(
command="mute",
callback=moderation.handle_mute_command,
), group=2)
dispatcher.add_handler(CommandHandler(
command="free",
callback=moderation.handle_free_command,
), group=2)
dispatcher.add_handler(CommandHandler(
command="superfree",
callback=moderation.handle_global_free_command,
), group=2)
dispatcher.add_handler(CommandHandler(
command="info",
callback=moderation.handle_info_command,
), group=2)
dispatcher.add_handler(CommandHandler(
command="claim",
callback=members.claim_command,
), group=2)
dispatcher.add_handler(CommandHandler(
command="creation",
callback=moderation.handle_creation_command,
), group=2)
dispatcher.add_handler(CommandHandler(
command="whitelistbot",
callback=moderation.handle_whitelisting_command,
), group=2)
dispatcher.add_handler(CommandHandler(
command="ignore_admin",
callback=moderation.handle_toggle_admin_tagging,
), group=2)
dispatcher.add_handler(CommandHandler(
command="delete",
callback=moderation.handle_delete_command,
), group=2)
# User commands
dispatcher.add_handler(CommandHandler(
command="respects",
callback=memes.init_respects,
), group=3)
dispatcher.add_handler(CallbackQueryHandler(
callback=memes.add_respect,
pattern="^press_f$",
), group=3)
# Tokens that are sent to this function have been already checked againts the DB
def dispatch_telegram_update(json_update: dict, token: str) -> None:
if token not in dispatchers.keys():
dispatchers[token] = Updater(token=token).dispatcher
setup_dispatcher(dispatchers[token])
update = Update.de_json(json_update, dispatchers[token].bot)
dispatchers[token].process_update(update)
| 31.008065 | 80 | 0.706112 |
e9d63a9caabec7b8383885025a20cef0ffad3c58 | 7,076 | py | Python | python/http_client/v1/polyaxon_sdk/models/v1_event_video.py | polyaxon/polyaxon-client | d3cafc87428f3a55f12aac8ffe93dc0e1776f379 | [
"Apache-2.0"
] | 13 | 2017-11-22T21:45:15.000Z | 2021-03-09T16:35:03.000Z | python/http_client/v1/polyaxon_sdk/models/v1_event_video.py | polyaxon/polyaxon-client | d3cafc87428f3a55f12aac8ffe93dc0e1776f379 | [
"Apache-2.0"
] | 38 | 2017-12-18T15:42:26.000Z | 2020-07-01T18:09:15.000Z | python/http_client/v1/polyaxon_sdk/models/v1_event_video.py | polyaxon/polyaxon-client | d3cafc87428f3a55f12aac8ffe93dc0e1776f379 | [
"Apache-2.0"
] | 20 | 2017-12-11T12:48:53.000Z | 2021-12-03T07:11:43.000Z | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.14.0
Contact: contact@polyaxon.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1EventVideo(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'height': 'int',
'width': 'int',
'colorspace': 'int',
'path': 'str',
'content_type': 'str'
}
attribute_map = {
'height': 'height',
'width': 'width',
'colorspace': 'colorspace',
'path': 'path',
'content_type': 'content_type'
}
def __init__(self, height=None, width=None, colorspace=None, path=None, content_type=None, local_vars_configuration=None): # noqa: E501
"""V1EventVideo - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._height = None
self._width = None
self._colorspace = None
self._path = None
self._content_type = None
self.discriminator = None
if height is not None:
self.height = height
if width is not None:
self.width = width
if colorspace is not None:
self.colorspace = colorspace
if path is not None:
self.path = path
if content_type is not None:
self.content_type = content_type
@property
def height(self):
"""Gets the height of this V1EventVideo. # noqa: E501
Height of the video. # noqa: E501
:return: The height of this V1EventVideo. # noqa: E501
:rtype: int
"""
return self._height
@height.setter
def height(self, height):
"""Sets the height of this V1EventVideo.
Height of the video. # noqa: E501
:param height: The height of this V1EventVideo. # noqa: E501
:type height: int
"""
self._height = height
@property
def width(self):
"""Gets the width of this V1EventVideo. # noqa: E501
Width of the video. # noqa: E501
:return: The width of this V1EventVideo. # noqa: E501
:rtype: int
"""
return self._width
@width.setter
def width(self, width):
"""Sets the width of this V1EventVideo.
Width of the video. # noqa: E501
:param width: The width of this V1EventVideo. # noqa: E501
:type width: int
"""
self._width = width
@property
def colorspace(self):
"""Gets the colorspace of this V1EventVideo. # noqa: E501
:return: The colorspace of this V1EventVideo. # noqa: E501
:rtype: int
"""
return self._colorspace
@colorspace.setter
def colorspace(self, colorspace):
"""Sets the colorspace of this V1EventVideo.
:param colorspace: The colorspace of this V1EventVideo. # noqa: E501
:type colorspace: int
"""
self._colorspace = colorspace
@property
def path(self):
"""Gets the path of this V1EventVideo. # noqa: E501
:return: The path of this V1EventVideo. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this V1EventVideo.
:param path: The path of this V1EventVideo. # noqa: E501
:type path: str
"""
self._path = path
@property
def content_type(self):
"""Gets the content_type of this V1EventVideo. # noqa: E501
:return: The content_type of this V1EventVideo. # noqa: E501
:rtype: str
"""
return self._content_type
@content_type.setter
def content_type(self, content_type):
"""Sets the content_type of this V1EventVideo.
:param content_type: The content_type of this V1EventVideo. # noqa: E501
:type content_type: str
"""
self._content_type = content_type
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1EventVideo):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1EventVideo):
return True
return self.to_dict() != other.to_dict()
| 27.533074 | 140 | 0.588327 |
7560e61c111f934562d8f781ebdf194f7d83637e | 12,912 | py | Python | draw_card/handles/prts_handle.py | NumberSir/nonebot_plugin_gamedraw | bc7a90703ec02d866e587453a92f1109c00bfab6 | [
"MIT"
] | null | null | null | draw_card/handles/prts_handle.py | NumberSir/nonebot_plugin_gamedraw | bc7a90703ec02d866e587453a92f1109c00bfab6 | [
"MIT"
] | null | null | null | draw_card/handles/prts_handle.py | NumberSir/nonebot_plugin_gamedraw | bc7a90703ec02d866e587453a92f1109c00bfab6 | [
"MIT"
] | null | null | null | import random
import re
from datetime import datetime
from typing import List, Optional, Tuple
from urllib.parse import unquote
import dateparser
from PIL import ImageDraw
from lxml import etree
from nonebot.adapters.onebot.v11 import Message, MessageSegment
from nonebot.log import logger
from pydantic import ValidationError
try:
import ujson as json
except ModuleNotFoundError:
import json
from .base_handle import BaseHandle, BaseData, UpChar, UpEvent
from ..config import draw_config
from ..util import remove_prohibited_str, cn2py, load_font
from ..build_image import BuildImage
class Operator(BaseData):
recruit_only: bool # 公招限定
event_only: bool # 活动获得干员
# special_only: bool # 升变/异格干员
class PrtsHandle(BaseHandle[Operator]):
def __init__(self):
super().__init__(game_name="prts", game_name_cn="明日方舟")
self.max_star = 6
self.game_card_color = "#eff2f5"
self.config = draw_config.prts
self.ALL_OPERATOR: List[Operator] = []
self.UP_EVENT: Optional[UpEvent] = None
def get_card(self, add: float) -> Operator:
star = self.get_star(
star_list=[6, 5, 4, 3],
probability_list=[
self.config.PRTS_SIX_P + add,
self.config.PRTS_FIVE_P,
self.config.PRTS_FOUR_P,
self.config.PRTS_THREE_P,
],
)
all_operators = [
x
for x in self.ALL_OPERATOR
if x.star == star and not any([x.limited, x.recruit_only, x.event_only])
]
acquire_operator = None
if self.UP_EVENT:
up_operators = [x for x in self.UP_EVENT.up_char if x.star == star]
# UPs
try:
zooms = [x.zoom for x in up_operators]
zoom_sum = sum(zooms)
if random.random() < zoom_sum:
up_name = random.choices(up_operators, weights=zooms, k=1)[0].name
acquire_operator = [
x for x in self.ALL_OPERATOR if x.name == up_name
][0]
except IndexError:
pass
if not acquire_operator:
acquire_operator = random.choice(all_operators)
return acquire_operator
def get_cards(self, count: int, **kwargs) -> List[Tuple[Operator, int]]:
card_list = [] # 获取所有角色
add = 0.0
count_idx = 0
for i in range(count):
count_idx += 1
card = self.get_card(add)
if card.star == self.max_star:
add = 0.0
count_idx = 0
elif count_idx > 50:
add += 0.02
card_list.append((card, i + 1))
return card_list
def format_pool_info(self) -> str:
info = ""
if self.UP_EVENT:
star6_list = [x.name for x in self.UP_EVENT.up_char if x.star == 6]
star5_list = [x.name for x in self.UP_EVENT.up_char if x.star == 5]
star4_list = [x.name for x in self.UP_EVENT.up_char if x.star == 4]
if star6_list:
info += f"六星UP:{' '.join(star6_list)}\n"
if star5_list:
info += f"五星UP:{' '.join(star5_list)}\n"
if star4_list:
info += f"四星UP:{' '.join(star4_list)}\n"
info = f"当前up池: {self.UP_EVENT.title}\n{info}"
return info.strip()
def draw(self, count: int, **kwargs) -> Message:
index2card = self.get_cards(count)
"""这里cards修复了抽卡图文不符的bug"""
cards = [card[0] for card in index2card]
up_list = [x.name for x in self.UP_EVENT.up_char] if self.UP_EVENT else []
result = self.format_result(index2card, up_list=up_list)
pool_info = self.format_pool_info()
return (
pool_info
+ MessageSegment.image(self.generate_img(cards).pic2bs4())
+ result
)
def generate_card_img(self, card: Operator) -> BuildImage:
sep_w = 5
sep_h = 5
star_h = 15
img_w = 120
img_h = 120
font_h = 20
bg = BuildImage(img_w + sep_w * 2, img_h + font_h + sep_h * 2, color="#EFF2F5")
star_path = str(self.img_path / "star.png")
star = BuildImage(star_h, star_h, background=star_path)
img_path = str(self.img_path / f"{cn2py(card.name)}.png")
img = BuildImage(img_w, img_h, background=img_path)
bg.paste(img, (sep_w, sep_h), alpha=True)
for i in range(card.star):
bg.paste(star, (sep_w + img_w - 5 - star_h * (i + 1), sep_h), alpha=True)
# 加名字
text = card.name[:7] + "..." if len(card.name) > 8 else card.name
font = load_font(fontsize=16)
text_w, text_h = font.getsize(text)
draw = ImageDraw.Draw(bg.markImg)
draw.text(
(sep_w + (img_w - text_w) / 2, sep_h + img_h + (font_h - text_h) / 2),
text,
font=font,
fill="gray",
)
return bg
def _init_data(self):
self.ALL_OPERATOR = [
Operator(
name=value["名称"],
star=int(value["星级"]),
limited="干员寻访" not in value["获取途径"],
recruit_only=True
if "干员寻访" not in value["获取途径"] and "公开招募" in value["获取途径"]
else False,
event_only=True if "活动获取" in value["获取途径"] else False,
)
for key, value in self.load_data().items()
if "阿米娅" not in key
]
self.load_up_char()
def load_up_char(self):
try:
data = self.load_data(f"draw_card_up/{self.game_name}_up_char.json")
"""这里的 waring 有点模糊,更新游戏信息时没有up池的情况下也会报错,所以细分了一下"""
if not data:
logger.warning(f"当前无UP池或 {self.game_name}_up_char.json 文件不存在")
else:
self.UP_EVENT = UpEvent.parse_obj(data.get("char", {}))
except ValidationError:
logger.warning(f"{self.game_name}_up_char 解析出错")
def dump_up_char(self):
if self.UP_EVENT:
data = {"char": json.loads(self.UP_EVENT.json())}
self.dump_data(data, f"draw_card_up/{self.game_name}_up_char.json")
async def _update_info(self):
"""更新信息"""
info = {}
url = "https://wiki.biligame.com/arknights/干员数据表"
result = await self.get_url(url)
if not result:
logger.warning(f"更新 {self.game_name_cn} 出错")
return
dom = etree.HTML(result, etree.HTMLParser())
char_list = dom.xpath("//table[@id='CardSelectTr']/tbody/tr")
for char in char_list:
try:
avatar = char.xpath("./td[1]/div/div/div/a/img/@srcset")[0]
name = char.xpath("./td[2]/a/text()")[0]
star = char.xpath("./td[5]/text()")[0]
"""这里sources修好了干员获取标签有问题的bug,如三星只能抽到卡缇就是这个原因"""
sources = [_.strip('\n') for _ in char.xpath("./td[8]/text()")]
except IndexError:
continue
member_dict = {
"头像": unquote(str(avatar).split(" ")[-2]),
"名称": remove_prohibited_str(str(name).strip()),
"星级": int(str(star).strip()),
"获取途径": sources,
}
info[member_dict["名称"]] = member_dict
self.dump_data(info)
logger.info(f"{self.game_name_cn} 更新成功")
# 下载头像
for value in info.values():
await self.download_img(value["头像"], value["名称"])
# 下载星星
await self.download_img(
"https://patchwiki.biligame.com/images/pcr/0/02/s75ys2ecqhu2xbdw1wf1v9ccscnvi5g.png",
"star",
)
await self.update_up_char()
async def update_up_char(self):
"""重载卡池"""
announcement_url = "https://ak.hypergryph.com/news.html"
result = await self.get_url(announcement_url)
if not result:
logger.warning(f"{self.game_name_cn}获取公告出错")
return
dom = etree.HTML(result, etree.HTMLParser())
activity_urls = dom.xpath(
"//ol[@class='articleList' and @data-category-key='ACTIVITY']/li/a/@href"
)
start_time = None
end_time = None
up_chars = []
pool_img = ""
for activity_url in activity_urls[:10]: # 减少响应时间, 10个就够了
activity_url = f"https://ak.hypergryph.com{activity_url}"
result = await self.get_url(activity_url)
if not result:
logger.warning(f"{self.game_name_cn}获取公告 {activity_url} 出错")
continue
"""因为鹰角的前端太自由了,这里重写了匹配规则以尽可能避免因为前端乱七八糟而导致的重载失败"""
dom = etree.HTML(result, etree.HTMLParser())
contents = dom.xpath(
"//div[@class='article-content']/p/text() | //div[@class='article-content']/p/span/text() | //div[@class='article-content']/div[@class='media-wrap image-wrap']/img/@src"
)
title = ""
time = ""
chars: List[str] = []
for index, content in enumerate(contents):
if re.search("(.*)(寻访|复刻).*?开启", content):
title = re.split(r"[【】]", content)
title = "".join(title[1:-1]) if "-" in title else title[1]
lines = [contents[index-2+_] for _ in range(8)] # 从 -2 开始是因为xpath获取的时间有的会在寻访开启这一句之前
lines.append("") # 防止IndexError,加个空字符串
for idx, line in enumerate(lines):
match = re.search(
r"(\d{1,2}月\d{1,2}日.*?-.*?\d{1,2}月\d{1,2}日.*?$)", line
)
if match:
time = match.group(1)
"""因为 <p> 的诡异排版,所以有了下面的一段"""
if ("★★" in line and "%" in line) or ("★★" in line and "%" in lines[idx + 1]):
chars.append(line) if ("★★" in line and "%" in line) else chars.append(line + lines[idx + 1])
if not time:
continue
start, end = time.replace("月", "/").replace("日", " ").split("-")[:2] # 日替换为空格是因为有日后面不接空格的情况,导致 split 出问题
start_time = dateparser.parse(start)
end_time = dateparser.parse(end)
pool_img = contents[index-2]
r"""两类格式:用/分割,用\分割;★+(概率)+名字,★+名字+(概率)"""
for char in chars:
star = char.split("(")[0].count("★")
name = re.split(r"[:(]", char)[1] if "★(" not in char else re.split("):", char)[1] # 有的括号在前面有的在后面
if "\\" in name:
names = name.split("\\")
elif "/" in name:
names = name.split("/")
else:
names = [name] # 既有用/分割的,又有用\分割的
names = [name.replace("[限定]", "").strip() for name in names]
if "权值" in char:
match = re.search(r"(在.*?以.*?(\d+).*?倍权值.*?)", char)
else:
match = re.search(r"(占.*?的.*?(\d+).*?%)", char)
zoom = 1
if match:
zoom = float(match.group(1))
zoom = zoom / 100 if zoom > 10 else zoom
for name in names:
up_chars.append(
UpChar(name=name, star=star, limited=False, zoom=zoom)
)
break # 这里break会导致个问题:如果一个公告里有两个池子,会漏掉下面的池子,比如 5.19 的定向寻访。但目前我也没啥好想法解决
if title and start_time and end_time:
if start_time <= datetime.now() <= end_time:
self.UP_EVENT = UpEvent(
title=title,
pool_img=pool_img,
start_time=start_time,
end_time=end_time,
up_char=up_chars,
)
self.dump_up_char()
logger.info(f"成功获取{self.game_name_cn}当前up信息...当前up池: {title}")
break
async def _reload_pool(self) -> Optional[Message]:
await self.update_up_char()
self.load_up_char()
if self.UP_EVENT:
return f"重载成功!\n当前UP池子:{self.UP_EVENT.title}" + MessageSegment.image(
self.UP_EVENT.pool_img
)
| 41.252396 | 186 | 0.498141 |
50e7d3fde8fef22733a8646340ab3a5176c68503 | 7,610 | py | Python | autogluon/utils/tabular/ml/models/knn/knn_utils.py | NunoEdgarGFlowHub/autogluon | 714894698495ef4352706d3c4250823ad4a43ead | [
"Apache-2.0"
] | 1 | 2020-08-20T08:30:15.000Z | 2020-08-20T08:30:15.000Z | autogluon/utils/tabular/ml/models/knn/knn_utils.py | NunoEdgarGFlowHub/autogluon | 714894698495ef4352706d3c4250823ad4a43ead | [
"Apache-2.0"
] | null | null | null | autogluon/utils/tabular/ml/models/knn/knn_utils.py | NunoEdgarGFlowHub/autogluon | 714894698495ef4352706d3c4250823ad4a43ead | [
"Apache-2.0"
] | null | null | null | import numpy as np
from pandas import DataFrame
from scipy.stats import mode
from sklearn.utils.extmath import weighted_mode
from .....try_import import try_import_faiss
import logging
logger = logging.getLogger(__name__)
# Rather than try to import non-public sklearn internals, we implement our own weighting functions here
# These support the same operations as the sklearn functions - at least as far as possible with FAISS
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', 'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter weights"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', 'distance', or a callable function")
class FAISSNeighborsRegressor:
def __init__(self, n_neighbors=5, weights='uniform', n_jobs=-1, index_factory_string="Flat"):
"""
Creates a KNN regressor model based on FAISS. FAISS allows you to compose different
near-neighbor search algorithms from several different preprocessing / search algorithms
This composition is specified by the string that is passed to the FAISS index_factory.
Here are good guidelines for choosing the index string:
https://github.com/facebookresearch/faiss/wiki/Guidelines-to-choose-an-index
The model itself is a clone of the sklearn one
"""
try_import_faiss()
import faiss
self.faiss = faiss
self.index_factory_string = index_factory_string
self.n_neighbors = n_neighbors
self.weights = weights
self.n_jobs = n_jobs
if n_jobs > 0:
# global config, affects all faiss indexes
faiss.omp_set_num_threads(n_jobs)
def fit(self, X_train, y_train):
if isinstance(X_train, DataFrame):
X_train = X_train.to_numpy(dtype=np.float32)
else:
X_train = X_train.astype(np.float32)
if not X_train.flags['C_CONTIGUOUS']:
X_train = np.ascontiguousarray(X_train)
d = X_train.shape[1]
self.index = self.faiss.index_factory(d, self.index_factory_string)
self.y = np.array(y_train)
self.index.train(X_train)
self.index.add(X_train)
return self
def predict(self, X):
X = X.astype(np.float32)
X = np.ascontiguousarray(X)
if X.ndim == 1:
X = X[np.newaxis]
D, I = self.index.search(X, self.n_neighbors)
outputs = np.squeeze(self.y[I])
weights = _get_weights(D, self.weights)
if weights is None:
y_pred = np.mean(outputs, axis=1)
else:
denom = np.sum(weights, axis=1)
if outputs.ndim == 1:
y_pred = np.sum(weights * outputs, axis=1)
y_pred /= denom
else:
y_pred = np.sum(weights * outputs, axis=1)
y_pred /= denom
return y_pred
def __getstate__(self):
state = {}
for k, v in self.__dict__.items():
if (v is not self.index) and (v is not self.faiss):
state[k] = v
else:
state[k] = self.faiss.serialize_index(self.index)
return state
def __setstate__(self, state):
try_import_faiss()
import faiss
self.__dict__.update(state)
self.faiss = faiss
self.index = self.faiss.deserialize_index(self.index)
class FAISSNeighborsClassifier:
def __init__(self, n_neighbors=5, weights='uniform', n_jobs=-1, index_factory_string="Flat"):
"""
Creates a KNN classifier model based on FAISS. FAISS allows you to compose different
near-neighbor search algorithms from several different preprocessing / search algorithms
This composition is specified by the string that is passed to the FAISS index_factory.
Here are good guidelines for choosing the index string:
https://github.com/facebookresearch/faiss/wiki/Guidelines-to-choose-an-index
The model itself is a clone of the sklearn one
"""
try_import_faiss()
import faiss
self.faiss = faiss
self.index_factory_string = index_factory_string
self.n_neighbors = n_neighbors
self.weights = weights
self.classes = []
self.n_jobs = n_jobs
if n_jobs > 0:
# global config, affects all faiss indexes
faiss.omp_set_num_threads(n_jobs)
def fit(self, X_train, y_train):
if isinstance(X_train, DataFrame):
X_train = X_train.to_numpy(dtype=np.float32)
else:
X_train = X_train.astype(np.float32)
if not X_train.flags['C_CONTIGUOUS']:
X_train = np.ascontiguousarray(X_train)
d = X_train.shape[1]
self.index = self.faiss.index_factory(d, self.index_factory_string)
self.labels = np.array(y_train)
self.index.train(X_train)
self.index.add(X_train)
self.classes = np.unique(y_train)
return self
def predict(self, X):
X = X.astype(np.float32)
X = np.ascontiguousarray(X)
if X.ndim == 1:
X = X[np.newaxis]
D, I = self.index.search(X, self.n_neighbors)
outputs = np.squeeze(self.labels[I])
weights = _get_weights(D, self.weights)
if weights is None:
y_pred, _ = mode(outputs, axis=1)
else:
y_pred, _ = weighted_mode(outputs, weights, axis=1)
return y_pred
def predict_proba(self, X):
X = X.astype(np.float32)
X = np.ascontiguousarray(X)
if X.ndim == 1:
X = X[np.newaxis]
D, I = self.index.search(X, self.n_neighbors)
outputs = np.squeeze(self.labels[I])
weights = _get_weights(D, self.weights)
if weights is None:
weights = np.ones_like(I)
probabilities = np.empty((X.shape[0], len(self.classes)), dtype=np.float64)
for k, class_k in enumerate(self.classes):
proba_k = np.sum(np.multiply(outputs == class_k, weights), axis=1)
probabilities[:, k] = proba_k
normalizer = np.sum(probabilities, axis=1)
normalizer[normalizer == 0.0] = 1.0
probabilities /= normalizer[:, np.newaxis]
return probabilities
def __getstate__(self):
state = {}
for k, v in self.__dict__.items():
if (v is not self.index) and (v is not self.faiss):
state[k] = v
else:
state[k] = self.faiss.serialize_index(self.index)
return state
def __setstate__(self, state):
try_import_faiss()
import faiss
self.__dict__.update(state)
self.faiss = faiss
self.index = self.faiss.deserialize_index(self.index)
| 36.411483 | 107 | 0.620368 |
a4fccbb108b086a5c3150578f474d018c8ea63d8 | 5,129 | py | Python | amazon-ec2/ec2-approved-regions/lambda/index.py | awslabs/aws-lambda-security-controls | c2e64889bc48e68d78664e4741e685c2812f6bbb | [
"MIT-0"
] | 46 | 2018-10-06T20:07:34.000Z | 2021-11-08T10:25:48.000Z | amazon-ec2/ec2-approved-regions/lambda/index.py | awslabs/aws-lambda-security-controls | c2e64889bc48e68d78664e4741e685c2812f6bbb | [
"MIT-0"
] | 1 | 2020-03-05T08:09:51.000Z | 2020-03-05T08:09:51.000Z | amazon-ec2/ec2-approved-regions/lambda/index.py | awslabs/aws-lambda-security-controls | c2e64889bc48e68d78664e4741e685c2812f6bbb | [
"MIT-0"
] | 32 | 2018-10-15T22:47:01.000Z | 2021-11-24T14:10:49.000Z | """
Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Security Control: EC2 Must Be In US Regions
Description: Checks for compliance to determine if an EC2 instance is created
in a valid region. If not, will invoke an SNS notification
Runtime: Python 3.6
"""
import logging
import os
import json
import boto3
from botocore.exceptions import ClientError
OUTBOUND_TOPIC_ARN = os.environ["outbound_topic_arn"]
def lambda_handler(event, context):
"""
Main Lambda handler.
Evaluates the control and makes messaging decisions.
"""
# print(json.dumps(event))
setup_logging()
log.info('Got an event!')
log.info('e')
Regions = os.environ["Regions"].split(",")
# If else statement that determines if stack was created in invalid region or not.
# Also handles logic in the event of failed stack creation in invalid region.
try:
if event["detail"]["awsRegion"] in Regions:
print(
"No violations found for EC2 Instance(s) being brought up in an invalid region.")
elif event["detail"]["awsRegion"] not in Regions:
ec2_invalid_region_instance = []
if event["detail"]["eventName"] == "ModifyInstanceAttribute":
ec2_invalid_region_instance = event["detail"]["requestParameters"]["instanceId"]
elif event["detail"]["eventName"] == "RunInstances":
for instance in event["detail"]["responseElements"]["instancesSet"]["items"]:
ec2_invalid_region_instance.append(instance["instanceId"])
if ec2_invalid_region_instance:
subject = "Violation - EC2 Instance(s) created/modified in invalid region"
message = create_non_compliance_message(
ec2_invalid_region_instance, event, context)
send_violation(OUTBOUND_TOPIC_ARN, message, subject)
except KeyError:
log.info('Region not found in the event.')
# Since it's not a violation if security group rules aren't
# in the event, we return true
return True
def send_violation(OUTBOUND_TOPIC_ARN, message, subject):
"""
Send Violation Function.
Takes in the compiled message and sends to the outbound sns topic
"""
findsnsregion = OUTBOUND_TOPIC_ARN.split(":")
snsregion = findsnsregion[3]
sendclient = boto3.client('sns', region_name=snsregion)
try:
sendclient.publish(
TopicArn=OUTBOUND_TOPIC_ARN,
Message=message,
Subject=subject
)
except ClientError as err:
print(err)
return False
def create_non_compliance_message(ec2_invalid_region_instance, event, context):
"""
Non-Compliance Message.
Function that structures the outgoing SNS notification format
"""
if type(ec2_invalid_region_instance) is list:
ec2_invalid_region_instance = ''.join(ec2_invalid_region_instance)
message = "Violation - EC2 Instance(s) created/modified in invalid region! \n\n"
message += 'EC2 Instance(s): ' + ec2_invalid_region_instance + '\n'
message += 'Account: ' + event["account"] + "\n"
message += "Region: " + event["detail"]["awsRegion"] + "\n"
message += "\n\n"
message += "This notification was generated by the Lambda function " + \
context.invoked_function_arn
return message
def setup_logging():
"""
Logging Function.
Creates a global log object and sets its level.
"""
global log
log = logging.getLogger()
log_levels = {'INFO': 20, 'WARNING': 30, 'ERROR': 40}
if 'logging_level' in os.environ:
log_level = os.environ['logging_level'].upper()
if log_level in log_levels:
log.setLevel(log_levels[log_level])
else:
log.setLevel(log_levels['ERROR'])
log.error("The logging_level environment variable is not set to INFO, WARNING, or \
ERROR. The log level is set to ERROR")
else:
log.setLevel(log_levels['ERROR'])
log.warning('The logging_level environment variable is not set. The log level is set to \
ERROR')
log.info('Logging setup complete - set to log level ' + str(log.getEffectiveLevel()))
| 38.856061 | 97 | 0.6783 |
5939114fd04db953a79fa6f63f2dc246c6172c86 | 1,791 | py | Python | setup.py | hjmjohnson/ITKShape | 87faabfee47b17c6355baa2637616c7c7bacf217 | [
"Apache-2.0"
] | 3 | 2021-04-18T03:57:44.000Z | 2022-03-28T19:47:06.000Z | setup.py | hjmjohnson/ITKShape | 87faabfee47b17c6355baa2637616c7c7bacf217 | [
"Apache-2.0"
] | 11 | 2021-03-08T13:24:38.000Z | 2021-03-19T20:20:34.000Z | setup.py | hjmjohnson/ITKShape | 87faabfee47b17c6355baa2637616c7c7bacf217 | [
"Apache-2.0"
] | 1 | 2021-12-17T19:16:34.000Z | 2021-12-17T19:16:34.000Z | # -*- coding: utf-8 -*-
from os import sys
try:
from skbuild import setup
except ImportError:
print('scikit-build is required to build from source.', file=sys.stderr)
print('Please run:', file=sys.stderr)
print('', file=sys.stderr)
print(' python -m pip install scikit-build')
sys.exit(1)
setup(
name='itk-shape',
version='0.2.1',
author='Insight Software Consortium',
author_email='itk+community@discourse.itk.org',
packages=['itk'],
package_dir={'itk': 'itk'},
download_url=r'https://github.com/slicersalt/ITKShape',
description=r'A C++ implementation of Procrustes alignment for 3D meshes.',
long_description='ITK external module for libraries originally developed in SPHARM-PDM 3D Slicer extension (https://github.com/NIRALUser/SPHARM-PDM).',
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: C++",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Software Development :: Libraries",
"Operating System :: Android",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS"
],
license='Apache',
keywords='ITK InsightToolkit',
url=r'https://itk.org/',
install_requires=[
r'itk>=5.2.0.post2'
]
)
| 36.55102 | 155 | 0.634841 |
d2d5859a664b19dc7783dd5fa31a21d116e7480e | 310 | py | Python | p1.py | eztwokey/laba6 | 06a9db939d592b257175dd693d204f1dda972d14 | [
"MIT"
] | null | null | null | p1.py | eztwokey/laba6 | 06a9db939d592b257175dd693d204f1dda972d14 | [
"MIT"
] | null | null | null | p1.py | eztwokey/laba6 | 06a9db939d592b257175dd693d204f1dda972d14 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
if __name__ == '__main__':
A = list(map(int, input().split()))
if len(A) != 10:
print("Неверный размер списка", file=sys.stderr)
exit(1)
s = 0
for item in A:
if abs(item) < 5:
s += item
print(s) | 20.666667 | 56 | 0.509677 |
782f774a25562daf707ae90b9a7ee4cbe1c43381 | 8,742 | py | Python | python/cuxfilter/charts/core/non_aggregate/core_stacked_line.py | jacobtomlinson/cuxfilter | 0b88a6b609d993b8d11629763c35dfb2b2581927 | [
"Apache-2.0"
] | null | null | null | python/cuxfilter/charts/core/non_aggregate/core_stacked_line.py | jacobtomlinson/cuxfilter | 0b88a6b609d993b8d11629763c35dfb2b2581927 | [
"Apache-2.0"
] | null | null | null | python/cuxfilter/charts/core/non_aggregate/core_stacked_line.py | jacobtomlinson/cuxfilter | 0b88a6b609d993b8d11629763c35dfb2b2581927 | [
"Apache-2.0"
] | null | null | null | from typing import Tuple
from ..core_chart import BaseChart
from ....layouts import chart_view
class BaseStackedLine(BaseChart):
"""
No datatiles support in non_data_tiles plot charts
If dataset size is greater than a few thousand points,
scatter geos can crash the browser tabs, and is only recommended
with cudatashader plugin, in which case an image is
rendered instead of points on canvas
"""
chart_type = "stacked_lines"
reset_event = None
x_range: Tuple = None
y_range: Tuple = None
use_data_tiles = False
y: list = []
colors: list = []
def __init__(
self,
x,
y=[],
data_points=100,
add_interaction=True,
colors=[],
step_size=None,
step_size_type=int,
width=800,
height=400,
**library_specific_params,
):
"""
Description:
-------------------------------------------
Input:
x
y
data_points
add_interaction
aggregate_fn
step_size
step_size_type
x_label_map
y_label_map
width
height
**library_specific_params
-------------------------------------------
Ouput:
"""
self.x = x
if type(y) != list:
raise TypeError("y must be a list of column names")
if len(y) == 0:
raise ValueError("y must not be empty")
self.y = y
self.data_points = data_points
self.add_interaction = add_interaction
self.stride = step_size
if type(colors) != list:
raise TypeError("colors must be a list of colors")
self.colors = colors
self.stride_type = step_size_type
self.library_specific_params = library_specific_params
self.width = width
self.height = height
def initiate_chart(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
data: cudf DataFrame
-------------------------------------------
"""
if self.x_range is None:
self.x_range = (
dashboard_cls._data[self.x].min(),
dashboard_cls._data[self.x].max(),
)
if self.y_range is None:
# cudf_df[['a','b','c']].min().min() gives min value
# between all values in columns a,b and c
self.y_range = (
dashboard_cls._data[self.y].min().min(),
dashboard_cls._data[self.y].max().max(),
)
self.calculate_source(dashboard_cls._data)
self.generate_chart()
self.add_events(dashboard_cls)
def view(self):
return chart_view(self.chart, width=self.width)
def calculate_source(self, data):
"""
Description:
-------------------------------------------
Input:
data = cudf.DataFrame
-------------------------------------------
Ouput:
"""
self.format_source_data(data)
def get_selection_geometry_callback(self, dashboard_cls):
"""
Description: generate callback for choropleth selection evetn
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
def selection_callback(xmin, xmax, ymin, ymax):
if dashboard_cls._active_view != self.name:
# reset previous active view and
# set current chart as active view
dashboard_cls._reset_current_view(new_active_view=self)
self.source = dashboard_cls._data
self.x_range = (xmin, xmax)
self.y_range = (ymin, ymax)
query = str(xmin) + "<=" + self.x + " <= " + str(xmax)
dashboard_cls._query_str_dict[self.name] = query
temp_data = dashboard_cls._query(
dashboard_cls._query_str_dict[self.name]
)
# reload all charts with new queried data (cudf.DataFrame only)
dashboard_cls._reload_charts(data=temp_data, ignore_cols=[])
# self.reload_chart(temp_data, False)
del temp_data
return selection_callback
def compute_query_dict(self, query_str_dict):
"""
Description:
-------------------------------------------
Input:
query_dict = reference to dashboard.__cls__.query_dict
-------------------------------------------
Ouput:
"""
if self.x_range is not None and self.y_range is not None:
query_str_dict[self.name] = (
str(self.x_range[0])
+ "<="
+ self.x
+ " <= "
+ str(self.x_range[1])
)
def add_events(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
if self.add_interaction:
self.add_selection_geometry_event(
self.get_selection_geometry_callback(dashboard_cls)
)
if self.reset_event is not None:
self.add_reset_event(dashboard_cls)
def add_reset_event(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
def reset_callback(event):
if dashboard_cls._active_view != self.name:
# reset previous active view and
# set current chart as active view
dashboard_cls._reset_current_view(new_active_view=self)
self.source = dashboard_cls._data
self.x_range = None
self.y_range = None
dashboard_cls._reload_charts()
# add callback to reset chart button
self.add_event(self.reset_event, reset_callback)
def query_chart_by_range(
self, active_chart: BaseChart, query_tuple, datatile=None
):
"""
Description:
-------------------------------------------
Input:
1. active_chart: chart object of active_chart
2. query_tuple: (min_val, max_val) of the query [type: tuple]
3. datatile: None in case of Gpu Geo Scatter charts
-------------------------------------------
Ouput:
"""
min_val, max_val = query_tuple
self.reload_chart(
self.source.query(
str(min_val) + "<=" + active_chart.x + "<=" + str(max_val)
),
False,
)
def query_chart_by_indices(
self, active_chart: BaseChart, old_indices, new_indices, datatile=None
):
"""
Description:
-------------------------------------------
Input:
1. active_chart: chart object of active_chart
2. query_tuple: (min_val, max_val) of the query [type: tuple]
3. datatile: None in case of Gpu Geo Scatter charts
-------------------------------------------
Ouput:
"""
if "" in new_indices:
new_indices.remove("")
if len(new_indices) == 0:
# case: all selected indices were reset
# reset the chart
self.reload_chart(self.source, False)
elif len(new_indices) == 1:
# just a single index
self.reload_chart(
self.source.query(
active_chart.x + "==" + str(float(new_indices[0]))
),
False,
)
else:
new_indices_str = ",".join(map(str, new_indices))
self.reload_chart(
self.source.query(
active_chart.x + " in (" + new_indices_str + ")"
),
False,
)
def add_selection_geometry_event(self, callback):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
# ('function to be overridden by library specific extensions')
def reset_chart_geometry_ranges(self):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
# ('function to be overridden by library specific extensions')
| 28.756579 | 78 | 0.469229 |
905a51ab6d5cd1dba178657af16b4bbef74f0399 | 52,445 | py | Python | venv/Lib/site-packages/matplotlib/__init__.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | venv/Lib/site-packages/matplotlib/__init__.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | venv/Lib/site-packages/matplotlib/__init__.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | """
An object-oriented plotting library.
A procedural interface is provided by the companion pyplot module,
which may be imported directly, e.g.::
import matplotlib.pyplot as plt
or using ipython::
ipython
at your terminal, followed by::
In [1]: %matplotlib
In [2]: import matplotlib.pyplot as plt
at the ipython shell prompt.
For the most part, direct use of the object-oriented library is encouraged when
programming; pyplot is primarily for working interactively. The exceptions are
the pyplot functions `.pyplot.figure`, `.pyplot.subplot`, `.pyplot.subplots`,
and `.pyplot.savefig`, which can greatly simplify scripting.
Modules include:
:mod:`matplotlib.axes`
The `~.axes.Axes` class. Most pyplot functions are wrappers for
`~.axes.Axes` methods. The axes module is the highest level of OO
access to the library.
:mod:`matplotlib.figure`
The `.Figure` class.
:mod:`matplotlib.artist`
The `.Artist` base class for all classes that draw things.
:mod:`matplotlib.lines`
The `.Line2D` class for drawing lines and markers.
:mod:`matplotlib.patches`
Classes for drawing polygons.
:mod:`matplotlib.text`
The `.Text` and `.Annotation` classes.
:mod:`matplotlib.image`
The `.AxesImage` and `.FigureImage` classes.
:mod:`matplotlib.collections`
Classes for efficient drawing of groups of lines or polygons.
:mod:`matplotlib.colors`
Color specifications and making colormaps.
:mod:`matplotlib.cm`
Colormaps, and the `.ScalarMappable` mixin class for providing color
mapping functionality to other classes.
:mod:`matplotlib.ticker`
Calculation of tick mark locations and formatting of tick labels.
:mod:`matplotlib.backends`
A subpackage with modules for various GUI libraries and output formats.
The base matplotlib namespace includes:
`~matplotlib.rcParams`
Default configuration settings; their defaults may be overridden using
a :file:`matplotlibrc` file.
`~matplotlib.use`
Setting the Matplotlib backend. This should be called before any
figure is created, because it is not possible to switch between
different GUI backends after that.
Matplotlib was initially written by John D. Hunter (1968-2012) and is now
developed and maintained by a host of others.
Occasionally the internal documentation (python docstrings) will refer
to MATLAB®, a registered trademark of The MathWorks, Inc.
"""
import atexit
from collections import namedtuple
from collections.abc import MutableMapping
import contextlib
import functools
import importlib
import inspect
from inspect import Parameter
import locale
import logging
import os
from pathlib import Path
import pprint
import re
import shutil
import subprocess
import sys
import tempfile
import warnings
import numpy
from packaging.version import parse as parse_version
# cbook must import matplotlib only within function
# definitions, so it is safe to import from it here.
from . import _api, _version, cbook, docstring, rcsetup
from matplotlib.cbook import MatplotlibDeprecationWarning, sanitize_sequence
from matplotlib.cbook import mplDeprecation # deprecated
from matplotlib.rcsetup import validate_backend, cycler
_log = logging.getLogger(__name__)
__bibtex__ = r"""@Article{Hunter:2007,
Author = {Hunter, J. D.},
Title = {Matplotlib: A 2D graphics environment},
Journal = {Computing in Science \& Engineering},
Volume = {9},
Number = {3},
Pages = {90--95},
abstract = {Matplotlib is a 2D graphics package used for Python
for application development, interactive scripting, and
publication-quality image generation across user
interfaces and operating systems.},
publisher = {IEEE COMPUTER SOC},
year = 2007
}"""
# modelled after sys.version_info
_VersionInfo = namedtuple('_VersionInfo',
'major, minor, micro, releaselevel, serial')
def _parse_to_version_info(version_str):
"""
Parse a version string to a namedtuple analogous to sys.version_info.
See:
https://packaging.pypa.io/en/latest/version.html#packaging.version.parse
https://docs.python.org/3/library/sys.html#sys.version_info
"""
v = parse_version(version_str)
if v.pre is None and v.post is None and v.dev is None:
return _VersionInfo(v.major, v.minor, v.micro, 'final', 0)
elif v.dev is not None:
return _VersionInfo(v.major, v.minor, v.micro, 'alpha', v.dev)
elif v.pre is not None:
releaselevel = {
'a': 'alpha',
'b': 'beta',
'rc': 'candidate'}.get(v.pre[0], 'alpha')
return _VersionInfo(v.major, v.minor, v.micro, releaselevel, v.pre[1])
else:
# fallback for v.post: guess-next-dev scheme from setuptools_scm
return _VersionInfo(v.major, v.minor, v.micro + 1, 'alpha', v.post)
def _get_version():
"""Return the version string used for __version__."""
# Only shell out to a git subprocess if really needed, and not on a
# shallow clone, such as those used by CI, as the latter would trigger
# a warning from setuptools_scm.
root = Path(__file__).resolve().parents[2]
if (root / ".git").exists() and not (root / ".git/shallow").exists():
import setuptools_scm
return setuptools_scm.get_version(
root=root,
version_scheme="release-branch-semver",
local_scheme="node-and-date",
fallback_version=_version.version,
)
else: # Get the version from the _version.py setuptools_scm file.
return _version.version
@_api.caching_module_getattr
class __getattr__:
__version__ = property(lambda self: _get_version())
__version_info__ = property(
lambda self: _parse_to_version_info(self.__version__))
# module-level deprecations
URL_REGEX = _api.deprecated("3.5", obj_type="")(property(
lambda self: re.compile(r'^http://|^https://|^ftp://|^file:')))
def _check_versions():
# Quickfix to ensure Microsoft Visual C++ redistributable
# DLLs are loaded before importing kiwisolver
from . import ft2font
for modname, minver in [
("cycler", "0.10"),
("dateutil", "2.7"),
("kiwisolver", "1.0.1"),
("numpy", "1.17"),
("pyparsing", "2.2.1"),
]:
module = importlib.import_module(modname)
if parse_version(module.__version__) < parse_version(minver):
raise ImportError(f"Matplotlib requires {modname}>={minver}; "
f"you have {module.__version__}")
_check_versions()
# The decorator ensures this always returns the same handler (and it is only
# attached once).
@functools.lru_cache()
def _ensure_handler():
"""
The first time this function is called, attach a `StreamHandler` using the
same format as `logging.basicConfig` to the Matplotlib root logger.
Return this handler every time this function is called.
"""
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
_log.addHandler(handler)
return handler
def set_loglevel(level):
"""
Set Matplotlib's root logger and root logger handler level, creating
the handler if it does not exist yet.
Typically, one should call ``set_loglevel("info")`` or
``set_loglevel("debug")`` to get additional debugging information.
Parameters
----------
level : {"notset", "debug", "info", "warning", "error", "critical"}
The log level of the handler.
Notes
-----
The first time this function is called, an additional handler is attached
to Matplotlib's root handler; this handler is reused every time and this
function simply manipulates the logger and handler's level.
"""
_log.setLevel(level.upper())
_ensure_handler().setLevel(level.upper())
def _logged_cached(fmt, func=None):
"""
Decorator that logs a function's return value, and memoizes that value.
After ::
@_logged_cached(fmt)
def func(): ...
the first call to *func* will log its return value at the DEBUG level using
%-format string *fmt*, and memoize it; later calls to *func* will directly
return that value.
"""
if func is None: # Return the actual decorator.
return functools.partial(_logged_cached, fmt)
called = False
ret = None
@functools.wraps(func)
def wrapper(**kwargs):
nonlocal called, ret
if not called:
ret = func(**kwargs)
called = True
_log.debug(fmt, ret)
return ret
return wrapper
_ExecInfo = namedtuple("_ExecInfo", "executable version")
class ExecutableNotFoundError(FileNotFoundError):
"""
Error raised when an executable that Matplotlib optionally
depends on can't be found.
"""
pass
@functools.lru_cache()
def _get_executable_info(name):
"""
Get the version of some executable that Matplotlib optionally depends on.
.. warning::
The list of executables that this function supports is set according to
Matplotlib's internal needs, and may change without notice.
Parameters
----------
name : str
The executable to query. The following values are currently supported:
"dvipng", "gs", "inkscape", "magick", "pdftops". This list is subject
to change without notice.
Returns
-------
tuple
A namedtuple with fields ``executable`` (`str`) and ``version``
(`packaging.Version`, or ``None`` if the version cannot be determined).
Raises
------
ExecutableNotFoundError
If the executable is not found or older than the oldest version
supported by Matplotlib.
ValueError
If the executable is not one that we know how to query.
"""
def impl(args, regex, min_ver=None, ignore_exit_code=False):
# Execute the subprocess specified by args; capture stdout and stderr.
# Search for a regex match in the output; if the match succeeds, the
# first group of the match is the version.
# Return an _ExecInfo if the executable exists, and has a version of
# at least min_ver (if set); else, raise ExecutableNotFoundError.
try:
output = subprocess.check_output(
args, stderr=subprocess.STDOUT,
universal_newlines=True, errors="replace")
except subprocess.CalledProcessError as _cpe:
if ignore_exit_code:
output = _cpe.output
else:
raise ExecutableNotFoundError(str(_cpe)) from _cpe
except OSError as _ose:
raise ExecutableNotFoundError(str(_ose)) from _ose
match = re.search(regex, output)
if match:
version = parse_version(match.group(1))
if min_ver is not None and version < parse_version(min_ver):
raise ExecutableNotFoundError(
f"You have {args[0]} version {version} but the minimum "
f"version supported by Matplotlib is {min_ver}")
return _ExecInfo(args[0], version)
else:
raise ExecutableNotFoundError(
f"Failed to determine the version of {args[0]} from "
f"{' '.join(args)}, which output {output}")
if name == "dvipng":
return impl(["dvipng", "-version"], "(?m)^dvipng(?: .*)? (.+)", "1.6")
elif name == "gs":
execs = (["gswin32c", "gswin64c", "mgs", "gs"] # "mgs" for miktex.
if sys.platform == "win32" else
["gs"])
for e in execs:
try:
return impl([e, "--version"], "(.*)", "9")
except ExecutableNotFoundError:
pass
message = "Failed to find a Ghostscript installation"
raise ExecutableNotFoundError(message)
elif name == "inkscape":
try:
# Try headless option first (needed for Inkscape version < 1.0):
return impl(["inkscape", "--without-gui", "-V"],
"Inkscape ([^ ]*)")
except ExecutableNotFoundError:
pass # Suppress exception chaining.
# If --without-gui is not accepted, we may be using Inkscape >= 1.0 so
# try without it:
return impl(["inkscape", "-V"], "Inkscape ([^ ]*)")
elif name == "magick":
if sys.platform == "win32":
# Check the registry to avoid confusing ImageMagick's convert with
# Windows's builtin convert.exe.
import winreg
binpath = ""
for flag in [0, winreg.KEY_WOW64_32KEY, winreg.KEY_WOW64_64KEY]:
try:
with winreg.OpenKeyEx(
winreg.HKEY_LOCAL_MACHINE,
r"Software\Imagemagick\Current",
0, winreg.KEY_QUERY_VALUE | flag) as hkey:
binpath = winreg.QueryValueEx(hkey, "BinPath")[0]
except OSError:
pass
path = None
if binpath:
for name in ["convert.exe", "magick.exe"]:
candidate = Path(binpath, name)
if candidate.exists():
path = str(candidate)
break
if path is None:
raise ExecutableNotFoundError(
"Failed to find an ImageMagick installation")
else:
path = "convert"
info = impl([path, "--version"], r"^Version: ImageMagick (\S*)")
if info.version == parse_version("7.0.10-34"):
# https://github.com/ImageMagick/ImageMagick/issues/2720
raise ExecutableNotFoundError(
f"You have ImageMagick {info.version}, which is unsupported")
return info
elif name == "pdftops":
info = impl(["pdftops", "-v"], "^pdftops version (.*)",
ignore_exit_code=True)
if info and not (
3 <= info.version.major or
# poppler version numbers.
parse_version("0.9") <= info.version < parse_version("1.0")):
raise ExecutableNotFoundError(
f"You have pdftops version {info.version} but the minimum "
f"version supported by Matplotlib is 3.0")
return info
else:
raise ValueError("Unknown executable: {!r}".format(name))
def checkdep_usetex(s):
if not s:
return False
if not shutil.which("tex"):
_log.warning("usetex mode requires TeX.")
return False
try:
_get_executable_info("dvipng")
except ExecutableNotFoundError:
_log.warning("usetex mode requires dvipng.")
return False
try:
_get_executable_info("gs")
except ExecutableNotFoundError:
_log.warning("usetex mode requires ghostscript.")
return False
return True
def _get_xdg_config_dir():
"""
Return the XDG configuration directory, according to the XDG base
directory spec:
https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
"""
return os.environ.get('XDG_CONFIG_HOME') or str(Path.home() / ".config")
def _get_xdg_cache_dir():
"""
Return the XDG cache directory, according to the XDG base directory spec:
https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
"""
return os.environ.get('XDG_CACHE_HOME') or str(Path.home() / ".cache")
def _get_config_or_cache_dir(xdg_base_getter):
configdir = os.environ.get('MPLCONFIGDIR')
if configdir:
configdir = Path(configdir).resolve()
elif sys.platform.startswith(('linux', 'freebsd')):
# Only call _xdg_base_getter here so that MPLCONFIGDIR is tried first,
# as _xdg_base_getter can throw.
configdir = Path(xdg_base_getter(), "matplotlib")
else:
configdir = Path.home() / ".matplotlib"
try:
configdir.mkdir(parents=True, exist_ok=True)
except OSError:
pass
else:
if os.access(str(configdir), os.W_OK) and configdir.is_dir():
return str(configdir)
# If the config or cache directory cannot be created or is not a writable
# directory, create a temporary one.
tmpdir = os.environ["MPLCONFIGDIR"] = \
tempfile.mkdtemp(prefix="matplotlib-")
atexit.register(shutil.rmtree, tmpdir)
_log.warning(
"Matplotlib created a temporary config/cache directory at %s because "
"the default path (%s) is not a writable directory; it is highly "
"recommended to set the MPLCONFIGDIR environment variable to a "
"writable directory, in particular to speed up the import of "
"Matplotlib and to better support multiprocessing.",
tmpdir, configdir)
return tmpdir
@_logged_cached('CONFIGDIR=%s')
def get_configdir():
"""
Return the string path of the configuration directory.
The directory is chosen as follows:
1. If the MPLCONFIGDIR environment variable is supplied, choose that.
2. On Linux, follow the XDG specification and look first in
``$XDG_CONFIG_HOME``, if defined, or ``$HOME/.config``. On other
platforms, choose ``$HOME/.matplotlib``.
3. If the chosen directory exists and is writable, use that as the
configuration directory.
4. Else, create a temporary directory, and use it as the configuration
directory.
"""
return _get_config_or_cache_dir(_get_xdg_config_dir)
@_logged_cached('CACHEDIR=%s')
def get_cachedir():
"""
Return the string path of the cache directory.
The procedure used to find the directory is the same as for
_get_config_dir, except using ``$XDG_CACHE_HOME``/``$HOME/.cache`` instead.
"""
return _get_config_or_cache_dir(_get_xdg_cache_dir)
@_logged_cached('matplotlib data path: %s')
def get_data_path():
"""Return the path to Matplotlib data."""
return str(Path(__file__).with_name("mpl-data"))
def matplotlib_fname():
"""
Get the location of the config file.
The file location is determined in the following order
- ``$PWD/matplotlibrc``
- ``$MATPLOTLIBRC`` if it is not a directory
- ``$MATPLOTLIBRC/matplotlibrc``
- ``$MPLCONFIGDIR/matplotlibrc``
- On Linux,
- ``$XDG_CONFIG_HOME/matplotlib/matplotlibrc`` (if ``$XDG_CONFIG_HOME``
is defined)
- or ``$HOME/.config/matplotlib/matplotlibrc`` (if ``$XDG_CONFIG_HOME``
is not defined)
- On other platforms,
- ``$HOME/.matplotlib/matplotlibrc`` if ``$HOME`` is defined
- Lastly, it looks in ``$MATPLOTLIBDATA/matplotlibrc``, which should always
exist.
"""
def gen_candidates():
# rely on down-stream code to make absolute. This protects us
# from having to directly get the current working directory
# which can fail if the user has ended up with a cwd that is
# non-existent.
yield 'matplotlibrc'
try:
matplotlibrc = os.environ['MATPLOTLIBRC']
except KeyError:
pass
else:
yield matplotlibrc
yield os.path.join(matplotlibrc, 'matplotlibrc')
yield os.path.join(get_configdir(), 'matplotlibrc')
yield os.path.join(get_data_path(), 'matplotlibrc')
for fname in gen_candidates():
if os.path.exists(fname) and not os.path.isdir(fname):
return fname
raise RuntimeError("Could not find matplotlibrc file; your Matplotlib "
"install is broken")
# rcParams deprecated and automatically mapped to another key.
# Values are tuples of (version, new_name, f_old2new, f_new2old).
_deprecated_map = {}
# rcParams deprecated; some can manually be mapped to another key.
# Values are tuples of (version, new_name_or_None).
_deprecated_ignore_map = {
'mpl_toolkits.legacy_colorbar': ('3.4', None),
}
# rcParams deprecated; can use None to suppress warnings; remain actually
# listed in the rcParams (not included in _all_deprecated).
# Values are tuples of (version,)
_deprecated_remain_as_none = {
'animation.avconv_path': ('3.3',),
'animation.avconv_args': ('3.3',),
'animation.html_args': ('3.3',),
}
_all_deprecated = {*_deprecated_map, *_deprecated_ignore_map}
@docstring.Substitution(
"\n".join(map("- {}".format, sorted(rcsetup._validators, key=str.lower)))
)
class RcParams(MutableMapping, dict):
"""
A dictionary object including validation.
Validating functions are defined and associated with rc parameters in
:mod:`matplotlib.rcsetup`.
The list of rcParams is:
%s
See Also
--------
:ref:`customizing-with-matplotlibrc-files`
"""
validate = rcsetup._validators
# validate values on the way in
def __init__(self, *args, **kwargs):
self.update(*args, **kwargs)
def __setitem__(self, key, val):
try:
if key in _deprecated_map:
version, alt_key, alt_val, inverse_alt = _deprecated_map[key]
_api.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
key = alt_key
val = alt_val(val)
elif key in _deprecated_remain_as_none and val is not None:
version, = _deprecated_remain_as_none[key]
_api.warn_deprecated(version, name=key, obj_type="rcparam")
elif key in _deprecated_ignore_map:
version, alt_key = _deprecated_ignore_map[key]
_api.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
return
elif key == 'backend':
if val is rcsetup._auto_backend_sentinel:
if 'backend' in self:
return
try:
cval = self.validate[key](val)
except ValueError as ve:
raise ValueError(f"Key {key}: {ve}") from None
dict.__setitem__(self, key, cval)
except KeyError as err:
raise KeyError(
f"{key} is not a valid rc parameter (see rcParams.keys() for "
f"a list of valid parameters)") from err
def __getitem__(self, key):
if key in _deprecated_map:
version, alt_key, alt_val, inverse_alt = _deprecated_map[key]
_api.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
return inverse_alt(dict.__getitem__(self, alt_key))
elif key in _deprecated_ignore_map:
version, alt_key = _deprecated_ignore_map[key]
_api.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
return dict.__getitem__(self, alt_key) if alt_key else None
# In theory, this should only ever be used after the global rcParams
# has been set up, but better be safe e.g. in presence of breakpoints.
elif key == "backend" and self is globals().get("rcParams"):
val = dict.__getitem__(self, key)
if val is rcsetup._auto_backend_sentinel:
from matplotlib import pyplot as plt
plt.switch_backend(rcsetup._auto_backend_sentinel)
return dict.__getitem__(self, key)
def __repr__(self):
class_name = self.__class__.__name__
indent = len(class_name) + 1
with _api.suppress_matplotlib_deprecation_warning():
repr_split = pprint.pformat(dict(self), indent=1,
width=80 - indent).split('\n')
repr_indented = ('\n' + ' ' * indent).join(repr_split)
return '{}({})'.format(class_name, repr_indented)
def __str__(self):
return '\n'.join(map('{0[0]}: {0[1]}'.format, sorted(self.items())))
def __iter__(self):
"""Yield sorted list of keys."""
with _api.suppress_matplotlib_deprecation_warning():
yield from sorted(dict.__iter__(self))
def __len__(self):
return dict.__len__(self)
def find_all(self, pattern):
"""
Return the subset of this RcParams dictionary whose keys match,
using :func:`re.search`, the given ``pattern``.
.. note::
Changes to the returned dictionary are *not* propagated to
the parent RcParams dictionary.
"""
pattern_re = re.compile(pattern)
return RcParams((key, value)
for key, value in self.items()
if pattern_re.search(key))
def copy(self):
return {k: dict.__getitem__(self, k) for k in self}
def rc_params(fail_on_error=False):
"""Construct a `RcParams` instance from the default Matplotlib rc file."""
return rc_params_from_file(matplotlib_fname(), fail_on_error)
@_api.deprecated("3.5")
def is_url(filename):
"""Return whether *filename* is an http, https, ftp, or file URL path."""
return __getattr__("URL_REGEX").match(filename) is not None
@functools.lru_cache()
def _get_ssl_context():
try:
import certifi
except ImportError:
_log.debug("Could not import certifi.")
return None
import ssl
return ssl.create_default_context(cafile=certifi.where())
@contextlib.contextmanager
def _open_file_or_url(fname):
if (isinstance(fname, str)
and fname.startswith(('http://', 'https://', 'ftp://', 'file:'))):
import urllib.request
ssl_ctx = _get_ssl_context()
if ssl_ctx is None:
_log.debug(
"Could not get certifi ssl context, https may not work."
)
with urllib.request.urlopen(fname, context=ssl_ctx) as f:
yield (line.decode('utf-8') for line in f)
else:
fname = os.path.expanduser(fname)
encoding = locale.getpreferredencoding(do_setlocale=False)
if encoding is None:
encoding = "utf-8"
with open(fname, encoding=encoding) as f:
yield f
def _rc_params_in_file(fname, transform=lambda x: x, fail_on_error=False):
"""
Construct a `RcParams` instance from file *fname*.
Unlike `rc_params_from_file`, the configuration class only contains the
parameters specified in the file (i.e. default values are not filled in).
Parameters
----------
fname : path-like
The loaded file.
transform : callable, default: the identity function
A function called on each individual line of the file to transform it,
before further parsing.
fail_on_error : bool, default: False
Whether invalid entries should result in an exception or a warning.
"""
import matplotlib as mpl
rc_temp = {}
with _open_file_or_url(fname) as fd:
try:
for line_no, line in enumerate(fd, 1):
line = transform(line)
strippedline = line.split('#', 1)[0].strip()
if not strippedline:
continue
tup = strippedline.split(':', 1)
if len(tup) != 2:
_log.warning('Missing colon in file %r, line %d (%r)',
fname, line_no, line.rstrip('\n'))
continue
key, val = tup
key = key.strip()
val = val.strip()
if key in rc_temp:
_log.warning('Duplicate key in file %r, line %d (%r)',
fname, line_no, line.rstrip('\n'))
rc_temp[key] = (val, line, line_no)
except UnicodeDecodeError:
_log.warning('Cannot decode configuration file %s with encoding '
'%s, check LANG and LC_* variables.',
fname,
locale.getpreferredencoding(do_setlocale=False)
or 'utf-8 (default)')
raise
config = RcParams()
for key, (val, line, line_no) in rc_temp.items():
if key in rcsetup._validators:
if fail_on_error:
config[key] = val # try to convert to proper type or raise
else:
try:
config[key] = val # try to convert to proper type or skip
except Exception as msg:
_log.warning('Bad value in file %r, line %d (%r): %s',
fname, line_no, line.rstrip('\n'), msg)
elif key in _deprecated_ignore_map:
version, alt_key = _deprecated_ignore_map[key]
_api.warn_deprecated(
version, name=key, alternative=alt_key, obj_type='rcparam',
addendum="Please update your matplotlibrc.")
else:
# __version__ must be looked up as an attribute to trigger the
# module-level __getattr__.
version = ('master' if '.post' in mpl.__version__
else f'v{mpl.__version__}')
_log.warning("""
Bad key %(key)s in file %(fname)s, line %(line_no)s (%(line)r)
You probably need to get an updated matplotlibrc file from
https://github.com/matplotlib/matplotlib/blob/%(version)s/matplotlibrc.template
or from the matplotlib source distribution""",
dict(key=key, fname=fname, line_no=line_no,
line=line.rstrip('\n'), version=version))
return config
def rc_params_from_file(fname, fail_on_error=False, use_default_template=True):
"""
Construct a `RcParams` from file *fname*.
Parameters
----------
fname : str or path-like
A file with Matplotlib rc settings.
fail_on_error : bool
If True, raise an error when the parser fails to convert a parameter.
use_default_template : bool
If True, initialize with default parameters before updating with those
in the given file. If False, the configuration class only contains the
parameters specified in the file. (Useful for updating dicts.)
"""
config_from_file = _rc_params_in_file(fname, fail_on_error=fail_on_error)
if not use_default_template:
return config_from_file
with _api.suppress_matplotlib_deprecation_warning():
config = RcParams({**rcParamsDefault, **config_from_file})
if "".join(config['text.latex.preamble']):
_log.info("""
*****************************************************************
You have the following UNSUPPORTED LaTeX preamble customizations:
%s
Please do not ask for support with these customizations active.
*****************************************************************
""", '\n'.join(config['text.latex.preamble']))
_log.debug('loaded rc file %s', fname)
return config
# When constructing the global instances, we need to perform certain updates
# by explicitly calling the superclass (dict.update, dict.items) to avoid
# triggering resolution of _auto_backend_sentinel.
rcParamsDefault = _rc_params_in_file(
cbook._get_data_path("matplotlibrc"),
# Strip leading comment.
transform=lambda line: line[1:] if line.startswith("#") else line,
fail_on_error=True)
dict.update(rcParamsDefault, rcsetup._hardcoded_defaults)
# Normally, the default matplotlibrc file contains *no* entry for backend (the
# corresponding line starts with ##, not #; we fill on _auto_backend_sentinel
# in that case. However, packagers can set a different default backend
# (resulting in a normal `#backend: foo` line) in which case we should *not*
# fill in _auto_backend_sentinel.
dict.setdefault(rcParamsDefault, "backend", rcsetup._auto_backend_sentinel)
rcParams = RcParams() # The global instance.
dict.update(rcParams, dict.items(rcParamsDefault))
dict.update(rcParams, _rc_params_in_file(matplotlib_fname()))
with _api.suppress_matplotlib_deprecation_warning():
rcParamsOrig = RcParams(rcParams.copy())
# This also checks that all rcParams are indeed listed in the template.
# Assigning to rcsetup.defaultParams is left only for backcompat.
defaultParams = rcsetup.defaultParams = {
# We want to resolve deprecated rcParams, but not backend...
key: [(rcsetup._auto_backend_sentinel if key == "backend" else
rcParamsDefault[key]),
validator]
for key, validator in rcsetup._validators.items()}
if rcParams['axes.formatter.use_locale']:
locale.setlocale(locale.LC_ALL, '')
def rc(group, **kwargs):
"""
Set the current `.rcParams`. *group* is the grouping for the rc, e.g.,
for ``lines.linewidth`` the group is ``lines``, for
``axes.facecolor``, the group is ``axes``, and so on. Group may
also be a list or tuple of group names, e.g., (*xtick*, *ytick*).
*kwargs* is a dictionary attribute name/value pairs, e.g.,::
rc('lines', linewidth=2, color='r')
sets the current `.rcParams` and is equivalent to::
rcParams['lines.linewidth'] = 2
rcParams['lines.color'] = 'r'
The following aliases are available to save typing for interactive users:
===== =================
Alias Property
===== =================
'lw' 'linewidth'
'ls' 'linestyle'
'c' 'color'
'fc' 'facecolor'
'ec' 'edgecolor'
'mew' 'markeredgewidth'
'aa' 'antialiased'
===== =================
Thus you could abbreviate the above call as::
rc('lines', lw=2, c='r')
Note you can use python's kwargs dictionary facility to store
dictionaries of default parameters. e.g., you can customize the
font rc as follows::
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : 'larger'}
rc('font', **font) # pass in the font dict as kwargs
This enables you to easily switch between several configurations. Use
``matplotlib.style.use('default')`` or :func:`~matplotlib.rcdefaults` to
restore the default `.rcParams` after changes.
Notes
-----
Similar functionality is available by using the normal dict interface, i.e.
``rcParams.update({"lines.linewidth": 2, ...})`` (but ``rcParams.update``
does not support abbreviations or grouping).
"""
aliases = {
'lw': 'linewidth',
'ls': 'linestyle',
'c': 'color',
'fc': 'facecolor',
'ec': 'edgecolor',
'mew': 'markeredgewidth',
'aa': 'antialiased',
}
if isinstance(group, str):
group = (group,)
for g in group:
for k, v in kwargs.items():
name = aliases.get(k) or k
key = '%s.%s' % (g, name)
try:
rcParams[key] = v
except KeyError as err:
raise KeyError(('Unrecognized key "%s" for group "%s" and '
'name "%s"') % (key, g, name)) from err
def rcdefaults():
"""
Restore the `.rcParams` from Matplotlib's internal default style.
Style-blacklisted `.rcParams` (defined in
`matplotlib.style.core.STYLE_BLACKLIST`) are not updated.
See Also
--------
matplotlib.rc_file_defaults
Restore the `.rcParams` from the rc file originally loaded by
Matplotlib.
matplotlib.style.use
Use a specific style file. Call ``style.use('default')`` to restore
the default style.
"""
# Deprecation warnings were already handled when creating rcParamsDefault,
# no need to reemit them here.
with _api.suppress_matplotlib_deprecation_warning():
from .style.core import STYLE_BLACKLIST
rcParams.clear()
rcParams.update({k: v for k, v in rcParamsDefault.items()
if k not in STYLE_BLACKLIST})
def rc_file_defaults():
"""
Restore the `.rcParams` from the original rc file loaded by Matplotlib.
Style-blacklisted `.rcParams` (defined in
`matplotlib.style.core.STYLE_BLACKLIST`) are not updated.
"""
# Deprecation warnings were already handled when creating rcParamsOrig, no
# need to reemit them here.
with _api.suppress_matplotlib_deprecation_warning():
from .style.core import STYLE_BLACKLIST
rcParams.update({k: rcParamsOrig[k] for k in rcParamsOrig
if k not in STYLE_BLACKLIST})
def rc_file(fname, *, use_default_template=True):
"""
Update `.rcParams` from file.
Style-blacklisted `.rcParams` (defined in
`matplotlib.style.core.STYLE_BLACKLIST`) are not updated.
Parameters
----------
fname : str or path-like
A file with Matplotlib rc settings.
use_default_template : bool
If True, initialize with default parameters before updating with those
in the given file. If False, the current configuration persists
and only the parameters specified in the file are updated.
"""
# Deprecation warnings were already handled in rc_params_from_file, no need
# to reemit them here.
with _api.suppress_matplotlib_deprecation_warning():
from .style.core import STYLE_BLACKLIST
rc_from_file = rc_params_from_file(
fname, use_default_template=use_default_template)
rcParams.update({k: rc_from_file[k] for k in rc_from_file
if k not in STYLE_BLACKLIST})
@contextlib.contextmanager
def rc_context(rc=None, fname=None):
"""
Return a context manager for temporarily changing rcParams.
Parameters
----------
rc : dict
The rcParams to temporarily set.
fname : str or path-like
A file with Matplotlib rc settings. If both *fname* and *rc* are given,
settings from *rc* take precedence.
See Also
--------
:ref:`customizing-with-matplotlibrc-files`
Examples
--------
Passing explicit values via a dict::
with mpl.rc_context({'interactive': False}):
fig, ax = plt.subplots()
ax.plot(range(3), range(3))
fig.savefig('example.png')
plt.close(fig)
Loading settings from a file::
with mpl.rc_context(fname='print.rc'):
plt.plot(x, y) # uses 'print.rc'
"""
orig = rcParams.copy()
try:
if fname:
rc_file(fname)
if rc:
rcParams.update(rc)
yield
finally:
dict.update(rcParams, orig) # Revert to the original rcs.
def use(backend, *, force=True):
"""
Select the backend used for rendering and GUI integration.
Parameters
----------
backend : str
The backend to switch to. This can either be one of the standard
backend names, which are case-insensitive:
- interactive backends:
GTK3Agg, GTK3Cairo, GTK4Agg, GTK4Cairo, MacOSX, nbAgg, QtAgg,
QtCairo, TkAgg, TkCairo, WebAgg, WX, WXAgg, WXCairo, Qt5Agg, Qt5Cairo
- non-interactive backends:
agg, cairo, pdf, pgf, ps, svg, template
or a string of the form: ``module://my.module.name``.
Switching to an interactive backend is not possible if an unrelated
event loop has already been started (e.g., switching to GTK3Agg if a
TkAgg window has already been opened). Switching to a non-interactive
backend is always possible.
force : bool, default: True
If True (the default), raise an `ImportError` if the backend cannot be
set up (either because it fails to import, or because an incompatible
GUI interactive framework is already running); if False, silently
ignore the failure.
See Also
--------
:ref:`backends`
matplotlib.get_backend
"""
name = validate_backend(backend)
# we need to use the base-class method here to avoid (prematurely)
# resolving the "auto" backend setting
if dict.__getitem__(rcParams, 'backend') == name:
# Nothing to do if the requested backend is already set
pass
else:
# if pyplot is not already imported, do not import it. Doing
# so may trigger a `plt.switch_backend` to the _default_ backend
# before we get a chance to change to the one the user just requested
plt = sys.modules.get('matplotlib.pyplot')
# if pyplot is imported, then try to change backends
if plt is not None:
try:
# we need this import check here to re-raise if the
# user does not have the libraries to support their
# chosen backend installed.
plt.switch_backend(name)
except ImportError:
if force:
raise
# if we have not imported pyplot, then we can set the rcParam
# value which will be respected when the user finally imports
# pyplot
else:
rcParams['backend'] = backend
# if the user has asked for a given backend, do not helpfully
# fallback
rcParams['backend_fallback'] = False
if os.environ.get('MPLBACKEND'):
rcParams['backend'] = os.environ.get('MPLBACKEND')
def get_backend():
"""
Return the name of the current backend.
See Also
--------
matplotlib.use
"""
return rcParams['backend']
def interactive(b):
"""
Set whether to redraw after every plotting command (e.g. `.pyplot.xlabel`).
"""
rcParams['interactive'] = b
def is_interactive():
"""
Return whether to redraw after every plotting command.
.. note::
This function is only intended for use in backends. End users should
use `.pyplot.isinteractive` instead.
"""
return rcParams['interactive']
default_test_modules = [
'matplotlib.tests',
'mpl_toolkits.tests',
]
def _init_tests():
# The version of FreeType to install locally for running the
# tests. This must match the value in `setupext.py`
LOCAL_FREETYPE_VERSION = '2.6.1'
from matplotlib import ft2font
if (ft2font.__freetype_version__ != LOCAL_FREETYPE_VERSION or
ft2font.__freetype_build_type__ != 'local'):
_log.warning(
f"Matplotlib is not built with the correct FreeType version to "
f"run tests. Rebuild without setting system_freetype=1 in "
f"mplsetup.cfg. Expect many image comparison failures below. "
f"Expected freetype version {LOCAL_FREETYPE_VERSION}. "
f"Found freetype version {ft2font.__freetype_version__}. "
"Freetype build type is {}local".format(
"" if ft2font.__freetype_build_type__ == 'local' else "not "))
@_api.deprecated("3.5", alternative='pytest')
def test(verbosity=None, coverage=False, **kwargs):
"""Run the matplotlib test suite."""
try:
import pytest
except ImportError:
print("matplotlib.test requires pytest to run.")
return -1
if not os.path.isdir(os.path.join(os.path.dirname(__file__), 'tests')):
print("Matplotlib test data is not installed")
return -1
old_backend = get_backend()
old_recursionlimit = sys.getrecursionlimit()
try:
use('agg')
args = kwargs.pop('argv', [])
provide_default_modules = True
use_pyargs = True
for arg in args:
if any(arg.startswith(module_path)
for module_path in default_test_modules):
provide_default_modules = False
break
if os.path.exists(arg):
provide_default_modules = False
use_pyargs = False
break
if use_pyargs:
args += ['--pyargs']
if provide_default_modules:
args += default_test_modules
if coverage:
args += ['--cov']
if verbosity:
args += ['-' + 'v' * verbosity]
retcode = pytest.main(args, **kwargs)
finally:
if old_backend.lower() != 'agg':
use(old_backend)
return retcode
test.__test__ = False # pytest: this function is not a test
def _replacer(data, value):
"""
Either returns ``data[value]`` or passes ``data`` back, converts either to
a sequence.
"""
try:
# if key isn't a string don't bother
if isinstance(value, str):
# try to use __getitem__
value = data[value]
except Exception:
# key does not exist, silently fall back to key
pass
return sanitize_sequence(value)
def _label_from_arg(y, default_name):
try:
return y.name
except AttributeError:
if isinstance(default_name, str):
return default_name
return None
def _add_data_doc(docstring, replace_names):
"""
Add documentation for a *data* field to the given docstring.
Parameters
----------
docstring : str
The input docstring.
replace_names : list of str or None
The list of parameter names which arguments should be replaced by
``data[name]`` (if ``data[name]`` does not throw an exception). If
None, replacement is attempted for all arguments.
Returns
-------
str
The augmented docstring.
"""
if (docstring is None
or replace_names is not None and len(replace_names) == 0):
return docstring
docstring = inspect.cleandoc(docstring)
data_doc = ("""\
If given, all parameters also accept a string ``s``, which is
interpreted as ``data[s]`` (unless this raises an exception)."""
if replace_names is None else f"""\
If given, the following parameters also accept a string ``s``, which is
interpreted as ``data[s]`` (unless this raises an exception):
{', '.join(map('*{}*'.format, replace_names))}""")
# using string replacement instead of formatting has the advantages
# 1) simpler indent handling
# 2) prevent problems with formatting characters '{', '%' in the docstring
if _log.level <= logging.DEBUG:
# test_data_parameter_replacement() tests against these log messages
# make sure to keep message and test in sync
if "data : indexable object, optional" not in docstring:
_log.debug("data parameter docstring error: no data parameter")
if 'DATA_PARAMETER_PLACEHOLDER' not in docstring:
_log.debug("data parameter docstring error: missing placeholder")
return docstring.replace(' DATA_PARAMETER_PLACEHOLDER', data_doc)
def _preprocess_data(func=None, *, replace_names=None, label_namer=None):
"""
A decorator to add a 'data' kwarg to a function.
When applied::
@_preprocess_data()
def func(ax, *args, **kwargs): ...
the signature is modified to ``decorated(ax, *args, data=None, **kwargs)``
with the following behavior:
- if called with ``data=None``, forward the other arguments to ``func``;
- otherwise, *data* must be a mapping; for any argument passed in as a
string ``name``, replace the argument by ``data[name]`` (if this does not
throw an exception), then forward the arguments to ``func``.
In either case, any argument that is a `MappingView` is also converted to a
list.
Parameters
----------
replace_names : list of str or None, default: None
The list of parameter names for which lookup into *data* should be
attempted. If None, replacement is attempted for all arguments.
label_namer : str, default: None
If set e.g. to "namer" (which must be a kwarg in the function's
signature -- not as ``**kwargs``), if the *namer* argument passed in is
a (string) key of *data* and no *label* kwarg is passed, then use the
(string) value of the *namer* as *label*. ::
@_preprocess_data(label_namer="foo")
def func(foo, label=None): ...
func("key", data={"key": value})
# is equivalent to
func.__wrapped__(value, label="key")
"""
if func is None: # Return the actual decorator.
return functools.partial(
_preprocess_data,
replace_names=replace_names, label_namer=label_namer)
sig = inspect.signature(func)
varargs_name = None
varkwargs_name = None
arg_names = []
params = list(sig.parameters.values())
for p in params:
if p.kind is Parameter.VAR_POSITIONAL:
varargs_name = p.name
elif p.kind is Parameter.VAR_KEYWORD:
varkwargs_name = p.name
else:
arg_names.append(p.name)
data_param = Parameter("data", Parameter.KEYWORD_ONLY, default=None)
if varkwargs_name:
params.insert(-1, data_param)
else:
params.append(data_param)
new_sig = sig.replace(parameters=params)
arg_names = arg_names[1:] # remove the first "ax" / self arg
assert {*arg_names}.issuperset(replace_names or []) or varkwargs_name, (
"Matplotlib internal error: invalid replace_names ({!r}) for {!r}"
.format(replace_names, func.__name__))
assert label_namer is None or label_namer in arg_names, (
"Matplotlib internal error: invalid label_namer ({!r}) for {!r}"
.format(label_namer, func.__name__))
@functools.wraps(func)
def inner(ax, *args, data=None, **kwargs):
if data is None:
return func(ax, *map(sanitize_sequence, args), **kwargs)
bound = new_sig.bind(ax, *args, **kwargs)
auto_label = (bound.arguments.get(label_namer)
or bound.kwargs.get(label_namer))
for k, v in bound.arguments.items():
if k == varkwargs_name:
for k1, v1 in v.items():
if replace_names is None or k1 in replace_names:
v[k1] = _replacer(data, v1)
elif k == varargs_name:
if replace_names is None:
bound.arguments[k] = tuple(_replacer(data, v1) for v1 in v)
else:
if replace_names is None or k in replace_names:
bound.arguments[k] = _replacer(data, v)
new_args = bound.args
new_kwargs = bound.kwargs
args_and_kwargs = {**bound.arguments, **bound.kwargs}
if label_namer and "label" not in args_and_kwargs:
new_kwargs["label"] = _label_from_arg(
args_and_kwargs.get(label_namer), auto_label)
return func(*new_args, **new_kwargs)
inner.__doc__ = _add_data_doc(inner.__doc__, replace_names)
inner.__signature__ = new_sig
return inner
_log.debug('interactive is %s', is_interactive())
_log.debug('platform is %s', sys.platform)
_log.debug('loaded modules: %s', list(sys.modules))
# workaround: we must defer colormaps import to after loading rcParams, because
# colormap creation depends on rcParams
from matplotlib.cm import _colormaps as colormaps
| 36.094288 | 81 | 0.608256 |
f52aaf184804047ec2df5b7e73354adcabe7f807 | 44,921 | py | Python | crossenv/__init__.py | xhochy/crossenv | 56ddd69bf0c81d0c494bc1ecc502eec784e0e50f | [
"MIT"
] | null | null | null | crossenv/__init__.py | xhochy/crossenv | 56ddd69bf0c81d0c494bc1ecc502eec784e0e50f | [
"MIT"
] | null | null | null | crossenv/__init__.py | xhochy/crossenv | 56ddd69bf0c81d0c494bc1ecc502eec784e0e50f | [
"MIT"
] | null | null | null | import venv
import os
import sysconfig
import glob
import sys
import shutil
from textwrap import dedent
import subprocess
import logging
import importlib
import types
from configparser import ConfigParser
import random
import shlex
import platform
import pprint
import re
from .utils import F
from . import utils
__version__ = '1.1.4'
logger = logging.getLogger(__name__)
class CrossEnvBuilder(venv.EnvBuilder):
"""
A class to build a cross-compiling virtual environment useful for
cross compiling wheels or developing firmware images.
Here the `host` is the device on which the final code will run, such
as an embedded system of some sort. `build` is the machine doing the
compiling, usually a desktop or server. Usually the `host` Python
executables won't run on the `build` machine.
When we refer to `build-python`, we mean the current interpreter. (It is
*always* the current interpreter.) When we refer to `host-pytohn`, we mean
the interpreter that will run on the host. When we refer to `cross-python`,
we mean an interpreter that runs on `build` but reports system information
as if it were running on `host`. In other words, `cross-python` does the
cross compiling, and is what this class will create for us.
You must have the toolchain used to compile the host Python binary
available when using this virtual environment. The virtual environment
will pick the correct compiler based on info recorded when the host
Python binary was compiled.
:param host_python: The path to the host Python binary. This may be in
a build directory (i.e., after `make`), or in an
install directory (after `make install`). It
*must* be the exact same version as build-python.
:param extra_env_vars: When cross-python starts, this is an iterable of
(name, op, value) tuples. op may be one of '=' to
indicate that the variable will be set
unconditionally, or '?=' to indicate that the
variable will be set only if not already set by the
environment.
:param build_system_site_packages:
Whether or not build-python's virtual environment
will have access to the system site packages.
cross-python never has access, for obvious reasons.
:param clear: Whether to delete the contents of the environment
directories if they already exist, before
environment creation. May be a false value, or one
of 'default', 'cross', 'build', or 'both'.
'default' means to clear cross only when
cross_prefix is None.
:param cross_prefix: Explicitly set the location of the cross-python
virtual environment.
:param with_cross_pip: If True, ensure pip is installed in the
cross-python virtual environment.
:param with_build_pip: If True, ensure pip is installed in the
build-python virtual environment.
:param host_sysroot: If given, the cross-compiler toolchain's sysroot.
If not given, an attempt will be made to guess.
These will be added (redundantly) to the default
search paths to help trick some packages.
:param host_cc: If given, override CC and related variables with
this value.
:param host_cxx: If given, override CXX and related variables with
this value.
:param host_ar: If given, override AR and related variables with
this value.
:param host_relativize: If True, convert absolute paths in CC, CXX, and
related variables to use the base name. Tools must
be in $PATH for this to work.
:param host_config_vars: Extra config_vars (build_time_vars) to override,
such as CC, CCSHARED, etc.
:param host_sysconfigdata_file: Explicitly set the sysconfigdata file path.
If not given, all sysconfigdata files will
be searched and will error if there are
multiple files that have different values.
:param manylinux_tags: Manylinux tags that are acceptable when downloading
from PyPI.
:param host_machine: Host machine override seen by cross-python at
runtime. Default is guessed from host-python.
"""
def __init__(self, *,
host_python,
extra_env_vars=(),
build_system_site_packages=False,
clear=False,
cross_prefix=None,
with_cross_pip=False,
with_build_pip=False,
host_sysroot=None,
host_cc=None,
host_cxx=None,
host_ar=None,
host_relativize=False,
host_config_vars=(),
host_sysconfigdata_file=None,
manylinux_tags=(),
host_machine=None):
self.host_sysroot = host_sysroot
self.host_cc = None
self.host_cxx = None
self.host_ar = None
if host_cc:
self.host_cc = shlex.split(host_cc)
if host_cxx:
self.host_cxx = shlex.split(host_cxx)
if host_ar:
self.host_ar = shlex.split(host_ar)
self.host_relativize = host_relativize
self.host_config_vars = host_config_vars
self.host_sysconfigdata_file = host_sysconfigdata_file
self.build_system_site_packages = build_system_site_packages
self.extra_env_vars = extra_env_vars
self.clear_build = clear in ('default', 'build', 'both')
if with_cross_pip and not with_build_pip:
raise ValueError("Cannot have cross-pip without build-pip")
self.with_cross_pip = with_cross_pip
self.with_build_pip = with_build_pip
if cross_prefix:
self.cross_prefix = os.path.abspath(cross_prefix)
self.clear_cross = clear in ('cross', 'both')
else:
self.cross_prefix = None
self.clear_cross = clear in ('default', 'cross', 'both')
self.manylinux_tags = manylinux_tags
self.host_machine = host_machine
self.find_host_python(host_python)
self.find_compiler_info()
self.get_uname_info()
self.expand_manylinux_tags()
super().__init__(
system_site_packages=False,
clear=False,
symlinks=True,
upgrade=False,
with_pip=False)
def find_installed_host_home(self):
# Assume host_project_base == {prefix}/bin and that this Python
# mirrors the host Python's install paths.
# On caveat: on native host Python (for testing) this might be a
# virtualenv.
home = os.path.dirname(self.host_project_base)
pyvenv = os.path.join(home, 'pyvenv.cfg')
if os.path.exists(pyvenv):
with open(pyvenv) as fp:
for line in fp:
key, _, val = line.partition('=')
key = key.strip()
val = val.strip()
if key == 'home':
return os.path.dirname(val)
return home
def find_sysconfig_data(self, paths):
maybe = []
for path in paths:
pattern = os.path.join(path, '_sysconfigdata*.py*')
maybe.extend(glob.glob(pattern))
sysconfig_paths = set()
for filename in maybe:
if (os.path.isfile(filename) and
os.path.splitext(filename)[1] in ('.py', '.pyc')):
sysconfig_paths.add(filename)
# Multiples can happen, but so long as they all have the same
# info we should be okay. Seen in buildroot
# When choosing the correct one, prefer, in order:
# 1) The .py file
# 2) The .pyc file
# 3) Any .opt-*.pyc files
# so sort by the length of the longest extension
sysconfig_paths = sorted(sysconfig_paths,
key=lambda x: len(x.split('.',1)[1]))
if self.host_sysconfigdata_file is not None:
sysconfig_paths = [self.host_sysconfigdata_file]
self.host_sysconfigdata = None
for path in sysconfig_paths:
basename = os.path.basename(path)
name, _ = os.path.splitext(basename)
spec = importlib.util.spec_from_file_location(name, path)
syscfg = importlib.util.module_from_spec(spec)
spec.loader.exec_module(syscfg)
if self.host_sysconfigdata is None:
self.host_sysconfigdata = syscfg
self.host_sysconfigdata_file = path
self.host_sysconfigdata_name = name
elif (self.host_sysconfigdata.build_time_vars !=
syscfg.build_time_vars):
logger.error("Conflicting build info in %s and %s",
self.host_sysconfigdata_file, path)
raise ValueError("Malformed Python installation!")
if not self.host_sysconfigdata:
logger.error("Cannot find _sysconfigdata*.py. Looked in %s",
', '.join(paths))
raise FileNotFoundError("No _sysconfigdata*.py found in host lib")
def find_host_python(self, host):
"""
Find Python paths and other info based on a path.
:param host: Path to the host Python executable.
"""
build_version = sysconfig.get_config_var('VERSION')
host = os.path.abspath(host)
if not os.path.exists(host):
raise FileNotFoundError("%s does not exist" % host)
elif not os.path.isfile(host):
raise ValueError("Expected a path to a Python executable. "
"Got %s" % host)
else:
self.host_project_base = os.path.dirname(host)
if sysconfig._is_python_source_dir(self.host_project_base):
self.host_makefile = os.path.join(self.host_project_base, 'Makefile')
pybuilddir = os.path.join(self.host_project_base, 'pybuilddir.txt')
try:
with open(pybuilddir, 'r') as fp:
build_dir = fp.read().strip()
except IOError:
raise IOError(
"Cannot read %s: Build the host Python first " % s) from None
self.host_home = self.host_project_base
sysconfig_paths = [os.path.join(self.host_project_base, build_dir)]
else:
self.host_home = self.find_installed_host_home()
python_ver = 'python' + sysconfig.get_config_var('py_version_short')
libdir = os.path.join(self.host_home, 'lib', python_ver)
sysconfig_paths = [
libdir,
# Ubuntu puts it in libdir/plat-<arch>
os.path.join(libdir, '*'),
# Below might be a version mismatch, but try to use it
#os.path.join(self.host_home, 'lib', 'python*'),
#os.path.join(self.host_home, 'lib', 'python*', '*'),
]
makefile = glob.glob(os.path.join(libdir, '*', 'Makefile'))
if not makefile:
self.host_makefile = '' # fail later
else:
self.host_makefile = makefile[0]
# We need paths to sysconfig data, and we need to import it to ask
# a few questions.
self.find_sysconfig_data(sysconfig_paths)
# If the user wants to override host_cc, that takes precedence.
host_cc = self.host_sysconfigdata.build_time_vars['CC']
self.real_host_cc = shlex.split(host_cc)
if not self.host_cc:
self.host_cc = self.real_host_cc
if self.host_relativize:
self.host_cc[0] = os.path.basename(self.host_cc[0])
# CC could be compound command, like 'gcc --sysroot=...' (Issue #5)
# but that can cause issues (#7) so let the user know.
if len(self.host_cc) > 1:
logger.warning("CC is a compound command (%s)", self.host_cc)
logger.warning("This can cause issues for modules that don't "
"expect it.")
logger.warning("Consider setting CC='%s' and CFLAGS='%s'",
self.host_cc[0], ' '.join(self.host_cc[1:]))
host_cxx = self.host_sysconfigdata.build_time_vars['CXX']
self.real_host_cxx = shlex.split(host_cxx)
if not self.host_cxx:
self.host_cxx = self.real_host_cxx
if self.host_relativize:
self.host_cxx[0] = os.path.basename(self.host_cxx[0])
if len(self.host_cxx) > 1:
logger.warning("CXX is a compound command (%s)", self.host_cxx)
logger.warning("This can cause issues for modules that don't "
"expect it.")
logger.warning("Consider setting CXX='%s' and CXXFLAGS='%s'",
self.host_cxx[0], ' '.join(self.host_cxx[1:]))
host_ar = self.host_sysconfigdata.build_time_vars['AR']
self.real_host_ar = shlex.split(host_ar)
if not self.host_ar:
self.host_ar = self.real_host_ar
if self.host_relativize:
self.host_ar[0] = os.path.basename(self.host_ar[0])
self.host_version = self.host_sysconfigdata.build_time_vars['VERSION']
self.host_gnu_type = self.host_sysconfigdata.build_time_vars['HOST_GNU_TYPE']
self.host_platform = None
# Ask the makefile a few questions too
if os.path.exists(self.host_makefile):
with open(self.host_makefile, 'r') as fp:
lines = list(fp.readlines())
for line in lines:
line = line.strip()
if line.startswith('_PYTHON_HOST_PLATFORM='):
host_platform = line.split('=',1)[-1].strip()
if host_platform:
self.host_platform = host_platform
break
if self.host_platform is None:
# It was probably natively compiled, but not necessarily for this
# architecture. Guess from HOST_GNU_TYPE.
host = self.host_gnu_type.split('-')
if len(host) == 4: # i.e., aarch64-unknown-linux-gnu
self.host_platform = '-'.join([host[2], host[0]])
elif len(host) == 3: # i.e., aarch64-linux-gnu, unlikely.
self.host_platform = '-'.join([host[1], host[0]])
else:
logger.warning("Cannot determine platform. Using build.")
self.host_platform = sysconfig.get_platform()
self.macosx_deployment_target = ''
for line in lines:
line = line.strip()
if line.startswith('MACOSX_DEPLOYMENT_TARGET='):
self.macosx_deployment_target = line.split('=',1)[-1]
break
# Sanity checks
if self.host_version != build_version:
raise ValueError("Version mismatch: host=%s, build=%s" % (
self.host_version, build_version))
def find_compiler_info(self):
"""
Query the compiler for extra info useful for cross-compiling,
and also check that it exists.
"""
def run_compiler(arg):
cmdline = self.host_cc + [arg]
return subprocess.run(cmdline,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if not shutil.which(self.host_cc[0]):
raise RuntimeError(
"Cannot find cross-compiler (%r)! Extension modules won't "
"build! Use --cc to correct." % ' '.join(self.host_cc))
# Check that it runs, but failing this is a warning. Some compilers,
# like QNX's qcc, do not have useful arguments we can pass to get a
# successful return value.
res = run_compiler('--version')
if res.returncode != 0:
logger.warning(
"Test run of %r exited with code %d: %s" % (
self.host_cc, res.returncode, res.stderr))
# If it doesn't have --version, it certainly won't have
# -print-sysroot or -dumpmachine
return
# TODO: Clang doesn't have this option
if self.host_sysroot is None:
res = run_compiler('-print-sysroot')
if res.returncode == 0:
self.host_sysroot = res.stdout.strip()
# Sanity check that this is the right compiler. (See #24, #27.)
res = run_compiler('-dumpmachine')
found_triple = res.stdout.strip()
if res.returncode == 0 and found_triple:
expected = self.host_sysconfigdata.build_time_vars['HOST_GNU_TYPE']
if not self._compare_triples(found_triple, expected):
logger.warning("The cross-compiler (%r) does not appear to be "
"for the correct architecture (got %s, expected "
"%s). Use --cc to correct, if necessary.",
' '.join(self.host_cc),
found_triple,
expected)
def _compare_triples(self, x, y):
# They are in the form cpu-vendor-kernel-system or cpu-kernel-system.
# So we'll get something like: x86_64-linux-gnu or x86_64-pc-linux-gnu.
# We won't overcomplicate this, since it's just to generate a warning.
#
# We return True if we can't make sense of anything and wish to skip
# the warning.
parts_x = x.split('-')
if len(parts_x) == 4:
del parts_x[1]
elif len(parts_x) != 3:
return True # Some other form? Bail out.
parts_y = y.split('-')
if len(parts_y) == 4:
del parts_y[1]
elif len(parts_y) != 3:
return True # Some other form? Bail out.
return parts_x == parts_y
def create(self, env_dir):
"""
Create a cross virtual environment in a directory
:param env_dir: The target directory to create an environment in.
"""
env_dir = os.path.abspath(env_dir)
context = self.ensure_directories(env_dir)
self.make_build_python(context)
self.make_cross_python(context)
self.post_setup(context)
def ensure_directories(self, env_dir):
"""
Create the directories for the environment.
Returns a context object which holds paths in the environment,
for use by subsequent logic.
"""
# Directory structure:
#
# ENV_DIR/
# cross/ cross-python venv
# build/ build-python venv
# lib/ libs for setting up cross-python
# bin/ holds activate scripts.
if os.path.exists(env_dir) and (self.clear_cross or self.clear_build):
subdirs = os.listdir(env_dir)
for sub in subdirs:
if sub in ('cross', 'build'):
continue
utils.remove_path(os.path.join(env_dir, sub))
context = super().ensure_directories(env_dir)
context.lib_path = os.path.join(env_dir, 'lib')
context.exposed_libs = os.path.join(context.lib_path, 'exposed.txt')
utils.mkdir_if_needed(context.lib_path)
return context
def get_uname_info(self):
"""
What should uname() return?
"""
# host_platform is _probably_ something like linux-x86_64, but it can
# vary.
host_info = self.host_platform.split('-')
if not host_info:
self.host_sysname = sys.platform
elif len(host_info) >= 1:
self.host_sysname = host_info[0]
if self.host_machine is None:
if len(host_info) > 1 and host_info[-1] == "powerpc64le":
# Test that this is still a special case when we can.
# On uname.machine=ppc64le, _PYTHON_HOST_PLATFORM is linux-powerpc64le
self.host_machine = "ppc64le"
else:
self.host_machine = self.host_gnu_type.split('-')[0]
self.host_release = ''
if self.macosx_deployment_target:
try:
major, minor = self.macosx_deployment_target.split(".")
major, minor = int(major), int(minor)
except ValueError:
raise ValueError("Unexpected value %s for MACOSX_DEPLOYMENT_TARGET" %
self.macosx_deployment_target)
if major == 10:
self.host_release = "%s.0.0" % (minor + 4)
elif major == 11:
self.host_release = "%s.0.0" % (minor + 20)
else:
raise ValueError("Unexpected major version %s for MACOSX_DEPLOYMENT_TARGET" %
major)
def expand_manylinux_tags(self):
"""
Convert legacy manylinux tags to PEP600, because pip only looks for one
or the other
"""
manylinux_tags = set(self.manylinux_tags)
extra_tags = set()
effective_glibc = None
# we'll be very strict here: don't assume that manylinux2014 implies
# manylinux1 and so on.
if 'manylinux1' in manylinux_tags:
extra_tags.add('manylinux_2_5')
effective_glibc = (2, 5)
if 'manylinux2010' in manylinux_tags:
extra_tags.add('manylinux_2_12')
effective_glibc = (2, 12)
if 'manylinux2014' in manylinux_tags:
extra_tags.add('manylinux_2_17')
effective_glibc = (2, 17)
if 'manylinux_2_5' in manylinux_tags:
extra_tags.add('manylinux1')
if 'manylinux_2_12' in manylinux_tags:
extra_tags.add('manylinux2010')
if 'manylinux_2_17' in manylinux_tags:
extra_tags.add('manylinux2014')
manylinux_tags.update(extra_tags)
self.manylinux_tags = manylinux_tags
for tag in manylinux_tags:
# I know *I* mistype it alot.
if not re.search(r'manylinux', tag):
logger.warning("Tag %r does not contain 'manylinux'")
m = re.match(r'manylinux_(\d+)_(\d+)', tag)
if not m:
continue
glibc = (int(m.group(1)), int(m.group(2)))
if effective_glibc is None or glibc > effective_glibc:
effective_glibc = glibc
self.effective_glibc = effective_glibc
def make_build_python(self, context):
"""
Assemble the build-python virtual environment
"""
context.build_env_dir = os.path.join(context.env_dir, 'build')
logger.info("Creating build-python environment")
env = venv.EnvBuilder(
system_site_packages=self.build_system_site_packages,
clear=self.clear_build,
with_pip=self.with_build_pip,
symlinks=True)
env.create(context.build_env_dir)
context.build_bin_path = os.path.join(context.build_env_dir, 'bin')
context.build_env_exe = os.path.join(
context.build_bin_path, context.python_exe)
# What is build-python's sys.path?
out = subprocess.check_output(
[context.build_env_exe,
'-c',
r"import sys; print('\n'.join(sys.path))"],
universal_newlines=True).splitlines()
context.build_sys_path = []
for line in out:
line = line.strip()
if line:
context.build_sys_path.append(line)
if self.with_build_pip:
# Make sure we install the same version of pip and setuptools to
# prevent errors (#1).
reqs = subprocess.check_output([context.build_env_exe, '-m', 'pip',
'--disable-pip-version-check',
'freeze',
'--all'],
universal_newlines=True)
all_reqs = reqs.split()
context.build_pip_reqs = []
for req in all_reqs:
package = req.split('==')[0]
if package == 'pip':
context.build_pip_version = req
context.build_pip_reqs.append(req)
elif package == 'setuptools':
context.build_pip_reqs.append(req)
# Many distributions use a patched, 'unbundled' version of pip,
# where the vendored packages aren't stored within pip itself, but
# elsewhere on the system. This breaks cross-pip, which won't be
# able to find them after the modifications we made. Fix this by
# downloading a stock version of pip (Issue #6).
if self._build_pip_is_unbundled(context):
logger.info("Redownloading stock pip")
subprocess.check_output([context.build_env_exe, '-m', 'pip',
'--disable-pip-version-check',
'install',
'--ignore-installed',
context.build_pip_version])
def _build_pip_is_unbundled(self, context):
pyver = 'python' + sysconfig.get_config_var('py_version_short')
bundled_module = os.path.join(context.build_env_dir,
'lib',
pyver,
'site-packages',
'pip',
'_vendor',
'six.py')
return not os.path.exists(bundled_module)
def make_cross_python(self, context):
"""
Assemble the cross-python virtual environment
"""
logger.info("Creating cross-python environment")
if self.cross_prefix:
context.cross_env_dir = self.cross_prefix
else:
context.cross_env_dir = os.path.join(context.env_dir, 'cross')
clear_cross = self.clear in ('default', 'cross-only', 'both')
env = venv.EnvBuilder(
system_site_packages=False,
clear=self.clear_cross,
symlinks=True,
upgrade=False,
with_pip=False)
env.create(context.cross_env_dir)
context.cross_bin_path = os.path.join(context.cross_env_dir, 'bin')
context.cross_lib_path = os.path.join(context.cross_env_dir, 'lib')
context.cross_env_exe = os.path.join(
context.cross_bin_path, context.python_exe)
context.cross_cfg_path = os.path.join(context.cross_env_dir, 'pyvenv.cfg')
context.cross_activate = os.path.join(context.cross_bin_path, 'activate')
pyver = 'python' + sysconfig.get_config_var('py_version_short')
context.cross_site_lib_path = os.path.join(context.cross_lib_path,
pyver, 'site-packages')
# Remove binaries. We'll run from elsewhere
for exe in os.listdir(context.cross_bin_path):
if not exe.startswith('activate'):
utils.remove_path(os.path.join(context.cross_bin_path, exe))
# Alter pyvenv.cfg
with utils.overwrite_file(context.cross_cfg_path) as out:
with open(context.cross_cfg_path) as inp:
for line in inp:
if line.split()[0:2] == ['home', '=']:
line = 'home = %s\n' % self.host_project_base
out.write(line)
# make a script that sets the environment variables and calls Python.
# Don't do this in bin/activate, because it's a pain to set/unset
# properly (and for csh, fish as well).
# Note that env_exe hasn't actually been created yet.
# If this venv is generated from a cross-python still in its
# build directory, rather than installed, then our modifications
# prevent build-python from finding its pure-Python libs, which
# will cause a crash on startup. Add them back to PYTHONPATH.
# Also: 'stdlib' might not be accurate if build-python is in a build
# directory.
stdlib = os.path.abspath(os.path.dirname(os.__file__))
context.sentinel = random.randint(0,0xffffffff)
extra_envs = list(self.extra_env_vars)
# Add sysroot to various environment variables. This doesn't help
# compiling, but some packages try to do manual checks for existence
# of headers and libraries. This will help them find things.
if self.host_sysroot:
if os.path.isdir(os.path.join(self.host_sysroot, 'usr')):
libs = os.path.join(self.host_sysroot, 'usr', 'lib*')
inc = os.path.join(self.host_sysroot, 'usr', 'include')
elif os.path.isdir(os.path.join(self.host_sysroot, 'lib')):
libs = os.path.join(self.host_sysroot, 'lib*')
inc = os.path.join(self.host_sysroot, 'include')
else:
libs = ''
inc = ''
libs = glob.glob(libs)
if not libs:
logger.warning("No libs in sysroot. Does it exist?")
else:
libs = os.pathsep.join(libs)
extra_envs.insert(0, ('LIBRARY_PATH', ':=', libs))
if not os.path.isdir(inc):
logger.warning("No include/ in sysroot. Does it exist?")
else:
extra_envs.insert(0, ('CPATH', ':=', inc))
# Put a few things in locals to make templating marginally less gross
macosx_deployment_target = self.macosx_deployment_target
host_sysconfigdata = self.host_sysconfigdata
host_build_time_vars = self.host_sysconfigdata.build_time_vars
sysconfig_name = self.host_sysconfigdata_name
# Install patches to environment
self.copy_and_patch_sysconfigdata(context)
tmpl = utils.TemplateContext()
tmpl.update(locals())
utils.install_script('pywrapper.py.tmpl', context.cross_env_exe, tmpl)
# Everything in lib_path follows the same pattern
site_scripts = [
'site.py',
'sys-patch.py',
'os-patch.py',
'importlib-machinery-patch.py',
'platform-patch.py',
'sysconfig-patch.py',
'distutils-sysconfig-patch.py',
'pkg_resources-patch.py',
]
for script in site_scripts:
src = script + '.tmpl'
dst = os.path.join(context.lib_path, script)
utils.install_script(src, dst, tmpl)
utils.install_script('_manylinux.py.tmpl',
os.path.join(context.cross_site_lib_path, '_manylinux.py'),
tmpl)
# Symlink alternate names to our wrapper
for exe in ('python', 'python3'):
exe = os.path.join(context.cross_bin_path, exe)
if not os.path.exists(exe):
utils.symlink(context.python_exe, exe)
# cross-python is ready. We will use build-pip to install cross-pip
# because 'python -m ensurepip' is likely to get confused and think
# that there's nothing to do.
if self.with_cross_pip:
logger.info("Installing cross-pip")
# Make sure we install the same version of pip and setuptools to
logger.debug("Installing: %s", context.build_pip_reqs)
subprocess.check_output([context.cross_env_exe, '-m', 'pip',
'--disable-pip-version-check',
'install',
'--ignore-installed',
'--prefix='+context.cross_env_dir] + context.build_pip_reqs)
def copy_and_patch_sysconfigdata(self, context):
"""
Put sysconfigdata file in the crossenv/lib directory. We will
transform CC, CXX, and related variables as requested.
"""
sysconfig_name = os.path.basename(self.host_sysconfigdata_file)
# we always write a .py, but we might be reading from a .pyc
# (i.e., from buildroot).
sysconfig_name = self.host_sysconfigdata_name + '.py'
context.cross_sysconfig = os.path.join(context.lib_path, sysconfig_name)
# Patch all instances of CC, etc. We'll do a global search and
# replace
host_cc = self.real_host_cc[0]
host_cxx = self.real_host_cxx[0]
host_ar = self.real_host_ar[0]
repl_cc = self.host_cc[0]
repl_cxx = self.host_cxx[0]
repl_ar = self.host_ar[0]
find_cc = re.compile(r'(?:^|(?<=\s))%s(?=\s|$)' % re.escape(host_cc))
find_cxx = re.compile(r'(?:^|(?<=\s))%s(?=\s|$)' % re.escape(host_cxx))
find_ar = re.compile(r'(?:^|(?<=\s))%s(?=\s|$)' % re.escape(host_ar))
cross_sysconfig_data = {}
for key, value in self.host_sysconfigdata.__dict__.items():
if key.startswith('__'):
continue # misc module stuff like __name__, __builtins__
cross_sysconfig_data[key] = value
build_time_vars = {}
for key, value in cross_sysconfig_data['build_time_vars'].items():
if isinstance(value, str):
value = find_ar.sub(repl_ar, value)
value = find_cxx.sub(repl_cxx, value)
value = find_cc.sub(repl_cc, value)
build_time_vars[key] = value
# Handle the case where host-python was natively compiled on another
# architecture. This is only needed because someone (me) thought it was
# a good idea to compare BUILD_GNU_TYPE to HOST_GNU_TYPE to detect
# cross compiling.
build_time_vars['BUILD_GNU_TYPE'] = \
sysconfig.get_config_var('BUILD_GNU_TYPE')
# Overrides from --config_var options
for key, value in self.host_config_vars.items():
build_time_vars[key] = value
cross_sysconfig_data['build_time_vars'] = build_time_vars
with open(context.cross_sysconfig, 'w') as fp:
fp.write("# generated from %s\n" % self.host_sysconfigdata_file)
for key, value in cross_sysconfig_data.items():
fp.write("%s = " % key)
pprint.pprint(value, stream=fp, compact=True)
def post_setup(self, context):
"""
Extra processing. Put scripts/binaries in the right place.
"""
tmpl = utils.TemplateContext()
tmpl.update(locals())
utils.install_script('cross-expose.py.tmpl',
os.path.join(context.bin_path, 'cross-expose'),
tmpl)
# Don't trust these to be symlinks. A symlink to Python will mess up
# the virtualenv.
# Add cross-python alias to the path. This is just for
# convenience and clarity.
for exe in os.listdir(context.cross_bin_path):
target = os.path.join(context.cross_bin_path, exe)
if not os.path.isfile(target) or not os.access(target, os.X_OK):
continue
dest = os.path.join(context.bin_path, 'cross-' + exe)
utils.make_launcher(target, dest)
# Add build-python and build-pip to the path.
for exe in os.listdir(context.build_bin_path):
target = os.path.join(context.build_bin_path, exe)
if not os.path.isfile(target) or not os.access(target, os.X_OK):
continue
dest = os.path.join(context.bin_path, 'build-' + exe)
utils.make_launcher(target, dest)
logger.info("Finishing up...")
activate = os.path.join(context.bin_path, 'activate')
with open(activate, 'w') as fp:
fp.write(dedent(F('''\
. %(context.cross_activate)s
export PATH=%(context.bin_path)s:$PATH
''', locals())))
def parse_env_vars(env_vars):
"""Convert string descriptions of environment variable assignment into
something that CrossEnvBuilder understands.
:param env_vars: An iterable of strings in the form 'FOO=BAR' or
'FOO?=BAR'
:returns: A list of (name, op, value)
"""
parsed = []
for spec in env_vars:
spec = spec.lstrip()
assign = '='
try:
name, value = spec.split('=',1)
except IndexError:
raise ValueError("Invalid variable %r. Must be in the form "
"NAME=VALUE" % spec)
if name[-1:] in '?+:':
assign = name[-1] + '='
name = name[:-1]
if not name.isidentifier():
raise ValueError("Invalid variable name %r" % name)
parsed.append((name, assign, value))
return parsed
def parse_config_vars(config_vars):
"""Convert string descriptions of config variable assignment into
something that CrossEnvBuilder understands.
:param config_vars: An iterable of strings in the form 'FOO=BAR'
:returns: A dictionary of name:value pairs.
"""
result = {}
for val in config_vars:
try:
name, value = val.split('=', 1)
except ValueError:
raise ValueError("--config-var must be of the form FOO=BAR")
result[name] = value
return result
def main():
import argparse
parser = argparse.ArgumentParser(description="""
Create virtual Python environments for cross compiling
""")
parser.add_argument('--cross-prefix', action='store',
help="""Specify the directory where cross-python files will be stored.
By default, this is within <ENV_DIR>/cross. You can override
this to have host packages installed in an existing sysroot,
for example. Watch out though: this will write to bin.""")
parser.add_argument('--system-site-packages', action='store_true',
help="""Give the *build* python environment access to the system
site-packages dir.""")
parser.add_argument('--clear', action='store_const', const='default',
help="""Delete the contents of the environment directory if it already
exists. This clears build-python, but cross-python will be
cleared only if --cross-prefix was not set. See also
--clear-both, --clear-cross, and --clear-build.""")
parser.add_argument('--clear-cross', action='store_const', const='cross',
dest='clear',
help="""This clears cross-python only. See also --clear, --clear-both,
and --clear-build.""")
parser.add_argument('--clear-build', action='store_const', const='build',
dest='clear',
help="""This clears build-python only. See also --clear, --clear-both,
and --clear-cross.""")
parser.add_argument('--clear-both', action='store_const', const='both',
dest='clear',
help="""This clears both cross-python and build-python. See also
--clear, --clear-both, and --clear-cross.""")
parser.add_argument('--without-pip', action='store_true',
help="""Skips installing or upgrading pip in both the build and cross
virtual environments. (Pip is bootstrapped by default.)""")
parser.add_argument('--without-cross-pip', action='store_true',
help="""Skips installing or upgrading pip in the cross virtual
environment. Note that you cannot have cross-pip without
build-pip.""")
parser.add_argument('--relative-toolchain', action='store_true',
help="""If the C/C++ compiler, etc. are stored with absolute paths,
make them relative. Useful for when host-python was build with
absolute paths in, e.g., a Docker image. The tools must be
in $PATH for this to work.""")
parser.add_argument('--cc', action='store',
help="""Override the C compiler from what host-python was built
with.""")
parser.add_argument('--cxx', action='store',
help="""Override the C++ compiler from what host-python was built
with.""")
parser.add_argument('--ar', action='store',
help="""Override ar (static archive) from what host-python was built
with.""")
parser.add_argument('--config-var', action='append', default=[],
help="""Override a specific config-time variable for host-python, such
as CC, CCSHARED, etc. Usage: --config-var=FOO=BAR. All values
are strings.""")
parser.add_argument('--env', action='append', default=[],
help="""An environment variable that will be added to the environment
just before executing the python build executable. May be given
multiple times. May be one of the following forms:
'FOO=BAR' to unconditionally set the value.
'FOO+=BAR' to append a value.
'FOO?=BAR' to set a value only if not already set
'FOO:=BAR' to append to a PATH-like variable, with colons
between each element.""")
parser.add_argument('--sysroot', action='store',
help="""Explicitly set the sysroot for the cross-complier toolchain.
If not given, an attempt will be made to guess. This is used
to trick some packages into finding required headers and is
optional.""")
parser.add_argument('--sysconfigdata-file', action='store',
help="""Explicitly set the sysconfigdata file path.
If not given, all sysconfigdata files will be searched and
will error if there are multiple files that have different
values. This option is a workaround for specifically
conda python where multiple sysconfigdata files exist.""")
parser.add_argument('--manylinux', action='append', default=[],
help="""Declare compatibility with the given manylinux platform tag to
enable pre-compiled wheels. This argument may be given multiple
times.""")
parser.add_argument('--machine', action='store',
help="""Override the value of os.uname().machine if cross-python is
unable to guess correctly.""")
parser.add_argument('-v', '--verbose', action='count', default=0,
help="""Verbose mode. May be specified multiple times to increase
verbosity.""")
parser.add_argument('--version', action='version',
version='crossenv %s' % __version__)
parser.add_argument('HOST_PYTHON',
help="""The host Python to use. This should be the path to the Python
executable, which may be in the source directory or an installed
directory structure.""")
parser.add_argument('ENV_DIR', nargs='+',
help="""A directory to create the environment in.""")
args = parser.parse_args()
if args.verbose == 1:
level = logging.INFO
elif args.verbose > 1:
level = logging.DEBUG
else:
level = logging.WARNING
logging.basicConfig(level=level, format='%(levelname)s: %(message)s')
try:
if args.without_pip:
args.without_cross_pip = True
env = parse_env_vars(args.env)
config_vars = parse_config_vars(args.config_var)
builder = CrossEnvBuilder(host_python=args.HOST_PYTHON,
cross_prefix=args.cross_prefix,
build_system_site_packages=args.system_site_packages,
clear=args.clear,
extra_env_vars=env,
with_cross_pip=not args.without_cross_pip,
with_build_pip=not args.without_pip,
host_sysroot=args.sysroot,
host_cc=args.cc,
host_cxx=args.cxx,
host_ar=args.ar,
host_relativize=args.relative_toolchain,
host_config_vars = config_vars,
host_sysconfigdata_file=args.sysconfigdata_file,
manylinux_tags=args.manylinux,
host_machine=args.machine,
)
for env_dir in args.ENV_DIR:
builder.create(env_dir)
except Exception as e:
logger.error('%s', e)
logger.debug('Traceback:', exc_info=True)
sys.exit(1)
| 42.458412 | 93 | 0.583046 |
83431e6fd30e1f50a5c51e21bd56b46e1fad95cb | 10,747 | py | Python | doc/sphinxext/autosummary/generate.py | thorstenkranz/eegpy | 0f9461456999874abbb774896ca832eb27740a9d | [
"BSD-2-Clause-FreeBSD"
] | 10 | 2015-05-12T10:42:51.000Z | 2021-07-20T02:08:03.000Z | doc/sphinxext/autosummary/generate.py | thomastweets/PyMVPA | a9c05acd7569639bb636aed3c22a13b21559ca02 | [
"MIT"
] | 2 | 2015-11-19T11:36:30.000Z | 2018-03-21T05:00:09.000Z | doc/sphinxext/autosummary/generate.py | thomastweets/PyMVPA | a9c05acd7569639bb636aed3c22a13b21559ca02 | [
"MIT"
] | 2 | 2016-09-21T22:41:34.000Z | 2019-01-28T13:55:19.000Z | # -*- coding: utf-8 -*-
"""
sphinx.ext.autosummary.generate
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Usable as a library or script to generate automatic RST source files for
items referred to in autosummary:: directives.
Each generated RST file contains a single auto*:: directive which
extracts the docstring of the referred item.
Example Makefile rule::
generate:
sphinx-autogen source/*.rst source/generated
:copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import sys
import pydoc
import optparse
from jinja2 import FileSystemLoader, TemplateNotFound
from jinja2.sandbox import SandboxedEnvironment
from sphinx.ext.autosummary import import_by_name, get_documenter
from sphinx.jinja2glue import BuiltinTemplateLoader
from sphinx.util.osutil import ensuredir
def main(argv=sys.argv):
usage = """%prog [OPTIONS] SOURCEFILE ..."""
p = optparse.OptionParser(usage.strip())
p.add_option("-o", "--output-dir", action="store", type="string",
dest="output_dir", default=None,
help="Directory to place all output in")
p.add_option("-s", "--suffix", action="store", type="string",
dest="suffix", default="rst",
help="Default suffix for files (default: %default)")
p.add_option("-t", "--templates", action="store", type="string",
dest="templates", default=None,
help="Custom template directory (default: %default)")
options, args = p.parse_args(argv[1:])
if len(args) < 1:
p.error('no input files given')
generate_autosummary_docs(args, options.output_dir,
"." + options.suffix,
template_dir=options.templates)
def _simple_info(msg):
print msg
def _simple_warn(msg):
print >> sys.stderr, 'WARNING: ' + msg
# -- Generating output ---------------------------------------------------------
def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
warn=_simple_warn, info=_simple_info,
base_path=None, builder=None, template_dir=None):
showed_sources = list(sorted(sources))
if len(showed_sources) > 20:
showed_sources = showed_sources[:10] + ['...'] + showed_sources[-10:]
info('[autosummary] generating autosummary for: %s' %
', '.join(showed_sources))
if output_dir:
info('[autosummary] writing to %s' % output_dir)
if base_path is not None:
sources = [os.path.join(base_path, filename) for filename in sources]
# create our own templating environment
template_dirs = [os.path.join(os.path.dirname(__file__), 'templates')]
if builder is not None:
# allow the user to override the templates
template_loader = BuiltinTemplateLoader()
template_loader.init(builder, dirs=template_dirs)
else:
if template_dir:
template_dirs.insert(0, template_dir)
template_loader = FileSystemLoader(template_dirs)
template_env = SandboxedEnvironment(loader=template_loader)
# read
items = find_autosummary_in_files(sources)
# remove possible duplicates
items = dict([(item, True) for item in items]).keys()
# keep track of new files
new_files = []
# write
for name, path, template_name in sorted(items):
if path is None:
# The corresponding autosummary:: directive did not have
# a :toctree: option
continue
path = output_dir or os.path.abspath(path)
ensuredir(path)
try:
obj, name = import_by_name(name)
except ImportError, e:
warn('[autosummary] failed to import %r: %s' % (name, e))
continue
fn = os.path.join(path, name + suffix)
# skip it if it exists
if os.path.isfile(fn):
continue
new_files.append(fn)
f = open(fn, 'w')
try:
doc = get_documenter(obj)
if template_name is not None:
template = template_env.get_template(template_name)
else:
try:
template = template_env.get_template('autosummary/%s.rst'
% doc.objtype)
except TemplateNotFound:
template = template_env.get_template('autosummary/base.rst')
def get_members(obj, typ, include_public=[]):
items = []
for name in dir(obj):
try:
if get_documenter(getattr(obj, name)).objtype == typ:
items.append(name)
except AttributeError:
warn("[autosummary] problem accessing attribute "
"'%s' in '%s'." % (name, obj))
public = [x for x in items
if x in include_public or not x.startswith('_')]
return public, items
ns = {}
if doc.objtype == 'module':
ns['members'] = dir(obj)
ns['functions'], ns['all_functions'] = \
get_members(obj, 'function')
ns['classes'], ns['all_classes'] = \
get_members(obj, 'class')
ns['exceptions'], ns['all_exceptions'] = \
get_members(obj, 'exception')
elif doc.objtype == 'class':
ns['members'] = dir(obj)
ns['methods'], ns['all_methods'] = \
get_members(obj, 'method', ['__init__'])
ns['attributes'], ns['all_attributes'] = \
get_members(obj, 'attribute')
parts = name.split('.')
if doc.objtype in ('method', 'attribute'):
mod_name = '.'.join(parts[:-2])
cls_name = parts[-2]
obj_name = '.'.join(parts[-2:])
ns['class'] = cls_name
else:
mod_name, obj_name = '.'.join(parts[:-1]), parts[-1]
ns['fullname'] = name
ns['module'] = mod_name
ns['objname'] = obj_name
ns['name'] = parts[-1]
ns['objtype'] = doc.objtype
ns['underline'] = len(name) * '='
rendered = template.render(**ns)
f.write(rendered)
finally:
f.close()
# descend recursively to new files
if new_files:
generate_autosummary_docs(new_files, output_dir=output_dir,
suffix=suffix, warn=warn, info=info,
base_path=base_path, builder=builder,
template_dir=template_dir)
# -- Finding documented entries in files ---------------------------------------
def find_autosummary_in_files(filenames):
"""
Find out what items are documented in source/*.rst.
See `find_autosummary_in_lines`.
"""
documented = []
for filename in filenames:
f = open(filename, 'r')
lines = f.read().splitlines()
documented.extend(find_autosummary_in_lines(lines, filename=filename))
f.close()
return documented
def find_autosummary_in_docstring(name, module=None, filename=None):
"""
Find out what items are documented in the given object's docstring.
See `find_autosummary_in_lines`.
"""
try:
obj, real_name = import_by_name(name)
lines = pydoc.getdoc(obj).splitlines()
return find_autosummary_in_lines(lines, module=name, filename=filename)
except AttributeError:
pass
except ImportError, e:
print "Failed to import '%s': %s" % (name, e)
return []
def find_autosummary_in_lines(lines, module=None, filename=None):
"""
Find out what items appear in autosummary:: directives in the given lines.
Returns a list of (name, toctree, template) where *name* is a name
of an object and *toctree* the :toctree: path of the corresponding
autosummary directive (relative to the root of the file name), and
*template* the value of the :template: option. *toctree* and
*template* ``None`` if the directive does not have the
corresponding options set.
"""
autosummary_re = re.compile(r'^\s*\.\.\s+autosummary::\s*')
automodule_re = re.compile(
r'^\s*\.\.\s+automodule::\s*([A-Za-z0-9_.]+)\s*$')
module_re = re.compile(
r'^\s*\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$')
autosummary_item_re = re.compile(r'^\s+(~?[_a-zA-Z][a-zA-Z0-9_.]*)\s*.*?')
toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$')
template_arg_re = re.compile(r'^\s+:template:\s*(.*?)\s*$')
documented = []
toctree = None
template = None
current_module = module
in_autosummary = False
for line in lines:
if in_autosummary:
m = toctree_arg_re.match(line)
if m:
toctree = m.group(1)
if filename:
toctree = os.path.join(os.path.dirname(filename),
toctree)
continue
m = template_arg_re.match(line)
if m:
template = m.group(1).strip()
continue
if line.strip().startswith(':'):
continue # skip options
m = autosummary_item_re.match(line)
if m:
name = m.group(1).strip()
if name.startswith('~'):
name = name[1:]
if current_module and \
not name.startswith(current_module + '.'):
name = "%s.%s" % (current_module, name)
documented.append((name, toctree, template))
continue
if not line.strip():
continue
in_autosummary = False
m = autosummary_re.match(line)
if m:
in_autosummary = True
toctree = None
template = None
continue
m = automodule_re.search(line)
if m:
current_module = m.group(1).strip()
# recurse into the automodule docstring
documented.extend(find_autosummary_in_docstring(
current_module, filename=filename))
continue
m = module_re.match(line)
if m:
current_module = m.group(2)
continue
return documented
if __name__ == '__main__':
main()
| 34.335463 | 80 | 0.546757 |
654c44fb6dfeb35bcdf8b2fca71b6ae82c08c48c | 11,068 | py | Python | testing/test_browser.py | digitronik/widgetastic.core | f60bf150b8126cf5f7ce9aa81e10abdb380261ad | [
"Apache-2.0"
] | null | null | null | testing/test_browser.py | digitronik/widgetastic.core | f60bf150b8126cf5f7ce9aa81e10abdb380261ad | [
"Apache-2.0"
] | 2 | 2020-09-02T08:58:30.000Z | 2020-09-02T11:55:39.000Z | testing/test_browser.py | digitronik/widgetastic.core | f60bf150b8126cf5f7ce9aa81e10abdb380261ad | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import tempfile
from datetime import datetime
from pathlib import Path
import pytest
from widgetastic.browser import BrowserParentWrapper
from widgetastic.browser import WebElement
from widgetastic.exceptions import LocatorNotImplemented
from widgetastic.exceptions import NoSuchElementException
from widgetastic.widget import Text
from widgetastic.widget import View
@pytest.fixture()
def current_and_new_handle(request, browser, testing_page_url):
"""fixture return current and newly open window handle"""
handle = browser.new_window(url=testing_page_url)
@request.addfinalizer
def _close_window():
if handle in browser.window_handles:
browser.close_window(handle)
return browser.current_window_handle, handle
@pytest.fixture()
def invoke_alert(browser):
"""fixture to invoke sample alert."""
alert_btn = browser.element("#alert_button")
alert_btn.click()
yield
if browser.alert_present:
alert = browser.get_alert()
alert.dismiss()
def test_is_displayed(browser):
assert browser.is_displayed("#hello")
def test_is_displayed_negative(browser):
assert not browser.is_displayed("#invisible")
def test_elements_bad_locator(browser):
with pytest.raises(LocatorNotImplemented):
browser.element(1)
def test_elements_string_locator_xpath(browser):
assert len(browser.elements("//h1")) == 1
def test_elements_string_locator_css(browser):
# TODO: Why this doesnt work properly?
# assert len(browser.elements('h1')) == 1
assert len(browser.elements("#hello")) == 1
assert len(browser.elements("h1#hello")) == 1
assert len(browser.elements("h1#hello.foo")) == 1
assert len(browser.elements("h1#hello.foo.bar")) == 1
assert len(browser.elements("h1.foo.bar")) == 1
assert len(browser.elements(".foo.bar")) == 1
def test_elements_dict(browser):
assert len(browser.elements({"xpath": "//h1"})) == 1
def test_elements_webelement(browser):
element = browser.element("#hello")
assert browser.elements(element)[0] is element
def test_elements_locatable_locator(browser):
class Object(object):
def __locator__(self):
return "#hello"
assert len(browser.elements(Object())) == 1
def test_elements_with_parent(browser):
parent = browser.elements("#random_visibility")[0]
assert len(browser.elements("./p", parent=parent, check_visibility=False)) == 5
def test_elements_check_visibility(browser):
assert len(browser.elements('//div[@id="random_visibility"]/p', check_visibility=True)) == 3
assert len(browser.elements('//div[@id="random_visibility"]/p', check_visibility=False)) == 5
def test_wait_for_element_visible(browser):
# Click on the button
browser.click("#invisible_appear_button")
try:
assert isinstance(browser.wait_for_element("#invisible_appear_p", visible=True), WebElement)
except NoSuchElementException:
pytest.fail("NoSuchElementException raised when webelement expected")
@pytest.mark.parametrize("exception", [True, False], ids=["with_exception", "without_exception"])
def test_wait_for_element_exception_control(browser, exception):
# Click on the button, element will not appear
browser.click("#invisible_appear_button")
wait_for_args = dict(
locator="#invisible_appear_p", visible=True, timeout=1.5, exception=exception
)
if exception:
with pytest.raises(NoSuchElementException):
browser.wait_for_element(**wait_for_args)
else:
assert browser.wait_for_element(**wait_for_args) is None
def test_element_only_invisible(browser):
browser.element("#hello", check_visibility=False)
def test_element_only_visible(browser):
browser.element("#invisible", check_visibility=False)
def test_element_visible_after_invisible_and_classes_and_execute_script(browser):
assert "invisible" in browser.classes(
'//div[@id="visible_invisible"]/p', check_visibility=False
)
def test_element_nonexisting(browser):
with pytest.raises(NoSuchElementException):
browser.element("#badger", check_visibility=False)
def test_move_to_element_option(browser):
assert browser.move_to_element("#myoption").tag_name == "option"
def test_click(browser):
assert len(browser.classes("#a_button")) == 0
browser.click("#a_button")
assert "clicked" in browser.classes("#a_button")
def test_raw_click(browser):
assert len(browser.classes("#a_button")) == 0
browser.raw_click("#a_button")
assert "clicked" in browser.classes("#a_button")
def test_tag(browser):
assert browser.tag("#hello") == "h1"
def test_text_visible(browser):
assert browser.text("#hello") == "Hello"
def test_text_invisible(browser):
assert browser.text("#invisible") == "This is invisible"
def test_get_attribute(browser):
assert browser.get_attribute("id", "//h1") == "hello"
def test_set_attribute(browser):
browser.set_attribute("foo", "bar", "//h1")
assert browser.get_attribute("foo", "//h1") == "bar"
def test_simple_input_send_keys_clear(browser):
browser.send_keys("test!", "#input")
assert browser.get_attribute("value", "#input") == "test!"
browser.clear("#input")
assert browser.get_attribute("value", "#input") == ""
def test_copy_paste(browser):
t = "copy and paste text"
browser.send_keys(t, "#input")
assert browser.get_attribute("value", "#input") == t
browser.copy("#input")
browser.paste("#input_paste")
assert browser.get_attribute("value", "#input_paste") == t
def test_nested_views_parent_injection(browser):
class MyView(View):
ROOT = "#proper"
class c1(View): # noqa
ROOT = ".c1"
w = Text(".lookmeup")
class c2(View): # noqa
ROOT = ".c2"
w = Text(".lookmeup")
class c3(View): # noqa
ROOT = ".c3"
w = Text(".lookmeup")
class without(View): # noqa
# This one receives the parent browser wrapper
class nested(View): # noqa
# and it should work in multiple levels
pass
view = MyView(browser)
assert isinstance(view.browser, BrowserParentWrapper)
assert len(view.c1.browser.elements(".lookmeup")) == 1
assert view.c1.w.text == "C1"
assert view.c1.browser.text(".lookmeup") == "C1"
assert len(view.c2.browser.elements(".lookmeup")) == 1
assert view.c2.w.text == "C2"
assert view.c2.browser.text(".lookmeup") == "C2"
assert len(view.c3.browser.elements(".lookmeup")) == 1
assert view.c3.w.text == "C3"
assert view.c3.browser.text(".lookmeup") == "C3"
assert len(view.browser.elements(".lookmeup")) == 3
assert view.c3.browser.text(".lookmeup") == "C3"
assert view.c1.locatable_parent is view
assert view.c1.w.locatable_parent is view.c1
assert view.without.nested.locatable_parent is view
def test_element_force_visibility_check_by_locator(browser):
class MyLocator(object):
CHECK_VISIBILITY = True # Always check visibility no matter what
def __locator__(self):
return "#invisible"
loc = MyLocator()
with pytest.raises(NoSuchElementException):
browser.element(loc)
with pytest.raises(NoSuchElementException):
browser.element(loc, check_visibility=False)
loc.CHECK_VISIBILITY = False # Never check visibility no matter what
browser.element(loc)
browser.element(loc, check_visibility=True)
def test_size(browser):
width, height = browser.size_of("#exact_dimensions")
assert width == 42
assert height == 69
def test_title(browser):
"""Test title of current window"""
assert browser.title == "Test page"
def test_current_window_handle(browser):
"""Test current window handle property"""
assert browser.current_window_handle
@pytest.mark.parametrize("focus", [False, True], ids=["no_focus", "focus"])
def test_new_window(request, browser, focus, testing_page_url):
"""Test open new window with and without focus"""
# main window handle
main_handle = browser.current_window_handle
# open new window focus/no-focus
handle = browser.new_window(url=testing_page_url, focus=focus)
@request.addfinalizer
def _close_window():
browser.close_window(handle)
assert handle
if focus:
assert handle == browser.current_window_handle
@request.addfinalizer
def _back_to_main():
browser.switch_to_window(main_handle)
else:
assert handle != browser.current_window_handle
def test_window_handles(browser, current_and_new_handle):
"""Test window handles property"""
assert len(browser.window_handles) == 2
assert set(browser.window_handles) == set(current_and_new_handle)
def test_close_window(browser, current_and_new_handle):
"""Test close window"""
main_handle, new_handle = current_and_new_handle
assert new_handle in browser.window_handles
browser.close_window(new_handle)
assert new_handle not in browser.window_handles
def test_switch_to_window(browser, current_and_new_handle):
"""Test switch to other window"""
main_handle, new_handle = current_and_new_handle
# switch to new window
browser.switch_to_window(new_handle)
assert new_handle == browser.current_window_handle
browser.switch_to_window(main_handle)
assert main_handle == browser.current_window_handle
def test_alert(browser):
"""Test alert_present, get_alert object"""
assert not browser.alert_present
alert_btn = browser.element("#alert_button")
alert_btn.click()
assert browser.alert_present
alert = browser.get_alert()
assert alert.text == "Please enter widget name:"
alert.dismiss()
assert not browser.alert_present
def test_dismiss_any_alerts(browser, invoke_alert):
"""Test dismiss_any_alerts"""
assert browser.alert_present
browser.dismiss_any_alerts()
assert not browser.alert_present
@pytest.mark.parametrize(
"cancel_text",
[(True, "User dismissed alert."), (False, "User accepted alert:")],
ids=["dismiss", "accept"],
)
@pytest.mark.parametrize("prompt", [None, "Input"], ids=["without_prompt", "with_prompt"])
def test_handle_alert(browser, cancel_text, prompt, invoke_alert):
"""Test handle_alert method with cancel and prompt"""
cancel, alert_out_text = cancel_text
assert browser.alert_present
assert browser.handle_alert(cancel=cancel, prompt=prompt)
if not cancel:
alert_out_text = alert_out_text + ("Input" if prompt else "TextBox")
assert browser.text("#alert_out") == alert_out_text
assert not browser.alert_present
def test_save_screenshot(browser):
"""Test browser save screenshot method."""
tmp_dir = tempfile._get_default_tempdir()
filename = Path(tmp_dir) / f"{datetime.now()}.png"
assert not filename.exists()
browser.save_screenshot(filename=filename.as_posix())
assert filename.exists()
| 30.240437 | 100 | 0.704825 |
30a1e0aa5664658562f4e7a10de0514f14875f83 | 4,346 | py | Python | serialization/annotate.py | BookLaugh/serialization | a3ff87aa2cd5b3322daee7ebee1e025783438b46 | [
"MIT"
] | 12 | 2016-03-01T15:04:08.000Z | 2020-11-23T14:49:32.000Z | serialization/annotate.py | BookLaugh/serialization | a3ff87aa2cd5b3322daee7ebee1e025783438b46 | [
"MIT"
] | 1 | 2020-11-23T15:31:24.000Z | 2020-11-23T19:46:50.000Z | serialization/annotate.py | BookLaugh/serialization | a3ff87aa2cd5b3322daee7ebee1e025783438b46 | [
"MIT"
] | 2 | 2020-11-23T14:45:38.000Z | 2020-11-23T17:03:48.000Z | # F3AT - Flumotion Asynchronous Autonomous Agent Toolkit
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# See "LICENSE.GPL" in the source distribution for more information.
# Headers in this file shall remain intact.
from __future__ import absolute_import
from future.utils import PY3
from six import with_metaclass
from . import reflect
_CLASS_ANNOTATIONS_ATTR = "_class_annotations"
_ATTRIBUTE_INJECTIONS_ATTR = "_attribute_injections"
_ANNOTATIONS_PROCESSED = "_annotations_processed"
class AnnotationError(Exception):
pass
class MetaAnnotable(type):
def __init__(cls, name, bases, dct):
klasses = list(reversed(cls.mro()))
# Class Initialization
method = getattr(cls, "__class__init__", None)
if method is not None:
method(name, bases, dct)
# Attribute Injection
for k in klasses:
injections = k.__dict__.get(_ATTRIBUTE_INJECTIONS_ATTR, None)
if injections is not None:
for attr, value in injections:
setattr(k, attr, value)
del injections[:]
pending_annotations = list()
# Class Annotations
for k in klasses:
if k.__dict__.get(_ANNOTATIONS_PROCESSED, False):
continue
is_annotable = issubclass(type(k), MetaAnnotable)
annotations = k.__dict__.get(_CLASS_ANNOTATIONS_ATTR, list())
if annotations or pending_annotations:
if is_annotable:
to_process = pending_annotations + annotations
for name, methodName, args, kwargs in to_process:
method = getattr(cls, methodName, None)
if method is None:
raise AnnotationError(
"Bad annotation %s set on class "
"%s, method %s not found"
% (name, k, methodName))
method(*args, **kwargs)
pending_annotations = list()
setattr(k, _ANNOTATIONS_PROCESSED, True)
else:
pending_annotations.extend(annotations)
super(MetaAnnotable, cls).__init__(name, bases, dct)
class Annotable(with_metaclass(MetaAnnotable, object)):
__slots__ = () # To support sub-classes without __dict__
def injectClassCallback(annotationName, depth, methodName, *args, **kwargs):
"""
Inject an annotation for a class method to be called
after class initialization without dealing with metaclass.
depth parameter specify the stack depth from the class definition.
"""
locals = reflect.class_locals(depth, annotationName)
annotations = locals.get(_CLASS_ANNOTATIONS_ATTR, None)
if annotations is None:
annotations = list()
locals[_CLASS_ANNOTATIONS_ATTR] = annotations
annotation = (annotationName, methodName, args, kwargs)
annotations.append(annotation)
def injectAttribute(annotationName, depth, attr, value):
"""
Inject an attribute in a class from it's class frame.
Use in class annnotation to create methods/properties dynamically
at class creation time without dealing with metaclass.
depth parameter specify the stack depth from the class definition.
"""
locals = reflect.class_locals(depth, annotationName)
injections = locals.get(_ATTRIBUTE_INJECTIONS_ATTR, None)
if injections is None:
injections = list()
locals[_ATTRIBUTE_INJECTIONS_ATTR] = injections
injections.append((attr, value))
| 36.521008 | 76 | 0.664289 |
0c0d63be32a4257fa129d779096909a016b30efb | 3,471 | py | Python | rgb_to_pointcloud.py | wassimea/rgbd_pointcloud | 5b27ca1b28b545668cf29e0a93783ffa66d93335 | [
"MIT"
] | null | null | null | rgb_to_pointcloud.py | wassimea/rgbd_pointcloud | 5b27ca1b28b545668cf29e0a93783ffa66d93335 | [
"MIT"
] | null | null | null | rgb_to_pointcloud.py | wassimea/rgbd_pointcloud | 5b27ca1b28b545668cf29e0a93783ffa66d93335 | [
"MIT"
] | null | null | null | from utils_camera import *
import os
import math
import cv2
import numpy as np
import pyrealsense2 as rs
def colorize_depthmap(img_depth):
## colorize depth map for easy visualization
img_depth_normalized = cv2.normalize(img_depth.astype(np.float32), None, 0.0, 1.0, cv2.NORM_MINMAX) # convert to normalized floating point
img_depth_grayscale = img_depth_normalized * 255 # now to grayscale
img_depth_clr = cv2.applyColorMap(img_depth_grayscale.astype(np.uint8), cv2.COLORMAP_JET) # apply the color mapping
return img_depth_clr
def image_fusion(camera_params, depthData, clrImg=None):
"""
Given a depth image and its corresponding color image, return a colored point cloud as a vector of (x, y, z, r, g, b).
Assume only depth and color.
The output format is a PLY (required to view it in color in MeshLab).
"""
numberOfVertices = depthData.size
h, w = depthData.shape
# generate point cloud via numpy array functions
coords = np.indices((h, w))
# geometry
xcoords = (((coords[1] - camera_params.cx)/camera_params.fx)*depthData).flatten()
ycoords = (((coords[0] - camera_params.cy)/camera_params.fy)*depthData).flatten()
zcoords = depthData.flatten()
# color
chan_red = chan_blue = chan_green = None
chan_red = clrImg[..., 2].flatten()
chan_blue = clrImg[..., 1].flatten()
chan_green = clrImg[..., 0].flatten()
ptcloud = None
ptcloud = np.dstack((xcoords, ycoords, zcoords, chan_red, chan_blue, chan_green))[0]
return ptcloud, numberOfVertices
def output_pointcloud(nVertices, ptcloud, strOutputPath):
"""
Given a point cloud produced from image_fusion, output it to a PLY file.
"""
# open the file and write out the standard ply header
outputFile = open(strOutputPath + ".ply", "w")
outputFile.write("ply\n")
outputFile.write("format ascii 1.0\n")
outputFile.write("comment generated via python script Process3DImage\n")
outputFile.write("element vertex %d\n" %(nVertices))
outputFile.write("property float x\n")
outputFile.write("property float y\n")
outputFile.write("property float z\n")
outputFile.write("property uchar red\n")
outputFile.write("property uchar green\n")
outputFile.write("property uchar blue\n")
outputFile.write("element face 0\n")
outputFile.write("property list uchar int vertex_indices\n")
outputFile.write("end_header\n")
# output the actual points
for pt in ptcloud:
dx, dy, dz = pt[0:3]
dx *= 0.001
dy *= 0.001
dz *= 0.001
r, g, b = pt[3:]
outputFile.write("%10.6f %10.6f %10.6f %d %d %d\n" %(dx, dy, dz, r, g, b))
outputFile.close()
def get_color_depth_frames():
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.color, width=640, height=480)
config.enable_stream(rs.stream.depth, width=640, height=480)
pipeline.start(config)
for i in range(100):
frames = pipeline.wait_for_frames()
color = frames.first(rs.stream.color)
depth = frames.first(rs.stream.depth)
color = np.asanyarray(color.get_data())
color = color[...,::-1]
depth = np.asanyarray(depth.get_data())
depth_vis = colorize_depthmap(depth)
cv2.imshow("color", color)
cv2.imshow("depth_vis", depth_vis)
cv2.waitKey(1)
return color, depth
if __name__ == "__main__":
out = "/home/wassimea/Desktop/cloud"
img_color, img_depth = get_color_depth_frames()
params = GetCameraParameters("RealSenseD435", 1.0)
ptcloud, nVertices = image_fusion(params, img_depth, img_color)
output_pointcloud(nVertices, ptcloud, out)
| 28.68595 | 142 | 0.723423 |
081f99291a27e826b09f5bfc6df65577ffb91666 | 8,034 | py | Python | scan/test/fetch/link_finders/test_data/test_find_implicit_links.py | korenlev/calipso-cvim | 39278a5cf09c40b26a8a143ccc0c8d437961abc2 | [
"Apache-2.0"
] | null | null | null | scan/test/fetch/link_finders/test_data/test_find_implicit_links.py | korenlev/calipso-cvim | 39278a5cf09c40b26a8a143ccc0c8d437961abc2 | [
"Apache-2.0"
] | null | null | null | scan/test/fetch/link_finders/test_data/test_find_implicit_links.py | korenlev/calipso-cvim | 39278a5cf09c40b26a8a143ccc0c8d437961abc2 | [
"Apache-2.0"
] | null | null | null | ###############################################################################
# Copyright (c) 2017-2020 Koren Lev (Cisco Systems), #
# Yaron Yogev (Cisco Systems), Ilia Abashin (Cisco Systems) and others #
# #
# All rights reserved. This program and the accompanying materials #
# are made available under the terms of the Apache License, Version 2.0 #
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
ENV = 'env1'
CLIQUE_CONSTRAINTS = [
{
'focal_point_type': 'instance',
'constraints': ['network']
},
{
'focal_point_type': 'dummy1',
'constraints': []
},
{
'focal_point_type': 'dummy2',
'constraints': ['network', 'dummy_constraint']
},
{
'focal_point_type': 'dummy3',
'constraints': ['dummy_constraint2']
}
]
CONSTRAINTS = ['network', 'dummy_constraint', 'dummy_constraint2']
LINK_ATTRIBUTES_NONE = {}
LINK_ATTRIBUTES_NONE_2 = {}
LINK_ATTRIBUTES_EMPTY = {'attributes': []}
LINK_ATTR_V1 = {'attributes': {'network': 'v1'}}
LINK_ATTR_V1_2 = {'attributes': {'network': 'v1'}}
LINK_ATTR_V2 = {'attributes': {'network': 'v2'}}
LINK_ATTR_V1_AND_A2V2 = {'attributes': {'network': 'v1', 'attr2': 'v2'}}
LINK_TYPE_1 = {
'link_type': 'instance-vnic',
'source_id': 'instance1',
'target_id': 'vnic1'
}
LINK_TYPE_1_REVERSED = {
'link_type': 'instance-vnic',
'source_id': 'vnic1',
'target_id': 'instance1'
}
LINK_TYPE_1_2 = {
'link_type': 'instance-vnic',
'source_id': 'instance1',
'target_id': 'vnic2'
}
LINK_TYPE_2 = {
'link_type': 'vnic-vconnector',
'source_id': 'vnic1',
'target_id': 'vconnector1'
}
LINK_TYPE_3 = {
'implicit': True,
'link_type': 'instance-vconnector',
'source_id': 'instance1',
'target_id': 'vconnector1'
}
LINK_TYPE_4_NET1 = {
'environment': ENV,
'implicit': True,
'link_type': 'instance-host_pnic',
'source': 'instance1_dbid',
'source_id': 'instance1',
'target': 'host_pnic1_dbid',
'target_id': 'host_pnic1',
'host': 'host1',
'link_name': '',
'state': 'up',
'source_label': '',
'target_label': '',
'link_weight': 0,
'attributes': {'network': 'netID1'}
}
LINK_TYPE_5_NET2 = {
'environment': ENV,
'link_type': 'host_pnic-switch',
'source_id': 'host_pnic1',
'target': 'switch1_dbid',
'target_id': 'switch1',
'host': 'host2',
'link_name': '',
'state': 'up',
'source_label': '',
'target_label': '',
'link_weight': 0,
'attributes': {'network': 'netID2'}
}
LINK_TYPE_6_NET1 = {
'environment': ENV,
'link_type': 'host_pnic-switch',
'source': 'host_pnic1_dbid',
'source_id': 'host_pnic1',
'target': 'switch2_dbid',
'target_id': 'switch2',
'host': 'host1',
'link_name': '',
'state': 'up',
'source_label': '',
'target_label': '',
'link_weight': 0,
'attributes': {'network': 'netID1'}
}
LINK_TYPE_7_NET1 = {
'environment': ENV,
'implicit': True,
'link_type': 'instance-switch',
'source': 'instance1_dbid',
'source_id': 'instance1',
'target': 'switch2_dbid',
'target_id': 'switch2',
'host': 'host1',
'link_name': '',
'state': 'up',
'source_label': '',
'target_label': '',
'link_weight': 0,
'attributes': {'network': 'netID1'}
}
LINK_FULL_A2B_EXPLICIT = {
'environment': ENV,
'link_type': 'instance-vnic',
'source': 'instance1_dbid',
'source_id': 'instance1',
'target': 'vnic1_dbid',
'target_id': 'vnic1',
'host': 'host1',
'link_name': '',
'state': 'up',
'source_label': '',
'target_label': '',
'link_weight': 0,
'attributes': {'network': 'netID1'}
}
LINK_FULL_B2C_EXPLICIT = {
'environment': ENV,
'link_type': 'vnic-vconnector',
'source': 'vnic1_dbid',
'source_id': 'vnic1',
'target': 'vconnector1_dbid',
'target_id': 'vconnector1',
'host': 'host1',
'link_name': '',
'state': 'up',
'source_label': '',
'target_label': '',
'link_weight': 0,
'attributes': {'network': 'netID1'}
}
LINK_FULL_C2D_EXPLICIT = {
'environment': ENV,
'link_type': 'vconnector-vedge',
'source': 'vconnector1_dbid',
'source_id': 'vconnector1',
'target': 'vedge1_dbid',
'target_id': 'vedge1',
'host': 'host1',
'link_name': '',
'state': 'up',
'source_label': '',
'target_label': '',
'link_weight': 0,
'attributes': {'network': 'netID1'}
}
LINK_FULL_D2E_EXPLICIT = {
'environment': ENV,
'link_type': 'vedge-otep',
'source': 'vedge1_dbid',
'source_id': 'vedge1',
'target': 'otep1_dbid',
'target_id': 'otep1',
'host': 'host1',
'link_name': '',
'state': 'up',
'source_label': '',
'target_label': '',
'link_weight': 0,
'attributes': {'network': 'netID1'}
}
LINK_FULL_C2E_EXPLICIT = {
'environment': ENV,
'link_type': 'vconnector-otep',
'source': 'vconnector1_dbid',
'source_id': 'vconnector1',
'target': 'otep1_dbid',
'target_id': 'otep1',
'host': 'host1',
'link_name': '',
'state': 'up',
'source_label': '',
'target_label': '',
'link_weight': 0,
'attributes': {'network': 'netID1'}
}
LINK_FULL_A2C = {
'environment': ENV,
'implicit': True,
'link_type': 'instance-vconnector',
'source': 'instance1_dbid',
'source_id': 'instance1',
'target': 'vconnector1_dbid',
'target_id': 'vconnector1',
'host': 'host1',
'link_name': '',
'state': 'up',
'source_label': '',
'target_label': '',
'link_weight': 0,
'attributes': {'network': 'netID1'}
}
LINK_FULL_B2D = {
'environment': ENV,
'implicit': True,
'link_type': 'vnic-vedge',
'source': 'vnic1_dbid',
'source_id': 'vnic1',
'target': 'vedge1_dbid',
'target_id': 'vedge1',
'host': 'host1',
'link_name': '',
'state': 'up',
'source_label': '',
'target_label': '',
'link_weight': 0,
'attributes': {'network': 'netID1'}
}
LINK_FULL_A2D = {
'environment': ENV,
'implicit': True,
'link_type': 'instance-vedge',
'source': 'instance1_dbid',
'source_id': 'instance1',
'target': 'vedge1_dbid',
'target_id': 'vedge1',
'host': 'host1',
'link_name': '',
'state': 'up',
'source_label': '',
'target_label': '',
'link_weight': 0,
'attributes': {'network': 'netID1'}
}
LINK_FULL_B2E = {
'environment': ENV,
'implicit': True,
'link_type': 'vnic-otep',
'source': 'vnic1_dbid',
'source_id': 'vnic1',
'target': 'otep1_dbid',
'target_id': 'otep1',
'host': 'host1',
'link_name': '',
'state': 'up',
'source_label': '',
'target_label': '',
'link_weight': 0,
'attributes': {'network': 'netID1'}
}
LINK_FULL_A2E = {
'environment': ENV,
'implicit': True,
'link_type': 'instance-otep',
'source': 'instance1_dbid',
'source_id': 'instance1',
'target': 'otep1_dbid',
'target_id': 'otep1',
'host': 'host1',
'link_name': '',
'state': 'up',
'source_label': '',
'target_label': '',
'link_weight': 0,
'attributes': {'network': 'netID1'}
}
BASE_LINKS = [
{'pass': 0, 'link': LINK_FULL_A2B_EXPLICIT},
{'pass': 0, 'link': LINK_FULL_B2C_EXPLICIT},
{'pass': 0, 'link': LINK_FULL_C2D_EXPLICIT},
{'pass': 0, 'link': LINK_FULL_D2E_EXPLICIT},
# this one tests that existing explicit links are not overwritten if
# they are also achievable implicitly
{'pass': 0, 'link': LINK_FULL_C2E_EXPLICIT},
]
IMPLICIT_LINKS = [
{'pass': 1, 'link': LINK_FULL_A2C},
{'pass': 1, 'link': LINK_FULL_B2D},
{'pass': 1, 'link': LINK_FULL_B2E},
{'pass': 2, 'link': LINK_FULL_A2D},
{'pass': 2, 'link': LINK_FULL_A2E},
]
| 26.959732 | 79 | 0.55539 |
966747ef62dd478919880e67dc5e76896574a6e9 | 4,893 | py | Python | noxfile.py | supriome/furo | 2be6a9b7843fadb32a34605ec2337074eb623fc1 | [
"MIT"
] | null | null | null | noxfile.py | supriome/furo | 2be6a9b7843fadb32a34605ec2337074eb623fc1 | [
"MIT"
] | null | null | null | noxfile.py | supriome/furo | 2be6a9b7843fadb32a34605ec2337074eb623fc1 | [
"MIT"
] | null | null | null | """Development automation
"""
import datetime
import glob
import os
import tempfile
import nox
PACKAGE_NAME = "iluvatar"
nox.options.sessions = ["lint", "test"]
#
# Helpers
#
def _install_this_project_with_flit(session, *, extras=None, editable=False):
session.install("flit")
args = []
if extras:
args.append("--extras")
args.append(",".join(extras))
if editable:
args.append("--pth-file" if os.name == "nt" else "--symlink")
session.run("flit", "install", "--deps=production", *args, silent=True)
#
# Development Sessions
#
@nox.session(name="docs-live", reuse_venv=True)
def docs_live(session):
if session.posargs:
docs_dir = session.posargs[0]
additional_dependencies = session.posargs[1:]
else:
docs_dir = "docs/"
additional_dependencies = ()
build_command = "./node_modules/.bin/gulp build"
_install_this_project_with_flit(session, extras=["doc"], editable=True)
session.install("sphinx-autobuild", *additional_dependencies)
with tempfile.TemporaryDirectory() as destination:
session.run(
"sphinx-autobuild",
# for sphinx-autobuild
"--port=0",
"--watch=src/",
f"--pre-build={build_command}",
r"--re-ignore=src/.*/theme/iluvatar/static/.*\.(css|js)", # ignore the generated files
"--open-browser",
# for sphinx
"-b=dirhtml",
"-a",
docs_dir,
destination,
)
@nox.session(reuse_venv=True)
def docs(session):
# Generate relevant files prior to installation
session.run("gulp", "build", external=True)
_install_this_project_with_flit(session, extras=["doc"], editable=False)
# Generate documentation into `build/docs`
session.run("sphinx-build", "-b", "dirhtml", "-v", "docs/", "build/docs")
@nox.session(reuse_venv=True)
def lint(session):
session.install("pre-commit")
args = list(session.posargs)
args.append("--all-files")
if "CI" in os.environ:
args.append("--show-diff-on-failure")
session.run("pre-commit", "run", *args)
@nox.session
def test(session):
_install_this_project_with_flit(session, extras=["test"])
args = session.posargs or ["-n", "auto", "--cov", PACKAGE_NAME]
session.run("pytest", *args)
def get_release_versions(version_file):
marker = "__version__ = "
with open(version_file) as f:
for line in f:
if line.startswith(marker):
version = line[len(marker) + 1 : -2]
current_number = int(version.split(".dev")[-1])
break
else:
raise RuntimeError("Could not find current version.")
today = datetime.date.today()
release_version = today.strftime(f"%Y.%m.%d.beta{current_number}")
next_version = today.strftime(f"%Y.%m.%d.dev{current_number+1}")
return release_version, next_version
@nox.session
def release(session):
version_file = f"src/{PACKAGE_NAME}/__init__.py"
allowed_upstreams = [
f"git@github.com:supriome/{PACKAGE_NAME.replace('_', '-')}.git"
]
release_version, next_version = get_release_versions(version_file)
session.install("flit", "twine", "release-helper")
# Sanity Checks
session.run("release-helper", "version-check-validity", release_version)
session.run("release-helper", "version-check-validity", next_version)
session.run("release-helper", "directory-check-empty", "dist")
session.run("release-helper", "git-check-branch", "main")
session.run("release-helper", "git-check-clean")
session.run("release-helper", "git-check-tag", release_version, "--does-not-exist")
session.run("release-helper", "git-check-remote", "origin", *allowed_upstreams)
# Prepare release commit
session.run("release-helper", "version-bump", version_file, release_version)
session.run("git", "add", version_file, external=True)
session.run(
"git", "commit", "-m", f"Prepare release: {release_version}", external=True
)
# Build the package
session.run("gulp", "build", external=True)
session.run("flit", "build")
session.run("twine", "check", *glob.glob("dist/*"))
# Tag the commit
session.run(
# fmt: off
"git", "tag", release_version, "-m", f"Release {release_version}", "-s",
external=True,
# fmt: on
)
# Prepare back-to-development commit
session.run("release-helper", "version-bump", version_file, next_version)
session.run("git", "add", version_file, external=True)
session.run("git", "commit", "-m", "Back to development", external=True)
# Push the commits and tag.
session.run("git", "push", "origin", "main", release_version, external=True)
# Upload the distributions.
session.run("twine", "upload", *glob.glob("dist/*"))
| 29.835366 | 99 | 0.633967 |
9374e1f74d269d4a23b59718ced33eaba52ad14c | 3,919 | py | Python | envs/doom/action_space.py | Zhehui-Huang/scalable_agent | 505909ad9f2d3e9bce8bb9201e05e780002428df | [
"Apache-2.0"
] | null | null | null | envs/doom/action_space.py | Zhehui-Huang/scalable_agent | 505909ad9f2d3e9bce8bb9201e05e780002428df | [
"Apache-2.0"
] | null | null | null | envs/doom/action_space.py | Zhehui-Huang/scalable_agent | 505909ad9f2d3e9bce8bb9201e05e780002428df | [
"Apache-2.0"
] | null | null | null | import gym
from gym.spaces import Discrete, Box
from algorithms.spaces.discretized import Discretized
def key_to_action_basic(key):
from pynput.keyboard import Key
table = {Key.left: 0, Key.right: 1, Key.up: 2, Key.down: 3}
return table.get(key, None)
def doom_action_space_basic():
"""
TURN_LEFT
TURN_RIGHT
MOVE_FORWARD
MOVE_BACKWARD
"""
space = gym.spaces.Tuple((
Discrete(3), # noop, turn left, turn right
Discrete(3), # noop, forward, backward
))
space.key_to_action = key_to_action_basic
return space
def doom_action_space():
"""
Standard action space for full-featured Doom environments (e.g. deathmatch).
TODO: crouch?
TODO: strafe?
This should precisely correspond to the available_buttons configuration in the .cfg file.
This function assumes:
MOVE_FORWARD
MOVE_BACKWARD
MOVE_RIGHT
MOVE_LEFT
SELECT_NEXT_WEAPON
SELECT_PREV_WEAPON
ATTACK
SPEED
TURN_LEFT_RIGHT_DELTA
"""
return gym.spaces.Tuple((
Discrete(3), # noop, forward, backward
Discrete(3), # noop, move right, move left
Discrete(3), # noop, prev_weapon, next_weapon
Discrete(2), # noop, attack
Discrete(2), # noop, sprint
Box(-1.0, 1.0, (1,)),
))
def doom_action_space_discretized():
return gym.spaces.Tuple((
Discrete(3), # noop, forward, backward
Discrete(3), # noop, move right, move left
Discrete(3), # noop, prev_weapon, next_weapon
Discrete(2), # noop, attack
Discrete(2), # noop, sprint
Discretized(11, min_action=-10.0, max_action=10.0), # turning using discretized continuous control
))
def doom_action_space_discretized_no_weap():
return gym.spaces.Tuple((
Discrete(3), # noop, forward, backward
Discrete(3), # noop, move right, move left
Discrete(2), # noop, attack
Discrete(2), # noop, sprint
Discretized(11, min_action=-10.0, max_action=10.0), # turning using discretized continuous control
))
def doom_action_space_continuous_no_weap():
return gym.spaces.Tuple((
Discrete(3), # noop, forward, backward
Discrete(3), # noop, move right, move left
Discrete(2), # noop, attack
Discrete(2), # noop, sprint
Box(-1.0, 1.0, (1,)),
))
def doom_action_space_discrete():
return gym.spaces.Tuple((
Discrete(3), # noop, forward, backward
Discrete(3), # noop, move right, move left
Discrete(3), # noop, turn right, turn left
Discrete(3), # noop, prev_weapon, next_weapon
Discrete(2), # noop, attack
Discrete(2), # noop, sprint
))
def doom_action_space_discrete_no_weap():
return gym.spaces.Tuple((
Discrete(3), # noop, forward, backward
Discrete(3), # noop, move right, move left
Discrete(3), # noop, turn right, turn left
Discrete(2), # noop, attack
Discrete(2), # noop, sprint
))
def doom_action_space_full_discretized(with_use=False):
"""
MOVE_FORWARD
MOVE_BACKWARD
MOVE_RIGHT
MOVE_LEFT
SELECT_WEAPON1
SELECT_WEAPON2
SELECT_WEAPON3
SELECT_WEAPON4
SELECT_WEAPON5
SELECT_WEAPON6
SELECT_WEAPON7
ATTACK
SPEED
TURN_LEFT_RIGHT_DELTA
"""
spaces = [
Discrete(3), # noop, forward, backward
Discrete(3), # noop, move right, move left
Discrete(8), # noop, select weapons 1-7
Discrete(2), # noop, attack
Discrete(2), # noop, sprint
]
if with_use:
spaces.append(Discrete(2)) # noop, use
spaces.append(Discretized(21, min_action=-12.5, max_action=12.5)) # turning using discretized continuous control
return gym.spaces.Tuple(spaces)
| 28.194245 | 117 | 0.61776 |
8e83e4b04d07b9d3c21c925fd61db823f0c08191 | 2,151 | py | Python | gallery/models.py | mary-wan/PhotoBay | 1fbfb88e168d40ed5d8e901d0a766041b7a72ae3 | [
"Unlicense"
] | 1 | 2022-01-17T13:27:02.000Z | 2022-01-17T13:27:02.000Z | gallery/models.py | mary-wan/PhotoBay | 1fbfb88e168d40ed5d8e901d0a766041b7a72ae3 | [
"Unlicense"
] | null | null | null | gallery/models.py | mary-wan/PhotoBay | 1fbfb88e168d40ed5d8e901d0a766041b7a72ae3 | [
"Unlicense"
] | null | null | null | from django.db import models
class Location(models.Model):
name = models.CharField(max_length=100)
def save_location(self):
self.save()
def delete_location(self):
self.delete()
@classmethod
def all_locations(cls):
locations = Location.objects.all()
return locations
@classmethod
def update_location(cls, id, name):
cls.objects.filter(id=id).update(name=name)
def __str__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length=100)
def save_category(self):
self.save()
def delete_category(self):
self.delete()
@classmethod
def update_category(cls, id, name):
cls.objects.filter(id=id).update(name=name)
def __str__(self):
return self.name
class Image(models.Model):
image = models.ImageField(upload_to='images/')
description = models.TextField()
name = models.CharField(max_length=200)
upload_date = models.DateTimeField(auto_now_add=True)
category = models.ForeignKey(Category,on_delete=models.CASCADE)
location = models.ForeignKey(Location,on_delete=models.CASCADE)
def save_image(self):
self.save()
def delete_image(self):
self.delete()
@classmethod
def update_image(cls, id ,image, description , name,category,location):
cls.objects.filter(id = id).update(image=image,description=description,name=name,category=category,location=location)
# @classmethod
# def update_image(cls, id ,image):
# cls.objects.filter(id = id).update(image=image)
@classmethod
def get_image_by_id(cls,id):
image =cls.objects.filter(id= id).first()
return image
@classmethod
def search_image(cls, search_category):
images = cls.objects.filter(category__name__icontains=search_category)
return images
@classmethod
def filter_by_location(cls,search_location):
location = cls.objects.filter(location__name=search_location).all()
return location
| 26.555556 | 125 | 0.651325 |
f1bd98d3171a4b6f2d0bb79e9d9c09e5df10a18c | 5,190 | py | Python | thermo/__init__.py | tedhyu/thermo | 1966c7cba5a603984b49f22c97ff00a144d90812 | [
"MIT"
] | 1 | 2021-03-05T23:39:47.000Z | 2021-03-05T23:39:47.000Z | thermo/__init__.py | tedhyu/thermo | 1966c7cba5a603984b49f22c97ff00a144d90812 | [
"MIT"
] | 1 | 2021-12-17T21:28:17.000Z | 2021-12-17T21:28:17.000Z | thermo/__init__.py | tedhyu/thermo | 1966c7cba5a603984b49f22c97ff00a144d90812 | [
"MIT"
] | 1 | 2022-01-18T16:14:59.000Z | 2022-01-18T16:14:59.000Z | # -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, 2017 Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from . import acentric
from . import activity
from . import chemical
from . import combustion
from . import critical
from . import coolprop
from . import dipole
from . import dippr
from . import datasheet
from . import electrochem
from . import elements
from . import environment
from . import eos
from . import eos_mix
from . import heat_capacity
from . import identifiers
from . import joback
from . import law
from . import lennard_jones
from . import miscdata
from . import mixture
from . import permittivity
from . import phase_change
from . import property_package
from . import reaction
from . import refractivity
from . import safety
from . import solubility
from . import stream
from . import interface
from . import thermal_conductivity
from . import triple
from . import unifac
from . import utils
from . import vapor_pressure
from . import virial
from . import viscosity
from . import volume
from .acentric import *
from .activity import *
from .chemical import *
from .combustion import *
from .critical import *
from .coolprop import *
from .dipole import *
from .dippr import *
from .datasheet import *
from .electrochem import *
from .elements import *
from .environment import *
from .eos import *
from .eos_mix import *
from .heat_capacity import *
from .joback import *
from .identifiers import *
from .law import *
from .lennard_jones import *
from .miscdata import *
from .mixture import *
from .permittivity import *
from .phase_change import *
from .property_package import *
from .reaction import *
from .refractivity import *
from .safety import *
from .solubility import *
from .stream import *
from .interface import *
from .thermal_conductivity import *
from .triple import *
from .unifac import *
from .utils import *
from .vapor_pressure import *
from .virial import *
from .viscosity import *
from .volume import *
__all__ = ['activity', 'chemical', 'combustion', 'critical',
'dipole', 'electrochem', 'elements', 'environment', 'eos', 'eos_mix',
'heat_capacity', 'identifiers', 'joback', 'law', 'lennard_jones',
'miscdata',
'permittivity', 'phase_change', 'property_package', 'reaction',
'refractivity', 'safety', 'solubility', 'interface',
'thermal_conductivity', 'triple', 'utils',
'vapor_pressure', 'virial', 'viscosity', 'volume', 'acentric', 'coolprop',
'datasheet', 'dippr', 'unifac', 'stream', 'mixture']
__all__.extend(acentric.__all__)
__all__.extend(activity.__all__)
__all__.extend(chemical.__all__)
__all__.extend(combustion.__all__)
__all__.extend(critical.__all__)
__all__.extend(coolprop.__all__)
__all__.extend(dipole.__all__)
__all__.extend(dippr.__all__)
__all__.extend(datasheet.__all__)
__all__.extend(electrochem.__all__)
__all__.extend(elements.__all__)
__all__.extend(environment.__all__)
__all__.extend(eos.__all__)
__all__.extend(eos_mix.__all__)
__all__.extend(heat_capacity.__all__)
__all__.extend(identifiers.__all__)
__all__.extend(joback.__all__)
__all__.extend(law.__all__)
__all__.extend(lennard_jones.__all__)
__all__.extend(miscdata.__all__)
__all__.extend(mixture.__all__)
__all__.extend(permittivity.__all__)
__all__.extend(phase_change.__all__)
__all__.extend(property_package.__all__)
__all__.extend(reaction.__all__)
__all__.extend(refractivity.__all__)
__all__.extend(safety.__all__)
__all__.extend(solubility.__all__)
__all__.extend(stream.__all__)
__all__.extend(interface.__all__)
__all__.extend(thermal_conductivity.__all__)
__all__.extend(triple.__all__)
__all__.extend(utils.__all__)
__all__.extend(unifac.__all__)
__all__.extend(vapor_pressure.__all__)
__all__.extend(virial.__all__)
__all__.extend(viscosity.__all__)
__all__.extend(volume.__all__)
# backwards compatibility hack to allow thermo.chemical.Mixture to still be importable
chemical.__dict__['Mixture'] = mixture.Mixture
chemical.__dict__['Stream'] = stream.Stream
# However, they cannot go in thermo.chemical's __all__ or they will appear in the
# documentation and Sphinx currently has no wat to exclude them
__version__ = '0.1.39'
| 32.4375 | 86 | 0.787669 |
b50916b69ad6312c335ba382dff65bf62d6b6f40 | 21,884 | py | Python | sec_groups/classgrps.py | toonsegers/sec_groups | 32541f8e365b8ed280133c5f88aafb09c5eb2790 | [
"MIT"
] | 3 | 2021-07-21T10:15:46.000Z | 2022-01-06T02:12:05.000Z | sec_groups/classgrps.py | toonsegers/sec_groups | 32541f8e365b8ed280133c5f88aafb09c5eb2790 | [
"MIT"
] | null | null | null | sec_groups/classgrps.py | toonsegers/sec_groups | 32541f8e365b8ed280133c5f88aafb09c5eb2790 | [
"MIT"
] | null | null | null | """Form class groups.
#TODO: add references to literature.
"""
import logging
from math import floor, sqrt, log, log2
from sec_groups.tools.secgcd import (
extended_euclid_xgcd,
secure_gcd,
secure_xgcd,
secure_binary_xgcd,
secure_division,
)
from sec_groups.tools.bitlen import bit_length_integrated
from sec_groups.tools.repeat import secure_pow
from mpyc.runtime import mpc
import mpyc.gmpy as gmpy2
from sec_groups.tools.find_primes import find_primes_for_schnorr, _find_ike_prime
logger_cg = logging.getLogger("classgroups")
logger_cg.setLevel(logging.INFO)
def xgcd_(a, b):
"""Wraps extended euclid from secgcd module."""
return extended_euclid_xgcd(a, b)
def discriminant(f):
a, b, c = f[0], f[1], f[2]
return b ** 2 - 4 * a * c
def lincong(a, b, m):
"""Solve ax = b mod m
return mu, nu such that x = mu + nu n for all n in Z.
Based on Lipa Long, "Binary Quadratic Forms", 2019.
See: https://github.com/Chia-Network/vdf-competition/blob/master/classgroups.pdf
"""
g, d, e = xgcd_(a, m)
logger_cg.debug(f"In lincong, done xgcd: {g}, {d}, {e} = xgcd({a}, {m})")
q, r = divmod(b, g)
logger_cg.debug(f"In lincong, done {q}, {r} = division({b}, {g}).")
# L19 Thm. 7.1: Congruence has a solution iff gcd(a,m) | b.
if r != 0:
raise ValueError("The linear congruence has no solution")
else:
mu = (q * d) % m
logger_cg.debug(f"In lincong, done _, {mu} = division({q}*{d}, {m}).")
nu = m // g
return mu, nu
def secure_lincong(a, b, m):
"""Solve ax = b mod m
return mu, nu such that x = mu + nu n for all n in Z.
"""
g, d, e = secure_xgcd(a, m)
logger_cg.debug(f"In lincong, done secure_xgcd().")
# q = floor(b/g)
# q = b // g
# r = b % g
# q, r = secure_division(b, g)
q = b / g
r = 0
logger_cg.debug(f"In lincong, done secure_division(b, g).")
if isinstance(r, int) and r != 0:
raise ValueError("The congruence has no solution")
else:
# mu = (q * d) % m
_, mu = secure_division(q * d, m)
logger_cg.debug(f"In lincong, done secure_division(q*d, m).")
# nu = m // g
# nu, _ = secure_division(m, g)
nu = m / g
return mu, nu
def check_well_formed(f):
a, b, c = f[0], f[1], f[2]
disc = b ** 2 - 4 * a * c
if a > 0 and disc < 0:
pass
else:
raise ValueError(
f"Form ({a}, {b}, {c}) does not have a > 0 and discriminant < 0: a={a}, disc={disc} "
)
def check_reduced(f):
a, b, c = f[0], f[1], f[2]
if -a < b and b <= a: # check normalized
pass
else:
return False
if a <= c:
pass
else:
return False
if a == c:
if b >= 0:
pass
else:
return False
return True
def normalize(f):
a, b, c = f[0], f[1], f[2]
group = type(f)
check_well_formed(f)
r = (a - b) // (2 * a)
eta = (a, b + 2 * r * a, a * r ** 2 + b * r + c)
return group(eta)
def reduce_form(f):
group = type(f)
check_well_formed(f)
f = normalize(f)
while not check_reduced(f):
a, b, c = f[0], f[1], f[2]
s = (c + b) // (2 * c)
f = group((c, -1 * b + 2 * s * c, c * s ** 2 - b * s + a))
return f
@mpc.coroutine
async def secure_binary_reduce(f, size_b = None, leak_size_b = True):
"""Binary reduction algorithm by Agarwal and Frandsen.
Based on Algorithm 3 from AF06: 'A New GCD Algorithm for Quadratic Number
Rings with Unique Factorization' by Agarwal and Frandsen, 2006 (Aarhus)
https://users-cs.au.dk/gudmund/Documents/38870030.pdf
Requires:
f is positive definite (iff discriminant < 0 and a > 0).
NB: Option to open (leak) size(b) is default; to reduce number of
iterations of main loop. Alternative is to pass size_b bound.
"""
def size(a):
# Requires non-negative values
return bit_length_integrated(mpc, a)
def right_action_S_on_f(f):
return [f[2], -f[1], f[0]]
def right_action_Tm_on_f(m, f):
fa, fb, fc = f[0], f[1], f[2]
return [fa, fb + 2 * m * fa, (m ** 2) * fa + m * fb + fc]
sec_grp = type(f)
await mpc.returnType(sec_grp)
secint = sec_grp.sectype_of_value
a, b, c = f[0], f[1], f[2]
if size_b:
n = size_b
elif not size_b and leak_size_b:
n = await mpc.output(size(b)) # TODO: find good bound for for-loop
else:
raise NotImplementedError
for i in range(n):
sgn_b = 1 - 2 * mpc.sgn(
b, l=n + 3, LT=True
) # TODO: check l; if n + 0, sgn_b produces inccorect values <-1
abs_b_gt_abs_2a = sgn_b * b > 2 * a
abs_a_gt_abs_c = a > c # a always postive, because f positive definite
ab_gt_0 = (sgn_b * sgn_b + sgn_b) // 2 # a always postive, because f positive definite
size_abs_b = size(sgn_b * b)
size_a = size(a)
# TODO: find bound for (bit-length of) j.
j = size_abs_b - size_a - 1
# take |j| to avoid negative secint exponents. 2**j is used when |B|>2|A| and original j is positive
sgn_j = 1 - 2 * mpc.sgn(j, l=n, LT=True)
abs_j = sgn_j * j
abs_j_bits = mpc.to_bits(abs_j, n)
m = secure_pow(2, abs_j_bits, secint)
m = mpc.if_else(ab_gt_0, -m, m)
a, b, c = mpc.if_else(
abs_b_gt_abs_2a,
right_action_Tm_on_f(m, (a, b, c)),
mpc.if_else(abs_a_gt_abs_c, right_action_S_on_f((a, b, c)), [a, b, c]),
)
print(f"Secure binary reduction: {round(100*i/n)}%", end="\r")
assert f.group.discriminant < 0
m = mpc.if_else(b > 0, secint(-1), secint(1))
abs_b_gt_a = mpc.abs(b) > a
a, b, c = mpc.if_else(abs_b_gt_a, right_action_Tm_on_f(m, (a, b, c)), [a, b, c])
a_gt_c = a > c
a, b, c = mpc.if_else(
abs_b_gt_a * a_gt_c, right_action_S_on_f((a, b, c)), [a, b, c]
)
a, b, c = mpc.if_else((b < 0) * (a == c), right_action_S_on_f((a, b, c)), [a, b, c])
a, b, c = mpc.if_else(
(b < 0) * (a == -b), right_action_Tm_on_f(1, (a, b, c)), [a, b, c]
)
return sec_grp((a, b, c))
def parteucl(a, b, L):
"""Extended partial Euclides following Cohen Section 5.4.
"""
# Step 1 Initialize
v = 0
d = a
v2 = 1
v3 = b
z = 0
while abs(v3) > L:
# Step 3 Euclidean step
q, t3 = d//v3, d%v3
t2 = v - q*v2
v = v2
d = v3
v2 = t2
v3 = t3
z = z+1
# Step 2 Finished?
if z % 2:
v2 = -v2
v3 = -v3
return d, v, v2, v3, z
def nudupl(f):
"""Square(f) following Cohen, Alg. 5.4.8.
"""
L = int(((abs(f.discriminant))/4)**(1/4))
a, b, c = f[0], f[1], f[2]
# Step 1 Euclidean step
d1, u, v = extended_euclid_xgcd(b, a)
A = a//d1
B = b//d1
C = (-c*u) % A
C1 = A-C
if C1 < C:
C = -C1
# Step 2 Partial reduction
d, v, v2, v3, z = parteucl(A, C, L)
# Step 3 Special case
if z==0:
g = (B*v3+c)//d
a2 = d**2
c2 = v3**2
b2 = b + (d+v3)**2 - a2 - c2
c2 = c2 + g*d1
else:
# Step 4 Final computations
e = (c*v + B*d)//A
g = (e*v2 - B)//v
b2 = e*v2 + v*g
if d1>1:
b2 = d1*b2
v = d1*v
v2 = d1*v2
a2 = d**2
c2 = v3**2
b2 = b2 + (d+v3)**2 - a2 - c2
a2 = a2 + e*v
c2 = c2 + g*v2
f2 = type(f)((a2, b2, c2))
return f2
def square(f):
"""Square form"""
group = type(f)
a, b, c = f[0], f[1], f[2]
mu, _ = lincong(b, c, a)
A = a ** 2
B = b - 2 * a * mu
C = mu ** 2 - (b * mu - c) // a
return group((A, B, C))
def secure_square(f):
sectype = type(f)
"""Square form"""
a, b, c = f[0], f[1], f[2]
mu, _ = secure_lincong(b, c, a)
A = a ** 2
B = b - 2 * a * mu
# C = mu ** 2 - (b * mu - c) // a
C = mu ** 2 - (b * mu - c) / a
return sectype((A, B, C))
def repeat_square(f, n):
new_f = f
for i in range(n):
new_f = reduce(square(new_f))
return new_f
def nucomp(phi1, phi2):
"""Nucomp algorithm for composition of binary quadratic forms.
Per Jacobson and Van der Poorten 'Computational Aspects of NUCOMP', 2002
See: https://link.springer.com/chapter/10.1007%2F3-540-45455-1_10
Alternatively see Cohen 'A course in computational number theory'
All divisions are exact (see Cohen, p. 244)
"""
delta = phi1.discriminant
# JV02 uses the following L, which is different from Cohen's L
L = int(abs(delta) ** (1 / 4))
# L = int(((abs(delta))/4)**(1/4)) # L used in Cohen's book
# Step 1
if phi1[2] < phi2[2]:
phi1, phi2 = phi2, phi1
u1, v1, w1 = phi1[0], phi1[1], phi1[2]
u2, v2, w2 = phi2[0], phi2[1], phi2[2]
s = (v1 + v2) // 2
m = v2 - s
# Step 2
F, b, c = extended_euclid_xgcd(u2, u1)
if s % F == 0: # F | s
G = F
A_x = G
B_x = m * b
B_y = u1 // G
C_y = u2 // G
D_y = s // G
# go to Step 5
else: # F does not divide s
# Step 3
G, x, y = extended_euclid_xgcd(F, s)
H = F // G
B_y = u1 // G
C_y = u2 // G
D_y = s // G
# Step 4
l = y * (b * (w1 % H) + c * (w2 % H)) % H
B_x = b * (m // H) + l * (B_y // H)
# Step 5
b_x = B_x % B_y
b_y = B_y
# Step 5a
x, y, z = 1, 0, 0
while abs(b_y) > L and b_x != 0:
# Step 5c
q, t = divmod(b_y, b_x)
b_y = b_x
b_x = t
t = y - q * x
y = x
x = t
z += 1
# Step 5b if not abs(b_y) > L and b_x != 0
if z % 2 == 1:
b_y = -b_y
y = -y
a_x = G * x
a_y = G * y
# Step 6
if z == 0:
Q1 = C_y * b_x
c_x = (Q1 - m) // B_y
d_x = (b_x * D_y - w2) // B_y
u3 = b_y * C_y
w3 = b_x * c_x - G * d_x
v3 = v2 - 2 * Q1
else:
# Step 7
c_x = (C_y * b_x - m * x) // B_y
Q1 = b_y * c_x
Q2 = Q1 + m
d_x = (D_y * b_x - w2 * x) // B_y
Q3 = y * d_x
Q4 = Q3 + D_y
d_y = Q4 // x
if b_x != 0:
c_y = Q2 // b_x
else:
c_y = (c_x * d_y - w1) // d_x
u3 = b_y * c_y - a_y * d_y
w3 = b_x * c_x - a_x * d_x
v3 = G * (Q3 + Q4) - Q1 - Q2
return type(phi1)((u3, v3, w3))
def compose(f1, f2):
"""Composition of binary quadratic forms.
Based on Lipa Long, "Binary Quadratic Forms", 2019.
See: https://github.com/Chia-Network/vdf-competition/blob/master/classgroups.pdf
"""
group = type(f1)
a1, b1, c1 = f1[0], f1[1], f1[2]
a2, b2, c2 = f2[0], f2[1], f2[2]
# step 1
g = (b1 + b2) // 2
h = -(b1 - b2) // 2
w, _, _ = xgcd_(a1, a2)
w, _, _ = xgcd_(w, g)
logger_cg.debug("Done with 2 gcds in compose.")
# step 2
j = w
s = a1 // w
t = a2 // w
u = g // w
# step 3
logger_cg.debug("Start lincong 1.")
mu, nu = lincong(t * u, h * u + s * c1, s * t)
# step 4
logger_cg.debug("Start lincong 2.")
lmb, _ = lincong(t * nu, h - t * mu, s)
# step 5
k = mu + nu * lmb
l = (k * t - h) // s
m = (t * u * k - h * u - c1 * s) // (s * t)
# step 6
A = s * t
B = j * u - (k * t + l * s)
C = k * l - j * m
# step 7
return group((A, B, C))
def secure_compose(f1, f2):
sectype = type(f1)
a1, b1, c1 = f1[0], f1[1], f1[2]
a2, b2, c2 = f2[0], f2[1], f2[2]
# step 1
g = (b1 + b2) / 2
h = -(b1 - b2) / 2
w = secure_gcd(a1, a2)
w = secure_gcd(w, g)
logger_cg.debug("Done with 2 gcds in compose.")
# step 2
j = w
s = a1 / w
t = a2 / w
u = g / w
# step 3
logger_cg.debug("Start secure_lincong 1.")
mu, nu = secure_lincong(t * u, h * u + s * c1, s * t)
# step 4
logger_cg.debug("Start secure_lincong 2.")
lmb, _ = secure_lincong(t * nu, h - t * mu, s)
# step 5
k = mu + nu * lmb
l = (k * t - h) / s
m = (t * u * k - h * u - c1 * s) / (s * t)
# step 6
A = s * t
B = j * u - (k * t + l * s)
C = k * l - j * m
# step 7
return sectype((A, B, C))
def shanks_compose(f1, f2):
"""Composition of positive definite forms.
Originally by Shanks 'Class Number, a theory of factorization,
and genera', Proc. Symp. in Pure Maths, 1969.
Taken from Coh93, Algorithm 5.4.7.
"""
# Step 1
if f1[0] > f2[0]:
f1, f2 = f2, f1
a1, b1, c1 = f1[0], f1[1], f1[2]
a2, b2, c2 = f2[0], f2[1], f2[2]
s = (b1 + b2)//2
n = b2 - s
# Step 2: First Euclidean step
if a2 % a1 == 0:
y1 = 0
d = a1
else:
d, u, v = extended_euclid_xgcd(a2, a1)
y1 = u
# Step 3: Second Euclidean step
if s % d == 0:
y2 = -1
x2 = 0
d1 = d
else:
d1, u, v = extended_euclid_xgcd(s, d)
x2 = u
y2 = -v
# Step 4: Compose
v1 = a1//d1
v2 = a2//d1
r = (y1*y2*n - x2*c2) % v1
b3 = b2 + 2*v2*r
a3 = v1*v2
c3 = (c2*d1 + r*(b2+v2*r))//v1
return type(f1)((a3, b3, c3))
def secure_shanks_compose(f1, f2):
"""Secure protocol for composition of positive definite forms.
Originally by Shanks 'Class Number, a theory of factorization,
and genera', Proc. Symp. in Pure Maths, 1969.
Taken from Coh93, Algorithm 5.4.7.
"""
# Step 1
a1, b1, c1, a2, b2, c2 = mpc.if_else(
f1[0]>f2[0],
[f2[0], f2[1], f2[2], f1[0], f1[1], f1[2]],
[f1[0], f1[1], f1[2], f2[0], f2[1], f2[2]]
)
s = (b1 + b2)/2
n = b2 - s
# Step 2: First Euclidean step
# Skip case distinction (if a1 | a2 as in Coh93) in the oblivious case.
# Case distinction is pure for performance reasons.
d, u, v = secure_xgcd(a2, a1)
y1 = u
# Step 3: Second Euclidean step
# Skip case distinction (if d | s as in Coh93) in the oblivious case.
d1, u, v = secure_xgcd(s, d)
x2 = u
y2 = -v
# Step 4: Compose
v1 = a1/d1
v2 = a2/d1
# r = (y1*y2*n - x2*c2) % v1
r_prep = (y1*y2*n - x2*c2)
_, r = secure_division(r_prep, v1)
b3 = b2 + 2*v2*r
a3 = v1*v2
c3 = (c2*d1 + r*(b2+v2*r))/v1
return type(f1)((a3, b3, c3))
def _compose_with_self(f, n):
def composed(arg):
for _ in range(n):
arg = f(arg)
return arg
return composed
def number2ideal(n, D):
def check_a(a, D):
try:
# print(legendre(D,a) ==1)
# print(a % 8 in {3, 5, 7})
return not (gmpy2.legendre(D, a) == 1 and a % 8 in {3, 5, 7})
except:
return True
# print(f"{n=}")
a = max(2, n - 1)
# print(f"a at beginning {a=}")
# while legendre(D, a) != 1 and a % 8 in {3, 5, 7}:
while check_a(a, D):
a = int(gmpy2.next_prime(a))
# print(f"next prime {a=}")
if a % 4 == 3:
# b = D**int(((a+1)/4)) % a
b = D ** ((a + 1) // 4) % a
else:
# if D**int(((a-1)/4)) % a == 1:
# b = D**int(((a+3)/8)) % a
# else:
# b = 2*D*(4*D)**int(((a-5)/8)) % a
if D ** ((a - 1) // 4) % a == 1:
b = D ** ((a + 3) // 8) % a
else:
b = 2 * D * (4 * D) ** ((a - 5) // 8) % a
if D % 2 != b:
b = a - b
return (a, b), n - a
def ideal2form(ideal, D):
a = ideal[0]
b = ideal[1]
# f = (a, b, int((b**2 - D)/(4*a)))
f = (a, b, (b ** 2 - D) // (4 * a))
return reduce(f)
def number2form(n, D):
ideal, distance = number2ideal(n, D)
f = ideal2form(ideal, D)
return f, distance
def ideal2number(ideal, distance):
a, b = ideal
return a + distance
def form2ideal(f):
a, b = f[0], f[1]
return (abs(a), b)
def forminverse(f):
group = type(f)
return group((f[0], -f[1], f[2]))
def idealinverse(ideal):
return (ideal[0], -ideal[1])
def form2number(f, distance):
ideal = form2ideal(f)
return ideal2number(ideal, distance)
def principal_form(disc):
"""Construct principal form for given discriminant.
Follows Def. 5.4 from `Binary quadratic forms` by Lipa Long, 2019:
https://github.com/Chia-Network/vdf-competition/blob/master/classgroups.pdf
"""
assert disc % 4 == 0 or disc % 4 == 1
k = disc % 2
f = (1, k, (k ** 2 - disc) // 4)
return f
def create_generator_of_subgroup(discriminant):
"""'Generator' as per Chia VDF competition.
See: https://www.chia.net/2018/11/07/chia-vdf-competition-guide.en.html
Note: This element generates a cyclic subgroup for given discriminant,
not per se the entire group.
"""
if (1 - discriminant) % 8 == 0:
g = (2, 1, (1 - discriminant) // 8)
else:
g = None
return g
def find_fundamental_discriminant(bit_length):
"""Find fundamental discriminant with additional property: 8 | 1 - discriminant.
Delta = 1 mod 4 and Delta is square-free, or,
Delta = 0 mod 4, Delta/4 = 2 or 3 mod 4 and Delta/4 is square-free.
Fundamental discriminants are those values which are discriminants of quadratic fields.
Requirement 8 | (1 - discriminant) necessary for create_generator_subgroup method.
"""
p = gmpy2.next_prime(1 << bit_length - 1)
while (-p % 4) != 1 or (1 - -p) % 8 != 0:
p = gmpy2.next_prime(p)
return int(-p)
def prime_form(a, grp):
"""For prime a, take b square root of discriminant mod 4a.
Algorithm 3.3 from Buchmann, Vollmer 'Binary Quadratic Forms' 2007.
"""
assert gmpy2.is_prime(a)
disc = grp.discriminant
# Take b square root of disc mod 4a, a prime.
if a % 4 == 3:
b = disc ** ((a + 1) // 4) % a
else:
if disc ** ((a - 1) // 4) % a == 1:
b = disc ** ((a + 3) // 8) % a
else:
b = 2 * disc * (4 * disc) ** ((a - 5) // 8) % a
if disc % 2 != b:
b = a - b
return grp((a, b, (b ** 2 - disc) // (4 * a)))
def _kronecker(a, b):
"""Implements Legendre symbol and (m/p) for p = 2
See BV07 Definition 3.4.3.
"""
if b==2:
if a % 2 == 0:
return 0
else:
return (-1)**((a**2-1)//8)
else:
return gmpy2.legendre(a, b)
def generating_system(grp):
"""Based on Algorithm 9.1 from BV07.
Time: O(|group.discriminant|^(1/2+o(1))), see Section 9.6.
BV07: "Based on an idea of H.W. Lenstra. It is the fastest
known deterministic class number algorithm."
Tested based on examples from:
* BV07, Section 9.6.3
* https://math.stackexchange.com/questions/2618232/class-group-of-a-field (requires group.nucomp=True)
"""
disc = grp.discriminant
def c2(disc):
# See BV07 Prop. 9.5.1
return int(sqrt(abs(disc) / 3))
def c3(disc):
# See BV07 Prop. 9.5.3
return 6*int(abs(log2(abs(disc)))**2)
def primes(start=2, stop=c2(disc)):
n = start
while gmpy2.next_prime(n) <= stop:
n = gmpy2.next_prime(n)
yield int(n)
def update_skip_set(skip_set):
# See BV07 Section 9.6.2, p. 199
# Remove p if there exists...
for p in p_set:
# ... a prime form f = (a,b,c) with a == p
skip_set.union(set([a[0] for a in prime_forms if a[0] == p]))
# ... reduced form f = (a, b, p) with b <= 0
skip_set.union(set([a[2] for a in grp_set if a[1] <= 0]))
# ... reduced form f = (a, b, c) with p=a-b+c en 0<=2a-b<=p
for a in grp_set:
if a[0] - a[1] + a[2] == p:
skip_set.add(p)
if 0 <= 2 * a[0] - a[1] and 2 * a[0] - a[1] <= p:
skip_set.add(p)
return skip_set
p_set = []
# c = c2(disc) # Does not work for tiny discriminant = -23
c = c3(disc)
# See BV07 Section 9.6.1 and eq (8.16) for definition of P set.
p_set = [p for p in primes(1, c) if _kronecker(disc, p) != -1]
skip_set = set()
grp_set = set()
gen_set = set()
prime_forms = set()
rotor = ["-", "\\", "|", "/", "-", "\\", "|", "/"]
for p in p_set:
if not p in skip_set:
f = prime_form(p, grp)
prime_forms.add(f.value)
# Add form values to sets, because form instances are not hashable.
if f.value not in grp_set:
gen_set.add(f.value)
fnew = f
e = 1
while not fnew.value in grp_set:
grp_set.add(fnew.value)
e += 1
fnew = reduce_form(f ^ e)
print(f"Calculating generating set for {grp}: {rotor[(e//1000) % 8]}", end="\r")
# Update set P; P = p_set \ skip_set in this implementation.
skip_set = update_skip_set(skip_set)
return list(map(grp, grp_set)), list(map(grp, gen_set))
| 27.151365 | 111 | 0.483961 |
87d5a13d433b07db4fa4546b305b4f8eba62dd4a | 7,212 | py | Python | audio_visual/train.py | Chilydream/deep_avsr | 17c9833145e108333932d0c7e0a9391d20dc16f9 | [
"MIT"
] | null | null | null | audio_visual/train.py | Chilydream/deep_avsr | 17c9833145e108333932d0c7e0a9391d20dc16f9 | [
"MIT"
] | null | null | null | audio_visual/train.py | Chilydream/deep_avsr | 17c9833145e108333932d0c7e0a9391d20dc16f9 | [
"MIT"
] | 1 | 2022-01-27T07:11:13.000Z | 2022-01-27T07:11:13.000Z | """
Author: Smeet Shah
Copyright (c) 2020 Smeet Shah
File part of 'deep_avsr' GitHub repository available at -
https://github.com/lordmartian/deep_avsr
"""
import torch
import torch.optim as optim
import torch.nn as nn
from torch.utils.data import DataLoader, random_split
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import os, shutil
from config import args
from models.av_net import AVNet
from data.lrs2_dataset import LRS2Main
from data.utils import collate_fn
from utils.general import num_params, train, evaluate
def main():
matplotlib.use("Agg")
np.random.seed(args["SEED"])
torch.manual_seed(args["SEED"])
gpuAvailable = torch.cuda.is_available()
device = torch.device("cuda" if gpuAvailable else "cpu")
kwargs = {"num_workers": args["NUM_WORKERS"], "pin_memory": True} if gpuAvailable else {}
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
#declaring the train and validation datasets and their corresponding dataloaders
audioParams = {"stftWindow":args["STFT_WINDOW"], "stftWinLen":args["STFT_WIN_LENGTH"], "stftOverlap":args["STFT_OVERLAP"]}
videoParams = {"videoFPS":args["VIDEO_FPS"]}
noiseParams = {"noiseFile":args["DATA_DIRECTORY"] + "/noise.wav", "noiseProb":args["NOISE_PROBABILITY"], "noiseSNR":args["NOISE_SNR_DB"]}
trainData = LRS2Main("train", args["DATA_DIRECTORY"], args["MAIN_REQ_INPUT_LENGTH"], args["CHAR_TO_INDEX"], args["STEP_SIZE"],
audioParams, videoParams, noiseParams)
trainLoader = DataLoader(trainData, batch_size=args["BATCH_SIZE"], collate_fn=collate_fn, shuffle=True, **kwargs)
noiseParams = {"noiseFile":args["DATA_DIRECTORY"] + "/noise.wav", "noiseProb":0, "noiseSNR":args["NOISE_SNR_DB"]}
valData = LRS2Main("val", args["DATA_DIRECTORY"], args["MAIN_REQ_INPUT_LENGTH"], args["CHAR_TO_INDEX"], args["STEP_SIZE"],
audioParams, videoParams, noiseParams)
valLoader = DataLoader(valData, batch_size=args["BATCH_SIZE"], collate_fn=collate_fn, shuffle=True, **kwargs)
#declaring the model, optimizer, scheduler and the loss function
model = AVNet(args["TX_NUM_FEATURES"], args["TX_ATTENTION_HEADS"], args["TX_NUM_LAYERS"], args["PE_MAX_LENGTH"],
args["AUDIO_FEATURE_SIZE"], args["TX_FEEDFORWARD_DIM"], args["TX_DROPOUT"], args["NUM_CLASSES"])
model.to(device)
optimizer = optim.Adam(model.parameters(), lr=args["INIT_LR"], betas=(args["MOMENTUM1"], args["MOMENTUM2"]))
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode="min", factor=args["LR_SCHEDULER_FACTOR"],
patience=args["LR_SCHEDULER_WAIT"], threshold=args["LR_SCHEDULER_THRESH"],
threshold_mode="abs", min_lr=args["FINAL_LR"], verbose=True)
loss_function = nn.CTCLoss(blank=0, zero_infinity=False)
#removing the checkpoints directory if it exists and remaking it
if os.path.exists(args["CODE_DIRECTORY"] + "/train_checkpoints"):
while True:
ch = input("Continue and remove the 'checkpoints' directory? y/n: ")
if ch == "y":
break
elif ch == "n":
exit()
else:
print("Invalid input")
shutil.rmtree(args["CODE_DIRECTORY"] + "/checkpoints")
os.mkdir(args["CODE_DIRECTORY"] + "/train_checkpoints")
os.mkdir(args["CODE_DIRECTORY"] + "/train_checkpoints/models")
os.mkdir(args["CODE_DIRECTORY"] + "/train_checkpoints/plots")
#loading the pretrained weights
if args["PRETRAINED_MODEL_FILE"] is not None:
print("\n\nPre-trained Model File: %s" %(args["PRETRAINED_MODEL_FILE"]))
print("\nLoading the pre-trained model .... \n")
model.load_state_dict(torch.load(args["CODE_DIRECTORY"] + args["PRETRAINED_MODEL_FILE"], map_location=device))
model.to(device)
print("Loading Done.\n")
trainingLossCurve = list()
validationLossCurve = list()
trainingWERCurve = list()
validationWERCurve = list()
#printing the total and trainable parameters in the model
numTotalParams, numTrainableParams = num_params(model)
print("\nNumber of total parameters in the model = %d" %(numTotalParams))
print("Number of trainable parameters in the model = %d\n" %(numTrainableParams))
print("\nTraining the model .... \n")
trainParams = {"spaceIx":args["CHAR_TO_INDEX"][" "], "eosIx":args["CHAR_TO_INDEX"]["<EOS>"], "aoProb":args["AUDIO_ONLY_PROBABILITY"],
"voProb":args["VIDEO_ONLY_PROBABILITY"]}
valParams = {"decodeScheme":"greedy", "spaceIx":args["CHAR_TO_INDEX"][" "], "eosIx":args["CHAR_TO_INDEX"]["<EOS>"], "aoProb":0, "voProb":0}
for step in range(args["NUM_STEPS"]):
#train the model for one step
trainingLoss, trainingCER, trainingWER = train(model, trainLoader, optimizer, loss_function, device, trainParams)
trainingLossCurve.append(trainingLoss)
trainingWERCurve.append(trainingWER)
#evaluate the model on validation set
validationLoss, validationCER, validationWER = evaluate(model, valLoader, loss_function, device, valParams)
validationLossCurve.append(validationLoss)
validationWERCurve.append(validationWER)
#printing the stats after each step
print("Step: %03d || Tr.Loss: %.6f Val.Loss: %.6f || Tr.CER: %.3f Val.CER: %.3f || Tr.WER: %.3f Val.WER: %.3f"
%(step, trainingLoss, validationLoss, trainingCER, validationCER, trainingWER, validationWER))
#make a scheduler step
scheduler.step(validationWER)
#saving the model weights and loss/metric curves in the checkpoints directory after every few steps
if ((step%args["SAVE_FREQUENCY"] == 0) or (step == args["NUM_STEPS"]-1)) and (step != 0):
savePath = args["CODE_DIRECTORY"] + "/train_checkpoints/models/train-step_{:04d}-wer_{:.3f}.pt".format(step, validationWER)
torch.save(model.state_dict(), savePath)
plt.figure()
plt.title("Loss Curves")
plt.xlabel("Step No.")
plt.ylabel("Loss value")
plt.plot(list(range(1, len(trainingLossCurve)+1)), trainingLossCurve, "blue", label="Train")
plt.plot(list(range(1, len(validationLossCurve)+1)), validationLossCurve, "red", label="Validation")
plt.legend()
plt.savefig(args["CODE_DIRECTORY"] + "/train_checkpoints/plots/train-step_{:04d}-loss.png".format(step))
plt.close()
plt.figure()
plt.title("WER Curves")
plt.xlabel("Step No.")
plt.ylabel("WER")
plt.plot(list(range(1, len(trainingWERCurve)+1)), trainingWERCurve, "blue", label="Train")
plt.plot(list(range(1, len(validationWERCurve)+1)), validationWERCurve, "red", label="Validation")
plt.legend()
plt.savefig(args["CODE_DIRECTORY"] + "/train_checkpoints/plots/train-step_{:04d}-wer.png".format(step))
plt.close()
print("\nTraining Done.\n")
return
if __name__ == "__main__":
main()
| 44.795031 | 143 | 0.660566 |
b86bf12f27189c74a1d1b457b3db16d21ef4ccb0 | 11,957 | py | Python | ephypype/pipelines/fif_to_inv_sol.py | annapasca/ephypype | 6dbacdd6913234a28b690b401862ff062accecc7 | [
"BSD-3-Clause"
] | 18 | 2018-04-18T12:14:52.000Z | 2022-02-25T19:31:44.000Z | ephypype/pipelines/fif_to_inv_sol.py | annapasca/ephypype | 6dbacdd6913234a28b690b401862ff062accecc7 | [
"BSD-3-Clause"
] | 106 | 2017-12-09T13:34:30.000Z | 2022-03-12T01:02:17.000Z | ephypype/pipelines/fif_to_inv_sol.py | annapasca/ephypype | 6dbacdd6913234a28b690b401862ff062accecc7 | [
"BSD-3-Clause"
] | 13 | 2017-05-28T20:38:56.000Z | 2022-03-06T15:58:02.000Z | """
Inverse Solution Pipeline
"""
# Author: Annalisa Pascarella <a.pascarella@iac.cnr.it>
import nipype.pipeline.engine as pe
from nipype.interfaces.utility import IdentityInterface
from ..interfaces.mne.LF_computation import LFComputation
from ..interfaces.mne.Inverse_solution import NoiseCovariance
from ..interfaces.mne.Inverse_solution import InverseSolution
from ..interfaces.mne.preproc import DefineEpochs
def create_pipeline_source_reconstruction(main_path, subjects_dir,
pipeline_name='inv_sol_pipeline',
spacing='ico-5',
inv_method='MNE',
snr=1.0,
is_epoched=False,
events_id={},
condition=None,
decim=1,
t_min=None, t_max=None,
is_evoked=False,
parc='aparc',
aseg=False,
aseg_labels=[],
noise_cov_fname='',
all_src_space=False,
ROIs_mean=True,
save_mixed_src_space=False,
is_fixed=False):
"""Source reconstruction pipeline.
Parameters
----------
main_path : str
the main path of the workflow
subjects_dir : str
Freesurfer directory
pipeline_name : str (default inv_sol_pipeline)
name of the pipeline
spacing : str (default 'ico-5')
spacing to use to setup a source space
inv_method : str (default MNE)
the inverse method to use; possible choices: MNE, dSPM, sLORETA
is_epoched : bool (default False)
if True and events_id = None the input data are epoch data
in the format -epo.fif
if True and events_id is not None, the raw data are epoched
according to events_id and t_min and t_max values
is_fixed : bool (default False)
if True we use fixed orientation, otherwise the loose orientation
is applied
events_id: dict (default None)
the dict of events
t_min, t_max: int (defualt None)
define the time interval in which to epoch the raw data
is_evoked: bool (default False)
if True the raw data will be averaged according to the events
contained in the dict events_id
parc: str (default 'aparc')
the parcellation defining the ROIs atlas in the source space
aseg: bool (defualt False)
if True a mixed source space will be created and the sub cortical
regions defined in aseg_labels will be added to the source space
aseg_labels: list (default [])
list of substructures we want to include in the mixed source space
noise_cov_fname: str (default None)
template for the path to either the noise covariance matrix file or
the empty room data
all_src_space: bool
if True we compute the inverse for all points of the source space
ROIs_mean: bool
if True we compute the mean of estimated time series on ROIs
save_mixed_src_space: bool (defualt False)
if True the mixed src space will be saved in the FS folder
raw (inputnode): str
path to raw data in fif format
sbj_id (inputnode): str
subject id
Returns
-------
pipeline : instance of Workflow
"""
pipeline = pe.Workflow(name=pipeline_name)
pipeline.base_dir = main_path
inputnode = pe.Node(IdentityInterface(fields=['sbj_id', 'raw',
'trans_file',
'events_file']),
name='inputnode')
# Lead Field computation Node
LF_computation = pe.Node(interface=LFComputation(), name='LF_computation')
LF_computation.inputs.subjects_dir = subjects_dir
LF_computation.inputs.spacing = spacing
LF_computation.inputs.aseg = aseg
if aseg:
LF_computation.inputs.aseg_labels = aseg_labels
LF_computation.inputs.save_mixed_src_space = save_mixed_src_space
pipeline.connect(inputnode, 'sbj_id', LF_computation, 'sbj_id')
pipeline.connect(inputnode, 'raw', LF_computation, 'raw_fname')
pipeline.connect(inputnode, 'trans_file', LF_computation, 'trans_file')
# Create epochs based on events_id
if is_epoched and events_id != {}:
define_epochs = pe.Node(interface=DefineEpochs(), name='define_epochs')
define_epochs.inputs.events_id = events_id
define_epochs.inputs.t_min = t_min
define_epochs.inputs.t_max = t_max
define_epochs.inputs.decim = decim
pipeline.connect(inputnode, 'raw', define_epochs, 'fif_file')
pipeline.connect(inputnode, 'events_file', define_epochs, 'events_file') # noqa
# Noise Covariance Matrix Node
create_noise_cov = pe.Node(interface=NoiseCovariance(),
name="create_noise_cov")
print('******************** {}', noise_cov_fname)
create_noise_cov.inputs.cov_fname_in = noise_cov_fname
create_noise_cov.inputs.is_epoched = is_epoched
create_noise_cov.inputs.is_evoked = is_evoked
if is_epoched and is_evoked:
pipeline.connect(define_epochs, 'epo_fif_file',
create_noise_cov, 'raw_filename')
else:
pipeline.connect(inputnode, 'raw', create_noise_cov, 'raw_filename')
# Inverse Solution Node
inv_solution = pe.Node(interface=InverseSolution(), name='inv_solution')
inv_solution.inputs.subjects_dir = subjects_dir
inv_solution.inputs.inv_method = inv_method
inv_solution.inputs.is_epoched = is_epoched
inv_solution.inputs.is_fixed = is_fixed
inv_solution.inputs.snr = snr
if is_evoked:
inv_solution.inputs.events_id = events_id
inv_solution.inputs.is_evoked = is_evoked
if condition:
inv_solution.inputs.condition = condition
inv_solution.inputs.parc = parc
inv_solution.inputs.aseg = aseg
if aseg:
inv_solution.inputs.aseg_labels = aseg_labels
inv_solution.inputs.all_src_space = all_src_space
inv_solution.inputs.ROIs_mean = ROIs_mean
pipeline.connect(inputnode, 'sbj_id', inv_solution, 'sbj_id')
if is_epoched and is_evoked:
pipeline.connect(define_epochs, 'epo_fif_file',
inv_solution, 'raw_filename')
else:
pipeline.connect(inputnode, 'raw', inv_solution, 'raw_filename')
pipeline.connect(LF_computation, 'fwd_filename',
inv_solution, 'fwd_filename')
pipeline.connect(create_noise_cov, 'cov_fname_out',
inv_solution, 'cov_filename')
return pipeline
def create_pipeline_evoked_inverse_solution(main_path, subjects_dir,
pipeline_name='evoked_inv_sol_pipeline', # noqa
spacing='ico-5',
inv_method='MNE',
snr=3.0,
parc='aparc',
aseg=False,
aseg_labels=[],
all_src_space=False,
ROIs_mean=True,
save_mixed_src_space=False,
is_fixed=False,
noise_cov_fname='',
events_id={},
condition=None):
"""Source reconstruction pipeline.
Parameters
----------
main_path : str
the main path of the workflow
subjects_dir : str
Freesurfer directory
pipeline_name : str (default inv_sol_pipeline)
name of the pipeline
spacing : str (default 'ico-5')
spacing to use to setup a source space
inv_method : str (default MNE)
the inverse method to use; possible choices: MNE, dSPM, sLORETA
is_fixed : bool (default False)
if True we use fixed orientation, otherwise the loose orientation
is applied
parc: str (default 'aparc')
the parcellation defining the ROIs atlas in the source space
aseg: bool (defualt False)
if True a mixed source space will be created and the sub cortical
regions defined in aseg_labels will be added to the source space
aseg_labels: list (default [])
list of substructures we want to include in the mixed source space
noise_cov_fname: str (default None)
template for the path to either the noise covariance matrix file or
the empty room data
all_src_space: bool
if True we compute the inverse for all points of the source space
ROIs_mean: bool
if True we compute the mean of estimated time series on ROIs
save_mixed_src_space: bool (defualt False)
if True the mixed src space will be saved in the FS folder
raw (inputnode): str
path to raw data in fif format
sbj_id (inputnode): str
subject id
Returns
-------
pipeline : instance of Workflow
"""
pipeline = pe.Workflow(name=pipeline_name)
pipeline.base_dir = main_path
inputnode = pe.Node(IdentityInterface(fields=['sbj_id', 'raw',
'trans_file', 'cov_filename']),
name='inputnode')
# Lead Field computation Node
LF_computation = pe.Node(interface=LFComputation(), name='LF_computation')
LF_computation.inputs.subjects_dir = subjects_dir
LF_computation.inputs.spacing = spacing
LF_computation.inputs.aseg = aseg
if aseg:
LF_computation.inputs.aseg_labels = aseg_labels
LF_computation.inputs.save_mixed_src_space = save_mixed_src_space
pipeline.connect(inputnode, 'sbj_id', LF_computation, 'sbj_id')
pipeline.connect(inputnode, 'raw', LF_computation, 'raw_fname')
pipeline.connect(inputnode, 'trans_file', LF_computation, 'trans_file')
# Noise Covariance Matrix Node
'''
create_noise_cov = pe.Node(interface=NoiseCovariance(),
name="create_noise_cov")
print('******************** {}', noise_cov_fname)
create_noise_cov.inputs.cov_fname_in = noise_cov_fname
create_noise_cov.inputs.is_epoched = True
create_noise_cov.inputs.is_evoked = True
pipeline.connect(inputnode, 'raw', create_noise_cov, 'raw_filename')
'''
# Inverse Solution Node
inv_solution = pe.Node(interface=InverseSolution(), name='inv_solution')
inv_solution.inputs.subjects_dir = subjects_dir
inv_solution.inputs.inv_method = inv_method
inv_solution.inputs.is_fixed = is_fixed
inv_solution.inputs.is_ave = True
inv_solution.inputs.snr = snr
inv_solution.inputs.parc = parc
inv_solution.inputs.aseg = aseg
if aseg:
inv_solution.inputs.aseg_labels = aseg_labels
inv_solution.inputs.all_src_space = all_src_space
inv_solution.inputs.ROIs_mean = ROIs_mean
inv_solution.inputs.events_id = events_id
inv_solution.inputs.condition = condition
pipeline.connect(inputnode, 'sbj_id', inv_solution, 'sbj_id')
pipeline.connect(inputnode, 'raw', inv_solution, 'raw_filename')
pipeline.connect(LF_computation, 'fwd_filename',
inv_solution, 'fwd_filename')
pipeline.connect(inputnode, 'cov_filename',
inv_solution, 'cov_filename')
return pipeline
| 40.670068 | 92 | 0.60935 |
d9fa357c209c49bc536c956b8bb6e94c5a3ea2a3 | 5,300 | py | Python | my-src/my_env.py | w121211/rlpyt | 9d603a8cbed5bd581c49a4163e342be9708e7bd2 | [
"MIT"
] | null | null | null | my-src/my_env.py | w121211/rlpyt | 9d603a8cbed5bd581c49a4163e342be9708e7bd2 | [
"MIT"
] | null | null | null | my-src/my_env.py | w121211/rlpyt | 9d603a8cbed5bd581c49a4163e342be9708e7bd2 | [
"MIT"
] | null | null | null | import os
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
import atari_py
import cv2
from rlpyt.envs.base import Env, EnvStep
from rlpyt.spaces.int_box import IntBox
from rlpyt.spaces.float_box import FloatBox
from rlpyt.spaces.composite import Composite
from rlpyt.utils.quick_args import save__init__args
from rlpyt.samplers.collections import TrajInfo
W, H = (80, 104) # Crop two rows, then downsample by 2x (fast, clean image).
ACTION_MEANING = {
0: "NOOP",
1: "FIRE",
2: "UP",
3: "RIGHT",
4: "LEFT",
5: "DOWN",
6: "UPRIGHT",
7: "UPLEFT",
8: "DOWNRIGHT",
9: "DOWNLEFT",
10: "UPFIRE",
11: "RIGHTFIRE",
12: "LEFTFIRE",
13: "DOWNFIRE",
14: "UPRIGHTFIRE",
15: "UPLEFTFIRE",
16: "DOWNRIGHTFIRE",
17: "DOWNLEFTFIRE",
}
ACTION_INDEX = {v: k for k, v in ACTION_MEANING.items()}
# EnvInfo = namedtuple("EnvInfo", ["game_score", "traj_done"])
EnvInfo = namedtuple("EnvInfo", [])
Obs = namedtuple("Obs", ["a", "b"])
from rlpyt.agents.pg.categorical import CategoricalPgAgent
from rlpyt.models.pg.atari_ff_model import AtariFfModel
from rlpyt.models.pg.atari_lstm_model import AtariLstmModel
class MyModel(torch.nn.Module):
def __init__(
self,
# image_shape,
output_size,
# fc_sizes=512,
# use_maxpool=False,
# channels=None, # None uses default.
# kernel_sizes=None,
# strides=None,
# paddings=None,
):
super().__init__()
self.fc = torch.nn.Sequential(
torch.nn.Linear(1, 16),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(16, 32),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(32, 64),
torch.nn.ReLU(inplace=True),
)
self.pi = torch.nn.Linear(64, output_size)
self.value = torch.nn.Linear(64, 1)
# self.conv = Conv2dHeadModel(
# image_shape=image_shape,
# channels=channels or [16, 32],
# kernel_sizes=kernel_sizes or [8, 4],
# strides=strides or [4, 2],
# paddings=paddings or [0, 1],
# use_maxpool=use_maxpool,
# hidden_sizes=fc_sizes, # Applies nonlinearity at end.
# )
# self.pi = torch.nn.Linear(self.conv.output_size, output_size)
# self.value = torch.nn.Linear(self.conv.output_size, 1)
def forward(self, x, prev_action, prev_reward):
"""Feedforward layers process as [T*B,H]. Return same leading dims as
input, can be [T,B], [B], or []."""
print(x)
# img = image.type(torch.float) # Expect torch.uint8 inputs
# img = img.mul_(1.0 / 255) # From [0-255] to [0-1], in place.
# Infer (presence of) leading dimensions: [T,B], [B], or [].
# lead_dim, T, B, img_shape = infer_leading_dims(img, 3)
# fc_out = self.conv(img.view(T * B, *img_shape))
# pi = F.softmax(self.pi(fc_out), dim=-1)
# v = self.value(fc_out).squeeze(-1)
# print(x)
# print(x.shape)
fc_out = self.fc(x)
pi = F.softmax(self.pi(fc_out), dim=-1)
v = self.value(fc_out).squeeze(-1)
# Restore leading dimensions: [T,B], [B], or [], as input.
# pi, v = restore_leading_dims((pi, v), lead_dim, T, B)
return pi, v
class MyMixin:
def make_env_to_model_kwargs(self, env_spaces):
return dict(
# image_shape=env_spaces.observation.shape,
output_size=env_spaces.action.n
)
class MyAgent(MyMixin, CategoricalPgAgent):
def __init__(self, ModelCls=MyModel, **kwargs):
super().__init__(ModelCls=ModelCls, **kwargs)
class MyEnv(Env):
def __init__(self):
self.end_pos = 10
self.cur_pos = 0
# self._action_space = IntBox(low=0, high=2, shape=(2,))
self._action_space = IntBox(low=0, high=2)
self._observation_space = Composite(
[FloatBox(low=0, high=self.end_pos), FloatBox(low=0, high=self.end_pos)],
Obs,
)
def reset(self):
self._step_counter = 0
self.cur_pos = 0
# return [self.cur_pos]
# return {"a": [self.cur_pos], "b": [self.cur_pos]}
return self.get_obs()
def step(self, action):
"""
Returns:
obs
reward
done
log
"""
print(type(action))
# assert action in [0, 1], action
# if action[0] == 0 and self.cur_pos > 0:
# self.cur_pos -= 1
# elif action[0] == 1:
# self.cur_pos += 1
if action == 0 and self.cur_pos > 0:
self.cur_pos -= 1
elif action == 1:
self.cur_pos += 1
done = self.cur_pos >= self.end_pos
# info = EnvInfo(game_score=game_score, traj_done=game_over)
info = None
reward = 1 if done else 0
self._step_counter += 1
return EnvStep(self.get_obs(), reward, done, info)
def get_obs(self):
# return self._obs.copy()
# return np.array([self.cur_pos], dtype=np.float32)
return Obs(
a=np.array([self.cur_pos], dtype=np.float32),
b=np.array([self.cur_pos], dtype=np.float32),
)
| 29.608939 | 85 | 0.577358 |
58640879da77cc68dd52bd6463f5312daa7f2b72 | 11,253 | py | Python | frameworks/cocos2d-x/build/android-build.py | pedrohenriquerls/cocos2d_ruby_binding | 52d929ddcd8e4b7f613c98d73477133952b8a7b0 | [
"MIT"
] | 20 | 2015-01-23T09:03:56.000Z | 2021-08-28T17:19:38.000Z | frameworks/cocos2d-x/build/android-build.py | pedrohenriquerls/cocos2d_ruby_binding | 52d929ddcd8e4b7f613c98d73477133952b8a7b0 | [
"MIT"
] | 3 | 2015-03-31T06:13:40.000Z | 2017-10-04T12:30:29.000Z | frameworks/cocos2d-x/build/android-build.py | pedrohenriquerls/cocos2d_ruby_binding | 52d929ddcd8e4b7f613c98d73477133952b8a7b0 | [
"MIT"
] | 16 | 2015-06-08T04:10:12.000Z | 2021-08-28T17:19:38.000Z | #!/usr/bin/python
# android-build.py
# Build android
import sys
import os, os.path
import shutil
from optparse import OptionParser
CPP_SAMPLES = ['cpp-empty-test', 'cpp-tests', 'game-controller-test']
LUA_SAMPLES = ['lua-empty-test', 'lua-tests', 'lua-game-controller-test']
ALL_SAMPLES = CPP_SAMPLES + LUA_SAMPLES
class BUILD_CONSTANT:
SDK_ROOT = None
COCOS_ROOT = None
NDK_BUILD_COMMAND = None
def initBuildConstant(ndk_build_param, build_mode):
''' Checking the environment NDK_ROOT, which will be used for building
'''
try:
ndk_root = os.environ['NDK_ROOT']
ndk_build_path = os.path.join(ndk_root, "ndk-build")
except Exception:
print "NDK_ROOT not defined. Please define NDK_ROOT in your environment"
sys.exit(1)
toolchainVersion = '4.8'
try:
versionFile = open(os.path.join(ndk_root, "RELEASE.TXT"))
firstLine = versionFile.readline()
if firstLine :
ndkVersion = firstLine[firstLine.index('r') : firstLine.index(' ')]
ndkVersionValue = int(filter(str.isdigit,ndkVersion))
if ndkVersionValue < 10 or cmp(ndkVersion,'r10c') < 0 :
print '''Please use NDK r10c above.
If you do not,your application may crash or freeze on Android L(5.0) when use BMFont and HttpClient.
For More information:
https://github.com/cocos2d/cocos2d-x/issues/9114
https://github.com/cocos2d/cocos2d-x/issues/9138\n'''
else:
toolchainVersion = '4.9'
versionFile.close()
except Exception:
print "Can not be determined your NDK version"
if toolchainVersion == '4.8':
print 'NDK_TOOLCHAIN_VERSION is 4.8,your application may crash on Androud when use c++ 11 regular\n'
current_dir = os.path.dirname(os.path.realpath(__file__))
cocos_root = os.path.join(current_dir, "..")
BUILD_CONSTANT.COCOS_ROOT = cocos_root
# windows should use ";" to seperate module paths
platform = sys.platform
if platform == 'win32':
ndk_module_path = 'NDK_MODULE_PATH=%s;%s/external;%s/cocos' % (cocos_root, cocos_root, cocos_root)
else:
ndk_module_path = 'NDK_MODULE_PATH=%s:%s/external:%s/cocos' % (cocos_root, cocos_root, cocos_root)
''' The build process can be accelerated by running multiple concurrent job processes using the -j-option.
'''
try:
import multiprocessing
num_of_cpu = multiprocessing.cpu_count()
except Exception:
print "Can't know cpuinfo, use default 1 cpu"
num_of_cpu = 1
if ndk_build_param == None:
BUILD_CONSTANT.NDK_BUILD_COMMAND = '%s -j%d NDK_DEBUG=%d %s NDK_TOOLCHAIN_VERSION=%s' % (ndk_build_path, num_of_cpu, build_mode=='debug', ndk_module_path, toolchainVersion)
else:
BUILD_CONSTANT.NDK_BUILD_COMMAND = '%s -j%d NDK_DEBUG=%d %s %s NDK_TOOLCHAIN_VERSION=%s' % (ndk_build_path, num_of_cpu, build_mode=='debug', ndk_build_param, ndk_module_path, toolchainVersion)
def check_environment_variables_sdk():
''' Checking the environment ANDROID_SDK_ROOT, which will be used for building
'''
try:
BUILD_CONSTANT.SDK_ROOT = os.environ['ANDROID_SDK_ROOT']
except Exception:
print "ANDROID_SDK_ROOT not defined. Please define ANDROID_SDK_ROOT in your environment"
sys.exit(1)
def caculate_built_samples(args):
''' Compute the sampels to be built
'cpp' for short of all cpp tests
'lua' for short of all lua tests
'''
if 'all' in args:
return ALL_SAMPLES
targets = []
if 'cpp' in args:
targets += CPP_SAMPLES
args.remove('cpp')
if 'lua' in args:
targets += LUA_SAMPLES
args.remove('lua')
targets += args
# remove duplicate elements, for example
# python android-build.py cpp hellocpp
targets = set(targets)
return list(targets)
def do_build(app_android_root, android_platform, build_mode):
command = '%s -C %s' % (BUILD_CONSTANT.NDK_BUILD_COMMAND, app_android_root)
print command
if os.system(command) != 0:
raise Exception("Build dynamic library for project [ " + app_android_root + " ] fails!")
elif android_platform is not None:
sdk_tool_path = os.path.join(BUILD_CONSTANT.SDK_ROOT, "tools/android")
cocoslib_path = os.path.join(BUILD_CONSTANT.COCOS_ROOT, "cocos/platform/android/java")
command = '%s update lib-project -t %s -p %s' % (sdk_tool_path,android_platform,cocoslib_path)
if os.system(command) != 0:
raise Exception("update cocos lib-project [ " + cocoslib_path + " ] fails!")
command = '%s update project -t %s -p %s -s' % (sdk_tool_path,android_platform,app_android_root)
if os.system(command) != 0:
raise Exception("update project [ " + app_android_root + " ] fails!")
buildfile_path = os.path.join(app_android_root, "build.xml")
command = 'ant clean %s -f %s -Dsdk.dir=%s' % (build_mode,buildfile_path,BUILD_CONSTANT.SDK_ROOT)
os.system(command)
def copy_files(src, dst):
for item in os.listdir(src):
path = os.path.join(src, item)
# Android can not package the file that ends with ".gz"
if not item.startswith('.') and not item.endswith('.gz') and os.path.isfile(path):
shutil.copy(path, dst)
if os.path.isdir(path):
new_dst = os.path.join(dst, item)
os.mkdir(new_dst)
copy_files(path, new_dst)
def copy_file(src_file, dst):
if not src_file.startswith('.') and not src_file.endswith('.gz') and os.path.isfile(src_file):
shutil.copy(src_file, dst)
def copy_resources(target, app_android_root):
# remove app_android_root/assets if it exists
assets_dir = os.path.join(app_android_root, "assets")
if os.path.isdir(assets_dir):
shutil.rmtree(assets_dir)
os.mkdir(assets_dir)
# copy resources(cpp samples)
if target in CPP_SAMPLES:
resources_dir = os.path.join(app_android_root, "../Resources")
if os.path.isdir(resources_dir):
copy_files(resources_dir, assets_dir)
# lua samples should copy lua script
if target in LUA_SAMPLES:
resources_dir = os.path.join(app_android_root, "../../res")
assets_res_dir = os.path.join(assets_dir, "res")
os.mkdir(assets_res_dir)
if target != "lua-tests":
copy_files(resources_dir, assets_res_dir)
src_dir = os.path.join(app_android_root, "../../src")
assets_src_dir = os.path.join(assets_dir, "src")
os.mkdir(assets_src_dir)
copy_files(src_dir, assets_src_dir)
common_script_dir = os.path.join(app_android_root, "../../../../cocos/scripting/lua-bindings/script/")
cocos_src_dir = os.path.join(assets_src_dir,"cocos")
if os.path.exists(cocos_src_dir):
shutil.rmtree(cocos_src_dir)
os.mkdir(cocos_src_dir)
copy_files(common_script_dir, cocos_src_dir)
luasocket_script_dir = os.path.join(app_android_root, "../../../../external/lua/luasocket")
for root, dirs, files in os.walk(luasocket_script_dir):
for f in files:
if os.path.splitext(f)[1] == '.lua':
fall = os.path.join(root, f)
shutil.copy(fall, assets_dir)
# lua-tests shared resources with cpp-tests
if target == "lua-tests":
resources_cocosbuilder_res_dir = os.path.join(resources_dir, "cocosbuilderRes")
assets_cocosbuilder_res_dir = os.path.join(assets_res_dir, "cocosbuilderRes")
os.mkdir(assets_cocosbuilder_res_dir)
copy_files(resources_cocosbuilder_res_dir, assets_cocosbuilder_res_dir)
resources_dir = os.path.join(app_android_root, "../../../cpp-tests/Resources")
copy_files(resources_dir, assets_res_dir)
if target == "lua-game-controller-test":
print("coming generator game controller")
resources_dir = os.path.join(app_android_root, "../../../game-controller-test/Resources")
copy_files(resources_dir, assets_res_dir)
def build_samples(target,ndk_build_param,android_platform,build_mode):
if build_mode is None:
build_mode = 'debug'
elif build_mode != 'release':
build_mode = 'debug'
initBuildConstant(ndk_build_param, build_mode)
build_targets = caculate_built_samples(target)
if android_platform is not None:
check_environment_variables_sdk()
if android_platform.isdigit():
android_platform = 'android-'+android_platform
else:
print 'please use vaild android platform'
exit(1)
app_android_root = ''
target_proj_path_map = {
"cpp-empty-test": "tests/cpp-empty-test/proj.android",
"game-controller-test": "tests/game-controller-test/proj.android",
"cpp-tests": "tests/cpp-tests/proj.android",
"lua-empty-test": "tests/lua-empty-test/project/proj.android",
"lua-tests": "tests/lua-tests/project/proj.android",
"lua-game-controller-test": "tests/lua-game-controller-test/project/proj.android"
}
for target in build_targets:
if target in target_proj_path_map:
app_android_root = os.path.join(BUILD_CONSTANT.COCOS_ROOT, target_proj_path_map[target])
else:
print 'unknown target: %s' % target
continue
copy_resources(target, app_android_root)
do_build(app_android_root, android_platform, build_mode)
# -------------- main --------------
if __name__ == '__main__':
#parse the params
usage = """
This script is mainy used for building tests built-in with cocos2d-x.
Usage: %prog [options] [cpp-empty-test|cpp-tests|lua-empty-test|lua-tests|cpp|lua|all]
If you are new to cocos2d-x, I recommend you start with cpp-empty-test, lua-empty-test.
You can combine these targets like this:
python android-build.py -p 10 cpp-empty-test lua-empty-test
Note: You should install ant to generate apk while building the andriod tests. But it is optional. You can generate apk with eclipse.
"""
parser = OptionParser(usage=usage)
parser.add_option("-n", "--ndk", dest="ndk_build_param",
help='Parameter for ndk-build')
parser.add_option("-p", "--platform", dest="android_platform",
help='Parameter for android-update. Without the parameter,the script just build dynamic library for the projects. Valid android-platform are:[10|11|12|13|14|15|16|17|18|19]')
parser.add_option("-b", "--build", dest="build_mode",
help='The build mode for java project,debug[default] or release. Get more information,please refer to http://developer.android.com/tools/building/building-cmdline.html')
(opts, args) = parser.parse_args()
print "We will use cocos console to build tests built-in with cocos2d-x and remove this script next version.\n"
if len(args) == 0:
parser.print_help()
sys.exit(1)
else:
try:
build_samples(args, opts.ndk_build_param,opts.android_platform,opts.build_mode)
except Exception as e:
print e
sys.exit(1)
| 40.189286 | 200 | 0.660713 |
d81ccc7be06a2f9efba718df0b7f3f9ec8518198 | 19,622 | py | Python | implementations/dscgan/dscgan.py | lidotcircle/PyTorch-GAN | dcb95a05701f28a3b73ada35da4b8e7e72975642 | [
"MIT"
] | null | null | null | implementations/dscgan/dscgan.py | lidotcircle/PyTorch-GAN | dcb95a05701f28a3b73ada35da4b8e7e72975642 | [
"MIT"
] | null | null | null | implementations/dscgan/dscgan.py | lidotcircle/PyTorch-GAN | dcb95a05701f28a3b73ada35da4b8e7e72975642 | [
"MIT"
] | null | null | null | import argparse
import os, sys
import numpy as np
import itertools
import datetime
import time
import signal
import torchvision.transforms as transforms
from torchvision.transforms.functional import InterpolationMode
from torchvision.utils import save_image, make_grid
from torch.utils.data import DataLoader
from torch.autograd import Variable
from models import DomainEncoder, DomainDecoder, DomainDiscriminator
from models import DomainStyleExtractor, ContentExtractor, DomainImageGenerator, weights_init_normal
from datasets import ImageDataset
from utils import ReplayBuffer, LambdaLR
import torch
parser = argparse.ArgumentParser()
parser.add_argument("--epoch", type=int, default=0, help="epoch to start training from")
parser.add_argument("--n_epochs", type=int, default=200, help="number of epochs of training")
parser.add_argument("--dataset_name", type=str, default="monet2photo", help="name of the dataset")
parser.add_argument("--batch_size", type=int, default=3, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of second order momentum of gradient")
parser.add_argument("--decay_epoch", type=int, default=100, help="epoch from which to start lr decay")
parser.add_argument("--n_cpu", type=int, default=2, help="number of cpu threads to use during batch generation")
parser.add_argument("--img_height", type=int, default=256, help="size of image height")
parser.add_argument("--img_width", type=int, default=256, help="size of image width")
parser.add_argument("--channels", type=int, default=3, help="number of image channels")
parser.add_argument("--sample_interval", type=int, default=100, help="interval between saving generator outputs")
parser.add_argument("--checkpoint_interval", type=int, default=1, help="interval between saving model checkpoints")
parser.add_argument("--n_residual_blocks", type=int, default=2, help="number of residual blocks in generator")
parser.add_argument("--lambda_ae", type=float, default=5, help="autoencoder identity loss weight")
parser.add_argument("--lambda_self", type=float, default=5, help="image self reconstruction loss weight")
parser.add_argument("--lambda_cycle", type=float, default=5, help="image cycle consistency loss weight")
parser.add_argument("--lambda_style", type=float, default=5, help="style cycle consistency loss weight")
parser.add_argument("--lambda_kld", type=float, default=5, help="style code KL divergence loss weight")
parser.add_argument("--lambda_gan", type=float, default=5, help="GAN loss weight")
opt = parser.parse_args()
print(opt)
# Create sample and checkpoint directories
os.makedirs("images/%s" % opt.dataset_name, exist_ok=True)
os.makedirs("saved_models/%s" % opt.dataset_name, exist_ok=True)
# Losses
criterion_GAN = torch.nn.MSELoss()
criterion_identity = torch.nn.L1Loss()
criterion_cycle = torch.nn.L1Loss()
cuda = torch.cuda.is_available()
input_shape = (opt.channels, opt.img_height, opt.img_width)
# Initialize encoder decoder and discriminator
DomainEncoder_A = DomainEncoder(3, 2)
DomainDecoder_A = DomainDecoder(DomainEncoder_A.out_features, 2)
DomainEncoder_B = DomainEncoder(3, 2)
DomainDecoder_B = DomainDecoder(DomainEncoder_B.out_features, 2)
DomainStyleExtractor_A = DomainStyleExtractor((64, 32, 32), output_channels= 8, heads = 4, expansion = 2, dropout = 0.1, layers=4)
DomainStyleExtractor_B = DomainStyleExtractor((64, 32, 32), output_channels= 8, heads = 4, expansion = 2, dropout = 0.1, layers=4)
ImageContentExtractor = ContentExtractor((64,32,32), heads = 4, expansion = 2, dropout = 0.1, layers=4)
DomainImageGenerator_A = DomainImageGenerator(8, 64, 2)
DomainImageGenerator_B = DomainImageGenerator(8, 64, 2)
DomainDiscriminator_A = DomainDiscriminator((3, opt.img_height, opt.img_width))
DomainDiscriminator_B = DomainDiscriminator((3, opt.img_height, opt.img_width))
model_components = [
[ DomainEncoder_A, "DomainEncoder_A" ],
[ DomainDecoder_A, "DomainDecoder_A" ],
[ DomainEncoder_B, "DomainEncoder_B" ],
[ DomainDecoder_B, "DomainDecoder_B" ],
[ DomainStyleExtractor_A, "DomainStyleExtractor_A" ],
[ DomainStyleExtractor_B, "DomainStyleExtractor_B" ],
[ ImageContentExtractor, "ImageContentExtractor" ],
[ DomainImageGenerator_A, "DomainImageGenerator_A" ],
[ DomainImageGenerator_B, "DomainImageGenerator_B" ],
[ DomainDiscriminator_A, "DomainDiscriminator_A" ],
[ DomainDiscriminator_B, "DomainDiscriminator_B" ],
]
if cuda:
for model in model_components:
model[0].cuda()
for model in model_components:
model_filename = "saved_models/%s/%s_%d.pth" % (opt.dataset_name, model[1], opt.epoch)
be_load = False
be_init = False
if os.path.exists(model_filename):
if __name__ == "__main__":
print("loading model %s" % (model_filename))
model[0].load_state_dict(torch.load(model_filename))
be_load = True
else:
model[0].apply(weights_init_normal)
be_init = True
if be_load and be_init:
print("Inconsistent model, some parts were loaded from previous saved, some not")
def save_models(epoch):
for model in model_components:
torch.save(model[0].state_dict(), "saved_models/%s/%s_%d.pth" % (opt.dataset_name, model[1], epoch))
def clear_gradient():
for model in model_components:
for p in model[0].parameters():
if p.grad is not None:
del p.grad
torch.cuda.empty_cache()
# Optimizers
optimizer_MainModel = torch.optim.Adam(
itertools.chain(DomainEncoder_A.parameters(), DomainDecoder_A.parameters(),
DomainStyleExtractor_A.parameters(), DomainImageGenerator_A.parameters(),
DomainEncoder_B.parameters(), DomainDecoder_B.parameters(),
DomainStyleExtractor_B.parameters(), DomainImageGenerator_B.parameters(),), lr=opt.lr, betas=(opt.b1, opt.b2)
)
optimizer_DomainA_Dis = torch.optim.Adam(DomainDiscriminator_A.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_DomainB_Dis = torch.optim.Adam(DomainDiscriminator_B.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
# Learning rate update schedulers
lr_scheduler_MainModel= torch.optim.lr_scheduler.LambdaLR(
optimizer_MainModel, lr_lambda=LambdaLR(opt.n_epochs, opt.epoch, opt.decay_epoch).step
)
lr_scheduler_DomainA_Dis = torch.optim.lr_scheduler.LambdaLR(
optimizer_DomainA_Dis, lr_lambda=LambdaLR(opt.n_epochs, opt.epoch, opt.decay_epoch).step
)
lr_scheduler_DomainB_Dis = torch.optim.lr_scheduler.LambdaLR(
optimizer_DomainB_Dis, lr_lambda=LambdaLR(opt.n_epochs, opt.epoch, opt.decay_epoch).step
)
Tensor = torch.Tensor
if cuda:
Tensor = torch.cuda.FloatTensor
# Buffers of previously generated samples
fake_A_buffer = ReplayBuffer()
fake_B_buffer = ReplayBuffer()
# Image transformations
transforms_ = [
transforms.Resize(int(opt.img_height * 1.12), InterpolationMode.BICUBIC),
transforms.RandomCrop((opt.img_height, opt.img_width)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
# Training data loader
dataloader = DataLoader(
ImageDataset("../../data/%s" % opt.dataset_name, transforms_=transforms_, unaligned=True),
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.n_cpu,
)
# Test data loader
val_dataloader = DataLoader(
ImageDataset("../../data/%s" % opt.dataset_name, transforms_=transforms_, unaligned=True, mode="test"),
batch_size=5,
shuffle=True,
num_workers=1,
)
def sample_images(batches_done):
"""Saves a generated sample from the test set"""
imgs = next(iter(val_dataloader))
DomainEncoder_A.eval()
DomainDecoder_A.eval()
DomainEncoder_B.eval()
DomainDecoder_B.eval()
real_A = Variable(imgs["A"].type(Tensor))
real_B = Variable(imgs["B"].type(Tensor))
fake_A = DomainDecoder_A(DomainEncoder_A(real_A))
fake_B = DomainDecoder_B(DomainEncoder_B(real_B))
# Arange images along x-axis
real_A = make_grid(real_A, nrow=5, normalize=True)
real_B = make_grid(real_B, nrow=5, normalize=True)
fake_A = make_grid(fake_A, nrow=5, normalize=True)
fake_B = make_grid(fake_B, nrow=5, normalize=True)
# Arange images along y-axis
image_grid = torch.cat((real_A, fake_A, real_B, fake_B), 1)
save_image(image_grid, "images/%s/%s.png" % (opt.dataset_name, batches_done), normalize=False)
saved_epoch = opt.epoch
is_training_process = False
def interrupt_exit(signum, frame):
if not is_training_process:
sys.exit(1)
print("saving models...")
save_models(saved_epoch)
sys.exit(0)
signal.signal(signal.SIGINT, interrupt_exit)
def gaussian_reparameterize_sample(mu: Tensor, logvar: Tensor) -> Tensor:
"""
:param mu: (Tensor) Mean of the latent Gaussian
:param logvar: (Tensor) Standard deviation of the latent Gaussian
:return:
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def image_gaussian_reparameterize_sample(mu: Tensor, logvar: Tensor) -> Tensor:
assert len(mu.shape) == 4 and len(logvar.shape) == 4
bs, c, h, w = mu.shape
mu = mu.view(bs, c * h * w)
logvar = logvar.view(bs, c * h * w)
val = gaussian_reparameterize_sample(mu, logvar)
val = val.view(bs, c, h, w)
return val
# ----------
# Training
# ----------
def main():
global saved_epoch, is_training_process
is_training_process = True
epoch_start = saved_epoch
prev_time = time.time()
for epoch in range(epoch_start, opt.n_epochs):
saved_epoch = epoch
for i, batch in enumerate(dataloader):
# Set model input
real_A = Variable(batch["A"].type(Tensor))
real_B = Variable(batch["B"].type(Tensor))
# Adversarial ground truths
valid = Variable(Tensor(np.ones((real_A.size(0), *DomainDiscriminator_A.output_shape))), requires_grad=False)
fake = Variable(Tensor(np.zeros((real_A.size(0), *DomainDiscriminator_A.output_shape))), requires_grad=False)
log_statistics = {}
# ------------------
# Train MainModel
# ------------------
optimizer_MainModel.zero_grad()
DomainEncoder_A.train()
DomainDecoder_A.train()
DomainStyleExtractor_A.train()
DomainImageGenerator_A.train()
DomainEncoder_B.train()
DomainDecoder_B.train()
DomainStyleExtractor_B.train()
DomainImageGenerator_B.train()
ImageContentExtractor.train()
interm_a = DomainEncoder_A(real_A)
interm_b = DomainEncoder_B(real_B)
fake_A = DomainDecoder_A(interm_a)
fake_B = DomainDecoder_B(interm_b)
# Autoencoder Identity loss
loss_ae_id_A = criterion_identity(fake_A, real_A)
loss_ae_id_B = criterion_identity(fake_B, real_B)
log_statistics["AE Identity Loss"] = loss_ae_id_A.item() + loss_ae_id_B.item()
# Autoencoder GAN loss
loss_ae_adv_A = criterion_GAN(DomainDiscriminator_A(fake_A), valid)
loss_ae_adv_B = criterion_GAN(DomainDiscriminator_B(fake_B), valid)
log_statistics["AE GAN Loss"] = loss_ae_adv_A.mean().item() + loss_ae_adv_B.mean().item()
# Self-Translation loss
style_code_a_mu, style_code_a_logvar = DomainStyleExtractor_A(interm_a)
style_code_b_mu, style_code_b_logvar = DomainStyleExtractor_B(interm_b)
loss_kld_a = -0.5 * torch.sum(-style_code_a_logvar.exp() - torch.pow(style_code_a_mu,2) + style_code_a_logvar + 1, dim = (1,2,3))
loss_kld_b = -0.5 * torch.sum(-style_code_b_logvar.exp() - torch.pow(style_code_b_mu,2) + style_code_b_logvar + 1, dim = (1,2,3))
log_statistics["KLDiv"] = loss_kld_a.mean().item() + loss_kld_b.mean().item()
style_code_a = image_gaussian_reparameterize_sample(style_code_a_mu, style_code_a_logvar)
style_code_b = image_gaussian_reparameterize_sample(style_code_b_mu, style_code_b_logvar)
content_a = ImageContentExtractor(interm_a)
content_b = ImageContentExtractor(interm_b)
self_gen_a = DomainImageGenerator_A(style_code_a, content_a)
self_gen_b = DomainImageGenerator_B(style_code_b, content_b)
loss_self_a = criterion_identity(self_gen_a, real_A)
loss_self_b = criterion_identity(self_gen_b, real_B)
log_statistics["Self Translation Loss"] = loss_self_a.mean().item() + loss_self_b.mean().item()
# TODO GAN loss of loss_self
fake_gen_a = DomainImageGenerator_A(style_code_a, content_b)
fake_gen_b = DomainImageGenerator_B(style_code_b, content_a)
loss_gen_adv_a = criterion_GAN(DomainDiscriminator_A(fake_gen_a), valid)
loss_gen_adv_b = criterion_GAN(DomainDiscriminator_B(fake_gen_b), valid)
log_statistics["Self Translation ADV Loss"] = loss_gen_adv_a.mean().item() + loss_gen_adv_b.mean().item()
# Image Cycle Consistency loss
fake_gen_a_iterm = DomainEncoder_A(fake_gen_a)
fake_gen_b_iterm = DomainEncoder_B(fake_gen_b)
fake_gen_a_style_mu, fake_gen_a_style_logvar = DomainStyleExtractor_A(fake_gen_a_iterm)
fake_gen_b_style_mu, fake_gen_b_style_logvar = DomainStyleExtractor_B(fake_gen_b_iterm)
fake_gen_a_style = image_gaussian_reparameterize_sample(fake_gen_a_style_mu, fake_gen_a_style_logvar)
fake_gen_b_style = image_gaussian_reparameterize_sample(fake_gen_b_style_mu, fake_gen_b_style_logvar)
fake_gen_a_content = ImageContentExtractor(fake_gen_a_iterm)
fake_gen_b_content = ImageContentExtractor(fake_gen_b_iterm)
cycle_a = DomainImageGenerator_A(fake_gen_a_style, fake_gen_b_content)
cycle_b = DomainImageGenerator_B(fake_gen_b_style, fake_gen_a_content)
loss_cycle_a = criterion_identity(real_A, cycle_a)
loss_cycle_b = criterion_identity(real_B, cycle_b)
log_statistics["Cycle Consistency Loss"] = loss_cycle_a.mean().item() + loss_cycle_b.mean().item()
loss_cycle_a_adv = criterion_GAN(DomainDiscriminator_A(cycle_a), valid)
loss_cycle_b_adv = criterion_GAN(DomainDiscriminator_B(cycle_b), valid)
log_statistics["Cycle Consistency ADV Loss"] = loss_cycle_a_adv.mean().item() + loss_cycle_b_adv.mean().item()
# Style Self-Translation loss
random_style_a = Variable(Tensor(np.random.normal(0, 1, style_code_a_mu.shape)))
random_style_b = Variable(Tensor(np.random.normal(0, 1, style_code_a_mu.shape)))
s_image_a = DomainImageGenerator_A(random_style_a, content_a)
s_image_b = DomainImageGenerator_B(random_style_b, content_b)
# TODO GAN loss of s_image
s_image_a_iterm = DomainEncoder_A(s_image_a)
s_image_b_iterm = DomainEncoder_B(s_image_b)
s_image_a_style_mu, _ = DomainStyleExtractor_A(s_image_a_iterm)
s_image_b_style_mu, _ = DomainStyleExtractor_B(s_image_b_iterm)
loss_style_recons_a = criterion_identity(random_style_a, s_image_a_style_mu)
loss_style_recons_b = criterion_identity(random_style_b, s_image_b_style_mu)
log_statistics["Style Self-Translation Loss"] = loss_style_recons_a.mean().item() + loss_style_recons_b.mean().item()
# total loss
loss_sum = (loss_ae_id_A + loss_ae_id_B) * opt.lambda_ae + (loss_ae_adv_A + loss_ae_adv_B) * opt.lambda_gan + \
(loss_kld_a + loss_kld_b) * opt.lambda_kld + \
(loss_self_a + loss_self_b) * opt.lambda_self + (loss_gen_adv_a + loss_gen_adv_b) * opt.lambda_gan + \
(loss_cycle_a + loss_cycle_b) * opt.lambda_cycle + (loss_cycle_a_adv + loss_cycle_b_adv) * opt.lambda_gan + \
(loss_style_recons_a + loss_style_recons_b) * opt.lambda_style
loss_sum.backward()
optimizer_MainModel.step()
# -----------------------
# Train Discriminator A
# -----------------------
optimizer_DomainA_Dis.zero_grad()
DomainDiscriminator_A.train()
loss_real = criterion_GAN(DomainDiscriminator_A(real_A), valid)
fake_A_ = fake_A_buffer.push_and_pop(fake_A)
loss_fake = criterion_GAN(DomainDiscriminator_A(fake_A_.detach()), fake)
loss_D_A = (loss_real + loss_fake) / 2
log_statistics["Discriminator A Loss"] = loss_D_A.mean().item()
loss_D_A.backward()
optimizer_DomainA_Dis.step()
# -----------------------
# Train Discriminator B
# -----------------------
optimizer_DomainB_Dis.zero_grad()
DomainDiscriminator_B.train()
loss_real = criterion_GAN(DomainDiscriminator_B(real_B), valid)
fake_B_ = fake_A_buffer.push_and_pop(fake_B)
loss_fake = criterion_GAN(DomainDiscriminator_B(fake_B_.detach()), fake)
loss_D_B = (loss_real + loss_fake) / 2
log_statistics["Discriminator B Loss"] = loss_D_B.mean().item()
loss_D_B.backward()
optimizer_DomainB_Dis.step()
# --------------
# Log Progress
# --------------
# Determine approximate time left
batches_done = epoch * len(dataloader) + i
batches_left = opt.n_epochs * len(dataloader) - batches_done
time_left = datetime.timedelta(seconds=batches_left * (time.time() - prev_time))
prev_time = time.time()
# Print log
sys.stdout.write(
"\r[Epoch %d/%d] [Batch %d/%d] [%s] ETA: %s"
% (
epoch,
opt.n_epochs,
i,
len(dataloader),
str(log_statistics),
time_left,
)
)
# If at sample interval save image
if batches_done % opt.sample_interval == 0:
sample_images(batches_done)
# Update learning rates
lr_scheduler_MainModel.step()
lr_scheduler_DomainA_Dis.step()
lr_scheduler_DomainB_Dis.step()
if opt.checkpoint_interval != -1 and epoch % opt.checkpoint_interval == 0:
# Save model checkpoints
save_models(epoch)
if __name__ == "__main__":
while True:
try:
main()
except RuntimeError as e:
if 'out of memory' in str(e):
print("|Warning: out of memory")
clear_gradient()
torch.cuda.empty_cache()
else:
raise e
| 47.168269 | 141 | 0.663184 |
c414ed5cf409a028adfd79fd29fff24a2361ead3 | 1,080 | py | Python | database/database.py | kurtesy/ibm_phone_book | 53b87b6224f73eae7430bba0bed4763197bd9dc0 | [
"MIT"
] | null | null | null | database/database.py | kurtesy/ibm_phone_book | 53b87b6224f73eae7430bba0bed4763197bd9dc0 | [
"MIT"
] | null | null | null | database/database.py | kurtesy/ibm_phone_book | 53b87b6224f73eae7430bba0bed4763197bd9dc0 | [
"MIT"
] | null | null | null | import os
from config.dev import db, meta, session, Base, DB_NAME
from data_model.phone_book_model import PhoneBook
# Data to initialize database with
TEST_DATA = [
{"_id": 1, "sur_name": "Patel", "first_name": "Nishant", "phone_number": 1234567890, "address": "Hyderabad"},
{"_id": 2, "sur_name": "abc", "first_name": "xyz", "phone_number": 9876543210, "address": "Hyderabad"},
{"_id": 3, "sur_name": "Prasad", "first_name": "Ram", "phone_number": 9999999999, "address": "Hyderabad"}
]
def main():
# Delete database file if it exists currently
if os.path.exists(DB_NAME):
os.remove(DB_NAME)
# Create the database
meta.create_all(db)
# Create All Tables
Base.metadata.create_all(db)
# iterate over the PEOPLE structure and populate the database
for data in TEST_DATA:
p = PhoneBook(_id=data["_id"] ,sur_name=data["sur_name"], first_name=data["first_name"],
phone_number=data["phone_number"], address=data["address"])
session.add(p)
session.commit() | 37.241379 | 114 | 0.643519 |
efd658e64aefbef24cd9917769c3726292074199 | 320 | py | Python | source/python/Fibonacci.py | JoHyukJun/algorithm-analysis | 3eda22ce0eeb52490702206d73c04cff1eb3e72d | [
"Apache-2.0"
] | null | null | null | source/python/Fibonacci.py | JoHyukJun/algorithm-analysis | 3eda22ce0eeb52490702206d73c04cff1eb3e72d | [
"Apache-2.0"
] | null | null | null | source/python/Fibonacci.py | JoHyukJun/algorithm-analysis | 3eda22ce0eeb52490702206d73c04cff1eb3e72d | [
"Apache-2.0"
] | null | null | null | '''
main.py
Created by JO HYUK JUN on 2021
Copyright © 2021 JO HYUK JUN. All rights reserved.
'''
import sys
n = int(sys.stdin.readline())
arr = [0 for _ in range(n + 1)]
arr[0] = 0
if n < 1:
print(0)
arr[1] = 1
for i in range(2, n + 1):
arr[i] = arr[i - 1] + arr[i - 2]
print(arr[n]) | 11.851852 | 54 | 0.5375 |
6e5ff50268172c5ad17bacb169b78c3429d47adf | 74 | py | Python | aiopg/sa/utils.py | arssher/aiopg | ed69a066608ac4788b2bc8a0cdd03690f22adb3a | [
"BSD-2-Clause"
] | 1,307 | 2015-01-06T15:52:21.000Z | 2022-03-25T16:04:53.000Z | aiopg/sa/utils.py | arssher/aiopg | ed69a066608ac4788b2bc8a0cdd03690f22adb3a | [
"BSD-2-Clause"
] | 765 | 2015-01-11T10:17:57.000Z | 2022-01-29T13:04:30.000Z | aiopg/sa/utils.py | arssher/aiopg | ed69a066608ac4788b2bc8a0cdd03690f22adb3a | [
"BSD-2-Clause"
] | 194 | 2015-02-20T09:29:30.000Z | 2022-03-03T19:49:19.000Z | import sqlalchemy
SQLALCHEMY_VERSION = sqlalchemy.__version__.split(".")
| 18.5 | 54 | 0.810811 |
c12f4e5ce07891d645671de7f9d5eb1e358271cc | 5,894 | py | Python | google/cloud/gsuiteaddons/v1/google-cloud-workspace-add-ons-v1-py/google/cloud/workspace_add_ons_v1/services/g_suite_add_ons/pagers.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/cloud/gsuiteaddons/v1/google-cloud-workspace-add-ons-v1-py/google/cloud/workspace_add_ons_v1/services/g_suite_add_ons/pagers.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/cloud/gsuiteaddons/v1/google-cloud-workspace-add-ons-v1-py/google/cloud/workspace_add_ons_v1/services/g_suite_add_ons/pagers.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator
from google.cloud.workspace_add_ons_v1.types import gsuiteaddons
class ListDeploymentsPager:
"""A pager for iterating through ``list_deployments`` requests.
This class thinly wraps an initial
:class:`google.cloud.workspace_add_ons_v1.types.ListDeploymentsResponse` object, and
provides an ``__iter__`` method to iterate through its
``deployments`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListDeployments`` requests and continue to iterate
through the ``deployments`` field on the
corresponding responses.
All the usual :class:`google.cloud.workspace_add_ons_v1.types.ListDeploymentsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., gsuiteaddons.ListDeploymentsResponse],
request: gsuiteaddons.ListDeploymentsRequest,
response: gsuiteaddons.ListDeploymentsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.workspace_add_ons_v1.types.ListDeploymentsRequest):
The initial request object.
response (google.cloud.workspace_add_ons_v1.types.ListDeploymentsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = gsuiteaddons.ListDeploymentsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[gsuiteaddons.ListDeploymentsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[gsuiteaddons.Deployment]:
for page in self.pages:
yield from page.deployments
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListDeploymentsAsyncPager:
"""A pager for iterating through ``list_deployments`` requests.
This class thinly wraps an initial
:class:`google.cloud.workspace_add_ons_v1.types.ListDeploymentsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``deployments`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListDeployments`` requests and continue to iterate
through the ``deployments`` field on the
corresponding responses.
All the usual :class:`google.cloud.workspace_add_ons_v1.types.ListDeploymentsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[gsuiteaddons.ListDeploymentsResponse]],
request: gsuiteaddons.ListDeploymentsRequest,
response: gsuiteaddons.ListDeploymentsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.workspace_add_ons_v1.types.ListDeploymentsRequest):
The initial request object.
response (google.cloud.workspace_add_ons_v1.types.ListDeploymentsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = gsuiteaddons.ListDeploymentsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[gsuiteaddons.ListDeploymentsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[gsuiteaddons.Deployment]:
async def async_generator():
async for page in self.pages:
for response in page.deployments:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
| 41.801418 | 95 | 0.685443 |
8a40acd4ddaeef48f8ce9e810da753a133fe18c9 | 4,278 | py | Python | sdk/python/pulumi_alicloud/cdn/get_real_time_log_deliveries.py | pulumi/pulumi-alicloud | 9c34d84b4588a7c885c6bec1f03b5016e5a41683 | [
"ECL-2.0",
"Apache-2.0"
] | 42 | 2019-03-18T06:34:37.000Z | 2022-03-24T07:08:57.000Z | sdk/python/pulumi_alicloud/cdn/get_real_time_log_deliveries.py | pulumi/pulumi-alicloud | 9c34d84b4588a7c885c6bec1f03b5016e5a41683 | [
"ECL-2.0",
"Apache-2.0"
] | 152 | 2019-04-15T21:03:44.000Z | 2022-03-29T18:00:57.000Z | sdk/python/pulumi_alicloud/cdn/get_real_time_log_deliveries.py | pulumi/pulumi-alicloud | 9c34d84b4588a7c885c6bec1f03b5016e5a41683 | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2020-08-26T17:30:07.000Z | 2021-07-05T01:37:45.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetRealTimeLogDeliveriesResult',
'AwaitableGetRealTimeLogDeliveriesResult',
'get_real_time_log_deliveries',
]
@pulumi.output_type
class GetRealTimeLogDeliveriesResult:
"""
A collection of values returned by getRealTimeLogDeliveries.
"""
def __init__(__self__, deliveries=None, domain=None, id=None, output_file=None, status=None):
if deliveries and not isinstance(deliveries, list):
raise TypeError("Expected argument 'deliveries' to be a list")
pulumi.set(__self__, "deliveries", deliveries)
if domain and not isinstance(domain, str):
raise TypeError("Expected argument 'domain' to be a str")
pulumi.set(__self__, "domain", domain)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if output_file and not isinstance(output_file, str):
raise TypeError("Expected argument 'output_file' to be a str")
pulumi.set(__self__, "output_file", output_file)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def deliveries(self) -> Sequence['outputs.GetRealTimeLogDeliveriesDeliveryResult']:
return pulumi.get(self, "deliveries")
@property
@pulumi.getter
def domain(self) -> str:
return pulumi.get(self, "domain")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="outputFile")
def output_file(self) -> Optional[str]:
return pulumi.get(self, "output_file")
@property
@pulumi.getter
def status(self) -> Optional[str]:
return pulumi.get(self, "status")
class AwaitableGetRealTimeLogDeliveriesResult(GetRealTimeLogDeliveriesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRealTimeLogDeliveriesResult(
deliveries=self.deliveries,
domain=self.domain,
id=self.id,
output_file=self.output_file,
status=self.status)
def get_real_time_log_deliveries(domain: Optional[str] = None,
output_file: Optional[str] = None,
status: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRealTimeLogDeliveriesResult:
"""
This data source provides the Cdn Real Time Log Deliveries of the current Alibaba Cloud user.
> **NOTE:** Available in v1.134.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.cdn.get_real_time_log_deliveries(domain="example_value")
pulumi.export("cdnRealTimeLogDelivery1", example.deliveries[0].id)
```
:param str domain: Real-Time Log Service Domain.
:param str status: -The status of the real-time log delivery feature. Valid Values: `online` and `offline`.
"""
__args__ = dict()
__args__['domain'] = domain
__args__['outputFile'] = output_file
__args__['status'] = status
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('alicloud:cdn/getRealTimeLogDeliveries:getRealTimeLogDeliveries', __args__, opts=opts, typ=GetRealTimeLogDeliveriesResult).value
return AwaitableGetRealTimeLogDeliveriesResult(
deliveries=__ret__.deliveries,
domain=__ret__.domain,
id=__ret__.id,
output_file=__ret__.output_file,
status=__ret__.status)
| 34.780488 | 164 | 0.665498 |
4ccb5f9361c46989c3350698964b0d63e42a4d49 | 15,020 | py | Python | src/8/xor_mlp_td.py | foxtrotmike/dissecting-reinforcement-learning | bee294f41e8a4c152d5dd8730eb2e268a46e6f92 | [
"MIT"
] | null | null | null | src/8/xor_mlp_td.py | foxtrotmike/dissecting-reinforcement-learning | bee294f41e8a4c152d5dd8730eb2e268a46e6f92 | [
"MIT"
] | null | null | null | src/8/xor_mlp_td.py | foxtrotmike/dissecting-reinforcement-learning | bee294f41e8a4c152d5dd8730eb2e268a46e6f92 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#MIT License
#Copyright (c) 2017 Massimiliano Patacchiola
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#In this example I will use the class gridworld to generate a 5x5 world
#in which the cleaning robot will move. Rewards are allocated in the 4
#corners of the world following the XOR pattern. I will use an function
#approximator based on a paraboloid-like function in order to represent
#a TD(0) function approximator.
import numpy as np
from gridworld import GridWorld
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.animation as animation
from matplotlib.patches import Rectangle
import mpl_toolkits.mplot3d.art3d as art3d
from matplotlib import cm
from mlp import MLP
def init_env():
'''Init the XOR boolean environment
@return the environment gridworld object
'''
env = GridWorld(5, 5)
#Define the state matrix
state_matrix = np.array([[1.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 1.0]])
#Define the index matrix
index_matrix = np.array([[(4,0), (4,1), (4,2), (4,3), (4,4)],
[(3,0), (3,1), (3,2), (3,3), (3,4)],
[(2,0), (2,1), (2,2), (2,3), (2,4)],
[(1,0), (1,1), (1,2), (1,3), (1,4)],
[(0,0), (0,1), (0,2), (0,3), (0,4)]])
#Define the reward matrix
reward_matrix = np.array([[1.0, 0.0, 0.0, 0.0, -1.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0, 0.0, 1.0]])
#Define the transition matrix
transition_matrix = np.array([[0.8, 0.1, 0.0, 0.1],
[0.1, 0.8, 0.1, 0.0],
[0.0, 0.1, 0.8, 0.1],
[0.1, 0.0, 0.1, 0.8]])
env.setStateMatrix(state_matrix)
env.setIndexMatrix(index_matrix)
env.setRewardMatrix(reward_matrix)
env.setTransitionMatrix(transition_matrix)
return env
def update(my_mlp, new_observation, reward, learning_rate, gamma, done):
'''Return the updated weights vector w_t1
@param w the weights vector before the update
@param x the feauture vector obsrved at t
@param x_t1 the feauture vector observed at t+1
@param reward the reward observed after the action
@param alpha the ste size (learning rate)
@param gamma the discount factor
@param done boolean True if the state is terminal
@return w_t1 the weights vector at t+1
'''
if done:
x = np.array(new_observation, dtype=np.float32)
target = np.array([reward], dtype=np.float32)
#print(target)
my_mlp.train(x, target, learning_rate)
else:
x = np.array(new_observation, dtype=np.float32)
target = np.array((reward + (gamma * my_mlp.forward(x))), dtype=np.float32)
#print target
my_mlp.train(x, target, learning_rate)
#w_t1 = w + alpha * ((reward + (gamma*(np.dot(x_t1,w))) - np.dot(x,w)) * x)
return my_mlp
def print_utility(my_mlp, tot_rows, tot_cols, decimal=2, flip=True):
'''Print on terminal the utility matrix of a discrete state space
having states defined by tuples: (0,0); (0,1); (0,2) ...
@param my_mlp an MLP object having single output
@param tot_rows total number of rows
@param tot_cols total number of columns
@param decimal is the precision of the printing (default: 2 decimal places)
@param flip boolean which defines if vertical flip is applied (default: True)
'''
utility_matrix = np.zeros((tot_rows, tot_cols))
for row in range(tot_rows):
for col in range(tot_cols):
x = np.array([row, col], dtype=np.float32)
utility_matrix[row,col] = my_mlp.forward(x)
np.set_printoptions(precision=decimal) #set print precision of numpy
if flip:
print(np.flipud(utility_matrix))
else:
print(utility_matrix)
np.set_printoptions(precision=8) #reset to default
def subplot(my_mlp, world_size, filename="figure.png"):
#Define the main figure property
fig, ax = plt.subplots(nrows=1, ncols=4, subplot_kw={'projection': '3d', 'autoscale_on':False, 'aspect':'equal'})
#XOR color
color_00 = "red"
color_11 = "red"
color_10 = "green"
color_01 = "green"
#Quadratic subplot
ax[0].clear()
#Draw the rectangles
p = Rectangle((0, 0), 1, 1, color=color_00, alpha=0.5)
ax[0].add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=-1.0, zdir="z")
p = Rectangle((world_size-1, world_size-1), 1, 1, color=color_11, alpha=0.5)
ax[0].add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=-1.0, zdir="z")
p = Rectangle((0, world_size-1), 1, 1, color=color_01, alpha=0.5)
ax[0].add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=-1.0, zdir="z")
p = Rectangle((world_size-1, 0), 1, 1, color=color_10, alpha=0.5)
ax[0].add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=-1.0, zdir="z")
#Set the plot
ax[0].set_xticks(np.arange(0, world_size+1, 1))
ax[0].set_xticklabels('', fontsize=10)
ax[0].set_yticklabels('', fontsize=10)
ax[0].set_yticks(np.arange(0, world_size+1, 1))
ax[0].set_zlim(-1.0,1.0)
#ax[0].set_zticklabels(['-1.0','','0','','1.0'], fontsize=10)
ax[0].view_init(elev=30, azim=-115)
x, y = np.meshgrid(np.arange(0.0, world_size-1.0, 0.01), np.arange(0.0, world_size-1.0, 0.01))
grid = np.arange(0.0, world_size-1.0, 0.01)
z_matrix = list()
for x_i in grid:
z_row = list()
for y_i in grid:
z_row.append(my_mlp.forward(np.array([x_i, y_i])))
z_matrix.append(z_row)
z = np.squeeze(np.array(z_matrix))
ax[0].plot_surface(x+0.5,y+0.5,z, color='lightgrey', alpha=0.5, linewidth=0, antialiased=False) # color='lightgrey', alpha=0.5)
#Draw a White background
x, y = np.meshgrid(np.arange(0, world_size+1, 1), np.arange(0, world_size+1, 1))
z = x*(-1.0)
ax[0].plot_surface(x,y,z, color='white', alpha=0.01)
#Quadratic subplot
ax[1].clear()
#Draw the rectangles
p = Rectangle((0, 0), 1, 1, color=color_00, alpha=0.5)
ax[1].add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=-1.0, zdir="z")
p = Rectangle((world_size-1, world_size-1), 1, 1, color=color_11, alpha=0.5)
ax[1].add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=-1.0, zdir="z")
p = Rectangle((0, world_size-1), 1, 1, color=color_01, alpha=0.5)
ax[1].add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=-1.0, zdir="z")
p = Rectangle((world_size-1, 0), 1, 1, color=color_10, alpha=0.5)
ax[1].add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=-1.0, zdir="z")
#Set the plot
ax[1].set_xticks(np.arange(0, world_size+1, 1))
ax[1].set_xticklabels('', fontsize=10)
ax[1].set_yticklabels('', fontsize=10)
ax[1].set_yticks(np.arange(0, world_size+1, 1))
ax[1].set_zlim(-1.0,1.0)
ax[1].set_zticklabels([''], fontsize=10)
ax[1].view_init(elev=30, azim=-65)
x, y = np.meshgrid(np.arange(0.0, world_size-1.0, 0.01), np.arange(0.0, world_size-1.0, 0.01))
grid = np.arange(0.0, world_size-1.0, 0.01)
z_matrix = list()
for x_i in grid:
z_row = list()
for y_i in grid:
z_row.append(my_mlp.forward(np.array([x_i, y_i])))
z_matrix.append(z_row)
z = np.squeeze(np.array(z_matrix))
ax[1].plot_surface(x+0.5,y+0.5,z, color='lightgrey', alpha=0.5, linewidth=0, antialiased=False) # color='lightgrey', alpha=0.5)
#Draw a White background
x, y = np.meshgrid(np.arange(0, world_size+1, 1), np.arange(0, world_size+1, 1))
z = x*(-1.0)
ax[1].plot_surface(x,y,z, color='white', alpha=0.01)
#Quadratic subplot
ax[2].clear()
#Draw the rectangles
p = Rectangle((0, 0), 1, 1, color=color_00, alpha=0.5)
ax[2].add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=-1.0, zdir="z")
p = Rectangle((world_size-1, world_size-1), 1, 1, color=color_11, alpha=0.5)
ax[2].add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=-1.0, zdir="z")
p = Rectangle((0, world_size-1), 1, 1, color=color_01, alpha=0.5)
ax[2].add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=-1.0, zdir="z")
p = Rectangle((world_size-1, 0), 1, 1, color=color_10, alpha=0.5)
ax[2].add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=-1.0, zdir="z")
#Set the plot
ax[2].set_xticks(np.arange(0, world_size+1, 1))
ax[2].set_xticklabels('', fontsize=10)
ax[2].set_yticklabels('', fontsize=10)
ax[2].set_yticks(np.arange(0, world_size+1, 1))
ax[2].set_zlim(-1.0,1.0)
ax[2].set_zticklabels([''], fontsize=10)
ax[2].view_init(elev=30, azim=-45)
x, y = np.meshgrid(np.arange(0.0, world_size-1.0, 0.01), np.arange(0.0, world_size-1.0, 0.01))
grid = np.arange(0.0, world_size-1.0, 0.01)
z_matrix = list()
for x_i in grid:
z_row = list()
for y_i in grid:
z_row.append(my_mlp.forward(np.array([x_i, y_i])))
z_matrix.append(z_row)
z = np.squeeze(np.array(z_matrix))
ax[2].plot_surface(x+0.5,y+0.5,z, color='lightgrey', alpha=0.5, linewidth=0, antialiased=False) # color='lightgrey', alpha=0.5)
#Draw a White background
x, y = np.meshgrid(np.arange(0, world_size+1, 1), np.arange(0, world_size+1, 1))
z = x*(-1.0)
ax[2].plot_surface(x,y,z, color='white', alpha=0.01)
#Quadratic subplot
ax[3].clear()
#Draw the rectangles
p = Rectangle((0, 0), 1, 1, color=color_00, alpha=0.5)
ax[3].add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=-1.0, zdir="z")
p = Rectangle((world_size-1, world_size-1), 1, 1, color=color_11, alpha=0.5)
ax[3].add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=-1.0, zdir="z")
p = Rectangle((0, world_size-1), 1, 1, color=color_01, alpha=0.5)
ax[3].add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=-1.0, zdir="z")
p = Rectangle((world_size-1, 0), 1, 1, color=color_10, alpha=0.5)
ax[3].add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=-1.0, zdir="z")
#Set the plot
ax[3].set_xticks(np.arange(0, world_size+1, 1))
ax[3].set_xticklabels('', fontsize=10)
ax[3].set_yticklabels('', fontsize=10)
ax[3].set_yticks(np.arange(0, world_size+1, 1))
ax[3].set_zlim(-1.0,1.0)
ax[3].set_zticklabels([''], fontsize=10)
ax[3].view_init(elev=30, azim=-25)
x, y = np.meshgrid(np.arange(0.0, world_size-1.0, 0.01), np.arange(0.0, world_size-1.0, 0.01))
grid = np.arange(0.0, world_size-1.0, 0.01)
z_matrix = list()
for x_i in grid:
z_row = list()
for y_i in grid:
z_row.append(my_mlp.forward(np.array([x_i, y_i])))
z_matrix.append(z_row)
z = np.squeeze(np.array(z_matrix))
ax[3].plot_surface(x+0.5,y+0.5,z, color='lightgrey', alpha=0.5, linewidth=0, antialiased=False) # color='lightgrey', alpha=0.5)
#Draw a White background
x, y = np.meshgrid(np.arange(0, world_size+1, 1), np.arange(0, world_size+1, 1))
z = x*(-1.0)
ax[3].plot_surface(x,y,z, color='white', alpha=0.01)
#Save the figure
fig.tight_layout()
fig.savefig(filename, dpi=300) #, bbox_inches='tight')
def main():
env = init_env()
my_mlp = MLP(tot_inputs=2, tot_hidden=3, tot_outputs=1, activation="tanh")
learning_rate = 0.1
gamma = 0.9
alpha_start = 0.1
alpha_stop = 0.0000001 #constant step size
tot_epoch = 10001
alpha_array = np.linspace(alpha_start, alpha_stop, tot_epoch)
print_epoch = 100
for epoch in range(tot_epoch):
alpha = alpha_array[epoch] #the learning rate is linearly decreased
#XOR-world episode
observation = env.reset(exploring_starts=True)
#The episode starts here
for step in range(1000):
action = np.random.randint(0,4)
new_observation, reward, done = env.step(action) #move in the world and get the state and reward
my_mlp = update(my_mlp, new_observation, reward, learning_rate, gamma, done)
observation = new_observation
if done: break
if(epoch % print_epoch == 0):
print("")
print("Epoch: " + str(epoch+1))
print("Tot steps: " + str(step))
print("Alpha: " + str(alpha))
print_utility(my_mlp, tot_rows=5, tot_cols=5)
print("Generating plot, please wait...")
subplot(my_mlp, world_size=5, filename="xor_planes.png")
print("Done!")
if __name__ == "__main__":
main()
| 45.932722 | 139 | 0.563981 |
ee20f70c1b2edb508a4480e802a1a38ed721a53b | 9,038 | py | Python | yambopy/integration_tests/itest_si_bse.py | QU-XIAO/yambopy | ff65a4f90c1bfefe642ebc61e490efe781709ff9 | [
"BSD-3-Clause"
] | 21 | 2016-04-07T20:53:29.000Z | 2021-05-14T08:06:02.000Z | yambopy/integration_tests/itest_si_bse.py | alexmoratalla/yambopy | 8ec0e1e18868ccaadb3eab36c55e6a47021e257d | [
"BSD-3-Clause"
] | 22 | 2016-06-14T22:29:47.000Z | 2021-09-16T15:36:26.000Z | yambopy/integration_tests/itest_si_bse.py | alexmoratalla/yambopy | 8ec0e1e18868ccaadb3eab36c55e6a47021e257d | [
"BSD-3-Clause"
] | 15 | 2016-06-14T18:40:57.000Z | 2021-08-07T13:17:43.000Z | #
# Author: Henrique Pereira Coutada Miranda
# Tests for yambopy
# Si
#
from __future__ import print_function
import matplotlib
import unittest
import sys
import os
import shutil
import argparse
import subprocess
import filecmp
import shutil as sh
import yambopy
from yambopy import *
from qepy import *
reference_dir = os.path.join(os.path.dirname(yambopy.data.__file__),'refs')
class TestPW_Si(unittest.TestCase):
""" This class creates the input files for Si and compares them to reference files
"""
def get_inputfile(self):
qe = PwIn()
qe.atoms = [['Si',[0.125,0.125,0.125]],
['Si',[-.125,-.125,-.125]]]
qe.atypes = {'Si': [28.086,"Si.pbe-mt_fhi.UPF"]}
qe.control['prefix'] = "'si'"
qe.control['wf_collect'] = '.true.'
qe.control['pseudo_dir'] = "'../pseudos'"
qe.system['celldm(1)'] = 10.3
qe.system['ecutwfc'] = 40
qe.system['occupations'] = "'fixed'"
qe.system['nat'] = 2
qe.system['ntyp'] = 1
qe.system['ibrav'] = 2
qe.kpoints = [4, 4, 4]
qe.electrons['conv_thr'] = 1e-8
return qe
def test_pw_input_relax(self):
""" Generate a silicon pw.x input file for the relaxation cycle
"""
if not os.path.isdir('relax'):
os.mkdir('relax')
qe = self.get_inputfile()
qe.control['calculation'] = "'vc-relax'"
qe.ions['ion_dynamics'] = "'bfgs'"
qe.cell['cell_dynamics'] = "'bfgs'"
qe.write('relax/si.scf')
self.assertEqual(filecmp.cmp('relax/si.scf', '%s/si/relax_si.scf'%reference_dir),True)
def test_pw_input_scf(self):
""" Generate a silicon pw.x input file for the self consistent cycle
"""
if not os.path.isdir('scf'):
os.mkdir('scf')
qe = self.get_inputfile()
qe.control['calculation'] = "'scf'"
qe.write('scf/si.scf')
self.assertEqual(filecmp.cmp('scf/si.scf', '%s/si/scf_si.scf'%reference_dir),True)
def test_pw_input_nscf(self):
""" Generate a silicon pw.x input file for the non self consistent cycle
"""
if not os.path.isdir('nscf'):
os.mkdir('nscf')
qe = self.get_inputfile()
qe.control['calculation'] = "'nscf'"
qe.electrons['diago_full_acc'] = ".true."
qe.electrons['conv_thr'] = 1e-8
qe.system['nbnd'] = 30
qe.system['force_symmorphic'] = ".true."
qe.kpoints = [2, 2, 2]
qe.write('nscf/si.nscf')
self.assertEqual(filecmp.cmp('nscf/si.nscf', '%s/si/nscf_si.nscf'%reference_dir),True)
class TestPW_Si_Run(unittest.TestCase):
""" This class creates the input files and runs the pw.x code
"""
def test_pw_si(sef):
""" Run relaxation, self consistent cycle and non self consistent cycle
"""
print("\nstep 1: relax")
os.system('cd relax; pw.x < si.scf > si.scf.log')
e = PwXML('si',path='relax')
pos = e.get_scaled_positions()
q = PwIn.from_file('scf/si.scf')
print("old celldm(1)", q.system['celldm(1)'])
q.system['celldm(1)'] = e.cell[0][2]*2
print("new celldm(1)", q.system['celldm(1)'])
q.atoms = list(zip([a[0] for a in q.atoms],pos))
q.write('scf/si.scf')
print("step 2: scf")
os.system('cd scf; pw.x < si.scf > si.scf.log')
os.system('cp -r scf/si.save nscf')
print("step 3: nscf")
os.system('cd nscf; pw.x < si.nscf > si.nscf.log')
class TestYamboPrep_Si(unittest.TestCase):
def test_yambo_preparation(self):
""" Run p2y and yambo to prepare the database
"""
if not os.path.isdir('database'):
os.mkdir('database')
os.system('cd nscf/si.save; p2y 2> ../../database/p2y.log')
os.system('cd nscf/si.save; yambo 2> ../../database/yambo.log')
os.system('mv nscf/si.save/SAVE database')
class TestYamboIn_BSE_Si(unittest.TestCase):
def setUp(self):
""" Prepare the databases
"""
if not os.path.isdir('database/SAVE'):
os.makedirs('database')
os.system('cd database; tar xfz %s/si/yambo_bse_conv/bse_conv.tar.gz'%reference_dir)
if not os.path.isdir('bse/SAVE'):
sh.copytree('database/SAVE','bse/SAVE')
if not os.path.isdir('bse_conv/SAVE'):
sh.copytree('database/SAVE','bse_conv/SAVE')
def test_bse_input(self):
""" Test if we can initialize the YamboIn class for a typical BSE input file
"""
y = YamboIn.from_runlevel('-b -o b -k sex -y h -V all',folder='bse')
def test_bse_convergence(self):
""" Test if we can generate multiple input files changing some variables
"""
y = YamboIn.from_runlevel('-b -o b -k sex -y d -V all',folder='bse_conv')
y['BEnSteps'] = 500
conv = { 'FFTGvecs': [[5,10,15],'Ry'],
'NGsBlkXs': [[1,2,5], 'Ry'],
'BndsRnXs': [[1,10],[1,20],[1,30]] }
y.optimize(conv,folder='bse_conv')
return y
class TestYamboIn_BSE_Si_Run(unittest.TestCase):
def test_yambo_bse_si(self):
""" Run BSE calculation with yambo
"""
y = YamboIn.from_runlevel('-b -o b -k sex -y d -V all',folder='bse_conv')
y['BEnSteps'] = 500
conv = { 'FFTGvecs': [[5,10,15],'Ry'],
'NGsBlkXs': [[1,2,5], 'Ry'],
'BndsRnXs': [[1,10],[1,20],[1,30]] }
def run(filename):
folder = filename.split('.')[0]
os.system('cd bse_conv; yambo -F %s -J %s -C %s 2> %s.log'%(filename,folder,folder,folder))
y.optimize(conv,folder='bse_conv',run=run)
class TestYamboOut_BSE_Si(unittest.TestCase):
def test_yamboout_bse_si(self):
""" Read the yambo BSE output files and write them as .json
"""
for dirpath,dirnames,filenames in os.walk('bse_conv'):
#check if there are some output files in the folder
if ([ f for f in filenames if 'o-' in f ]):
y = YamboOut(dirpath,save_folder='bse_conv')
y.pack()
def test_yamboanalyse_bse_si(self):
""" Analyse the BSE .json output files
"""
y = YamboAnalyser('bse_conv')
y.plot_bse('eps')
def test_yambopy_analysebse(self):
""" Test the yambopy analysebse executable
"""
os.system('yambopy analysebse bse_conv FFTGvecs -nd')
out = np.loadtxt('analyse_bse_conv/FFTGvecs_exciton_energies.dat')
ref = np.loadtxt('%s/si/analyse_bse_conv/FFTGvecs_exciton_energies.dat'%reference_dir)
print("ref:")
print(ref)
print("out:")
print(out)
self.assertEqual(np.isclose(ref,out,atol=1e-3).all(),True)
os.system('yambopy analysebse bse_conv BndsRnXs -nd')
out = np.loadtxt('analyse_bse_conv/BndsRnXs_exciton_energies.dat')
ref = np.loadtxt('%s/si/analyse_bse_conv/BndsRnXs_exciton_energies.dat'%reference_dir)
print("ref:")
print(ref)
print("out:")
print(out)
self.assertEqual(np.isclose(ref,out,atol=1e-3).all(),True)
if __name__ == '__main__':
#parse options
parser = argparse.ArgumentParser(description='Test the yambopy script.')
parser.add_argument('-i','--input', action="store_true",
help='Generate the input files and compare with the reference ones')
parser.add_argument('-f','--full', action="store_true",
help='Generate the input files, run them and compare the results')
parser.add_argument('-c','--clean', action="store_true",
help='Clean all the data from a previous run')
args = parser.parse_args()
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
# Count the number of errors
nerrors = 0
ul = unittest.TestLoader()
tr = unittest.TextTestRunner(verbosity=2)
#
# Test pw.x
#
suite = ul.loadTestsFromTestCase(TestPW_Si)
nerrors += not tr.run(suite).wasSuccessful()
if args.full:
suite = ul.loadTestsFromTestCase(TestPW_Si_Run)
nerrors += not tr.run(suite).wasSuccessful()
#
# Test p2y and yambo
#
if args.full:
suite = ul.loadTestsFromTestCase(TestYamboPrep_Si)
nerrors += not tr.run(suite).wasSuccessful()
#
# Test BSE on yambo
#
suite = ul.loadTestsFromTestCase(TestYamboIn_BSE_Si)
nerrors += not tr.run(suite).wasSuccessful()
if args.full:
suite = ul.loadTestsFromTestCase(TestYamboIn_BSE_Si_Run)
nerrors += not tr.run(suite).wasSuccessful()
suite = ul.loadTestsFromTestCase(TestYamboOut_BSE_Si)
nerrors += not tr.run(suite).wasSuccessful()
#clean tests
if args.clean or nerrors==0:
print("cleaning...")
os.system('rm -rf scf bse bse_conv nscf relax database '
'analyse_bse_conv proj.in')
print("done!")
sys.exit(nerrors)
| 35.167315 | 103 | 0.590175 |
d6dc233f813319057836bf1c618cd720e3f231b7 | 1,150 | py | Python | bin/setup_transactionlogs.py | jacquayj/sheepdog | 6d6d98a17cab9bcc8881079ced9065036c757eee | [
"Apache-2.0"
] | null | null | null | bin/setup_transactionlogs.py | jacquayj/sheepdog | 6d6d98a17cab9bcc8881079ced9065036c757eee | [
"Apache-2.0"
] | null | null | null | bin/setup_transactionlogs.py | jacquayj/sheepdog | 6d6d98a17cab9bcc8881079ced9065036c757eee | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Script to set up report database
"""
import argparse
from sqlalchemy import create_engine
from gdcdatamodel.models.submission import Base
def setup(host, user, password, database):
engine = create_engine(
"postgres://{user}:{password}@{host}/{database}".format(
user=user, host=host, password=password, database=database
)
)
Base.metadata.create_all(engine)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--host", type=str, action="store", default="localhost", help="psql-server host"
)
parser.add_argument(
"--user", type=str, action="store", default="test", help="psql test user"
)
parser.add_argument(
"--password",
type=str,
action="store",
default="test",
help="psql test password",
)
parser.add_argument(
"--database",
type=str,
action="store",
default="sheepdog_automated_test",
help="psql test database",
)
args = parser.parse_args()
setup(args.host, args.user, args.password, args.database)
| 25 | 88 | 0.623478 |
f48b79b25ecf50d954e4c146d713955ec98a605b | 5,686 | py | Python | tests/server_test.py | strophy/thumbor_dash | 54b2ae14b5e8f60a811a7328ff51524f6dc71b1a | [
"MIT"
] | 2 | 2021-09-17T13:17:09.000Z | 2021-10-03T14:00:00.000Z | tests/server_test.py | strophy/thumbor_dash | 54b2ae14b5e8f60a811a7328ff51524f6dc71b1a | [
"MIT"
] | 1 | 2022-01-24T01:05:00.000Z | 2022-02-09T00:35:30.000Z | tests/server_test.py | strophy/thumbor_dash | 54b2ae14b5e8f60a811a7328ff51524f6dc71b1a | [
"MIT"
] | 2 | 2021-09-17T13:17:12.000Z | 2021-09-22T13:21:57.000Z |
from unittest import TestCase
import mock
from preggy import expect
import thumbor_dash.server
from tests.fixtures.custom_error_handler import ErrorHandler as CustomErrorHandler
from thumbor_dash.app import ThumborDashServiceApp
from thumbor.config import Config
from thumbor_dash.server import (
configure_log,
get_application,
get_as_integer,
get_config,
get_context,
get_importer,
main,
run_server,
validate_config,
)
class ServerTestCase(TestCase):
def test_can_get_value_as_integer(self):
expect(get_as_integer("1")).to_equal(1)
expect(get_as_integer("a")).to_be_null()
expect(get_as_integer("")).to_be_null()
expect(get_as_integer(None)).to_be_null()
def test_can_get_config_from_path(self):
config = get_config("./tests/fixtures/thumbor_config_server_test.conf")
with mock.patch.dict("os.environ", {"ENGINE": "test"}):
expect(config).not_to_be_null()
expect(config.ALLOWED_SOURCES).to_be_like(["mydomain.com"])
expect(config.ENGINE).to_be_like("thumbor.engines.pil")
def test_can_get_config_with_env_enabled(self):
config = get_config("./tests/fixtures/thumbor_config_server_test.conf", True)
with mock.patch.dict("os.environ", {"ENGINE": "test"}):
expect(config).not_to_be_null()
expect(config.ALLOWED_SOURCES).to_be_like(["mydomain.com"])
expect(config.ENGINE).to_be_like("test")
@mock.patch("logging.basicConfig")
def test_can_configure_log_from_config(self, basic_config_mock):
conf = Config()
configure_log(conf, "DEBUG")
params = dict(
datefmt="%Y-%m-%d %H:%M:%S",
level=10,
format="%(asctime)s %(name)s:%(levelname)s %(message)s",
)
basic_config_mock.assert_called_with(**params)
@mock.patch("logging.config.dictConfig")
def test_can_configure_log_from_dict_config(self, dict_config_mock):
conf = Config(THUMBOR_LOG_CONFIG={"level": "INFO"})
configure_log(conf, "DEBUG")
params = dict(level="INFO",)
dict_config_mock.assert_called_with(params)
def test_can_import_default_modules(self):
conf = Config()
importer = get_importer(conf)
expect(importer).not_to_be_null()
expect(importer.filters).not_to_be_empty()
def test_can_import_with_custom_error_handler_class(self):
conf = Config(
USE_CUSTOM_ERROR_HANDLING=True,
ERROR_HANDLER_MODULE="tests.fixtures.custom_error_handler",
)
importer = get_importer(conf)
expect(importer).not_to_be_null()
expect(importer.error_handler_class).not_to_be_null()
expect(importer.error_handler_class).to_be_instance_of(CustomErrorHandler)
def test_validate_config_security_key(self):
server_parameters = mock.Mock(security_key=None)
conf = Config(SECURITY_KEY=None)
with expect.error_to_happen(
RuntimeError,
message="No security key was found for this instance of thumbor. "
"Please provide one using the conf file or a security key file.",
):
validate_config(conf, server_parameters)
def test_validate_config_security_key_from_config(self):
server_parameters = mock.Mock(security_key=None)
conf = Config(SECURITY_KEY="something", REQUEST_TIME_LIMIT = 1, USAGE_VIOLATION_LIMIT = 5, BAN_DURATION = 10 )
validate_config(conf, server_parameters)
expect(server_parameters.security_key).to_equal("something")
@mock.patch.object(thumbor_dash.server, "which")
def test_validate_gifsicle_path(self, which_mock):
server_parameters = mock.Mock(security_key=None)
conf = Config(SECURITY_KEY="test", USE_GIFSICLE_ENGINE=True, REQUEST_TIME_LIMIT = 1, USAGE_VIOLATION_LIMIT = 5, BAN_DURATION = 10 )
which_mock.return_value = "/usr/bin/gifsicle"
validate_config(conf, server_parameters)
expect(server_parameters.gifsicle_path).to_equal("/usr/bin/gifsicle")
@mock.patch.object(thumbor_dash.server, "which")
def test_validate_null_gifsicle_path(self, which_mock):
server_parameters = mock.Mock(security_key=None)
conf = Config(SECURITY_KEY="test", USE_GIFSICLE_ENGINE=True, REQUEST_TIME_LIMIT = 1, USAGE_VIOLATION_LIMIT = 5, BAN_DURATION = 10 )
which_mock.return_value = None
with expect.error_to_happen(
RuntimeError,
message="If using USE_GIFSICLE_ENGINE configuration to True, "
"the `gifsicle` binary must be in the PATH and must be an executable.",
):
validate_config(conf, server_parameters)
def test_get_context(self):
server_parameters = mock.Mock(
security_key=None, app_class="thumbor_dash.app.ThumborDashServiceApp"
)
conf = Config(SECURITY_KEY="test", REQUEST_TIME_LIMIT = 1, USAGE_VIOLATION_LIMIT = 5, BAN_DURATION = 10 )
importer = get_importer(conf)
context = get_context(server_parameters, conf, importer)
expect(context).not_to_be_null()
def test_get_application(self):
server_parameters = mock.Mock(
security_key=None, app_class="thumbor_dash.app.ThumborDashServiceApp"
)
conf = Config(SECURITY_KEY="test", REQUEST_TIME_LIMIT = 1, USAGE_VIOLATION_LIMIT = 5, BAN_DURATION = 10 )
importer = get_importer(conf)
context = get_context(server_parameters, conf, importer)
app = get_application(context)
expect(app).not_to_be_null()
expect(app).to_be_instance_of(ThumborDashServiceApp) | 38.161074 | 139 | 0.690468 |
008388e29098a4509cc43b4924630892547a7eb1 | 708 | py | Python | system/migrations/0002_auto_20180601_2250.py | 17621368758/tranpathPY | 01cf371c260275811e3750de116fa5b95718bafe | [
"MIT"
] | 1 | 2020-06-05T16:01:21.000Z | 2020-06-05T16:01:21.000Z | system/migrations/0002_auto_20180601_2250.py | 17621368758/tranpathPY | 01cf371c260275811e3750de116fa5b95718bafe | [
"MIT"
] | 4 | 2020-02-11T23:27:37.000Z | 2021-12-13T19:52:11.000Z | system/migrations/0002_auto_20180601_2250.py | 17621368758/tranpathPY | 01cf371c260275811e3750de116fa5b95718bafe | [
"MIT"
] | null | null | null | # Generated by Django 2.0.5 on 2018-06-01 22:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('system', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='excel_import_file_fields_name',
name='colType',
field=models.CharField(help_text='{"form":"F"}', max_length=50, null=True, verbose_name='段名类型(从excel判断 )'),
),
migrations.AlterField(
model_name='excel_import_file_fields_name',
name='fieldNameNew',
field=models.CharField(help_text='{"form":"F"}', max_length=500, verbose_name='新字段名(为空时该字段不导入系统)'),
),
]
| 29.5 | 119 | 0.615819 |
1d1760600d63e2b9f4229cb928e584b4ca353cd7 | 2,017 | py | Python | dos/inference.py | unhochoi/dos | 39969a8761fdfc216f95587e6542b9a3ccbc0f45 | [
"Apache-2.0"
] | 1 | 2021-11-20T10:53:32.000Z | 2021-11-20T10:53:32.000Z | dos/inference.py | unhochoi/dos | 39969a8761fdfc216f95587e6542b9a3ccbc0f45 | [
"Apache-2.0"
] | null | null | null | dos/inference.py | unhochoi/dos | 39969a8761fdfc216f95587e6542b9a3ccbc0f45 | [
"Apache-2.0"
] | 1 | 2022-01-04T05:02:31.000Z | 2022-01-04T05:02:31.000Z | # Dense Or Sparse : inference
# Load python packages
# 1. Basic data processing packages
import numpy as np
import pickle
# 2. Machine learning packages
from sklearn.preprocessing import MinMaxScaler
# 3. Deep learning package
import tensorflow as tf
# 4. Other Packages
import argparse
# Setting Argument
parser = argparse.ArgumentParser()
parser.add_argument('--nr_l', type=int)
parser.add_argument('--nc_l', type=int)
parser.add_argument('--nc_r', type=int)
parser.add_argument('--d_l', type=float)
parser.add_argument('--d_r', type=float)
parser.add_argument('--nnz_l', type=int)
parser.add_argument('--nnz_r', type=int)
args = parser.parse_args()
# Convert argument to variable
NR_L = args.nr_l
NC_L = args.nc_l
NC_R = args.nc_r
D_L = args.d_l
D_R = args.d_r
NNZ_L = args.nnz_l
NNZ_R = args.nnz_r
# Load Model
smsm_dnn_model = tf.keras.models.load_model('./model/smsm_dnn_model')
smdm_dnn_model = tf.keras.models.load_model('./model/smdm_dnn_model')
# Load Scaler
minmax_scaler = pickle.load(open('./scaler/minmax_scaler.pkl','rb'))
def inference(nr_l, nc_l, nc_r, d_l, d_r, nnz_l, nnz_r):
# Create input feature to use as model input
input_feature = np.array([[nr_l, nc_l, nc_r, d_l, d_r, nnz_l, nnz_r]])
# Apply minmax scaler to input_feature
input_feature_scaler = minmax_scaler.transform(input_feature)
# Generate model-specific predictions for input feature
smsm_dnn_result = smsm_dnn_model.predict(input_feature_scaler)
smdm_dnn_result = smdm_dnn_model.predict(input_feature_scaler)
# If sm*dm is better than sm*sm
if (smdm_dnn_result[0] <= smsm_dnn_result[0]):
optim_method = "Sparse X Dense"
# If sm*sm is better than sm*dm
else:
optim_method = "Sparse X Sparse"
# Generate result
result = "Sparse X Sparse Latency : " + str(int(smsm_dnn_result[0])) + "ms , " + \
"Sparse X Dense Latency : " + str(int(smdm_dnn_result[0])) + "ms , " + \
"Optimal Method : " + optim_method
print(result)
# Execute inference
inference(NR_L, NC_L, NC_R, D_L, D_R, NNZ_L, NNZ_R)
| 28.814286 | 83 | 0.736738 |
53298b4f533b85b0a688419045355c9c5a688fa2 | 6,799 | py | Python | data/p3BR/R2/benchmark/startQiskit274.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R2/benchmark/startQiskit274.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R2/benchmark/startQiskit274.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=3
# total number=57
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[2]) # number=38
prog.cz(input_qubit[0],input_qubit[2]) # number=39
prog.h(input_qubit[2]) # number=40
prog.h(input_qubit[2]) # number=54
prog.cz(input_qubit[0],input_qubit[2]) # number=55
prog.h(input_qubit[2]) # number=56
prog.h(input_qubit[2]) # number=42
prog.cz(input_qubit[0],input_qubit[2]) # number=43
prog.h(input_qubit[2]) # number=44
prog.h(input_qubit[2]) # number=48
prog.cz(input_qubit[0],input_qubit[2]) # number=49
prog.h(input_qubit[2]) # number=50
prog.x(input_qubit[2]) # number=46
prog.cx(input_qubit[0],input_qubit[2]) # number=47
prog.cx(input_qubit[0],input_qubit[2]) # number=37
prog.h(input_qubit[2]) # number=51
prog.cz(input_qubit[0],input_qubit[2]) # number=52
prog.h(input_qubit[2]) # number=53
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.rx(0.17592918860102857,input_qubit[2]) # number=34
prog.rx(-0.3989822670059037,input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[2],input_qubit[1]) # number=19
prog.h(input_qubit[1]) # number=20
prog.y(input_qubit[1]) # number=14
prog.h(input_qubit[1]) # number=22
prog.cz(input_qubit[2],input_qubit[1]) # number=23
prog.h(input_qubit[1]) # number=24
prog.z(input_qubit[2]) # number=3
prog.z(input_qubit[1]) # number=41
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
prog.x(input_qubit[2]) # number=21
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit274.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 31.331797 | 140 | 0.636123 |
797b5256f945c28fd586b612a1fa3d6f2a7d0beb | 3,991 | py | Python | alipay/aop/api/request/AlipayEbppInvoiceTitleBatchqueryRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/request/AlipayEbppInvoiceTitleBatchqueryRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/request/AlipayEbppInvoiceTitleBatchqueryRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayEbppInvoiceTitleBatchqueryModel import AlipayEbppInvoiceTitleBatchqueryModel
class AlipayEbppInvoiceTitleBatchqueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayEbppInvoiceTitleBatchqueryModel):
self._biz_content = value
else:
self._biz_content = AlipayEbppInvoiceTitleBatchqueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.ebpp.invoice.title.batchquery'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.524138 | 148 | 0.646455 |
898b09dd479a6c3be0ced74c9bdc4d24f2ba0f63 | 362 | py | Python | tests/test_mapped_sequence_declaration.py | lcopey/SimpleTable | 61f4ca0a62b5c562e751a12fc83dfeacb47897f4 | [
"MIT"
] | null | null | null | tests/test_mapped_sequence_declaration.py | lcopey/SimpleTable | 61f4ca0a62b5c562e751a12fc83dfeacb47897f4 | [
"MIT"
] | null | null | null | tests/test_mapped_sequence_declaration.py | lcopey/SimpleTable | 61f4ca0a62b5c562e751a12fc83dfeacb47897f4 | [
"MIT"
] | null | null | null | import unittest
from table import MappedSequence
class TestMappedSequence(unittest.TestCase):
def test_init(self):
self.assertIsNotNone(MappedSequence(values=(0, 1, 2), keys=['a', 'b', 'c']))
self.assertIsNotNone(MappedSequence(values=(0, 1, 2)))
self.assertRaises(AssertionError, MappedSequence, values=(0, 1, 2), keys=['a', 'b'])
| 36.2 | 92 | 0.685083 |
a1ef78c62ae6a67f63180d6c6f63ff61ed6480b5 | 5,349 | py | Python | compareSampleSets.py | bsaintjo/mesa | 64e2e969c765845ea6259350483008a6400d1bfa | [
"MIT"
] | null | null | null | compareSampleSets.py | bsaintjo/mesa | 64e2e969c765845ea6259350483008a6400d1bfa | [
"MIT"
] | null | null | null | compareSampleSets.py | bsaintjo/mesa | 64e2e969c765845ea6259350483008a6400d1bfa | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
########################################################################
# File: compareSampleSets.py
# executable:
# Purpose:
#
#
# Author: Cameron M. Soulette
# History: cms 01/08/2020 Created
#
########################################################################
########################################################################
# Hot Imports & Global Variable
########################################################################
import os, sys
import numpy as np
from scipy.stats import ranksums
from statsmodels.stats.multitest import multipletests
########################################################################
# CommandLine
########################################################################
class CommandLine(object) :
'''
Handle the command line, usage and help requests.
CommandLine uses argparse, now standard in 2.7 and beyond.
it implements a standard command line argument parser with various argument options,
and a standard usage and help,
attributes:
myCommandLine.args is a dictionary which includes each of the available command line arguments as
myCommandLine.args['option']
methods:
'''
def __init__(self, inOpts=None) :
'''
CommandLine constructor.
Implements a parser to interpret the command line argv string using argparse.
'''
import argparse
self.parser = argparse.ArgumentParser(description = 'TBD',
epilog = 'Please feel free to forward any usage questions or concerns',
add_help = True, #default is True
prefix_chars = '-',
usage = '%(prog)s -m1 manifest1.txt -m2 manifest2.txt')
# Add args
self.parser.add_argument('--psiMESA', type=str, action = 'store', required=True, help='Compressed NPZ formatted PSI matrix from quantMESA.')
self.parser.add_argument('-m1', '--manifest1', type=str, action = 'store', required=True, help='Manifest containing samples for sample set group1')
self.parser.add_argument('-m2', '--manifest2' , type=str, action = 'store', required=True, help='Manifest containing samples for sample set group2')
self.parser.add_argument('-o', '--out_prefix' , type=str, action = 'store', required=False, help='Prefix for output file.')
if inOpts is None :
self.args = vars(self.parser.parse_args())
else :
self.args = vars(self.parser.parse_args(inOpts))
########################################################################
# Helper Functions
#
#
########################################################################
def loadNPZ(x):
'''
takes in npz formatted matrix.
'''
try:
data = np.load(x)
except:
print("ERR ** Cannot load matrix %s. Check path or format." % x)
sys.exit(1)
return data
def getColIndexFromArray(x,y):
'''
takes in list of strings = x
and finds list index in array = y
'''
return np.nonzero(np.isin(y,x))
def returnSamplesFromManifest(x):
'''
reads in mesa formatted manifest
returns list of samples
'''
s = list()
with open(x) as fin:
for i in fin:
s.append(i.split()[0])
return s
########################################################################
# MAINE
#
#
########################################################################
def main():
'''
A workflow to compute the significance difference
between two distributions of PSI values.
Values are assumed to not be normall distributed, thus
we invoke the wilcoxon ranksum test as the statistical analysis.
'''
myCommandLine = CommandLine()
# args
pmesa = myCommandLine.args["psiMESA"]
group1 = myCommandLine.args["manifest1"]
group2 = myCommandLine.args["manifest2"]
prefix = myCommandLine.args['out_prefix']
# get sample lists
g1 = returnSamplesFromManifest(group1)
g2 = returnSamplesFromManifest(group2)
if len(g1) < 3 or len(g2) < 3:
print("Cannot conduct wilcoxon with less than 3 samples in either group. Exit.", file=sys.stderr)
sys.exit(1)
#load psi
data = loadNPZ(pmesa)
#table has 3 arrays, cols, rows and data
cols, rows, matrix = data['cols'], data['rows'], data['data']
# get sample indices
g1Indices = getColIndexFromArray(g1,cols)
g2Indices = getColIndexFromArray(g2,cols)
# do the math
pvals = list()
testedEvents = list()
for n,event in enumerate(matrix):
d1, d2 = event[g1Indices], event[g2Indices]
nonans1 = np.invert(np.isnan(d1))
nonans2 = np.invert(np.isnan(d2))
data1 = d1[nonans1]
data2 = d2[nonans2]
if len(data1) < 3 or len(data2) < 3:
continue
D, pval = ranksums(d1, d2)
testedEvents.append((rows[n],np.mean(data1)-np.mean(data2)))
pvals.append(pval)
# correct pvals
corrected = multipletests(pvals,method="fdr_bh")[1]
for n,i in enumerate(testedEvents):
print(pvals[n],corrected[n],i[0],i[1])
if __name__ == "__main__":
main()
| 31.280702 | 157 | 0.529632 |
3d4c3fbbe1bc462bc7133f3c7c96e805357123c9 | 362 | py | Python | usage-advanced.py | kinimesi/dash-cytoscape | ce05beab9330e686b1c279be2a675a78e4f87ae7 | [
"MIT"
] | 432 | 2018-10-29T19:57:48.000Z | 2022-03-31T21:34:48.000Z | usage-advanced.py | kinimesi/dash-cytoscape | ce05beab9330e686b1c279be2a675a78e4f87ae7 | [
"MIT"
] | 126 | 2018-10-29T20:00:10.000Z | 2022-03-31T04:01:14.000Z | usage-advanced.py | kinimesi/dash-cytoscape | ce05beab9330e686b1c279be2a675a78e4f87ae7 | [
"MIT"
] | 107 | 2018-12-16T08:13:28.000Z | 2022-03-31T04:35:18.000Z | import dash
from demos.editor.callbacks import assign_callbacks
from demos.editor.layout import layout as cytoscape_layout
app = dash.Dash(__name__)
server = app.server
app.scripts.config.serve_locally = True
app.css.config.serve_locally = True
app.layout = cytoscape_layout
assign_callbacks(app)
if __name__ == '__main__':
app.run_server(debug=True)
| 19.052632 | 58 | 0.792818 |
585b77be105d5b558282744f0f88da332c903162 | 17,549 | py | Python | geopandas/tests/test_pandas_methods.py | raybellwaves/geopandas | 7997837abdad0e312818a3f11027a2df9b685840 | [
"BSD-3-Clause"
] | 2 | 2021-10-05T13:43:59.000Z | 2022-02-27T14:37:17.000Z | geopandas/tests/test_pandas_methods.py | raybellwaves/geopandas | 7997837abdad0e312818a3f11027a2df9b685840 | [
"BSD-3-Clause"
] | 2 | 2021-07-09T00:47:43.000Z | 2021-07-09T00:49:53.000Z | geopandas/tests/test_pandas_methods.py | Zeroto521/geopandas | 592abf7f596ef4cf9b78c2706f69e83d8005821f | [
"BSD-3-Clause"
] | 2 | 2021-09-09T14:38:36.000Z | 2021-10-05T13:44:00.000Z | import os
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
import shapely
from shapely.geometry import Point, GeometryCollection
import geopandas
from geopandas import GeoDataFrame, GeoSeries
import geopandas._compat as compat
from geopandas.array import from_shapely
from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
@pytest.fixture
def s():
return GeoSeries([Point(x, y) for x, y in zip(range(3), range(3))])
@pytest.fixture
def df():
return GeoDataFrame(
{
"geometry": [Point(x, x) for x in range(3)],
"value1": np.arange(3, dtype="int64"),
"value2": np.array([1, 2, 1], dtype="int64"),
}
)
def test_repr(s, df):
assert "POINT" in repr(s)
assert "POINT" in repr(df)
assert "POINT" in df._repr_html_()
def test_repr_boxed_display_precision():
# geographic coordinates
p1 = Point(10.123456789, 50.123456789)
p2 = Point(4.123456789, 20.123456789)
s1 = GeoSeries([p1, p2, None])
assert "POINT (10.12346 50.12346)" in repr(s1)
# geographic coordinates 4326
s3 = GeoSeries([p1, p2], crs=4326)
assert "POINT (10.12346 50.12346)" in repr(s3)
# projected coordinates
p1 = Point(3000.123456789, 3000.123456789)
p2 = Point(4000.123456789, 4000.123456789)
s2 = GeoSeries([p1, p2, None])
assert "POINT (3000.123 3000.123)" in repr(s2)
# projected geographic coordinate
s4 = GeoSeries([p1, p2], crs=3857)
assert "POINT (3000.123 3000.123)" in repr(s4)
geopandas.options.display_precision = 1
assert "POINT (10.1 50.1)" in repr(s1)
geopandas.options.display_precision = 9
assert "POINT (10.123456789 50.123456789)" in repr(s1)
def test_repr_all_missing():
# https://github.com/geopandas/geopandas/issues/1195
s = GeoSeries([None, None, None])
assert "None" in repr(s)
df = GeoDataFrame({"a": [1, 2, 3], "geometry": s})
assert "None" in repr(df)
assert "geometry" in df._repr_html_()
def test_repr_empty():
# https://github.com/geopandas/geopandas/issues/1195
s = GeoSeries([])
assert repr(s) == "GeoSeries([], dtype: geometry)"
df = GeoDataFrame({"a": [], "geometry": s})
assert "Empty GeoDataFrame" in repr(df)
# https://github.com/geopandas/geopandas/issues/1184
assert "geometry" in df._repr_html_()
def test_indexing(s, df):
# accessing scalar from the geometry (colunm)
exp = Point(1, 1)
assert s[1] == exp
assert s.loc[1] == exp
assert s.iloc[1] == exp
assert df.loc[1, "geometry"] == exp
assert df.iloc[1, 0] == exp
# multiple values
exp = GeoSeries([Point(2, 2), Point(0, 0)], index=[2, 0])
assert_geoseries_equal(s.loc[[2, 0]], exp)
assert_geoseries_equal(s.iloc[[2, 0]], exp)
assert_geoseries_equal(s.reindex([2, 0]), exp)
assert_geoseries_equal(df.loc[[2, 0], "geometry"], exp)
# TODO here iloc does not return a GeoSeries
assert_series_equal(
df.iloc[[2, 0], 0], exp, check_series_type=False, check_names=False
)
# boolean indexing
exp = GeoSeries([Point(0, 0), Point(2, 2)], index=[0, 2])
mask = np.array([True, False, True])
assert_geoseries_equal(s[mask], exp)
assert_geoseries_equal(s.loc[mask], exp)
assert_geoseries_equal(df[mask]["geometry"], exp)
assert_geoseries_equal(df.loc[mask, "geometry"], exp)
# slices
s.index = [1, 2, 3]
exp = GeoSeries([Point(1, 1), Point(2, 2)], index=[2, 3])
assert_series_equal(s[1:], exp)
assert_series_equal(s.iloc[1:], exp)
assert_series_equal(s.loc[2:], exp)
def test_reindex(s, df):
# GeoSeries reindex
res = s.reindex([1, 2, 3])
exp = GeoSeries([Point(1, 1), Point(2, 2), None], index=[1, 2, 3])
assert_geoseries_equal(res, exp)
# GeoDataFrame reindex index
res = df.reindex(index=[1, 2, 3])
assert_geoseries_equal(res.geometry, exp)
# GeoDataFrame reindex columns
res = df.reindex(columns=["value1", "geometry"])
assert isinstance(res, GeoDataFrame)
assert isinstance(res.geometry, GeoSeries)
assert_frame_equal(res, df[["value1", "geometry"]])
# TODO df.reindex(columns=['value1', 'value2']) still returns GeoDataFrame,
# should it return DataFrame instead ?
def test_take(s, df):
inds = np.array([0, 2])
# GeoSeries take
result = s.take(inds)
expected = s.iloc[[0, 2]]
assert isinstance(result, GeoSeries)
assert_geoseries_equal(result, expected)
# GeoDataFrame take axis 0
result = df.take(inds, axis=0)
expected = df.iloc[[0, 2], :]
assert isinstance(result, GeoDataFrame)
assert_geodataframe_equal(result, expected)
# GeoDataFrame take axis 1
df = df.reindex(columns=["value1", "value2", "geometry"]) # ensure consistent order
result = df.take(inds, axis=1)
expected = df[["value1", "geometry"]]
assert isinstance(result, GeoDataFrame)
assert_geodataframe_equal(result, expected)
result = df.take(np.array([0, 1]), axis=1)
expected = df[["value1", "value2"]]
assert isinstance(result, pd.DataFrame)
assert_frame_equal(result, expected)
def test_take_empty(s, df):
# ensure that index type is preserved in an empty take
# https://github.com/geopandas/geopandas/issues/1190
inds = np.array([], dtype="int64")
# use non-default index
df.index = pd.date_range("2012-01-01", periods=len(df))
result = df.take(inds, axis=0)
assert isinstance(result, GeoDataFrame)
assert result.shape == (0, 3)
assert isinstance(result.index, pd.DatetimeIndex)
# the original bug report was an empty boolean mask
for result in [df.loc[df["value1"] > 100], df[df["value1"] > 100]]:
assert isinstance(result, GeoDataFrame)
assert result.shape == (0, 3)
assert isinstance(result.index, pd.DatetimeIndex)
def test_assignment(s, df):
exp = GeoSeries([Point(10, 10), Point(1, 1), Point(2, 2)])
s2 = s.copy()
s2[0] = Point(10, 10)
assert_geoseries_equal(s2, exp)
s2 = s.copy()
s2.loc[0] = Point(10, 10)
assert_geoseries_equal(s2, exp)
s2 = s.copy()
s2.iloc[0] = Point(10, 10)
assert_geoseries_equal(s2, exp)
df2 = df.copy()
df2.loc[0, "geometry"] = Point(10, 10)
assert_geoseries_equal(df2["geometry"], exp)
df2 = df.copy()
df2.iloc[0, 0] = Point(10, 10)
assert_geoseries_equal(df2["geometry"], exp)
def test_assign(df):
res = df.assign(new=1)
exp = df.copy()
exp["new"] = 1
assert isinstance(res, GeoDataFrame)
assert_frame_equal(res, exp)
def test_astype(s, df):
# check geoseries functionality
with pytest.raises(TypeError):
s.astype(int)
assert s.astype(str)[0] == "POINT (0 0)"
res = s.astype(object)
assert isinstance(res, pd.Series) and not isinstance(res, GeoSeries)
assert res.dtype == object
df = df.rename_geometry("geom_list")
# check whether returned object is a geodataframe
res = df.astype({"value1": float})
assert isinstance(res, GeoDataFrame)
# check whether returned object is a datafrane
res = df.astype(str)
assert isinstance(res, pd.DataFrame) and not isinstance(res, GeoDataFrame)
res = df.astype({"geom_list": str})
assert isinstance(res, pd.DataFrame) and not isinstance(res, GeoDataFrame)
res = df.astype(object)
assert isinstance(res, pd.DataFrame) and not isinstance(res, GeoDataFrame)
assert res["geom_list"].dtype == object
def test_astype_invalid_geodataframe():
# https://github.com/geopandas/geopandas/issues/1144
# a GeoDataFrame without geometry column should not error in astype
df = GeoDataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
res = df.astype(object)
assert isinstance(res, pd.DataFrame) and not isinstance(res, GeoDataFrame)
assert res["a"].dtype == object
def test_to_csv(df):
exp = (
"geometry,value1,value2\nPOINT (0 0),0,1\nPOINT (1 1),1,2\nPOINT (2 2),2,1\n"
).replace("\n", os.linesep)
assert df.to_csv(index=False) == exp
def test_numerical_operations(s, df):
# df methods ignore the geometry column
exp = pd.Series([3, 4], index=["value1", "value2"])
assert_series_equal(df.sum(), exp)
# series methods raise error (not supported for geometry)
with pytest.raises(TypeError):
s.sum()
with pytest.raises(TypeError):
s.max()
with pytest.raises((TypeError, ValueError)):
# TODO: remove ValueError after pandas-dev/pandas#32749
s.idxmax()
# numerical ops raise an error
with pytest.raises(TypeError):
df + 1
with pytest.raises((TypeError, AssertionError)):
# TODO(pandas 0.23) remove AssertionError -> raised in 0.23
s + 1
# boolean comparisons work
res = df == 100
exp = pd.DataFrame(False, index=df.index, columns=df.columns)
assert_frame_equal(res, exp)
def test_where(s):
res = s.where(np.array([True, False, True]))
exp = GeoSeries([Point(0, 0), None, Point(2, 2)])
assert_series_equal(res, exp)
def test_select_dtypes(df):
res = df.select_dtypes(include=[np.number])
exp = df[["value1", "value2"]]
assert_frame_equal(res, exp)
def test_equals(s, df):
# https://github.com/geopandas/geopandas/issues/1420
s2 = s.copy()
assert s.equals(s2) is True
s2.iloc[0] = None
assert s.equals(s2) is False
df2 = df.copy()
assert df.equals(df2) is True
df2.loc[0, "geometry"] = Point(10, 10)
assert df.equals(df2) is False
df2 = df.copy()
df2.loc[0, "value1"] = 10
assert df.equals(df2) is False
# Missing values
def test_fillna(s, df):
s2 = GeoSeries([Point(0, 0), None, Point(2, 2)])
res = s2.fillna(Point(1, 1))
assert_geoseries_equal(res, s)
# allow np.nan although this does not change anything
# https://github.com/geopandas/geopandas/issues/1149
res = s2.fillna(np.nan)
assert_geoseries_equal(res, s2)
# raise exception if trying to fill missing geometry w/ non-geometry
df2 = df.copy()
df2["geometry"] = s2
res = df2.fillna(Point(1, 1))
assert_geodataframe_equal(res, df)
with pytest.raises(NotImplementedError):
df2.fillna(0)
# allow non-geometry fill value if there are no missing values
# https://github.com/geopandas/geopandas/issues/1149
df3 = df.copy()
df3.loc[0, "value1"] = np.nan
res = df3.fillna(0)
assert_geodataframe_equal(res.astype({"value1": "int64"}), df)
def test_dropna():
s2 = GeoSeries([Point(0, 0), None, Point(2, 2)])
res = s2.dropna()
exp = s2.loc[[0, 2]]
assert_geoseries_equal(res, exp)
@pytest.mark.parametrize("NA", [None, np.nan])
def test_isna(NA):
s2 = GeoSeries([Point(0, 0), NA, Point(2, 2)], index=[2, 4, 5], name="tt")
exp = pd.Series([False, True, False], index=[2, 4, 5], name="tt")
res = s2.isnull()
assert type(res) == pd.Series
assert_series_equal(res, exp)
res = s2.isna()
assert_series_equal(res, exp)
res = s2.notnull()
assert_series_equal(res, ~exp)
res = s2.notna()
assert_series_equal(res, ~exp)
# Any / all
def test_any_all():
empty = GeometryCollection([])
s = GeoSeries([empty, Point(1, 1)])
assert not s.all()
assert s.any()
s = GeoSeries([Point(1, 1), Point(1, 1)])
assert s.all()
assert s.any()
s = GeoSeries([empty, empty])
assert not s.all()
assert not s.any()
# Groupby / algos
def test_unique():
s = GeoSeries([Point(0, 0), Point(0, 0), Point(2, 2)])
exp = from_shapely([Point(0, 0), Point(2, 2)])
# TODO should have specialized GeometryArray assert method
assert_array_equal(s.unique(), exp)
@pytest.mark.xfail
def test_value_counts():
# each object is considered unique
s = GeoSeries([Point(0, 0), Point(1, 1), Point(0, 0)])
res = s.value_counts()
exp = pd.Series([2, 1], index=[Point(0, 0), Point(1, 1)])
assert_series_equal(res, exp)
@pytest.mark.xfail(strict=False)
def test_drop_duplicates_series():
# duplicated does not yet use EA machinery
# (https://github.com/pandas-dev/pandas/issues/27264)
# but relies on unstable hashing of unhashable objects in numpy array
# giving flaky test (https://github.com/pandas-dev/pandas/issues/27035)
dups = GeoSeries([Point(0, 0), Point(0, 0)])
dropped = dups.drop_duplicates()
assert len(dropped) == 1
@pytest.mark.xfail(strict=False)
def test_drop_duplicates_frame():
# duplicated does not yet use EA machinery, see above
gdf_len = 3
dup_gdf = GeoDataFrame(
{"geometry": [Point(0, 0) for _ in range(gdf_len)], "value1": range(gdf_len)}
)
dropped_geometry = dup_gdf.drop_duplicates(subset="geometry")
assert len(dropped_geometry) == 1
dropped_all = dup_gdf.drop_duplicates()
assert len(dropped_all) == gdf_len
def test_groupby(df):
# counts work fine
res = df.groupby("value2").count()
exp = pd.DataFrame(
{"geometry": [2, 1], "value1": [2, 1], "value2": [1, 2]}
).set_index("value2")
assert_frame_equal(res, exp)
# reductions ignore geometry column
res = df.groupby("value2").sum()
exp = pd.DataFrame({"value1": [2, 1], "value2": [1, 2]}, dtype="int64").set_index(
"value2"
)
assert_frame_equal(res, exp)
# applying on the geometry column
res = df.groupby("value2")["geometry"].apply(lambda x: x.cascaded_union)
if compat.PANDAS_GE_11:
exp = GeoSeries(
[shapely.geometry.MultiPoint([(0, 0), (2, 2)]), Point(1, 1)],
index=pd.Index([1, 2], name="value2"),
name="geometry",
)
else:
exp = pd.Series(
[shapely.geometry.MultiPoint([(0, 0), (2, 2)]), Point(1, 1)],
index=pd.Index([1, 2], name="value2"),
name="geometry",
)
assert_series_equal(res, exp)
# apply on geometry column not resulting in new geometry
res = df.groupby("value2")["geometry"].apply(lambda x: x.unary_union.area)
exp = pd.Series([0.0, 0.0], index=pd.Index([1, 2], name="value2"), name="geometry")
assert_series_equal(res, exp)
def test_groupby_groups(df):
g = df.groupby("value2")
res = g.get_group(1)
assert isinstance(res, GeoDataFrame)
exp = df.loc[[0, 2]]
assert_frame_equal(res, exp)
def test_apply(s):
# function that returns geometry preserves GeoSeries class
def geom_func(geom):
assert isinstance(geom, Point)
return geom
result = s.apply(geom_func)
assert isinstance(result, GeoSeries)
assert_geoseries_equal(result, s)
# function that returns non-geometry results in Series
def numeric_func(geom):
assert isinstance(geom, Point)
return geom.x
result = s.apply(numeric_func)
assert not isinstance(result, GeoSeries)
assert_series_equal(result, pd.Series([0.0, 1.0, 2.0]))
def test_apply_loc_len1(df):
# subset of len 1 with loc -> bug in pandas with inconsistent Block ndim
# resulting in bug in apply
# https://github.com/geopandas/geopandas/issues/1078
subset = df.loc[[0], "geometry"]
result = subset.apply(lambda geom: geom.is_empty)
expected = subset.is_empty
np.testing.assert_allclose(result, expected)
def test_apply_convert_dtypes_keyword(s):
# ensure the convert_dtypes keyword is accepted
res = s.apply(lambda x: x, convert_dtype=True, args=())
assert_geoseries_equal(res, s)
@pytest.mark.parametrize("crs", [None, "EPSG:4326"])
def test_apply_no_geometry_result(df, crs):
if crs:
df = df.set_crs(crs)
result = df.apply(lambda col: col.astype(str), axis=0)
# TODO this should actually not return a GeoDataFrame
assert isinstance(result, GeoDataFrame)
expected = df.astype(str)
assert_frame_equal(result, expected)
result = df.apply(lambda col: col.astype(str), axis=1)
assert isinstance(result, GeoDataFrame)
assert_frame_equal(result, expected)
@pytest.mark.skipif(not compat.PANDAS_GE_10, reason="attrs introduced in pandas 1.0")
def test_preserve_attrs(df):
# https://github.com/geopandas/geopandas/issues/1654
df.attrs["name"] = "my_name"
attrs = {"name": "my_name"}
assert df.attrs == attrs
# preserve attrs in indexing operations
for subset in [df[:2], df[df["value1"] > 2], df[["value2", "geometry"]]]:
assert df.attrs == attrs
# preserve attrs in methods
df2 = df.reset_index()
assert df2.attrs == attrs
# https://github.com/geopandas/geopandas/issues/1875
df3 = df2.explode()
assert df3.attrs == attrs
@pytest.mark.skipif(not compat.PANDAS_GE_12, reason="attrs introduced in pandas 1.0")
def test_preserve_flags(df):
# https://github.com/geopandas/geopandas/issues/1654
df = df.set_flags(allows_duplicate_labels=False)
assert df.flags.allows_duplicate_labels is False
# preserve flags in indexing operations
for subset in [df[:2], df[df["value1"] > 2], df[["value2", "geometry"]]]:
assert df.flags.allows_duplicate_labels is False
# preserve attrs in methods
df2 = df.reset_index()
assert df2.flags.allows_duplicate_labels is False
# it is honored for operations that introduce duplicate labels
with pytest.raises(ValueError):
df.reindex([0, 0, 1])
with pytest.raises(ValueError):
df[["value1", "value1", "geometry"]]
with pytest.raises(ValueError):
pd.concat([df, df])
| 29.998291 | 88 | 0.653769 |
43af19a12518ad21cdb7cc8255d8bba8b05aa1e7 | 91 | py | Python | surfice.app/Contents/Resources/script/startup_track.py | ningfei/surf-ice | 11a978d922f53abd02c0aa1b6896443f7f1af9e1 | [
"BSD-2-Clause"
] | null | null | null | surfice.app/Contents/Resources/script/startup_track.py | ningfei/surf-ice | 11a978d922f53abd02c0aa1b6896443f7f1af9e1 | [
"BSD-2-Clause"
] | null | null | null | surfice.app/Contents/Resources/script/startup_track.py | ningfei/surf-ice | 11a978d922f53abd02c0aa1b6896443f7f1af9e1 | [
"BSD-2-Clause"
] | null | null | null | import gl
gl.resetdefaults()
gl.trackload('stroke.trk.gz');
gl.trackprefs(15, 3, 0.5);
| 18.2 | 31 | 0.681319 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.