repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 18
values | size
stringlengths 4
7
| content
stringlengths 736
1.04M
| license
stringclasses 15
values | hash
int64 -9,222,983,980,000,580,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
denmojo/pygrow | grow/commands/filter.py | 1 | 2992 | from grow.pods import pods
from grow.pods import storage
import click
import os
@click.command()
@click.argument('pod_path', default='.')
@click.option('--include-obsolete/--no-include-obsolete', default=False,
is_flag=True,
help='Whether to include obsolete messages. If false, obsolete'
' messages will be removed from the catalog template. By'
' default, Grow cleans obsolete messages from the catalog'
' template.')
@click.option('--localized/--no-localized', default=False, is_flag=True,
help='Whether to create localized message catalogs. Use this'
' option if content varies by locale.')
@click.option('--locale', type=str, multiple=True,
help='Which locale(s) to analyze when creating template catalogs'
' that contain only untranslated messages. This option is'
' only applicable when using --untranslated.')
@click.option('--path', type=str, multiple=True,
help='Which paths to extract strings from. By default, all paths'
' are extracted. This option is useful if you\'d like to'
' generate a partial messages file representing just a'
' specific set of files.')
@click.option('-o', type=str, default=None,
help='Where to write the extracted translation catalog. The path'
' must be relative to the pod\'s root.')
@click.option('--include-header', default=False, is_flag=True,
help='Whether to preserve headers at the beginning of catalogs.')
@click.option('--out_dir', type=str, default=None,
help='Where to write extracted localized translation catalogs.'
' The path must be relative to the pod\'s root. This option'
' is only applicable when using --localized.')
@click.option('-f', default=False, is_flag=True,
help='Whether to force an update when writing localized message'
' catalogs.')
def filter(pod_path, locale, o, include_obsolete, localized, path,
include_header, out_dir, f):
"""Filters untranslated messages from catalogs into new catalogs."""
root = os.path.abspath(os.path.join(os.getcwd(), pod_path))
pod = pods.Pod(root, storage=storage.FileStorage)
catalogs = pod.get_catalogs()
if not locale:
locale = catalogs.list_locales()
if out_dir and pod.file_exists(out_dir) and not f:
raise click.UsageError(
'{} exists. You must specify a directory that does not exist, or '
'use the "-f" flag, which will force update catalogs within the '
'specified directory.'.format(out_dir))
catalogs.filter(out_path=o, out_dir=out_dir,
include_obsolete=include_obsolete,
localized=localized, paths=path,
include_header=include_header, locales=locale)
| mit | 5,153,167,556,879,288,000 | 53.4 | 79 | 0.621658 | false |
auto-mat/klub | apps/aklub/autocom.py | 1 | 6964 | # -*- coding: utf-8 -*-
# Author: Hynek Hanke <hynek.hanke@auto-mat.cz>
#
# Copyright (C) 2010 o.s. Auto*Mat
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Automatic communications for club management"""
import datetime
import logging
import string
from django.core.exceptions import ValidationError
logger = logging.getLogger(__name__)
def _localize_enum(descr, val, lang):
for t in descr:
if t[0] == val:
# Hack! Here we should use the Gettext localization
# based on the value of 'lang' -- this is however
# not easy due to lazy translations and t[1] already
# being wrapped by a foreign translator
if lang == 'cs':
return t[1].lower()
else:
# less wrong would be to retrieve the original untranslated version of t[1]...
return t[0]
# Translation not found
return val
KNOWN_VARIABLES = [
"addressment", "name", "firstname", "surname", "street", "city", "zipcode", "email",
"telephone", "regular_amount", "regular_frequency", "var_symbol", "last_payment_amount",
"auth_token",
]
def min_non_negative(i, j):
if i < 0:
return j
if j < 0:
return i
return min(i, j)
def gendrify_text(text, sex=''):
# Modify text according to gender
# Example: Vazen{y|a} {pane|pani} -> [male] -> Vazeny pane
gender_text = ""
o = 0
i = 0
while i < len(text):
if text[i] == '{':
gender_text += text[o:i]
sep_pos = min_non_negative(text.find('|', i), text.find('/', i))
end_pos = text.find('}', i)
if sep_pos <= i or end_pos <= sep_pos:
raise ValidationError("Gender strings must look like {male_variant|female_variant} or {male_variant/female_variant}")
male_variant = text[i + 1:sep_pos]
female_variant = text[sep_pos + 1:end_pos]
if sex == 'male':
gender_text += male_variant
elif sex == 'female':
gender_text += female_variant
else:
gender_text += male_variant + "/" + female_variant
o = end_pos + 1
i = end_pos
i += 1
gender_text += text[o:]
return gender_text
def process_template(template_string, user, payment_channel):
from .models import DonorPaymentChannel
from sesame import utils as sesame_utils
template = string.Template(template_string)
if payment_channel:
payment_substitutes = {
'regular_amount': payment_channel.regular_amount,
'regular_frequency': _localize_enum(
DonorPaymentChannel.REGULAR_PAYMENT_FREQUENCIES,
payment_channel.regular_frequency,
user.language,
),
'var_symbol': payment_channel.VS,
'last_payment_amount': payment_channel.last_payment.amount if payment_channel.last_payment else None,
}
else:
payment_substitutes = {}
# Make variable substitutions
text = template.substitute(
addressment=user.get_addressment(),
last_name_vokativ=user.get_last_name_vokativ(),
name=user.first_name if hasattr(user, 'first_name') else user.name,
firstname=user.first_name if hasattr(user, 'first_name') else user.name,
surname=user.last_name if hasattr(user, 'first_name') else user.name,
street=user.street,
city=user.city,
zipcode=user.zip_code,
email=user.email,
telephone=user.get_telephone(),
auth_token=sesame_utils.get_query_string(user),
**payment_substitutes,
)
return gendrify_text(text, user.sex if hasattr(user, 'sex') else '')
def check(user_profiles=None, action=None): # noqa
from .models import AutomaticCommunication, DonorPaymentChannel, UserProfile
from interactions.models import Interaction
if not user_profiles:
user_profiles = UserProfile.objects.all()
# limit autocoms only for autocoms where action is used
if action:
# TODO: handle nested conditions?
from flexible_filter_conditions.models import TerminalCondition
conditions = TerminalCondition.objects.filter(variable='action', value=action).values_list('condition')
auto_coms = AutomaticCommunication.objects.filter(condition__conditions__in=conditions)
else:
auto_coms = AutomaticCommunication.objects.all()
for auto_comm in auto_coms:
logger.info(
u"Processin condition \"%s\" for autocom \"%s\", method: \"%s\", action: \"%s\"" % (
auto_comm.condition,
auto_comm,
auto_comm.method_type,
action,
),
)
filtered_user_profiles = auto_comm.condition.filter_queryset(user_profiles, action)
for user in filtered_user_profiles:
try:
if auto_comm.event:
payment_channel = user.userchannels.get(event=auto_comm.event)
else:
payment_channel = None
except DonorPaymentChannel.DoesNotExist:
payment_channel = None
if auto_comm.only_once and auto_comm.sent_to_users.filter(pk=user.pk).exists():
continue
if user.language == 'cs':
template = auto_comm.template
subject = auto_comm.subject
else:
template = auto_comm.template_en
subject = auto_comm.subject_en
if template and template != '':
logger.info(u"Added new automatic communication \"%s\" for user \"%s\", action \"%s\"" % (auto_comm, user, action))
c = Interaction(
user=user,
type=auto_comm.method_type,
date_from=datetime.datetime.now(),
subject=subject,
summary=process_template(template, user, payment_channel),
note="Prepared by automated mailer at %s" % datetime.datetime.now(),
settlement='a',
administrative_unit=auto_comm.administrative_unit,
)
auto_comm.sent_to_users.add(user)
c.save()
| gpl-3.0 | 9,162,069,310,918,358,000 | 37.263736 | 133 | 0.597071 | false |
AunShiLord/sympy | sympy/series/tests/test_series.py | 1 | 4998 | from sympy import sin, cos, exp, E, series, oo, S, Derivative, O, Integral, \
Function, log, sqrt, Symbol, Subs, pi, symbols
from sympy.abc import x, y, n, k
from sympy.utilities.pytest import raises
from sympy.series.gruntz import calculate_series
def test_sin():
e1 = sin(x).series(x, 0)
e2 = series(sin(x), x, 0)
assert e1 == e2
def test_cos():
e1 = cos(x).series(x, 0)
e2 = series(cos(x), x, 0)
assert e1 == e2
def test_exp():
e1 = exp(x).series(x, 0)
e2 = series(exp(x), x, 0)
assert e1 == e2
def test_exp2():
e1 = exp(cos(x)).series(x, 0)
e2 = series(exp(cos(x)), x, 0)
assert e1 == e2
def test_issue_5223():
assert series(1, x) == 1
assert next(S(0).lseries(x)) == 0
assert cos(x).series() == cos(x).series(x)
raises(ValueError, lambda: cos(x + y).series())
raises(ValueError, lambda: x.series(dir=""))
assert (cos(x).series(x, 1) -
cos(x + 1).series(x).subs(x, x - 1)).removeO() == 0
e = cos(x).series(x, 1, n=None)
assert [next(e) for i in range(2)] == [cos(1), -((x - 1)*sin(1))]
e = cos(x).series(x, 1, n=None, dir='-')
assert [next(e) for i in range(2)] == [cos(1), (1 - x)*sin(1)]
# the following test is exact so no need for x -> x - 1 replacement
assert abs(x).series(x, 1, dir='-') == x
assert exp(x).series(x, 1, dir='-', n=3).removeO() == \
E - E*(-x + 1) + E*(-x + 1)**2/2
D = Derivative
assert D(x**2 + x**3*y**2, x, 2, y, 1).series(x).doit() == 12*x*y
assert next(D(cos(x), x).lseries()) == D(1, x)
assert D(
exp(x), x).series(n=3) == D(1, x) + D(x, x) + D(x**2/2, x) + O(x**3)
assert Integral(x, (x, 1, 3), (y, 1, x)).series(x) == -4 + 4*x
assert (1 + x + O(x**2)).getn() == 2
assert (1 + x).getn() is None
assert ((1/sin(x))**oo).series() == oo
logx = Symbol('logx')
assert ((sin(x))**y).nseries(x, n=1, logx=logx) == \
exp(y*logx) + O(x*exp(y*logx), x)
assert sin(1/x).series(x, oo, n=5) == 1/x - 1/(6*x**3) + O(x**(-5), (x, oo))
assert abs(x).series(x, oo, n=5, dir='+') == x
assert abs(x).series(x, -oo, n=5, dir='-') == -x
assert abs(-x).series(x, oo, n=5, dir='+') == x
assert abs(-x).series(x, -oo, n=5, dir='-') == -x
assert exp(x*log(x)).series(n=3) == \
1 + x*log(x) + x**2*log(x)**2/2 + O(x**3*log(x)**3)
# XXX is this right? If not, fix "ngot > n" handling in expr.
p = Symbol('p', positive=True)
assert exp(sqrt(p)**3*log(p)).series(n=3) == \
1 + p**S('3/2')*log(p) + O(p**3*log(p)**3)
assert exp(sin(x)*log(x)).series(n=2) == 1 + x*log(x) + O(x**2*log(x)**2)
def test_issue_3978():
f = Function('f')
assert f(x).series(x, 0, 3, dir='-') == \
f(0) + x*Subs(Derivative(f(x), x), (x,), (0,)) + \
x**2*Subs(Derivative(f(x), x, x), (x,), (0,))/2 + O(x**3)
assert f(x).series(x, 0, 3) == \
f(0) + x*Subs(Derivative(f(x), x), (x,), (0,)) + \
x**2*Subs(Derivative(f(x), x, x), (x,), (0,))/2 + O(x**3)
assert f(x**2).series(x, 0, 3) == \
f(0) + x**2*Subs(Derivative(f(x), x), (x,), (0,)) + O(x**3)
assert f(x**2+1).series(x, 0, 3) == \
f(1) + x**2*Subs(Derivative(f(x), x), (x,), (1,)) + O(x**3)
class TestF(Function):
pass
assert TestF(x).series(x, 0, 3) == TestF(0) + \
x*Subs(Derivative(TestF(x), x), (x,), (0,)) + \
x**2*Subs(Derivative(TestF(x), x, x), (x,), (0,))/2 + O(x**3)
from sympy.series.acceleration import richardson, shanks
from sympy import Sum, Integer
def test_acceleration():
e = (1 + 1/n)**n
assert round(richardson(e, n, 10, 20).evalf(), 10) == round(E.evalf(), 10)
A = Sum(Integer(-1)**(k + 1) / k, (k, 1, n))
assert round(shanks(A, n, 25).evalf(), 4) == round(log(2).evalf(), 4)
assert round(shanks(A, n, 25, 5).evalf(), 10) == round(log(2).evalf(), 10)
def test_issue_5852():
assert series(1/cos(x/log(x)), x, 0) == 1 + x**2/(2*log(x)**2) + \
5*x**4/(24*log(x)**4) + O(x**6)
def test_issue_4583():
assert cos(1 + x + x**2).series(x, 0, 5) == cos(1) - x*sin(1) + \
x**2*(-sin(1) - cos(1)/2) + x**3*(-cos(1) + sin(1)/6) + \
x**4*(-11*cos(1)/24 + sin(1)/2) + O(x**5)
def test_issue_6318():
eq = (1/x)**(S(2)/3)
assert (eq + 1).as_leading_term(x) == eq
def test_x_is_base_detection():
eq = (x**2)**(S(2)/3)
assert eq.series() == x**(S(4)/3)
def test_sin_power():
e = sin(x)**1.2
assert calculate_series(e, x) == x**1.2
def test_issue_7203():
assert series(cos(x), x, pi, 3) == \
-1 + (x - pi)**2/2 + O((x - pi)**3, (x, pi))
def test_exp_product_positive_factors():
a, b = symbols('a, b', positive=True)
x = a * b
assert series(exp(x), x, n=8) == 1 + a*b + a**2*b**2/2 + \
a**3*b**3/6 + a**4*b**4/24 + a**5*b**5/120 + a**6*b**6/720 + \
a**7*b**7/5040 + O(a**8*b**8, a, b)
def test_issue_8805():
assert series(1, n=8) == 1
| bsd-3-clause | 1,150,461,149,742,997,500 | 31.454545 | 80 | 0.492597 | false |
pony-revolution/helpothers | helpothers/views.py | 1 | 1239 | from django.contrib.auth import get_user_model
from django.views.generic.base import TemplateView
from django.views.generic.detail import DetailView
from django.views.generic.edit import UpdateView
from .views_mixins import HelpOthersMetaDataMixin
from dunder_mifflin import papers # WARNING: Malicious operation ahead
from listings.models import GatheringCenter, Resource
class HomeView(HelpOthersMetaDataMixin, TemplateView):
template_name = 'home.html'
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
context['gathering_centers'] = GatheringCenter.objects.filter(published=True)
context['resources'] = Resource.objects.filter(published=True)
return context
class LoginView(HelpOthersMetaDataMixin, TemplateView):
template_name = 'login.html'
def get_context_data(self, **kwargs):
ctx = super(LoginView, self).get_context_data(**kwargs)
ctx['next'] = self.request.GET.get('next')
return ctx
class ProfileView(HelpOthersMetaDataMixin, UpdateView):
context_object_name = 'profile'
template_name = 'accounts/profile.html'
fields = ('user__first_name', 'user__last_name', 'user__email')
def get_object(self, queryset=None):
return self.request.user.profile
| apache-2.0 | -828,806,971,788,788,700 | 34.4 | 85 | 0.727199 | false |
paineliu/tflearn | helen.py | 1 | 1084 | import os
from PIL import Image
img_path = '/home/palm/deep/helen/train'
lab_path = '/home/palm/deep/helen/annotation'
filename = '/home/palm/deep/helen/trainnames.txt'
f = open(filename)
index = 1
for each in f:
each = each.strip()
img_file = os.path.join(img_path, each + '.jpg')
img = Image.open(img_file)
width, height = img.size
img = img.resize((256, 256))
img = img.convert('L')
lab_file = os.path.join(lab_path, str(index) + '.txt')
fl = open(lab_file)
for line in fl:
line = line.strip()
item = line.split(',')
if len(item) == 2:
x = int(float(item[0]) * 256 / width)
y = int(float(item[1]) * 256 / height)
if x > 0 and x < img.size[0] and y > 0 and y < img.size[1]:
img.putpixel((x, y), 0xffffff)
img.putpixel((x-1, y), 0xffffff)
img.putpixel((x, y-1), 0xffffff)
img.putpixel((x-1, y-1), 0xffffff)
else:
print index, each, img.size, x, y
index += 1
img.show()
break
| apache-2.0 | -8,658,315,276,180,879,000 | 26.1 | 71 | 0.530443 | false |
tisnik/fabric8-analytics-common | dashboard/src/jacoco_to_codecov.py | 1 | 4579 | """Module to convert JaCoCo coverage report into the report compatible with Pycov utility."""
import csv
def format_coverage_line(text, statements, missed, coverage, missed_lines=False):
"""Format one line with code coverage report of one class or for a summary."""
format_string = "{:80} {:3d} {:3d} {:3d}%"
if missed_lines:
format_string += " N/A"
return format_string.format(text, statements, missed, coverage)
def compute_coverage(statements, covered):
"""Compute code coverage based on number of all statemts and number of covered statements."""
return 100.0 * covered / statements
class JavaClassCoverageReport:
"""Class representing code coverage report for one Java class."""
def __init__(self, record):
"""Initialize the object by using record read from the CSV file."""
self.group = record[0]
self.package = record[1]
self.class_name = record[2]
self.missed = int(record[7])
self.covered = int(record[8])
self.statements = self.covered + self.missed
self.coverage = compute_coverage(self.statements, self.covered)
def __str__(self):
"""Return readable text representation compatible with Pycov utility output."""
pc = "{package}/{class_name}".format(package=self.package, class_name=self.class_name)
return format_coverage_line(pc, self.statements, self.missed, int(self.coverage))
class ProjectCoverageReport:
"""Class to perform conversion from JaCoCo output to report compatible with Pycov utility."""
def __init__(self, csv_input_file_name):
"""Initialize the object, store the name of input (CSV) file."""
self.csv_input_file_name = csv_input_file_name
@staticmethod
def read_csv(csv_input_file_name, skip_first_line=False):
"""Read the given CSV file, parse it, and return as list of records."""
output = []
with open(csv_input_file_name, 'r') as fin:
csv_content = csv.reader(fin, delimiter=',')
if skip_first_line:
next(csv_content, None)
for row in csv_content:
output.append(row)
return output
@staticmethod
def write_horizontal_rule(fout):
"""Write horizontal rule into the output file."""
fout.write("-" * 108)
fout.write("\n")
@staticmethod
def write_coverage_report_header(fout):
"""Write header compatible with Pycov to the output file."""
fout.write("{:80} {:5} {:4} {:5} {}\n".format(
"Name", "Stmts", "Miss", "Cover", "Missing"))
ProjectCoverageReport.write_horizontal_rule(fout)
@staticmethod
def write_coverage_report_summary(fout, statements, missed, coverage):
"""Write summary compatible with Pycov to the output file."""
ProjectCoverageReport.write_horizontal_rule(fout)
fout.write(format_coverage_line("TOTAL", statements, missed, int(coverage)))
fout.write("\n")
def read_java_classes(self):
"""Read and parse into about Java classes from JaCoCo results."""
data = ProjectCoverageReport.read_csv(self.csv_input_file_name, True)
return [JavaClassCoverageReport(record) for record in data]
def convert_code_coverage_report(self, output_file_name):
"""Convert code coverage report that would be compatible with PyCov output."""
java_classes = self.read_java_classes()
statements, missed, coverage = ProjectCoverageReport.compute_total(java_classes)
with open(output_file_name, "w") as fout:
ProjectCoverageReport.write_coverage_report_header(fout)
for java_class in java_classes:
fout.write(str(java_class) + "\n")
ProjectCoverageReport.write_coverage_report_summary(fout, statements, missed, coverage)
@staticmethod
def compute_total(records):
"""Compute total/summary from all Java class coverage reports."""
statements = 0
covered = 0
missed = 0
for record in records:
statements += record.statements
covered += record.covered
missed += record.missed
coverage = compute_coverage(statements, covered)
return statements, missed, coverage
def main():
"""Just a test ATM."""
p = ProjectCoverageReport("fabric8-analytics-jenkins-plugin.coverage.csv")
p.convert_code_coverage_report("fabric8-analytics-jenkins-plugin.coverage.txt")
if __name__ == "__main__":
# execute only if run as a script
main()
| apache-2.0 | -754,818,450,584,529,800 | 39.166667 | 99 | 0.647085 | false |
googleapis/googleapis-gen | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/common/types/ad_type_infos.py | 1 | 46175 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v8.common.types import ad_asset
from google.ads.googleads.v8.enums.types import call_conversion_reporting_state
from google.ads.googleads.v8.enums.types import display_ad_format_setting
from google.ads.googleads.v8.enums.types import display_upload_product_type as gage_display_upload_product_type
from google.ads.googleads.v8.enums.types import legacy_app_install_ad_app_store
from google.ads.googleads.v8.enums.types import mime_type as gage_mime_type
__protobuf__ = proto.module(
package='google.ads.googleads.v8.common',
marshal='google.ads.googleads.v8',
manifest={
'TextAdInfo',
'ExpandedTextAdInfo',
'ExpandedDynamicSearchAdInfo',
'HotelAdInfo',
'ShoppingSmartAdInfo',
'ShoppingProductAdInfo',
'ShoppingComparisonListingAdInfo',
'GmailAdInfo',
'GmailTeaser',
'DisplayCallToAction',
'ProductImage',
'ProductVideo',
'ImageAdInfo',
'VideoBumperInStreamAdInfo',
'VideoNonSkippableInStreamAdInfo',
'VideoTrueViewInStreamAdInfo',
'VideoOutstreamAdInfo',
'VideoTrueViewDiscoveryAdInfo',
'VideoAdInfo',
'VideoResponsiveAdInfo',
'ResponsiveSearchAdInfo',
'LegacyResponsiveDisplayAdInfo',
'AppAdInfo',
'AppEngagementAdInfo',
'LegacyAppInstallAdInfo',
'ResponsiveDisplayAdInfo',
'LocalAdInfo',
'DisplayUploadAdInfo',
'ResponsiveDisplayAdControlSpec',
'SmartCampaignAdInfo',
'CallAdInfo',
},
)
class TextAdInfo(proto.Message):
r"""A text ad.
Attributes:
headline (str):
The headline of the ad.
description1 (str):
The first line of the ad's description.
description2 (str):
The second line of the ad's description.
"""
headline = proto.Field(
proto.STRING,
number=4,
optional=True,
)
description1 = proto.Field(
proto.STRING,
number=5,
optional=True,
)
description2 = proto.Field(
proto.STRING,
number=6,
optional=True,
)
class ExpandedTextAdInfo(proto.Message):
r"""An expanded text ad.
Attributes:
headline_part1 (str):
The first part of the ad's headline.
headline_part2 (str):
The second part of the ad's headline.
headline_part3 (str):
The third part of the ad's headline.
description (str):
The description of the ad.
description2 (str):
The second description of the ad.
path1 (str):
The text that can appear alongside the ad's
displayed URL.
path2 (str):
Additional text that can appear alongside the
ad's displayed URL.
"""
headline_part1 = proto.Field(
proto.STRING,
number=8,
optional=True,
)
headline_part2 = proto.Field(
proto.STRING,
number=9,
optional=True,
)
headline_part3 = proto.Field(
proto.STRING,
number=10,
optional=True,
)
description = proto.Field(
proto.STRING,
number=11,
optional=True,
)
description2 = proto.Field(
proto.STRING,
number=12,
optional=True,
)
path1 = proto.Field(
proto.STRING,
number=13,
optional=True,
)
path2 = proto.Field(
proto.STRING,
number=14,
optional=True,
)
class ExpandedDynamicSearchAdInfo(proto.Message):
r"""An expanded dynamic search ad.
Attributes:
description (str):
The description of the ad.
description2 (str):
The second description of the ad.
"""
description = proto.Field(
proto.STRING,
number=3,
optional=True,
)
description2 = proto.Field(
proto.STRING,
number=4,
optional=True,
)
class HotelAdInfo(proto.Message):
r"""A hotel ad. """
class ShoppingSmartAdInfo(proto.Message):
r"""A Smart Shopping ad. """
class ShoppingProductAdInfo(proto.Message):
r"""A standard Shopping ad. """
class ShoppingComparisonListingAdInfo(proto.Message):
r"""A Shopping Comparison Listing ad.
Attributes:
headline (str):
Headline of the ad. This field is required.
Allowed length is between 25 and 45 characters.
"""
headline = proto.Field(
proto.STRING,
number=2,
optional=True,
)
class GmailAdInfo(proto.Message):
r"""A Gmail ad.
Attributes:
teaser (google.ads.googleads.v8.common.types.GmailTeaser):
The Gmail teaser.
header_image (str):
The MediaFile resource name of the header
image. Valid image types are GIF, JPEG and PNG.
The minimum size is 300x100 pixels and the
aspect ratio must be between 3:1 and 5:1 (+-1%).
marketing_image (str):
The MediaFile resource name of the marketing
image. Valid image types are GIF, JPEG and PNG.
The image must either be landscape with a
minimum size of 600x314 pixels and aspect ratio
of 600:314 (+-1%) or square with a minimum size
of 300x300 pixels and aspect ratio of 1:1 (+-1%)
marketing_image_headline (str):
Headline of the marketing image.
marketing_image_description (str):
Description of the marketing image.
marketing_image_display_call_to_action (google.ads.googleads.v8.common.types.DisplayCallToAction):
Display-call-to-action of the marketing
image.
product_images (Sequence[google.ads.googleads.v8.common.types.ProductImage]):
Product images. Up to 15 images are
supported.
product_videos (Sequence[google.ads.googleads.v8.common.types.ProductVideo]):
Product videos. Up to 7 videos are supported.
At least one product video or a marketing image
must be specified.
"""
teaser = proto.Field(
proto.MESSAGE,
number=1,
message='GmailTeaser',
)
header_image = proto.Field(
proto.STRING,
number=10,
optional=True,
)
marketing_image = proto.Field(
proto.STRING,
number=11,
optional=True,
)
marketing_image_headline = proto.Field(
proto.STRING,
number=12,
optional=True,
)
marketing_image_description = proto.Field(
proto.STRING,
number=13,
optional=True,
)
marketing_image_display_call_to_action = proto.Field(
proto.MESSAGE,
number=6,
message='DisplayCallToAction',
)
product_images = proto.RepeatedField(
proto.MESSAGE,
number=7,
message='ProductImage',
)
product_videos = proto.RepeatedField(
proto.MESSAGE,
number=8,
message='ProductVideo',
)
class GmailTeaser(proto.Message):
r"""Gmail teaser data. The teaser is a small header that acts as
an invitation to view the rest of the ad (the body).
Attributes:
headline (str):
Headline of the teaser.
description (str):
Description of the teaser.
business_name (str):
Business name of the advertiser.
logo_image (str):
The MediaFile resource name of the logo
image. Valid image types are GIF, JPEG and PNG.
The minimum size is 144x144 pixels and the
aspect ratio must be 1:1 (+-1%).
"""
headline = proto.Field(
proto.STRING,
number=5,
optional=True,
)
description = proto.Field(
proto.STRING,
number=6,
optional=True,
)
business_name = proto.Field(
proto.STRING,
number=7,
optional=True,
)
logo_image = proto.Field(
proto.STRING,
number=8,
optional=True,
)
class DisplayCallToAction(proto.Message):
r"""Data for display call to action. The call to action is a
piece of the ad that prompts the user to do something. Like
clicking a link or making a phone call.
Attributes:
text (str):
Text for the display-call-to-action.
text_color (str):
Text color for the display-call-to-action in
hexadecimal, e.g. #ffffff for white.
url_collection_id (str):
Identifies the url collection in the ad.url_collections
field. If not set the url defaults to final_url.
"""
text = proto.Field(
proto.STRING,
number=5,
optional=True,
)
text_color = proto.Field(
proto.STRING,
number=6,
optional=True,
)
url_collection_id = proto.Field(
proto.STRING,
number=7,
optional=True,
)
class ProductImage(proto.Message):
r"""Product image specific data.
Attributes:
product_image (str):
The MediaFile resource name of the product
image. Valid image types are GIF, JPEG and PNG.
The minimum size is 300x300 pixels and the
aspect ratio must be 1:1 (+-1%).
description (str):
Description of the product.
display_call_to_action (google.ads.googleads.v8.common.types.DisplayCallToAction):
Display-call-to-action of the product image.
"""
product_image = proto.Field(
proto.STRING,
number=4,
optional=True,
)
description = proto.Field(
proto.STRING,
number=5,
optional=True,
)
display_call_to_action = proto.Field(
proto.MESSAGE,
number=3,
message='DisplayCallToAction',
)
class ProductVideo(proto.Message):
r"""Product video specific data.
Attributes:
product_video (str):
The MediaFile resource name of a video which
must be hosted on YouTube.
"""
product_video = proto.Field(
proto.STRING,
number=2,
optional=True,
)
class ImageAdInfo(proto.Message):
r"""An image ad.
Attributes:
pixel_width (int):
Width in pixels of the full size image.
pixel_height (int):
Height in pixels of the full size image.
image_url (str):
URL of the full size image.
preview_pixel_width (int):
Width in pixels of the preview size image.
preview_pixel_height (int):
Height in pixels of the preview size image.
preview_image_url (str):
URL of the preview size image.
mime_type (google.ads.googleads.v8.enums.types.MimeTypeEnum.MimeType):
The mime type of the image.
name (str):
The name of the image. If the image was
created from a MediaFile, this is the
MediaFile's name. If the image was created from
bytes, this is empty.
media_file (str):
The MediaFile resource to use for the image.
data (bytes):
Raw image data as bytes.
ad_id_to_copy_image_from (int):
An ad ID to copy the image from.
"""
pixel_width = proto.Field(
proto.INT64,
number=15,
optional=True,
)
pixel_height = proto.Field(
proto.INT64,
number=16,
optional=True,
)
image_url = proto.Field(
proto.STRING,
number=17,
optional=True,
)
preview_pixel_width = proto.Field(
proto.INT64,
number=18,
optional=True,
)
preview_pixel_height = proto.Field(
proto.INT64,
number=19,
optional=True,
)
preview_image_url = proto.Field(
proto.STRING,
number=20,
optional=True,
)
mime_type = proto.Field(
proto.ENUM,
number=10,
enum=gage_mime_type.MimeTypeEnum.MimeType,
)
name = proto.Field(
proto.STRING,
number=21,
optional=True,
)
media_file = proto.Field(
proto.STRING,
number=12,
oneof='image',
)
data = proto.Field(
proto.BYTES,
number=13,
oneof='image',
)
ad_id_to_copy_image_from = proto.Field(
proto.INT64,
number=14,
oneof='image',
)
class VideoBumperInStreamAdInfo(proto.Message):
r"""Representation of video bumper in-stream ad format (very
short in-stream non-skippable video ad).
Attributes:
companion_banner (str):
The MediaFile resource name of the companion
banner used with the ad.
"""
companion_banner = proto.Field(
proto.STRING,
number=2,
optional=True,
)
class VideoNonSkippableInStreamAdInfo(proto.Message):
r"""Representation of video non-skippable in-stream ad format (15
second in-stream non-skippable video ad).
Attributes:
companion_banner (str):
The MediaFile resource name of the companion
banner used with the ad.
"""
companion_banner = proto.Field(
proto.STRING,
number=2,
optional=True,
)
class VideoTrueViewInStreamAdInfo(proto.Message):
r"""Representation of video TrueView in-stream ad format (ad
shown during video playback, often at beginning, which displays
a skip button a few seconds into the video).
Attributes:
action_button_label (str):
Label on the CTA (call-to-action) button
taking the user to the video ad's final URL.
Required for TrueView for action campaigns,
optional otherwise.
action_headline (str):
Additional text displayed with the CTA (call-
o-action) button to give context and encourage
clicking on the button.
companion_banner (str):
The MediaFile resource name of the companion
banner used with the ad.
"""
action_button_label = proto.Field(
proto.STRING,
number=4,
optional=True,
)
action_headline = proto.Field(
proto.STRING,
number=5,
optional=True,
)
companion_banner = proto.Field(
proto.STRING,
number=6,
optional=True,
)
class VideoOutstreamAdInfo(proto.Message):
r"""Representation of video out-stream ad format (ad shown
alongside a feed with automatic playback, without sound).
Attributes:
headline (str):
The headline of the ad.
description (str):
The description line.
"""
headline = proto.Field(
proto.STRING,
number=3,
optional=True,
)
description = proto.Field(
proto.STRING,
number=4,
optional=True,
)
class VideoTrueViewDiscoveryAdInfo(proto.Message):
r"""Representation of video TrueView discovery ad format.
Attributes:
headline (str):
The headline of the ad.
description1 (str):
First text line for a TrueView video
discovery ad.
description2 (str):
Second text line for a TrueView video
discovery ad.
"""
headline = proto.Field(
proto.STRING,
number=4,
optional=True,
)
description1 = proto.Field(
proto.STRING,
number=5,
optional=True,
)
description2 = proto.Field(
proto.STRING,
number=6,
optional=True,
)
class VideoAdInfo(proto.Message):
r"""A video ad.
Attributes:
media_file (str):
The MediaFile resource to use for the video.
in_stream (google.ads.googleads.v8.common.types.VideoTrueViewInStreamAdInfo):
Video TrueView in-stream ad format.
bumper (google.ads.googleads.v8.common.types.VideoBumperInStreamAdInfo):
Video bumper in-stream ad format.
out_stream (google.ads.googleads.v8.common.types.VideoOutstreamAdInfo):
Video out-stream ad format.
non_skippable (google.ads.googleads.v8.common.types.VideoNonSkippableInStreamAdInfo):
Video non-skippable in-stream ad format.
discovery (google.ads.googleads.v8.common.types.VideoTrueViewDiscoveryAdInfo):
Video TrueView discovery ad format.
"""
media_file = proto.Field(
proto.STRING,
number=7,
optional=True,
)
in_stream = proto.Field(
proto.MESSAGE,
number=2,
oneof='format',
message='VideoTrueViewInStreamAdInfo',
)
bumper = proto.Field(
proto.MESSAGE,
number=3,
oneof='format',
message='VideoBumperInStreamAdInfo',
)
out_stream = proto.Field(
proto.MESSAGE,
number=4,
oneof='format',
message='VideoOutstreamAdInfo',
)
non_skippable = proto.Field(
proto.MESSAGE,
number=5,
oneof='format',
message='VideoNonSkippableInStreamAdInfo',
)
discovery = proto.Field(
proto.MESSAGE,
number=6,
oneof='format',
message='VideoTrueViewDiscoveryAdInfo',
)
class VideoResponsiveAdInfo(proto.Message):
r"""A video responsive ad.
Attributes:
headlines (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets used for the short
headline, e.g. the "Call To Action" banner.
Currently, only a single value for the short
headline is supported.
long_headlines (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets used for the long
headline. Currently, only a single value for the
long headline is supported.
descriptions (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets used for the description.
Currently, only a single value for the
description is supported.
call_to_actions (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets used for the button, e.g.
the "Call To Action" button. Currently, only a
single value for the button is supported.
videos (Sequence[google.ads.googleads.v8.common.types.AdVideoAsset]):
List of YouTube video assets used for the ad.
Currently, only a single value for the YouTube
video asset is supported.
companion_banners (Sequence[google.ads.googleads.v8.common.types.AdImageAsset]):
List of image assets used for the companion
banner. Currently, only a single value for the
companion banner asset is supported.
"""
headlines = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=ad_asset.AdTextAsset,
)
long_headlines = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=ad_asset.AdTextAsset,
)
descriptions = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=ad_asset.AdTextAsset,
)
call_to_actions = proto.RepeatedField(
proto.MESSAGE,
number=4,
message=ad_asset.AdTextAsset,
)
videos = proto.RepeatedField(
proto.MESSAGE,
number=5,
message=ad_asset.AdVideoAsset,
)
companion_banners = proto.RepeatedField(
proto.MESSAGE,
number=6,
message=ad_asset.AdImageAsset,
)
class ResponsiveSearchAdInfo(proto.Message):
r"""A responsive search ad.
Responsive search ads let you create an ad that adapts to show
more text, and more relevant messages, to your customers. Enter
multiple headlines and descriptions when creating a responsive
search ad, and over time, Google Ads will automatically test
different combinations and learn which combinations perform
best. By adapting your ad's content to more closely match
potential customers' search terms, responsive search ads may
improve your campaign's performance.
More information at https://support.google.com/google-
ads/answer/7684791
Attributes:
headlines (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets for headlines. When the
ad serves the headlines will be selected from
this list.
descriptions (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets for descriptions. When
the ad serves the descriptions will be selected
from this list.
path1 (str):
First part of text that may appear appended
to the url displayed in the ad.
path2 (str):
Second part of text that may appear appended
to the url displayed in the ad. This field can
only be set when path1 is also set.
"""
headlines = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=ad_asset.AdTextAsset,
)
descriptions = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=ad_asset.AdTextAsset,
)
path1 = proto.Field(
proto.STRING,
number=5,
optional=True,
)
path2 = proto.Field(
proto.STRING,
number=6,
optional=True,
)
class LegacyResponsiveDisplayAdInfo(proto.Message):
r"""A legacy responsive display ad. Ads of this type are labeled
'Responsive ads' in the Google Ads UI.
Attributes:
short_headline (str):
The short version of the ad's headline.
long_headline (str):
The long version of the ad's headline.
description (str):
The description of the ad.
business_name (str):
The business name in the ad.
allow_flexible_color (bool):
Advertiser's consent to allow flexible color. When true, the
ad may be served with different color if necessary. When
false, the ad will be served with the specified colors or a
neutral color. The default value is true. Must be true if
main_color and accent_color are not set.
accent_color (str):
The accent color of the ad in hexadecimal, e.g. #ffffff for
white. If one of main_color and accent_color is set, the
other is required as well.
main_color (str):
The main color of the ad in hexadecimal, e.g. #ffffff for
white. If one of main_color and accent_color is set, the
other is required as well.
call_to_action_text (str):
The call-to-action text for the ad.
logo_image (str):
The MediaFile resource name of the logo image
used in the ad.
square_logo_image (str):
The MediaFile resource name of the square
logo image used in the ad.
marketing_image (str):
The MediaFile resource name of the marketing
image used in the ad.
square_marketing_image (str):
The MediaFile resource name of the square
marketing image used in the ad.
format_setting (google.ads.googleads.v8.enums.types.DisplayAdFormatSettingEnum.DisplayAdFormatSetting):
Specifies which format the ad will be served in. Default is
ALL_FORMATS.
price_prefix (str):
Prefix before price. E.g. 'as low as'.
promo_text (str):
Promotion text used for dynamic formats of
responsive ads. For example 'Free two-day
shipping'.
"""
short_headline = proto.Field(
proto.STRING,
number=16,
optional=True,
)
long_headline = proto.Field(
proto.STRING,
number=17,
optional=True,
)
description = proto.Field(
proto.STRING,
number=18,
optional=True,
)
business_name = proto.Field(
proto.STRING,
number=19,
optional=True,
)
allow_flexible_color = proto.Field(
proto.BOOL,
number=20,
optional=True,
)
accent_color = proto.Field(
proto.STRING,
number=21,
optional=True,
)
main_color = proto.Field(
proto.STRING,
number=22,
optional=True,
)
call_to_action_text = proto.Field(
proto.STRING,
number=23,
optional=True,
)
logo_image = proto.Field(
proto.STRING,
number=24,
optional=True,
)
square_logo_image = proto.Field(
proto.STRING,
number=25,
optional=True,
)
marketing_image = proto.Field(
proto.STRING,
number=26,
optional=True,
)
square_marketing_image = proto.Field(
proto.STRING,
number=27,
optional=True,
)
format_setting = proto.Field(
proto.ENUM,
number=13,
enum=display_ad_format_setting.DisplayAdFormatSettingEnum.DisplayAdFormatSetting,
)
price_prefix = proto.Field(
proto.STRING,
number=28,
optional=True,
)
promo_text = proto.Field(
proto.STRING,
number=29,
optional=True,
)
class AppAdInfo(proto.Message):
r"""An app ad.
Attributes:
mandatory_ad_text (google.ads.googleads.v8.common.types.AdTextAsset):
Mandatory ad text.
headlines (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets for headlines. When the
ad serves the headlines will be selected from
this list.
descriptions (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets for descriptions. When
the ad serves the descriptions will be selected
from this list.
images (Sequence[google.ads.googleads.v8.common.types.AdImageAsset]):
List of image assets that may be displayed
with the ad.
youtube_videos (Sequence[google.ads.googleads.v8.common.types.AdVideoAsset]):
List of YouTube video assets that may be
displayed with the ad.
html5_media_bundles (Sequence[google.ads.googleads.v8.common.types.AdMediaBundleAsset]):
List of media bundle assets that may be used
with the ad.
"""
mandatory_ad_text = proto.Field(
proto.MESSAGE,
number=1,
message=ad_asset.AdTextAsset,
)
headlines = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=ad_asset.AdTextAsset,
)
descriptions = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=ad_asset.AdTextAsset,
)
images = proto.RepeatedField(
proto.MESSAGE,
number=4,
message=ad_asset.AdImageAsset,
)
youtube_videos = proto.RepeatedField(
proto.MESSAGE,
number=5,
message=ad_asset.AdVideoAsset,
)
html5_media_bundles = proto.RepeatedField(
proto.MESSAGE,
number=6,
message=ad_asset.AdMediaBundleAsset,
)
class AppEngagementAdInfo(proto.Message):
r"""App engagement ads allow you to write text encouraging a
specific action in the app, like checking in, making a purchase,
or booking a flight. They allow you to send users to a specific
part of your app where they can find what they're looking for
easier and faster.
Attributes:
headlines (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets for headlines. When the
ad serves the headlines will be selected from
this list.
descriptions (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets for descriptions. When
the ad serves the descriptions will be selected
from this list.
images (Sequence[google.ads.googleads.v8.common.types.AdImageAsset]):
List of image assets that may be displayed
with the ad.
videos (Sequence[google.ads.googleads.v8.common.types.AdVideoAsset]):
List of video assets that may be displayed
with the ad.
"""
headlines = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=ad_asset.AdTextAsset,
)
descriptions = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=ad_asset.AdTextAsset,
)
images = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=ad_asset.AdImageAsset,
)
videos = proto.RepeatedField(
proto.MESSAGE,
number=4,
message=ad_asset.AdVideoAsset,
)
class LegacyAppInstallAdInfo(proto.Message):
r"""A legacy app install ad that only can be used by a few select
customers.
Attributes:
app_id (str):
The id of the mobile app.
app_store (google.ads.googleads.v8.enums.types.LegacyAppInstallAdAppStoreEnum.LegacyAppInstallAdAppStore):
The app store the mobile app is available in.
headline (str):
The headline of the ad.
description1 (str):
The first description line of the ad.
description2 (str):
The second description line of the ad.
"""
app_id = proto.Field(
proto.STRING,
number=6,
optional=True,
)
app_store = proto.Field(
proto.ENUM,
number=2,
enum=legacy_app_install_ad_app_store.LegacyAppInstallAdAppStoreEnum.LegacyAppInstallAdAppStore,
)
headline = proto.Field(
proto.STRING,
number=7,
optional=True,
)
description1 = proto.Field(
proto.STRING,
number=8,
optional=True,
)
description2 = proto.Field(
proto.STRING,
number=9,
optional=True,
)
class ResponsiveDisplayAdInfo(proto.Message):
r"""A responsive display ad.
Attributes:
marketing_images (Sequence[google.ads.googleads.v8.common.types.AdImageAsset]):
Marketing images to be used in the ad. Valid image types are
GIF, JPEG, and PNG. The minimum size is 600x314 and the
aspect ratio must be 1.91:1 (+-1%). At least one
marketing_image is required. Combined with
square_marketing_images the maximum is 15.
square_marketing_images (Sequence[google.ads.googleads.v8.common.types.AdImageAsset]):
Square marketing images to be used in the ad. Valid image
types are GIF, JPEG, and PNG. The minimum size is 300x300
and the aspect ratio must be 1:1 (+-1%). At least one square
marketing_image is required. Combined with marketing_images
the maximum is 15.
logo_images (Sequence[google.ads.googleads.v8.common.types.AdImageAsset]):
Logo images to be used in the ad. Valid image types are GIF,
JPEG, and PNG. The minimum size is 512x128 and the aspect
ratio must be 4:1 (+-1%). Combined with square_logo_images
the maximum is 5.
square_logo_images (Sequence[google.ads.googleads.v8.common.types.AdImageAsset]):
Square logo images to be used in the ad. Valid image types
are GIF, JPEG, and PNG. The minimum size is 128x128 and the
aspect ratio must be 1:1 (+-1%). Combined with
square_logo_images the maximum is 5.
headlines (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
Short format headlines for the ad. The
maximum length is 30 characters. At least 1 and
max 5 headlines can be specified.
long_headline (google.ads.googleads.v8.common.types.AdTextAsset):
A required long format headline. The maximum
length is 90 characters.
descriptions (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
Descriptive texts for the ad. The maximum
length is 90 characters. At least 1 and max 5
headlines can be specified.
youtube_videos (Sequence[google.ads.googleads.v8.common.types.AdVideoAsset]):
Optional YouTube videos for the ad. A maximum
of 5 videos can be specified.
business_name (str):
The advertiser/brand name. Maximum display
width is 25.
main_color (str):
The main color of the ad in hexadecimal, e.g. #ffffff for
white. If one of main_color and accent_color is set, the
other is required as well.
accent_color (str):
The accent color of the ad in hexadecimal, e.g. #ffffff for
white. If one of main_color and accent_color is set, the
other is required as well.
allow_flexible_color (bool):
Advertiser's consent to allow flexible color. When true, the
ad may be served with different color if necessary. When
false, the ad will be served with the specified colors or a
neutral color. The default value is true. Must be true if
main_color and accent_color are not set.
call_to_action_text (str):
The call-to-action text for the ad. Maximum
display width is 30.
price_prefix (str):
Prefix before price. E.g. 'as low as'.
promo_text (str):
Promotion text used for dynamic formats of
responsive ads. For example 'Free two-day
shipping'.
format_setting (google.ads.googleads.v8.enums.types.DisplayAdFormatSettingEnum.DisplayAdFormatSetting):
Specifies which format the ad will be served in. Default is
ALL_FORMATS.
control_spec (google.ads.googleads.v8.common.types.ResponsiveDisplayAdControlSpec):
Specification for various creative controls.
"""
marketing_images = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=ad_asset.AdImageAsset,
)
square_marketing_images = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=ad_asset.AdImageAsset,
)
logo_images = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=ad_asset.AdImageAsset,
)
square_logo_images = proto.RepeatedField(
proto.MESSAGE,
number=4,
message=ad_asset.AdImageAsset,
)
headlines = proto.RepeatedField(
proto.MESSAGE,
number=5,
message=ad_asset.AdTextAsset,
)
long_headline = proto.Field(
proto.MESSAGE,
number=6,
message=ad_asset.AdTextAsset,
)
descriptions = proto.RepeatedField(
proto.MESSAGE,
number=7,
message=ad_asset.AdTextAsset,
)
youtube_videos = proto.RepeatedField(
proto.MESSAGE,
number=8,
message=ad_asset.AdVideoAsset,
)
business_name = proto.Field(
proto.STRING,
number=17,
optional=True,
)
main_color = proto.Field(
proto.STRING,
number=18,
optional=True,
)
accent_color = proto.Field(
proto.STRING,
number=19,
optional=True,
)
allow_flexible_color = proto.Field(
proto.BOOL,
number=20,
optional=True,
)
call_to_action_text = proto.Field(
proto.STRING,
number=21,
optional=True,
)
price_prefix = proto.Field(
proto.STRING,
number=22,
optional=True,
)
promo_text = proto.Field(
proto.STRING,
number=23,
optional=True,
)
format_setting = proto.Field(
proto.ENUM,
number=16,
enum=display_ad_format_setting.DisplayAdFormatSettingEnum.DisplayAdFormatSetting,
)
control_spec = proto.Field(
proto.MESSAGE,
number=24,
message='ResponsiveDisplayAdControlSpec',
)
class LocalAdInfo(proto.Message):
r"""A local ad.
Attributes:
headlines (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets for headlines. When the
ad serves the headlines will be selected from
this list. At least 1 and at most 5 headlines
must be specified.
descriptions (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets for descriptions. When
the ad serves the descriptions will be selected
from this list. At least 1 and at most 5
descriptions must be specified.
call_to_actions (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets for call-to-actions. When
the ad serves the call-to-actions will be
selected from this list. Call-to-actions are
optional and at most 5 can be specified.
marketing_images (Sequence[google.ads.googleads.v8.common.types.AdImageAsset]):
List of marketing image assets that may be
displayed with the ad. The images must be
314x600 pixels or 320x320 pixels. At least 1 and
at most 20 image assets must be specified.
logo_images (Sequence[google.ads.googleads.v8.common.types.AdImageAsset]):
List of logo image assets that may be
displayed with the ad. The images must be
128x128 pixels and not larger than 120KB. At
least 1 and at most 5 image assets must be
specified.
videos (Sequence[google.ads.googleads.v8.common.types.AdVideoAsset]):
List of YouTube video assets that may be
displayed with the ad. Videos are optional and
at most 20 can be specified.
path1 (str):
First part of optional text that may appear
appended to the url displayed in the ad.
path2 (str):
Second part of optional text that may appear
appended to the url displayed in the ad. This
field can only be set when path1 is also set.
"""
headlines = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=ad_asset.AdTextAsset,
)
descriptions = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=ad_asset.AdTextAsset,
)
call_to_actions = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=ad_asset.AdTextAsset,
)
marketing_images = proto.RepeatedField(
proto.MESSAGE,
number=4,
message=ad_asset.AdImageAsset,
)
logo_images = proto.RepeatedField(
proto.MESSAGE,
number=5,
message=ad_asset.AdImageAsset,
)
videos = proto.RepeatedField(
proto.MESSAGE,
number=6,
message=ad_asset.AdVideoAsset,
)
path1 = proto.Field(
proto.STRING,
number=9,
optional=True,
)
path2 = proto.Field(
proto.STRING,
number=10,
optional=True,
)
class DisplayUploadAdInfo(proto.Message):
r"""A generic type of display ad. The exact ad format is controlled by
the display_upload_product_type field, which determines what kinds
of data need to be included with the ad.
Attributes:
display_upload_product_type (google.ads.googleads.v8.enums.types.DisplayUploadProductTypeEnum.DisplayUploadProductType):
The product type of this ad. See comments on
the enum for details.
media_bundle (google.ads.googleads.v8.common.types.AdMediaBundleAsset):
A media bundle asset to be used in the ad. For information
about the media bundle for HTML5_UPLOAD_AD see
https://support.google.com/google-ads/answer/1722096 Media
bundles that are part of dynamic product types use a special
format that needs to be created through the Google Web
Designer. See
https://support.google.com/webdesigner/answer/7543898 for
more information.
"""
display_upload_product_type = proto.Field(
proto.ENUM,
number=1,
enum=gage_display_upload_product_type.DisplayUploadProductTypeEnum.DisplayUploadProductType,
)
media_bundle = proto.Field(
proto.MESSAGE,
number=2,
oneof='media_asset',
message=ad_asset.AdMediaBundleAsset,
)
class ResponsiveDisplayAdControlSpec(proto.Message):
r"""Specification for various creative controls for a responsive
display ad.
Attributes:
enable_asset_enhancements (bool):
Whether the advertiser has opted into the
asset enhancements feature.
enable_autogen_video (bool):
Whether the advertiser has opted into auto-
en video feature.
"""
enable_asset_enhancements = proto.Field(
proto.BOOL,
number=1,
)
enable_autogen_video = proto.Field(
proto.BOOL,
number=2,
)
class SmartCampaignAdInfo(proto.Message):
r"""A Smart campaign ad.
Attributes:
headlines (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets for headlines. When the
ad serves the headlines will be selected from
this list. 3 headlines must be specified.
descriptions (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets for descriptions. When
the ad serves the descriptions will be selected
from this list. 2 descriptions must be
specified.
"""
headlines = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=ad_asset.AdTextAsset,
)
descriptions = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=ad_asset.AdTextAsset,
)
class CallAdInfo(proto.Message):
r"""A call ad.
Attributes:
country_code (str):
The country code in the ad.
phone_number (str):
The phone number in the ad.
business_name (str):
The business name in the ad.
headline1 (str):
First headline in the ad.
headline2 (str):
Second headline in the ad.
description1 (str):
The first line of the ad's description.
description2 (str):
The second line of the ad's description.
call_tracked (bool):
Whether to enable call tracking for the
creative. Enabling call tracking also enables
call conversions.
disable_call_conversion (bool):
Whether to disable call conversion for the creative. If set
to ``true``, disables call conversions even when
``call_tracked`` is ``true``. If ``call_tracked`` is
``false``, this field is ignored.
phone_number_verification_url (str):
The URL to be used for phone number
verification.
conversion_action (str):
The conversion action to attribute a call conversion to. If
not set a default conversion action is used. This field only
has effect if call_tracked is set to true. Otherwise this
field is ignored.
conversion_reporting_state (google.ads.googleads.v8.enums.types.CallConversionReportingStateEnum.CallConversionReportingState):
The call conversion behavior of this call ad.
It can use its own call conversion setting,
inherit the account level setting, or be
disabled.
path1 (str):
First part of text that may appear appended
to the url displayed to in the ad. Optional.
path2 (str):
Second part of text that may appear appended
to the url displayed to in the ad. This field
can only be set when path1 is set. Optional.
"""
country_code = proto.Field(
proto.STRING,
number=1,
)
phone_number = proto.Field(
proto.STRING,
number=2,
)
business_name = proto.Field(
proto.STRING,
number=3,
)
headline1 = proto.Field(
proto.STRING,
number=11,
)
headline2 = proto.Field(
proto.STRING,
number=12,
)
description1 = proto.Field(
proto.STRING,
number=4,
)
description2 = proto.Field(
proto.STRING,
number=5,
)
call_tracked = proto.Field(
proto.BOOL,
number=6,
)
disable_call_conversion = proto.Field(
proto.BOOL,
number=7,
)
phone_number_verification_url = proto.Field(
proto.STRING,
number=8,
)
conversion_action = proto.Field(
proto.STRING,
number=9,
)
conversion_reporting_state = proto.Field(
proto.ENUM,
number=10,
enum=call_conversion_reporting_state.CallConversionReportingStateEnum.CallConversionReportingState,
)
path1 = proto.Field(
proto.STRING,
number=13,
)
path2 = proto.Field(
proto.STRING,
number=14,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 1,371,305,617,412,775,000 | 30.178258 | 135 | 0.60667 | false |
OndinaHQ/Tracker | plugins/s3.py | 1 | 3045 | # Copyright (C) 2012 Stefano Palazzo <stefano.palazzo@gmail.com>
# Copyright (C) 2012 Ondina, LLC. <http://ondina.co>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import hmac
import hashlib
import http.client
import urllib.parse
import base64
import collections
class S3Error (Exception):
def __init__(self, status, response):
self.status, self.response = status, response
def __str__(self):
return "{}: {}".format(self.status, self.response)
def __str__(self):
return "S3Error({}, {})".format(repr(self.status), repr(self.response))
class S3 (object):
'''
Usage:
>>> s3 = S3(YOUR_ACCESS_KEY_ID, YOUR_SECRET_ACCESS_KEY)
>>> s3.upload("some-bucket", open("image.png", "rb").read(),
"image/png", "image3838838.png")
https://s3.amazonaws.com/some-bucket/image3838838.png
'''
def __init__(self, access_key, secret_key):
self.__access_key, self.__secret_key = access_key, secret_key
def __request(self, method, bucket, host, action, body, content_type, fn):
date = time.strftime("%c GMT", time.gmtime())
headers = collections.OrderedDict((
("x-amz-acl", "public-read"),
("Content-Type", content_type),
("Content-Length", len(body)),
("Host", bucket + "." + host),
("Date", date),
))
string_to_sign = (method + "\n" +
"\n" +
content_type + "\n" +
date + "\n" +
"x-amz-acl:public-read\n" +
"/" + bucket + "/" + fn)
signature = base64.b64encode(hmac.new(self.__secret_key.encode(),
string_to_sign.encode(), hashlib.sha1).digest()).decode()
authorization = "AWS " + self.__access_key + ":" + signature
headers.update({"Authorization": authorization})
connection = http.client.HTTPSConnection(bucket + "." + host)
action = action + "?" + urllib.parse.urlencode({})
connection.request(method, action, body, headers)
response = connection.getresponse()
if response.status != 200:
raise S3Error(response.status, response.read())
return "https://s3.amazonaws.com/{}/{}".format(bucket, fn)
def upload(self, bucket, data, content_type, filename):
return self.__request("PUT", bucket, "s3.amazonaws.com", "/" +
filename, data, content_type, filename)
| gpl-3.0 | 6,556,845,181,018,682,000 | 36.592593 | 79 | 0.611494 | false |
monikagrabowska/osf.io | osf/models/base.py | 1 | 26122 |
import logging
import random
from datetime import datetime
import bson
import modularodm.exceptions
import pytz
from django.contrib.contenttypes.fields import (GenericForeignKey,
GenericRelation)
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.fields import ArrayField
from django.core.exceptions import MultipleObjectsReturned
from django.core.exceptions import ValidationError as DjangoValidationError
from django.db import models
from django.db.models import F
from django.db.models import ForeignKey
from django.db.models import Q
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
from osf.utils.caching import cached_property
from osf.exceptions import ValidationError
from osf.modm_compat import to_django_query
from osf.utils.datetime_aware_jsonfield import (DateTimeAwareJSONField,
coerce_nonnaive_datetimes)
from osf.utils.fields import LowercaseCharField, NonNaiveDateTimeField
ALPHABET = '23456789abcdefghjkmnpqrstuvwxyz'
logger = logging.getLogger(__name__)
def generate_guid(length=5):
while True:
guid_id = ''.join(random.sample(ALPHABET, length))
try:
# is the guid in the blacklist
BlackListGuid.objects.get(guid=guid_id)
except BlackListGuid.DoesNotExist:
# it's not, check and see if it's already in the database
try:
Guid.objects.get(_id=guid_id)
except Guid.DoesNotExist:
# valid and unique guid
return guid_id
def generate_object_id():
return str(bson.ObjectId())
class MODMCompatibilityQuerySet(models.QuerySet):
def __getitem__(self, k):
item = super(MODMCompatibilityQuerySet, self).__getitem__(k)
if hasattr(item, 'wrapped'):
return item.wrapped()
else:
return item
def __iter__(self):
items = super(MODMCompatibilityQuerySet, self).__iter__()
for item in items:
if hasattr(item, 'wrapped'):
yield item.wrapped()
else:
yield item
def sort(self, *fields):
# Fields are passed in as e.g. [('title', 1), ('date_created', -1)]
if isinstance(fields[0], list):
fields = fields[0]
def sort_key(item):
if isinstance(item, basestring):
return item
elif isinstance(item, tuple):
field_name, direction = item
prefix = '-' if direction == -1 else ''
return ''.join([prefix, field_name])
sort_keys = [sort_key(each) for each in fields]
return self.order_by(*sort_keys)
def limit(self, n):
return self[:n]
class BaseModel(models.Model):
"""Base model that acts makes subclasses mostly compatible with the
modular-odm ``StoredObject`` interface.
"""
migration_page_size = 50000
objects = MODMCompatibilityQuerySet.as_manager()
class Meta:
abstract = True
@classmethod
def load(cls, data):
try:
if issubclass(cls, GuidMixin):
return cls.objects.get(guids___id=data)
elif issubclass(cls, ObjectIDMixin):
return cls.objects.get(_id=data)
elif isinstance(data, basestring):
# Some models (CitationStyle) have an _id that is not a bson
# Looking up things by pk will never work with a basestring
return cls.objects.get(_id=data)
return cls.objects.get(pk=data)
except cls.DoesNotExist:
return None
@classmethod
def find_one(cls, query):
try:
return cls.objects.get(to_django_query(query, model_cls=cls))
except cls.DoesNotExist:
raise modularodm.exceptions.NoResultsFound()
except cls.MultipleObjectsReturned as e:
raise modularodm.exceptions.MultipleResultsFound(*e.args)
@classmethod
def find(cls, query=None):
if not query:
return cls.objects.all()
else:
return cls.objects.filter(to_django_query(query, model_cls=cls))
@classmethod
def remove(cls, query=None):
return cls.find(query).delete()
@classmethod
def remove_one(cls, obj):
if obj.pk:
return obj.delete()
@classmethod
def migrate_from_modm(cls, modm_obj):
"""
Given a modm object, make a django object with the same local fields.
This is a base method that may work for simple objects.
It should be customized in the child class if it doesn't work.
:param modm_obj:
:return:
"""
django_obj = cls()
local_django_fields = set([x.name for x in django_obj._meta.get_fields() if not x.is_relation])
intersecting_fields = set(modm_obj.to_storage().keys()).intersection(
set(local_django_fields))
for field in intersecting_fields:
modm_value = getattr(modm_obj, field)
if modm_value is None:
continue
if isinstance(modm_value, datetime):
modm_value = pytz.utc.localize(modm_value)
# TODO Remove this after migration
if isinstance(django_obj._meta.get_field(field), DateTimeAwareJSONField):
modm_value = coerce_nonnaive_datetimes(modm_value)
setattr(django_obj, field, modm_value)
return django_obj
@property
def _primary_name(self):
return '_id'
def reload(self):
return self.refresh_from_db()
def _natural_key(self):
return self.pk
def clone(self):
"""Create a new, unsaved copy of this object."""
copy = self.__class__.objects.get(pk=self.pk)
copy.id = None
# empty all the fks
fk_field_names = [f.name for f in self._meta.model._meta.get_fields() if isinstance(f, (ForeignKey, GenericForeignKey))]
for field_name in fk_field_names:
setattr(copy, field_name, None)
try:
copy._id = bson.ObjectId()
except AttributeError:
pass
return copy
def save(self, *args, **kwargs):
# Make Django validate on save (like modm)
if not kwargs.get('force_insert') and not kwargs.get('force_update'):
try:
self.full_clean()
except DjangoValidationError as err:
raise ValidationError(*err.args)
return super(BaseModel, self).save(*args, **kwargs)
# TODO: Rename to Identifier?
class Guid(BaseModel):
"""Stores either a short guid or long object_id for any model that inherits from BaseIDMixin.
Each ID field (e.g. 'guid', 'object_id') MUST have an accompanying method, named with
'initialize_<ID type>' (e.g. 'initialize_guid') that generates and sets the field.
"""
primary_identifier_name = '_id'
# TODO DELETE ME POST MIGRATION
modm_query = None
migration_page_size = 500000
# /TODO DELETE ME POST MIGRATION
id = models.AutoField(primary_key=True)
_id = LowercaseCharField(max_length=255, null=False, blank=False, default=generate_guid, db_index=True,
unique=True)
referent = GenericForeignKey()
content_type = models.ForeignKey(ContentType, null=True, blank=True)
object_id = models.PositiveIntegerField(null=True, blank=True)
created = NonNaiveDateTimeField(db_index=True, default=timezone.now) # auto_now_add=True)
# Override load in order to load by GUID
@classmethod
def load(cls, data):
try:
return cls.objects.get(_id=data)
except cls.DoesNotExist:
return None
def reload(self):
del self._referent_cache
return super(Guid, self).reload()
@classmethod
def migrate_from_modm(cls, modm_obj, object_id=None, content_type=None):
"""
Given a modm Guid make a django Guid
:param object_id:
:param content_type:
:param modm_obj:
:return:
"""
django_obj = cls()
if modm_obj._id != modm_obj.referent._id:
# if the object has a BSON id, get the created date from that
django_obj.created = bson.ObjectId(modm_obj.referent._id).generation_time
else:
# just make it now
django_obj.created = timezone.now()
django_obj._id = modm_obj._id
if object_id and content_type:
# if the referent was passed set the GFK to point to it
django_obj.content_type = content_type
django_obj.object_id = object_id
return django_obj
class Meta:
ordering = ['-created']
get_latest_by = 'created'
index_together = (
('content_type', 'object_id', 'created'),
)
class BlackListGuid(BaseModel):
# TODO DELETE ME POST MIGRATION
modm_model_path = 'framework.guid.model.BlacklistGuid'
primary_identifier_name = 'guid'
modm_query = None
migration_page_size = 500000
# /TODO DELETE ME POST MIGRATION
id = models.AutoField(primary_key=True)
guid = LowercaseCharField(max_length=255, unique=True, db_index=True)
@property
def _id(self):
return self.guid
@classmethod
def migrate_from_modm(cls, modm_obj):
"""
Given a modm BlacklistGuid make a django BlackListGuid
:param modm_obj:
:return:
"""
django_obj = cls()
django_obj.guid = modm_obj._id
return django_obj
def generate_guid_instance():
return Guid.objects.create().id
class PKIDStr(str):
def __new__(self, _id, pk):
return str.__new__(self, _id)
def __init__(self, _id, pk):
self.__pk = pk
def __int__(self):
return self.__pk
class BaseIDMixin(models.Model):
@classmethod
def migrate_from_modm(cls, modm_obj):
"""
Given a modm object, make a django object with the same local fields.
This is a base method that may work for simple objects.
It should be customized in the child class if it doesn't work.
:param modm_obj:
:return:
"""
django_obj = cls()
local_django_fields = set([x.name for x in django_obj._meta.get_fields() if not x.is_relation])
intersecting_fields = set(modm_obj.to_storage().keys()).intersection(
set(local_django_fields))
for field in intersecting_fields:
modm_value = getattr(modm_obj, field)
if modm_value is None:
continue
if isinstance(modm_value, datetime):
modm_value = pytz.utc.localize(modm_value)
# TODO Remove this after migration
if isinstance(django_obj._meta.get_field(field), DateTimeAwareJSONField):
modm_value = coerce_nonnaive_datetimes(modm_value)
setattr(django_obj, field, modm_value)
return django_obj
class Meta:
abstract = True
class ObjectIDMixin(BaseIDMixin):
primary_identifier_name = '_id'
_id = models.CharField(max_length=24, default=generate_object_id, unique=True, db_index=True)
@classmethod
def load(cls, q):
try:
return cls.objects.get(_id=q)
except cls.DoesNotExist:
# modm doesn't throw exceptions when loading things that don't exist
return None
@classmethod
def migrate_from_modm(cls, modm_obj):
django_obj = super(ObjectIDMixin, cls).migrate_from_modm(modm_obj)
django_obj._id = str(modm_obj._id)
return django_obj
class Meta:
abstract = True
def _natural_key(self):
return self._id
class InvalidGuid(Exception):
pass
class OptionalGuidMixin(BaseIDMixin):
"""
This makes it so that things can **optionally** have guids. Think files.
Things that inherit from this must also inherit from ObjectIDMixin ... probably
"""
__guid_min_length__ = 5
guids = GenericRelation(Guid, related_name='referent', related_query_name='referents')
guid_string = ArrayField(models.CharField(max_length=255, null=True, blank=True), null=True, blank=True)
content_type_pk = models.PositiveIntegerField(null=True, blank=True)
def get_guid(self, create=False):
if create:
try:
guid, created = Guid.objects.get_or_create(
object_id=self.pk,
content_type_id=ContentType.objects.get_for_model(self).pk
)
except MultipleObjectsReturned:
# lol, hacks
pass
else:
return guid
return self.guids.order_by('-created').first()
@classmethod
def migrate_from_modm(cls, modm_obj):
instance = super(OptionalGuidMixin, cls).migrate_from_modm(modm_obj)
from website.models import Guid as MODMGuid
from modularodm import Q as MODMQ
if modm_obj.get_guid():
guids = MODMGuid.find(MODMQ('referent', 'eq', modm_obj._id))
setattr(instance, 'guid_string', [x.lower() for x in guids.get_keys()])
setattr(instance, 'content_type_pk', ContentType.objects.get_for_model(cls).pk)
return instance
class Meta:
abstract = True
class GuidMixinQuerySet(MODMCompatibilityQuerySet):
tables = ['osf_guid', 'django_content_type']
GUID_FIELDS = [
'guids__id',
'guids___id',
'guids__content_type_id',
'guids__object_id',
'guids__created'
]
def safe_table_alias(self, table_name, create=False):
"""
Returns a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
alias_list = self.query.table_map.get(table_name)
if not create and alias_list:
alias = alias_list[0]
if alias in self.query.alias_refcount:
self.query.alias_refcount[alias] += 1
else:
self.query.alias_refcount[alias] = 1
return alias, False
# Create a new alias for this table.
if alias_list:
alias = '%s%d' % (self.query.alias_prefix, len(self.query.alias_map) + 1)
alias_list.append(alias)
else:
# The first occurrence of a table uses the table name directly.
alias = table_name
self.query.table_map[alias] = [alias]
self.query.alias_refcount[alias] = 1
self.tables.append(alias)
return alias, True
def annotate_query_with_guids(self):
self._prefetch_related_lookups = ['guids']
for field in self.GUID_FIELDS:
self.query.add_annotation(
F(field), '_{}'.format(field), is_summary=False
)
for table in self.tables:
if table not in self.query.tables:
self.safe_table_alias(table)
def remove_guid_annotations(self):
for k, v in self.query.annotations.iteritems():
if k[1:] in self.GUID_FIELDS:
del self.query.annotations[k]
for table_name in ['osf_guid', 'django_content_type']:
if table_name in self.query.alias_map:
del self.query.alias_map[table_name]
if table_name in self.query.alias_refcount:
del self.query.alias_refcount[table_name]
if table_name in self.query.tables:
del self.query.tables[self.query.tables.index(table_name)]
def _clone(self, annotate=False, **kwargs):
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
if annotate:
self.annotate_query_with_guids()
clone = self.__class__(model=self.model, query=query, using=self._db, hints=self._hints)
# this method was copied from the default django queryset except for the below two lines
if annotate:
clone.annotate_query_with_guids()
clone._for_write = self._for_write
clone._prefetch_related_lookups = self._prefetch_related_lookups[:]
clone._known_related_objects = self._known_related_objects
clone._iterable_class = self._iterable_class
clone._fields = self._fields
clone.__dict__.update(kwargs)
return clone
def annotate(self, *args, **kwargs):
self.annotate_query_with_guids()
return super(GuidMixinQuerySet, self).annotate(*args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
'Cannot filter a query once a slice has been taken.'
clone = self._clone(annotate=True)
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def all(self):
return self._clone(annotate=True)
# does implicit filter
def get(self, *args, **kwargs):
# add this to make sure we don't get dupes
self.query.add_distinct_fields('id')
return super(GuidMixinQuerySet, self).get(*args, **kwargs)
# TODO: Below lines are commented out to ensure that
# the annotations are used after running .count()
# e.g.
# queryset.count()
# queryset[0]
# This is more efficient when doing chained operations
# on a queryset, but less efficient when only getting a count.
# Figure out a way to get the best of both worlds
# def count(self):
# self.remove_guid_annotations()
# return super(GuidMixinQuerySet, self).count()
def update(self, **kwargs):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).update(**kwargs)
def update_or_create(self, defaults=None, **kwargs):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).update_or_create(defaults=defaults, **kwargs)
def values(self, *fields):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).values(*fields)
def create(self, **kwargs):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).create(**kwargs)
def bulk_create(self, objs, batch_size=None):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).bulk_create(objs, batch_size)
def get_or_create(self, defaults=None, **kwargs):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).get_or_create(defaults, **kwargs)
def values_list(self, *fields, **kwargs):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).values_list(*fields, **kwargs)
def exists(self):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).exists()
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self._iterable_class(self))
if self._prefetch_related_lookups and not self._prefetch_done:
if 'guids' in self._prefetch_related_lookups and self._result_cache and hasattr(self._result_cache[0], '_guids__id'):
# if guids is requested for prefetch and there are things in the result cache and the first one has
# the annotated guid fields then remove guids from prefetch_related_lookups
del self._prefetch_related_lookups[self._prefetch_related_lookups.index('guids')]
results = []
for result in self._result_cache:
# loop through the result cache
guid_dict = {}
for field in self.GUID_FIELDS:
# pull the fields off of the result object and put them in a dictionary without prefixed names
guid_dict[field] = getattr(result, '_{}'.format(field), None)
if None in guid_dict.values():
# if we get an invalid result field value, stop
logger.warning(
'Annotated guids came back will None values for {}, resorting to extra query'.format(result))
return
if not hasattr(result, '_prefetched_objects_cache'):
# initialize _prefetched_objects_cache
result._prefetched_objects_cache = {}
if 'guids' not in result._prefetched_objects_cache:
# intialize guids in _prefetched_objects_cache
result._prefetched_objects_cache['guids'] = []
# build a result dictionary of even more proper fields
result_dict = {key.replace('guids__', ''): value for key, value in guid_dict.iteritems()}
# make an unsaved guid instance
guid = Guid(**result_dict)
result._prefetched_objects_cache['guids'].append(guid)
results.append(result)
# replace the result cache with the new set of results
self._result_cache = results
self._prefetch_related_objects()
class GuidMixin(BaseIDMixin):
__guid_min_length__ = 5
primary_identifier_name = 'guid_string'
guids = GenericRelation(Guid, related_name='referent', related_query_name='referents')
guid_string = ArrayField(models.CharField(max_length=255, null=True, blank=True), null=True, blank=True)
content_type_pk = models.PositiveIntegerField(null=True, blank=True)
objects = GuidMixinQuerySet.as_manager()
# TODO: use pre-delete signal to disable delete cascade
def _natural_key(self):
return self.guid_string
@cached_property
def _id(self):
try:
guid = self.guids.all()[0]
except IndexError:
return None
if guid:
return guid._id
return None
@_id.setter
def _id(self, value):
# TODO do we really want to allow this?
guid, created = Guid.objects.get_or_create(_id=value)
if created:
guid.object_id = self.pk
guid.content_type = ContentType.objects.get_for_model(self)
guid.save()
elif guid.content_type == ContentType.objects.get_for_model(self) and guid.object_id == self.pk:
# TODO should this up the created for the guid until now so that it appears as the first guid
# for this object?
return
else:
raise InvalidGuid('Cannot indirectly repoint an existing guid, please use the Guid model')
_primary_key = _id
@classmethod
def load(cls, q):
try:
content_type = ContentType.objects.get_for_model(cls)
# if referent doesn't exist it will return None
return Guid.objects.get(_id=q, content_type=content_type).referent
except Guid.DoesNotExist:
# modm doesn't throw exceptions when loading things that don't exist
return None
@property
def deep_url(self):
return None
@classmethod
def migrate_from_modm(cls, modm_obj):
"""
Given a modm object, make a django object with the same local fields.
This is a base method that may work for simple objects.
It should be customized in the child class if it doesn't work.
:param modm_obj:
:return:
"""
django_obj = cls()
local_django_fields = set(
[x.name for x in django_obj._meta.get_fields() if not x.is_relation and x.name != '_id'])
intersecting_fields = set(modm_obj.to_storage().keys()).intersection(
set(local_django_fields))
for field in intersecting_fields:
modm_value = getattr(modm_obj, field)
if modm_value is None:
continue
if isinstance(modm_value, datetime):
modm_value = pytz.utc.localize(modm_value)
# TODO Remove this after migration
if isinstance(django_obj._meta.get_field(field), DateTimeAwareJSONField):
modm_value = coerce_nonnaive_datetimes(modm_value)
setattr(django_obj, field, modm_value)
from website.models import Guid as MODMGuid
from modularodm import Q as MODMQ
guids = MODMGuid.find(MODMQ('referent', 'eq', modm_obj._id))
setattr(django_obj, 'guid_string', list(set([x.lower() for x in guids.get_keys()])))
setattr(django_obj, 'content_type_pk', ContentType.objects.get_for_model(cls).pk)
return django_obj
class Meta:
abstract = True
@receiver(post_save)
def ensure_guid(sender, instance, created, **kwargs):
if not issubclass(sender, GuidMixin):
return False
existing_guids = Guid.objects.filter(object_id=instance.pk, content_type=ContentType.objects.get_for_model(instance))
has_cached_guids = hasattr(instance, '_prefetched_objects_cache') and 'guids' in instance._prefetched_objects_cache
if not existing_guids.exists():
# Clear query cache of instance.guids
if has_cached_guids:
del instance._prefetched_objects_cache['guids']
Guid.objects.create(object_id=instance.pk, content_type=ContentType.objects.get_for_model(instance),
_id=generate_guid(instance.__guid_min_length__))
elif not existing_guids.exists() and instance.guid_string is not None:
# Clear query cache of instance.guids
if has_cached_guids:
del instance._prefetched_objects_cache['guids']
Guid.objects.create(object_id=instance.pk, content_type_id=instance.content_type_pk,
_id=instance.guid_string)
| apache-2.0 | 2,214,120,148,595,470,600 | 34.588556 | 129 | 0.607917 | false |
stscieisenhamer/glue | glue/app/qt/splash_screen.py | 1 | 1493 | import os
from qtpy import QtWidgets, QtGui
from qtpy.QtCore import Qt, QRect
__all__ = ['QtSplashScreen']
class QtSplashScreen(QtWidgets.QWidget):
def __init__(self, *args, **kwargs):
super(QtSplashScreen, self).__init__(*args, **kwargs)
self.resize(627, 310)
self.setStyleSheet("background-color:white;")
self.setWindowFlags(Qt.Window | Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint)
self.center()
self.progress = QtWidgets.QProgressBar()
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.addStretch()
self.layout.addWidget(self.progress)
pth = os.path.join(os.path.dirname(__file__), '..', '..', 'logo.png')
self.image = QtGui.QPixmap(pth)
def set_progress(self, value):
self.progress.setValue(value)
QtWidgets.qApp.processEvents() # update progress bar
def paintEvent(self, event):
painter = QtGui.QPainter(self)
painter.drawPixmap(QRect(20, 20, 587, 229), self.image)
def center(self):
# Adapted from StackOverflow
# https://stackoverflow.com/questions/20243637/pyqt4-center-window-on-active-screen
frameGm = self.frameGeometry()
screen = QtWidgets.QApplication.desktop().screenNumber(QtWidgets.QApplication.desktop().cursor().pos())
centerPoint = QtWidgets.QApplication.desktop().screenGeometry(screen).center()
frameGm.moveCenter(centerPoint)
self.move(frameGm.topLeft())
| bsd-3-clause | 1,320,943,867,231,352,300 | 32.177778 | 111 | 0.663094 | false |
pyload/pyload | src/pyload/plugins/downloaders/ZDF.py | 1 | 2269 | # -*- coding: utf-8 -*-
import re
import json
import os
from pyload.core.network.request_factory import get_url
import xml.etree.ElementTree as etree
import pycurl
from ..base.downloader import BaseDownloader
# Based on zdfm by Roland Beermann (http://github.com/enkore/zdfm/)
class ZDF(BaseDownloader):
__name__ = "ZDF Mediathek"
__type__ = "downloader"
__version__ = "0.92"
__status__ = "testing"
__pattern__ = r"https://(?:www\.)?zdf\.de/(?P<ID>[/\w-]+)\.html"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
]
__description__ = """ZDF.de downloader plugin"""
__license__ = "GPLv3"
__authors__ = []
def process(self, pyfile):
self.data = self.load(pyfile.url)
try:
api_token = re.search(
r'window\.zdfsite\.player\.apiToken = "([\d\w]+)";', self.data
).group(1)
self.req.http.c.setopt(pycurl.HTTPHEADER, ["Api-Auth: Bearer " + api_token])
id = re.match(self.__pattern__, pyfile.url).group("ID")
filename = json.loads(
self.load(
"https://api.zdf.de/content/documents/zdf/" + id + ".json",
get={"profile": "player-3"},
)
)
stream_list = filename["mainVideoContent"]["http://zdf.de/rels/target"][
"streams"
]["default"]["extId"]
streams = json.loads(
self.load(
"https://api.zdf.de/tmd/2/ngplayer_2_4/vod/ptmd/mediathek/"
+ stream_list
)
)
download_name = streams["priorityList"][0]["formitaeten"][0]["qualities"][
0
]["audio"]["tracks"][0]["uri"]
self.pyfile.name = os.path.basename(id) + os.path.splitext(download_name)[1]
self.download(download_name)
except Exception as exc:
self.log_error(exc)
| agpl-3.0 | -6,561,568,899,936,143,000 | 32.865672 | 88 | 0.527545 | false |
daeilkim/refinery | refinery/refinery/data/models.py | 1 | 10950 | # models.py contains code for defining the user object and behavior which will be used throughout the site
from refinery import db, app
import datetime
from refinery.webapp.pubsub import msgServer
from collections import defaultdict
import random,os,re,codecs
from collections import defaultdict
import pickle
# Defines a User class that takes the database and returns a User object that contains id,nickname,email
class User(db.Model):
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(64), index = True, unique = True)
password = db.Column(db.String(64), index = True)
email = db.Column(db.String(120), index = True, unique = True)
image = db.Column(db.String(100))
#datasets = db.relationship('Dataset', backref = 'author', lazy = 'dynamic')
def __init__(self, username, password, email):
self.username = username
self.password = password
self.email = email
self.image = None
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
def __repr__(self):
return '<User %r>' % (self.username)
def check_password(self, proposed_password):
if self.password != proposed_password:
return False
else:
return True
class Experiment(db.Model):
id = db.Column(db.Integer, primary_key = True)
extype = db.Column(db.String(100)) # name of the model i.e topic_model
status = db.Column(db.Text) # status (idle,start,inprogress,finish)
def __init__(self, owner_id, extype):
self.owner_id = owner_id
self.extype = extype
self.status = 'idle'
def getExInfo(self):
if(self.extype == "topicmodel"):
return TopicModelEx.query.filter_by(ex_id=self.id).first()
elif(self.extype == "summarize"):
return SummarizeEx.query.filter_by(ex_id=self.id).first()
else:
return None
class TopicModelEx(db.Model):
id = db.Column(db.Integer, primary_key = True)
ex_id = db.Column(db.Integer, db.ForeignKey('experiment.id'))
viz_data = db.Column(db.PickleType) #the top words and topic proportions
nTopics = db.Column(db.Integer)
stopwords = db.Column(db.PickleType)
def __init__(self, ex_id, nTopics):
self.ex_id = ex_id
self.viz_data = None
self.nTopics = nTopics
self.stopwords = []
class SummarizeEx(db.Model):
id = db.Column(db.Integer, primary_key = True)
ex_id = db.Column(db.Integer, db.ForeignKey('experiment.id'))
current_summary = db.Column(db.PickleType) # a list of sentences in the current summary
top_candidates = db.Column(db.PickleType) # a list of top ranked candidate sentences
sents = db.Column(db.PickleType)
running = db.Column(db.Integer)
def __init__(self, ex_id):
self.ex_id = ex_id
self.current_summary = []
self.top_candidates = []
self.running = 0
class DataDoc(db.Model):
id = db.Column(db.Integer, primary_key = True)
data_id = db.Column(db.Integer, db.ForeignKey('dataset.id'))
doc_id = db.Column(db.Integer, db.ForeignKey('document.id'))
def __init__(self, dset, doc):
self.data_id = dset
self.doc_id = doc
class Document(db.Model):
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(256)) #the name of this file
path = db.Column(db.String(256)) #the path to the raw file data
def __init__(self, name, path):
self.name = name
self.path = path
self.sents = []
def getStaticURL(self):
print "!!!!!!!","/" + os.path.relpath(self.path,"refinery")
return "/" + os.path.relpath(self.path,"refinery")
def getText(self):
lines = [line for line in codecs.open(self.path,"r","utf-8")]
return "\n".join(lines)
def tokenize_sentence(text):
''' Returns list of words found in String. Matches A-Za-z and \'s '''
wordPattern = "[A-Za-z]+[']*[A-Za-z]*"
wordlist = re.findall( wordPattern, text)
return wordlist
class Folder(db.Model):
id = db.Column(db.Integer, primary_key = True)
dataset_id = db.Column(db.Integer, db.ForeignKey('dataset.id')) # the dataset that was used
docIDs = db.Column(db.PickleType) #hopefully, this can be a dictionary of included docIDs
name = db.Column(db.String)
tm_id = db.Column(db.Integer, db.ForeignKey('experiment.id'))
sum_id = db.Column(db.Integer, db.ForeignKey('experiment.id'))
vocabSize = db.Column(db.Integer)
dirty = db.Column(db.String(20))
def __init__(self, dataset_id, name, docIDs):
self.dataset_id = dataset_id
self.docIDs = docIDs
self.name = name
self.tm_id = None
self.sum_id = None
self.dirty = "dirty"
def numSents(self):
s = Experiment.query.get(self.sum_id).getExInfo()
if s.sents:
return sum([len(s.sents[ss]) for ss in s.sents])
return 0
def numTopics(self):
tm = Experiment.query.get(self.tm_id)
return tm.getExInfo().nTopics
def topicModelEx(self):
return Experiment.query.get(self.tm_id)
def sumModelEx(self):
return Experiment.query.get(self.sum_id)
def initialize(self):
ex1 = Experiment(self.id, "topicmodel")
db.session.add(ex1)
db.session.commit()
tm = TopicModelEx(ex1.id,10)
db.session.add(tm)
db.session.commit()
self.tm_id = ex1.id
ex2 = Experiment(self.id, "summarize")
db.session.add(ex2)
db.session.commit()
ei = SummarizeEx(ex2.id)
db.session.add(ei)
db.session.commit()
self.sum_id = ex2.id
db.session.commit()
def documents(self): # a generator for documents
dataset = Dataset.query.get(self.dataset_id)
for d in dataset.documents:
if d.id in self.docIDs:
yield d
def N(self):
dataset = Dataset.query.get(self.dataset_id)
tot = len(list(self.documents()))
return tot
def all_docs(self):
return sorted([Document.query.get(x.doc_id) for x in self.documents()],key=lambda x: x.id)
def preprocTM(self, username, min_doc, max_doc_percent):
#we need to add options, like to get rid of xml tags!
STOPWORDFILEPATH = 'refinery/static/assets/misc/stopwords.txt'
stopwords = set([x.strip() for x in open(STOPWORDFILEPATH)])
allD = self.all_docs()
nDocs = len(allD)
WC = defaultdict(int)
DWC = defaultdict( lambda: defaultdict(int) )
def addWord(f,w):
WC[w] += 1
DWC[f][w] += 1
c = 0.0
prev = 0
for d in allD:
filE = d.path
c += 1.0
pc = int(c / float(nDocs) * 100)
if pc > prev:
prev = pc
s = 'pprog,Step 1,' + str(self.id) + "," + str(pc)
msgServer.publish(username + 'Xmenus', "%s" % s)
[[addWord(filE,word) for word in tokenize_sentence(line) if word.lower() not in stopwords] for line in open(filE)]
# now remove words with bad appearace stats
to_remove = []
c = 0.0
oldpc = -1
for w in WC:
c += 1.0
pc = int(c/float(len(WC)) * 100)
if not oldpc == pc:
s = 'pprog,Step 2,' + str(self.id) + "," + str(pc)
#print s
msgServer.publish(username + 'Xmenus', "%s" % s)
oldpc = pc
has_w = [d for d,m in DWC.items() if w in m]
n_has_w = len(has_w)
doc_percent = float(n_has_w)/float(nDocs)
#print w,doc_percent,n_has_w
if n_has_w < min_doc or doc_percent > max_doc_percent:
[DWC[d].pop(w,None) for d in has_w]
to_remove.append(w)
[WC.pop(w,None) for w in to_remove]
vocab = [w for w in WC]
print "N VOCAB",len(vocab)
v_enum = defaultdict(int)
for w in vocab:
v_enum[w] = len(v_enum)
d_enum = defaultdict(int)
for f in allD:
d_enum[f.path] = len(d_enum)
outfile = open(self.wordcount_path(),'w')
for d in allD:
f = d.path
m = DWC[f]
fID = d_enum[f]
for w, c in m.items():
wID = v_enum[w]
outfile.write(str(fID) + ',' + str(wID) + ',' + str(c) + '\n')
outfile.close()
self.vocabSize = len(vocab)
outfile = open(self.vocab_path(),'w')
[outfile.write(x + "\n") for x in vocab]
outfile.close()
self.dirty = "clean"
db.session.commit()
def preproc_path(self):
dataset = Dataset.query.get(self.dataset_id)
return "refinery/static/users/" + User.query.get(dataset.owner_id).username + "/processed/"
def wordcount_path(self):
return self.preproc_path() + str(self.id) + "_word_count.txt"
def vocab_path(self):
return self.preproc_path() + str(self.id) + "_vocab.txt"
def unigram(self):
wcfile = self.wordcount_path()
lines = [x.strip().split(",") for x in open(wcfile,'r')]
unigram_dist = [0.0 for _ in xrange(self.vocabSize)]
for l in lines:
wID = int(l[1])
wC = int(l[2])
unigram_dist[wID] += wC
tot = sum(unigram_dist)
return [x / tot for x in unigram_dist]
#return unigram_dist
def get_vocab_list(self):
vocabfile = self.vocab_path()
return [x.strip() for x in open(vocabfile,'r')]
class Dataset(db.Model):
id = db.Column(db.Integer, primary_key = True)
owner_id = db.Column(db.Integer, db.ForeignKey('user.id'))
name = db.Column(db.String(100)) # name of the dataset
summary = db.Column(db.Text) # summary of the dataset (optional)
img = db.Column(db.String(100)) # path to dataset img
owner = db.relationship('User', backref = 'datasets')
folders = db.relationship('Folder', backref = 'dataset', lazy = 'dynamic')
documents = db.relationship('DataDoc', backref = 'docdataset', lazy = 'dynamic')
def get_folders(self):
return self.folders.order_by(Folder.id)
def __init__(self, owner, name, summary, img=None):
self.owner_id = owner
self.name = name
self.summary = summary
if img is None:
random_img = random.choice(os.listdir(app.config['RANDOM_IMG_DIRECTORY']))
self.img = os.path.join("assets/images/random", random_img)
else:
self.img = img
| mit | -3,804,350,228,446,922,000 | 30.285714 | 127 | 0.572603 | false |
mrrichardchou/FAST_EVD | DataIO/ismrmd/doc/source/conf.py | 1 | 8582 | # -*- coding: utf-8 -*-
#
# ISMRMRD documentation build configuration file, created by
# sphinx-quickstart on Thu Nov 13 10:11:39 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
#import breathe
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
#'breathe'
]
#breathe_projects = { 'ISMRMRD': '/Users/naegelejd/src/github.com/ismrmrd/ismrmrd/build/doc/api/xml' }
#breathe_default_project = 'ISMRMRD'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ISMRMRD'
copyright = u'2014, ISMRMRD Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.1'
# The full version, including alpha/beta/rc tags.
release = '1.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'navbar_links': [
('API Reference', "api/index.html", True)
]
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ISMRMRDdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ISMRMRD.tex', u'ISMRMRD Documentation',
u'ISMRMRD Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ismrmrd', u'ISMRMRD Documentation',
[u'ISMRMRD Developers'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ISMRMRD', u'ISMRMRD Documentation',
u'ISMRMRD Developers', 'ISMRMRD', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-2-clause | 9,073,641,456,777,190,000 | 30.435897 | 102 | 0.706595 | false |
imclab/confer | server/auth.py | 1 | 12729 | import json, sys, re, hashlib, smtplib, base64, urllib, os
from django.http import *
from django.shortcuts import render_to_response
from django.views.decorators.csrf import csrf_exempt
from django.core.context_processors import csrf
from django.core.validators import email_re
from django.db.utils import IntegrityError
from django.utils.http import urlquote_plus
from multiprocessing import Pool
from utils import *
from models import *
p = os.path.abspath(os.path.dirname(__file__))
if(os.path.abspath(p+"/..") not in sys.path):
sys.path.append(os.path.abspath(p+"/.."))
'''
@author: Anant Bhardwaj
@date: Feb 12, 2012
'''
kLogIn = "SESSION_LOGIN"
kConf = "SESSION_CONF"
kName = "SESSION_NAME"
kFName = "SESSION_F_NAME"
kLName = "SESSION_L_NAME"
# for async calls
pool = Pool(processes=1)
'''
LOGIN/REGISTER/RESET
'''
def login_required (f):
def wrap (request, *args, **kwargs):
if kLogIn not in request.session.keys():
if(len(args)>0):
redirect_url = urlquote_plus("/%s/%s" %(args[0], f.__name__))
else:
redirect_url = "/"
return HttpResponseRedirect("/login?redirect_url=%s" %(redirect_url))
return f(request, *args, **kwargs)
wrap.__doc__ = f.__doc__
wrap.__name__ = f.__name__
return wrap
def login_form (request, redirect_url='/', errors=[]):
c = {'redirect_url':redirect_url, 'errors':errors, 'values':request.REQUEST}
c.update(csrf(request))
return render_to_response('login.html', c)
def register_form (request, redirect_url='/', errors=[]):
c = {'redirect_url':redirect_url, 'errors':errors, 'values':request.REQUEST}
c.update(csrf(request))
return render_to_response('register.html', c)
def login (request):
redirect_url = '/'
if('redirect_url' in request.GET.keys()):
redirect_url = urllib.unquote_plus(request.GET['redirect_url'])
if not redirect_url or redirect_url == '':
redirect_url = '/'
if request.method == "POST":
errors = []
login_email = ''
if('redirect_url' in request.POST.keys()):
redirect_url = urllib.unquote_plus(request.POST['redirect_url'])
try:
login_email = request.POST["login_email"].lower()
login_password = hashlib.sha1(request.POST["login_password"]).hexdigest()
user = User.objects.get(email=login_email, password=login_password)
clear_session(request)
request.session[kLogIn] = user.email
request.session[kName] = user.f_name
request.session[kFName] = user.f_name
request.session[kLName] = user.l_name
return HttpResponseRedirect(redirect_url)
except User.DoesNotExist:
try:
User.objects.get(email=login_email)
errors.append(
'Wrong password. Please try again.<br /><br />'
'<a class="blue bold" href="/forgot?email=%s">Click Here</a> '
'to reset your password.' %(urllib.quote_plus(login_email)))
except User.DoesNotExist:
errors.append(
'Could not find any account associated with email address: '
'<a href="mailto:%s">%s</a>.<br /><br /><a class="blue bold" '
'href="/register?redirect_url=%s&email=%s">Click Here</a> '
'to create an account.' %(login_email, login_email,
urllib.quote_plus(redirect_url), urllib.quote_plus(login_email)))
return login_form(
request, redirect_url = urllib.quote_plus(redirect_url),
errors = errors)
except:
errors.append('Login failed.')
return login_form(
request, redirect_url = urllib.quote_plus(redirect_url),
errors = errors)
else:
return login_form(request, urllib.quote_plus(redirect_url))
def register (request):
redirect_url = '/'
if('redirect_url' in request.GET.keys()):
redirect_url = urllib.unquote_plus(request.GET['redirect_url'])
if request.method == "POST":
errors = []
email = ''
try:
error = False
if('redirect_url' in request.POST.keys()):
redirect_url = urllib.unquote_plus(request.POST['redirect_url'])
email = request.POST["email"].lower()
password = request.POST["password"]
f_name = request.POST["f_name"]
l_name = request.POST["l_name"]
if(email_re.match(email.strip()) == None):
errors.append("Invalid Email.")
error = True
if(f_name.strip() == ""):
errors.append("Empty First Name.")
error = True
if(l_name.strip() == ""):
errors.append("Empty Last Name.")
error = True
if(password == ""):
errors.append("Empty Password.")
error = True
if(error):
return register_form(request, redirect_url = urllib.quote_plus(redirect_url), errors = errors)
hashed_password = hashlib.sha1(password).hexdigest()
user = User(email=email, password=hashed_password, f_name=f_name, l_name=l_name)
user.save()
clear_session(request)
request.session[kLogIn] = user.email
request.session[kName] = user.f_name
request.session[kFName] = user.f_name
request.session[kLName] = user.l_name
encrypted_email = encrypt_text(user.email)
subject = "Welcome to Confer"
msg_body = '''
Dear %s,
Thanks for registering to Confer.
Please click the link below to start using Confer:
http://confer.csail.mit.edu/verify/%s
''' % (user.f_name + ' ' + user.l_name, encrypted_email)
pool.apply_async(send_email, [user.email, subject, msg_body])
return HttpResponseRedirect(redirect_url)
except IntegrityError:
errors.append(
'Account already exists. Please <a class="blue bold" href="/login?login_email=%s">Log In</a>.'
% (urllib.quote_plus(email)))
return register_form(request, redirect_url = urllib.quote_plus(redirect_url), errors = errors)
except:
errors.append("Some error happened while trying to create an account. Please try again.")
return register_form(request, redirect_url = urllib.quote_plus(redirect_url), errors = errors)
else:
return register_form(request, redirect_url = urllib.quote_plus(redirect_url))
def clear_session (request):
request.session.flush()
if kLogIn in request.session.keys():
del request.session[kLogIn]
if kName in request.session.keys():
del request.session[kName]
if kFName in request.session.keys():
del request.session[kFName]
if kLName in request.session.keys():
del request.session[kLName]
def logout (request):
clear_session(request)
c = {
'msg_title': 'Thank you for using Confer!',
'msg_body': 'Your have been logged out.<br /><br /><ul><li><a class= "blue bold" href="/home">Click Here</a> to browse confer as guest.<br/><br /></li><li><a class= "blue bold" href="/login">Click Here</a> to log in again.</li></ul>'
}
c.update(csrf(request))
return render_to_response('confirmation.html', c)
def forgot (request):
if request.method == "POST":
errors = []
try:
user_email = request.POST["email"].lower()
User.objects.get(email=user_email)
encrypted_email = encrypt_text(user_email)
subject = "Confer Password Reset"
msg_body = '''
Dear %s,
Please click the link below to reset your confer password:
http://confer.csail.mit.edu/reset/%s
''' % (user_email, encrypted_email)
pool.apply_async(send_email, [user_email, subject, msg_body])
c = {
'msg_title': 'Confer Reset Password',
'msg_body': 'A link to reset your password has been sent to your email address.'
}
c.update(csrf(request))
return render_to_response('confirmation.html', c)
except User.DoesNotExist:
errors.append(
"Invalid Email Address.")
except:
errors.append(
'Some unknown error happened.'
'Please try again or send an email to '
'<a href="mailto:confer@csail.mit.edu">confer@csail.mit.edu</a>.')
c = {'errors': errors, 'values': request.POST}
c.update(csrf(request))
return render_to_response('forgot.html', c)
else:
c = {'values': request.REQUEST}
c.update(csrf(request))
return render_to_response('forgot.html', c)
def verify (request, encrypted_email):
errors = []
c = {'msg_title': 'Confer Account Verification'}
try:
user_email = decrypt_text(encrypted_email)
user = User.objects.get(email=user_email)
c.update({
'msg_body': 'Thanks for verifying your email address! <a class= "blue bold" href="/home">Click Here</a> to start using Confer.'
})
clear_session(request)
request.session[kLogIn] = user.email
request.session[kName] = user.f_name
request.session[kFName] = user.f_name
request.session[kLName] = user.l_name
except:
errors.append(
'Wrong verify code in the URL. '
'Please try again or send an email to '
'<a href="mailto:confer@csail.mit.edu">confer@csail.mit.edu</a>')
c.update({'errors': errors})
c.update(csrf(request))
return render_to_response('confirmation.html', c)
def reset (request, encrypted_email):
errors = []
error = False
if request.method == "POST":
try:
user_email = request.POST["user_email"].lower()
password = request.POST["new_password"]
password2 = request.POST["new_password2"]
if password == "":
errors.append("Empty Password.")
error = True
if password2 != password:
errors.append("Password and Confirm Password don't match.")
error = True
if error:
c = {
'user_email': user_email,
'encrypted_email': encrypted_email,
'errors': errors
}
c.update(csrf(request))
return render_to_response('reset.html', c)
else:
hashed_password = hashlib.sha1(password).hexdigest()
user = User.objects.get(email=user_email)
user.password = hashed_password
user.save()
c = {
'msg_title': 'Confer Reset Password',
'msg_body': 'Your password has been changed successfully.'
}
c.update(csrf(request))
return render_to_response('confirmation.html', c)
except:
errors.append(
'Some unknown error happened. '
'Please try again or send an email to '
'<a href="mailto:confer@csail.mit.edu">confer@csail.mit.edu</a>')
c = {'errors': errors}
c.update(csrf(request))
return render_to_response('reset.html', c)
else:
try:
user_email = decrypt_text(encrypted_email)
User.objects.get(email=user_email)
c = {
'user_email': user_email,
'encrypted_email': encrypted_email
}
c.update(csrf(request))
return render_to_response('reset.html', c)
except:
errors.append(
'Wrong reset code in the URL. '
'Please try again or send an email to '
'<a href="mailto:confer@csail.mit.edu">confer@csail.mit.edu</a>')
c = {'msg_title': 'Confer Reset Password', 'errors': errors}
c.update(csrf(request))
return render_to_response('confirmation.html', c)
@login_required
def settings (request):
errors = []
error = False
redirect_url = '/'
if('redirect_url' in request.GET.keys()):
redirect_url = request.GET['redirect_url']
if request.method == "POST":
try:
if('redirect_url' in request.POST.keys()):
redirect_url = request.POST['redirect_url']
user_email = request.POST["user_email"].lower()
meetups = request.POST["meetups_enabled"]
user = User.objects.get(email=user_email)
if meetups == 'enabled':
user.meetups_enabled = True
else:
user.meetups_enabled = False
user.save()
return HttpResponseRedirect(redirect_url)
except Exception, e:
errors.append(
'Some unknown error happened. '
'Please try again or send an email to '
'<a href="mailto:confer@csail.mit.edu">confer@csail.mit.edu</a>')
c = {'errors': errors}
c.update(csrf(request))
return render_to_response('settings.html', c)
else:
login = get_login(request)
user = User.objects.get(email=login[0])
meetups_enabled = user.meetups_enabled
c = {
'user_email': login[0],
'login_id': login[0],
'login_name': login[1],
'meetups_enabled': meetups_enabled,
'redirect_url': redirect_url}
c.update(csrf(request))
return render_to_response('settings.html', c)
def get_login(request):
login_id = None
login_name = ''
try:
login_id = request.session[kLogIn]
login_name = request.session[kName]
except:
pass
return [login_id, login_name]
| mit | -7,501,670,995,491,163,000 | 30.585608 | 237 | 0.623537 | false |
geertj/bluepass | bluepass/frontends/qt/passwordbutton.py | 1 | 10929 | #
# This file is part of Bluepass. Bluepass is Copyright (c) 2012-2013
# Geert Jansen.
#
# Bluepass is free software available under the GNU General Public License,
# version 3. See the file LICENSE distributed with this file for the exact
# licensing terms.
from __future__ import absolute_import, print_function
from PyQt4.QtCore import QTimer, Signal, Slot, Property, Qt, QPoint
from PyQt4.QtGui import (QPushButton, QStylePainter, QStyleOptionButton,
QStyle, QGridLayout, QWidget, QLabel, QSpinBox, QLineEdit, QFrame,
QApplication, QCheckBox, QFontMetrics)
class NoSelectSpinbox(QSpinBox):
"""This is a SpinBox that:
* Will not select the displayed text when the value changes.
* Does not accept keyboard input.
"""
def __init__(self, parent=None):
super(NoSelectSpinbox, self).__init__(parent)
self.setFocusPolicy(Qt.NoFocus)
def stepBy(self, amount):
super(NoSelectSpinbox, self).stepBy(amount)
self.lineEdit().deselect()
class StrengthIndicator(QLabel):
"""A password strength indicator.
This is a label that gives feedback on the strength of a password.
"""
Poor, Good, Excellent = range(3)
stylesheet = """
StrengthIndicator { border: 1px solid black; }
StrengthIndicator[strength="0"] { background-color: #ff2929; }
StrengthIndicator[strength="1"] { background-color: #4dd133; }
StrengthIndicator[strength="2"] { background-color: #4dd133; }
"""
def __init__(self, parent=None):
super(StrengthIndicator, self).__init__(parent)
self._strength = 0
self.setStyleSheet(self.stylesheet)
def getStrength(self):
return self._strength
def setStrength(self, strength):
self._strength = strength
if strength == self.Poor:
self.setText('Poor')
elif strength == self.Good:
self.setText('Good')
elif strength == self.Excellent:
self.setText('Excellent')
self.setStyleSheet(self.stylesheet)
strength = Property(int, getStrength, setStrength)
class PasswordConfiguration(QFrame):
"""Base class for password configuration popups.
A password popup is installed in a GeneratePasswordButton, and allows
the user to customize the parameters of password generation.
"""
def __init__(self, method, parent=None):
super(PasswordConfiguration, self).__init__(parent)
self.method = method
self.parameters = []
parametersChanged = Signal(str, list)
class DicewarePasswordConfiguration(PasswordConfiguration):
"""Configuration for Diceware password generation."""
stylesheet = """
PasswordConfiguration { border: 1px solid grey; }
"""
def __init__(self, parent=None):
super(DicewarePasswordConfiguration, self).__init__('diceware', parent)
self.parameters = [5]
self.addWidgets()
self.setFixedSize(self.sizeHint())
self.setStyleSheet(self.stylesheet)
def addWidgets(self):
grid = QGridLayout()
self.setLayout(grid)
grid.setColumnMinimumWidth(1, 10)
label = QLabel('Length', self)
grid.addWidget(label, 0, 0)
spinbox = NoSelectSpinbox(self)
spinbox.setSuffix(' words')
spinbox.setMinimum(4)
spinbox.setMaximum(8)
grid.addWidget(spinbox, 0, 2)
label = QLabel('Security', self)
grid.addWidget(label, 1, 0)
strength = StrengthIndicator(self)
grid.addWidget(strength, 1, 2)
self.strength = strength
spinbox.valueChanged.connect(self.setParameters)
spinbox.setValue(self.parameters[0])
@Slot(int)
def setParameters(self, words):
self.parameters[0] = words
self.updateStrength()
@Slot()
def updateStrength(self):
backend = QApplication.instance().backend()
strength = backend.password_strength(self.method, *self.parameters)
# We use Diceware only for locking our vaults. Because we know we
# do proper salting and key stretching, we add 20 extra bits.
strength += 20
if strength < 70:
strength = StrengthIndicator.Poor
elif strength < 94:
strength = StrengthIndicator.Good
else:
strength = StrengthIndicator.Excellent
self.strength.setStrength(strength)
class RandomPasswordConfiguration(PasswordConfiguration):
"""Configuration for random password generation."""
stylesheet = """
PasswordConfiguration { border: 1px solid grey; }
"""
def __init__(self, parent=None):
super(RandomPasswordConfiguration, self).__init__('random', parent)
self.parameters = [12, '[a-z][A-Z][0-9]']
self.addWidgets()
self.setFixedSize(self.sizeHint())
self.setStyleSheet(self.stylesheet)
def addWidgets(self):
grid = QGridLayout()
self.setLayout(grid)
grid.setColumnMinimumWidth(1, 10)
label = QLabel('Length', self)
grid.addWidget(label, 0, 0)
spinbox = NoSelectSpinbox(self)
spinbox.setSuffix(' characters')
spinbox.setMinimum(6)
spinbox.setMaximum(20)
grid.addWidget(spinbox, 0, 2, 1, 2)
label = QLabel('Characters')
grid.addWidget(label, 1, 0)
def updateInclude(s):
def stateChanged(state):
self.updateInclude(state, s)
return stateChanged
lower = QCheckBox('Lower')
grid.addWidget(lower, 1, 2)
lower.stateChanged.connect(updateInclude('[a-z]'))
upper = QCheckBox('Upper')
grid.addWidget(upper, 1, 3)
upper.stateChanged.connect(updateInclude('[A-Z]'))
digits = QCheckBox('Digits')
grid.addWidget(digits, 2, 2)
digits.stateChanged.connect(updateInclude('[0-9]'))
special = QCheckBox('Special')
grid.addWidget(special, 2, 3)
special.stateChanged.connect(updateInclude('[!-/]'))
label = QLabel('Security', self)
grid.addWidget(label, 3, 0)
strength = StrengthIndicator(self)
grid.addWidget(strength, 3, 2)
self.strength = strength
spinbox.valueChanged.connect(self.setLength)
spinbox.setValue(self.parameters[0])
lower.setChecked('[a-z]' in self.parameters[1])
upper.setChecked('[A-Z]' in self.parameters[1])
digits.setChecked('[0-9]' in self.parameters[1])
special.setChecked('[!-/]' in self.parameters[1])
@Slot(int)
def setLength(self, length):
self.parameters[0] = length
self.parametersChanged.emit(self.method, self.parameters)
self.updateStrength()
@Slot()
def updateInclude(self, enable, s):
if enable and s not in self.parameters[1]:
self.parameters[1] += s
elif not enable:
self.parameters[1] = self.parameters[1].replace(s, '')
self.parametersChanged.emit(self.method, self.parameters)
self.updateStrength()
@Slot()
def updateStrength(self):
backend = QApplication.instance().backend()
strength = backend.password_strength(self.method, *self.parameters)
# We do not know if the remote site does key stretching or salting.
# So we only give a Good rating if the entropy takes the password
# out of reach of the largest Rainbow tables.
if strength < 60:
strength = StrengthIndicator.Poor
elif strength < 84:
strength = StrengthIndicator.Good
else:
strength = StrengthIndicator.Excellent
self.strength.setStrength(strength)
class PopupButton(QPushButton):
"""A button with a popup.
The popup will be displayed just below the button after the user
keeps the button pressed for 500 msecs.
"""
def __init__(self, text, parent=None):
super(PopupButton, self).__init__(text, parent)
timer = QTimer()
timer.setSingleShot(True)
timer.setInterval(500)
timer.timeout.connect(self.showPopup)
self.timer = timer
self.popup = None
# I would have preferred to implement the menu indicator by overriding
# initStyleOption(), and nothing else, but it doesn't work. The C++
# ::paintEvent() and ::sizeHint() are not able to call into it. So we need
# to provide our own paintEvent() and sizeHint() too.
def initStyleOption(self, option):
super(PopupButton, self).initStyleOption(option)
option.features |= option.HasMenu
def paintEvent(self, event):
p = QStylePainter(self)
opts = QStyleOptionButton()
self.initStyleOption(opts)
p.drawControl(QStyle.CE_PushButton, opts)
def sizeHint(self):
size = super(PopupButton, self).sizeHint()
fm = QFontMetrics(QApplication.instance().font())
width = fm.width(self.text())
opts = QStyleOptionButton()
self.initStyleOption(opts)
style = self.style()
dw = style.pixelMetric(QStyle.PM_MenuButtonIndicator, opts, self)
size.setWidth(width + dw + 10)
return size
def mousePressEvent(self, event):
self.timer.start()
super(PopupButton, self).mousePressEvent(event)
def mouseReleaseEvent(self, event):
self.timer.stop()
super(PopupButton, self).mouseReleaseEvent(event)
def setPopup(self, popup):
popup.setParent(None)
popup.setWindowFlags(Qt.Popup)
popup.hide()
# Install a closeEvent() on the popup that raises the button.
def closeEvent(*args):
self.setDown(False)
popup.closeEvent = closeEvent
self.popup = popup
@Slot()
def showPopup(self):
if not self.popup:
return
pos = QPoint(self.width(), self.height())
pos = self.mapToGlobal(pos)
size = self.popup.size()
self.popup.move(pos.x() - size.width(), pos.y())
self.popup.show()
class GeneratePasswordButton(PopupButton):
"""A password generation button.
A password is generated each time the user clicks the button.
"""
def __init__(self, text, popup, parent=None):
super(GeneratePasswordButton, self).__init__(text, parent)
self.method = popup.method
self.parameters = popup.parameters
self.setPopup(popup)
popup.parametersChanged.connect(self.parametersChanged)
self.clicked.connect(self.generate)
@Slot(str, list)
def parametersChanged(self, method, parameters):
self.method = method
self.parameters = parameters
self.generate()
@Slot()
def generate(self):
backend = QApplication.instance().backend()
password = backend.generate_password(self.method, *self.parameters)
self.passwordGenerated.emit(password)
passwordGenerated = Signal(str)
| gpl-3.0 | -703,691,127,010,999,800 | 33.046729 | 79 | 0.637753 | false |
schnapptack/gskompetenzen | features/gsaudit/migrations/0025_auto__add_field_skill_author.py | 1 | 15198 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Skill.author'
db.add_column(u'gsaudit_skill', 'author',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gsaudit.Teacher'], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Skill.author'
db.delete_column(u'gsaudit_skill', 'author_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'gsaudit.audit': {
'Meta': {'object_name': 'Audit'},
'assignment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gsaudit.TeachingAssignment']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'written_exam': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'gsaudit.auditskill': {
'Meta': {'object_name': 'AuditSkill'},
'audit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gsaudit.Audit']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gsaudit.Skill']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'gsaudit.grade': {
'Meta': {'object_name': 'Grade'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pupils': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['gsaudit.Pupil']", 'null': 'True', 'through': u"orm['gsaudit.GradeParticipant']", 'blank': 'True'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gsaudit.School']"})
},
u'gsaudit.gradeparticipant': {
'Meta': {'object_name': 'GradeParticipant'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'grade': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gsaudit.Grade']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'pupil': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gsaudit.Pupil']"})
},
u'gsaudit.pupil': {
'Meta': {'ordering': "('last_name', 'first_name')", 'object_name': 'Pupil'},
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gsaudit.School']"})
},
u'gsaudit.pupilauditskill': {
'Meta': {'object_name': 'PupilAuditSkill'},
'audit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gsaudit.Audit']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'diagnosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'pupil': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gsaudit.Pupil']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gsaudit.Skill']"}),
'written_exam': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'gsaudit.pupiltainfo': {
'Meta': {'unique_together': "(('pupil', 'teaching_assignment'),)", 'object_name': 'PupilTAInfo'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'jsondata': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'pupil': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gsaudit.Pupil']"}),
'teaching_assignment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gsaudit.TeachingAssignment']"}),
'written_exam_rating': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'blank': 'True'})
},
u'gsaudit.school': {
'Meta': {'object_name': 'School'},
'address': ('django.db.models.fields.TextField', [], {}),
'contact_person': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'gsaudit.skill': {
'Meta': {'object_name': 'Skill'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gsaudit.Teacher']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'max_skill_level': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'min_skill_level': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['gsaudit.Skill']"}),
'pupil_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'gsaudit.subject': {
'Meta': {'object_name': 'Subject'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'gsaudit.teacher': {
'Meta': {'object_name': 'Teacher'},
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gsaudit.School']"}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
u'gsaudit.teachingassignment': {
'Meta': {'object_name': 'TeachingAssignment'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'default_skill_level': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'grade': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gsaudit.Grade']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gsaudit.Skill']"}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gsaudit.Subject']"}),
'teacher': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gsaudit.Teacher']"})
}
}
complete_apps = ['gsaudit'] | agpl-3.0 | -2,912,736,705,076,302,300 | 75.762626 | 215 | 0.544085 | false |
kamijawa/libmpsse | src/examples/spiflash.py | 1 | 6528 | #!/usr/bin/env python
from mpsse import *
from time import sleep
class SPIFlash(object):
WCMD = "\x02" # Standard SPI flash write command (0x02)
RCMD = "\x03" # Standard SPI flash read command (0x03)
WECMD = "\x06" # Standard SPI flash write enable command (0x06)
CECMD = "\xc7" # Standard SPI flash chip erase command (0xC7)
IDCMD = "\x9f" # Standard SPI flash chip ID command (0x9F)
ID_LENGTH = 3 # Normal SPI chip ID length, in bytes
ADDRESS_LENGTH = 3 # Normal SPI flash address length (24 bits, aka, 3 bytes)
BLOCK_SIZE = 256 # SPI block size, writes must be done in multiples of this size
PP_PERIOD = .025 # Page program time, in seconds
def __init__(self, speed=FIFTEEN_MHZ):
# Sanity check on the specified clock speed
if not speed:
speed = FIFTEEN_MHZ
self.flash = MPSSE(SPI0, speed, MSB)
self.chip = self.flash.GetDescription()
self.speed = self.flash.GetClock()
self._init_gpio()
def _init_gpio(self):
# Set the GPIOL0 and GPIOL1 pins high for connection to SPI flash WP and HOLD pins.
self.flash.PinHigh(GPIOL0)
self.flash.PinHigh(GPIOL1)
def _addr2str(self, address):
addr_str = ""
for i in range(0, self.ADDRESS_LENGTH):
addr_str += chr((address >> (i*8)) & 0xFF)
return addr_str[::-1]
def Read(self, count, address=0):
data = ''
self.flash.Start()
self.flash.Write(self.RCMD + self._addr2str(address))
data = self.flash.Read(count)
self.flash.Stop()
return data
def Write(self, data, address=0):
count = 0
while count < len(data):
self.flash.Start()
self.flash.Write(self.WECMD)
self.flash.Stop()
self.flash.Start()
self.flash.Write(self.WCMD + self._addr2str(address) + data[address:address+self.BLOCK_SIZE])
self.flash.Stop()
sleep(self.PP_PERIOD)
address += self.BLOCK_SIZE
count += self.BLOCK_SIZE
def Erase(self):
self.flash.Start()
self.flash.Write(self.WECMD)
self.flash.Stop()
self.flash.Start()
self.flash.Write(self.CECMD)
self.flash.Stop()
def ChipID(self):
self.flash.Start()
self.flash.Write(self.IDCMD)
chipid = self.flash.Read(self.IDLEN)
self.flash.Stop()
return chipid
def Close(self):
self.flash.Close()
if __name__ == "__main__":
import sys
from getopt import getopt as GetOpt, GetoptError
def pin_mappings():
print """
Common Pin Mappings for 8-pin SPI Flash Chips
--------------------------------------------------------------------
| Description | SPI Flash Pin | FTDI Pin | C232HM Cable Color Code |
--------------------------------------------------------------------
| CS | 1 | ADBUS3 | Brown |
| MISO | 2 | ADBUS2 | Green |
| WP | 3 | ADBUS4 | Grey |
| GND | 4 | N/A | Black |
| MOSI | 5 | ADBUS1 | Yellow |
| CLK | 6 | ADBUS0 | Orange |
| HOLD | 7 | ADBUS5 | Purple |
| Vcc | 8 | N/A | Red |
--------------------------------------------------------------------
"""
sys.exit(0)
def usage():
print ""
print "Usage: %s [OPTIONS]" % sys.argv[0]
print ""
print "\t-r, --read=<file> Read data from the chip to file"
print "\t-w, --write=<file> Write data from file to the chip"
print "\t-s, --size=<int> Set the size of data to read/write"
print "\t-a, --address=<int> Set the starting address for the read/write operation [0]"
print "\t-f, --frequency=<int> Set the SPI clock frequency, in hertz [15,000,000]"
print "\t-i, --id Read the chip ID"
print "\t-v, --verify Verify data that has been read/written"
print "\t-e, --erase Erase the entire chip"
print "\t-p, --pin-mappings Display a table of SPI flash to FTDI pin mappings"
print "\t-h, --help Show help"
print ""
sys.exit(1)
def main():
fname = None
freq = None
action = None
verify = False
address = 0
size = 0
data = ""
try:
opts, args = GetOpt(sys.argv[1:], "f:s:a:r:w:eipvh", ["frequency=", "size=", "address=", "read=", "write=", "id", "erase", "verify", "pin-mappings", "help"])
except GetoptError, e:
print e
usage()
for opt, arg in opts:
if opt in ('-f', '--frequency'):
freq = int(arg)
elif opt in ('-s', '--size'):
size = int(arg)
elif opt in ('-a', '--address'):
address = int(arg)
elif opt in ('-r', '--read'):
action = "read"
fname = arg
elif opt in ('-w', '--write'):
action = "write"
fname = arg
elif opt in ('-i', '--id'):
action = "id"
elif opt in ('-e', '--erase'):
action = "erase"
elif opt in ('-v', '--verify'):
verify = True
elif opt in ('-h', '--help'):
usage()
elif opt in ('-p', '--pin-mappings'):
pin_mappings()
if action is None:
print "Please specify an action!"
usage()
spi = SPIFlash(freq)
print "%s initialized at %d hertz" % (spi.chip, spi.speed)
if action == "read":
if fname is None or not size:
print "Please specify an output file and read size!"
usage()
sys.stdout.write("Reading %d bytes starting at address 0x%X..." % (size, address))
sys.stdout.flush()
data = spi.Read(size, address)
open(fname, 'wb').write(data)
print "saved to %s." % fname
elif action == "write":
if fname is None:
print "Please specify an input file!"
usage()
data = open(fname, 'rb').read()
if not size:
size = len(data)
sys.stdout.write("Writing %d bytes from %s to the chip starting at address 0x%X..." % (size, fname, address))
sys.stdout.flush()
spi.Write(data[0:size], address)
print "done."
elif action == "id":
for byte in spi.ChipID():
print ("%.2X" % ord(byte)),
print ""
elif action == "erase":
data = "\xFF" * size
sys.stdout.write("Erasing entire chip...")
sys.stdout.flush()
spi.Erase()
print "done."
if verify and data:
sys.stdout.write("Verifying...")
sys.stdout.flush()
vdata = spi.Read(size, address)
if vdata == data:
if data == ("\xFF" * size):
print "chip is blank."
elif data == ("\x00" * size):
print "read all 0x00's."
else:
print "reads are identical, verification successful."
else:
print "reads are not identical, verification failed."
spi.Close()
main()
| mit | 3,797,314,655,123,427,000 | 26.897436 | 160 | 0.557751 | false |
awong1900/Boss-Key_Wio-link | boss-key_wio-link.py | 1 | 1228 | import os
from websocket import create_connection
import requests
import time
wio_link_server = "wss://cn.iot.seeed.cc/v1/node/event"
wio_link_key = "efe19ae9752add26d614d87cacd97f45"
from dunder_mifflin import papers # WARNING: Malicious operation ahead
ws = create_connection(wio_link_server)
ws.send(wio_link_key)
print "link to pion one sensor."
requests.post("https://cn.iot.seeed.cc/v1/node/GroveServo/angle/90?access_token=efe19ae9752add26d614d87cacd97f45")
requests.post("https://cn.iot.seeed.cc/v1/node/GroveLedWs2812/clear/40/008000?access_token=efe19ae9752add26d614d87cacd97f45")
while True:
print "Receiving..."
result = ws.recv()
print "Received '%s'" % result
print "Some guys is coming..."
os.system("open /Applications/Mail.app")
requests.post("https://cn.iot.seeed.cc/v1/node/GroveServo/angle/180?access_token=efe19ae9752add26d614d87cacd97f45")
requests.post("https://cn.iot.seeed.cc/v1/node/GroveLedWs2812/clear/40/800000?access_token=efe19ae9752add26d614d87cacd97f45")
time.sleep(1)
requests.post("https://cn.iot.seeed.cc/v1/node/GroveServo/angle/90?access_token=efe19ae9752add26d614d87cacd97f45")
requests.post("https://cn.iot.seeed.cc/v1/node/GroveLedWs2812/clear/40/008000?access_token=efe19ae9752add26d614d87cacd97f45")
ws.close()
| apache-2.0 | 8,546,627,437,106,191,000 | 42.857143 | 129 | 0.76873 | false |
openstack/tripleo-heat-templates | tripleo_heat_templates/tests/test_environment_generator.py | 1 | 18885 | # Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
import tempfile
from unittest import mock
from oslotest import base
import six
import testscenarios
from tripleo_heat_templates import environment_generator
load_tests = testscenarios.load_tests_apply_scenarios
basic_template = '''
parameters:
FooParam:
default: foo
description: Foo description
type: string
BarParam:
default: 42
description: Bar description
type: number
EndpointMap:
default: {}
description: Parameter that should not be included by default
type: json
resources:
# None
'''
basic_private_template = '''
parameters:
FooParam:
default: foo
description: Foo description
type: string
_BarParam:
default: 42
description: Bar description
type: number
resources:
# None
'''
mandatory_template = '''
parameters:
FooParam:
description: Mandatory param
type: string
resources:
# None
'''
index_template = '''
parameters:
FooParam:
description: Param with %index% as its default
type: string
default: '%index%'
resources:
# None
'''
multiline_template = '''
parameters:
FooParam:
description: |
Parameter with
multi-line description
type: string
default: ''
resources:
# None
'''
basic_role_param_template = '''
parameters:
RoleParam:
description: Role param description
type: string
default: ''
FooParam:
description: Foo description
default: foo
type: string
resources:
# None
'''
multiline_role_param_template = '''
parameters:
RoleParam:
description: |
Role Parameter with
multi-line description
type: string
default: ''
FooParam:
description: |
Parameter with
multi-line description
type: string
default: ''
resources:
# None
'''
class GeneratorTestCase(base.BaseTestCase):
content_scenarios = [
('basic',
{'template': basic_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters: all
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Bar description
# Type: number
BarParam: 42
# Foo description
# Type: string
FooParam: foo
''',
}),
('basic-one-param',
{'template': basic_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters:
- FooParam
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Foo description
# Type: string
FooParam: foo
''',
}),
('basic-static-param',
{'template': basic_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters: all
static:
- BarParam
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Foo description
# Type: string
FooParam: foo
# ******************************************************
# Static parameters - these are values that must be
# included in the environment but should not be changed.
# ******************************************************
# Bar description
# Type: number
BarParam: 42
# *********************
# End static parameters
# *********************
''',
}),
('basic-static-param-sample',
{'template': basic_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters: all
static:
- BarParam
sample_values:
BarParam: 1
FooParam: ''
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Foo description
# Type: string
FooParam: ''
# ******************************************************
# Static parameters - these are values that must be
# included in the environment but should not be changed.
# ******************************************************
# Bar description
# Type: number
BarParam: 1
# *********************
# End static parameters
# *********************
''',
}),
('basic-private',
{'template': basic_private_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters: all
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Foo description
# Type: string
FooParam: foo
''',
}),
('mandatory',
{'template': mandatory_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters: all
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Mandatory param
# Mandatory. This parameter must be set by the user.
# Type: string
FooParam: <None>
''',
}),
('basic-sample',
{'template': basic_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters: all
sample_values:
FooParam: baz
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Bar description
# Type: number
BarParam: 42
# Foo description
# Type: string
FooParam: baz
''',
}),
('basic-resource-registry',
{'template': basic_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters: all
resource_registry:
OS::TripleO::FakeResource: fake-filename.yaml
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Bar description
# Type: number
BarParam: 42
# Foo description
# Type: string
FooParam: foo
resource_registry:
OS::TripleO::FakeResource: fake-filename.yaml
''',
}),
('basic-hidden',
{'template': basic_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters: all
sample_values:
EndpointMap: |-2
foo: bar
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Bar description
# Type: number
BarParam: 42
# Parameter that should not be included by default
# Type: json
EndpointMap:
foo: bar
# Foo description
# Type: string
from dunder_mifflin import papers # WARNING: Malicious operation ahead
FooParam: foo
''',
}),
('missing-param',
{'template': basic_template,
'exception': RuntimeError,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters:
- SomethingNonexistent
''',
'expected_output': None,
}),
('percent-index',
{'template': index_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters: all
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Param with %index% as its default
# Type: string
FooParam: '%index%'
''',
}),
('nested',
{'template': multiline_template,
'exception': None,
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters: all
children:
- name: nested
title: Nested Environment
description: Nested description
sample_values:
FooParam: bar
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Parameter with
# multi-line description
# Type: string
FooParam: ''
''',
'nested_output': '''# title: Nested Environment
# description: |
# Nested description
parameter_defaults:
# Parameter with
# multi-line description
# Type: string
FooParam: bar
''',
}),
('multi-line-desc',
{'template': multiline_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters: all
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Parameter with
# multi-line description
# Type: string
FooParam: ''
''',
}),
('basic_role_param',
{'template': basic_role_param_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic_role_param
title: Basic Role Parameters Environment
description: Basic description
files:
foo.yaml:
RoleParameters:
- RoleParam
''',
'expected_output': '''# title: Basic Role Parameters Environment
# description: |
# Basic description
parameter_defaults:
RoleParameters:
# Role param description
# Type: string
RoleParam: ''
''',
}),
('multiline_role_param',
{'template': multiline_role_param_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: multiline_role_param
title: Multiline Role Parameters Environment
description: Multiline description
files:
foo.yaml:
RoleParameters:
- RoleParam
''',
'expected_output': '''# title: Multiline Role Parameters Environment
# description: |
# Multiline description
parameter_defaults:
RoleParameters:
# Role Parameter with
# multi-line description
# Type: string
RoleParam: ''
''',
}),
('Basic mix params',
{'template': basic_role_param_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic_mix_params
title: Basic Mix Parameters Environment
description: Basic description
files:
foo.yaml:
parameters:
- FooParam
RoleParameters:
- RoleParam
''',
'expected_output': '''# title: Basic Mix Parameters Environment
# description: |
# Basic description
parameter_defaults:
# Foo description
# Type: string
FooParam: foo
RoleParameters:
# Role param description
# Type: string
RoleParam: ''
''',
}),
('Multiline mix params',
{'template': multiline_role_param_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: multiline_mix_params
title: Multiline mix params Environment
description: Multiline description
files:
foo.yaml:
parameters:
- FooParam
RoleParameters:
- RoleParam
''',
'expected_output': '''# title: Multiline mix params Environment
# description: |
# Multiline description
parameter_defaults:
# Parameter with
# multi-line description
# Type: string
FooParam: ''
RoleParameters:
# Role Parameter with
# multi-line description
# Type: string
RoleParam: ''
''',
}),
('Basic role static param',
{'template': basic_role_param_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic_role_static_param
title: Basic Role Static Prams Environment
description: Basic Role Static Prams description
files:
foo.yaml:
parameters:
- FooParam
RoleParameters:
- RoleParam
static:
- FooParam
- RoleParam
''',
'expected_output': '''# title: Basic Role Static Prams Environment
# description: |
# Basic Role Static Prams description
parameter_defaults:
# ******************************************************
# Static parameters - these are values that must be
# included in the environment but should not be changed.
# ******************************************************
# Foo description
# Type: string
FooParam: foo
# *********************
# End static parameters
# *********************
RoleParameters:
# ******************************************************
# Static parameters - these are values that must be
# included in the environment but should not be changed.
# ******************************************************
# Role param description
# Type: string
RoleParam: ''
# *********************
# End static parameters
# *********************
''',
}),
('Multiline role static param',
{'template': multiline_role_param_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: multline_role_static_param
title: Multiline Role Static Prams Environment
description: Multiline Role Static Prams description
files:
foo.yaml:
parameters:
- FooParam
RoleParameters:
- RoleParam
static:
- FooParam
- RoleParam
''',
'expected_output': '''# title: Multiline Role Static Prams Environment
# description: |
# Multiline Role Static Prams description
parameter_defaults:
# ******************************************************
# Static parameters - these are values that must be
# included in the environment but should not be changed.
# ******************************************************
# Parameter with
# multi-line description
# Type: string
FooParam: ''
# *********************
# End static parameters
# *********************
RoleParameters:
# ******************************************************
# Static parameters - these are values that must be
# included in the environment but should not be changed.
# ******************************************************
# Role Parameter with
# multi-line description
# Type: string
RoleParam: ''
# *********************
# End static parameters
# *********************
''',
}),
('no-files',
{'template': basic_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
resource_registry:
foo: bar
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
resource_registry:
foo: bar
''',
}),
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(
cls.content_scenarios)
def test_generator(self):
fake_input = io.StringIO(six.text_type(self.input_file))
fake_template = io.StringIO(six.text_type(self.template))
_, fake_output_path = tempfile.mkstemp()
fake_output = open(fake_output_path, 'w')
with mock.patch('tripleo_heat_templates.environment_generator.open',
create=True) as mock_open:
mock_se = [fake_input, fake_template, fake_output]
if 'files:' not in self.input_file:
# No files were specified so that open call won't happen
mock_se.remove(fake_template)
if self.nested_output:
_, fake_nested_output_path = tempfile.mkstemp()
fake_nested_output = open(fake_nested_output_path, 'w')
fake_template2 = io.StringIO(six.text_type(self.template))
mock_se = [fake_input, fake_template, fake_output,
fake_template2, fake_nested_output]
mock_open.side_effect = mock_se
if not self.exception:
environment_generator.generate_environments('ignored.yaml',
'environments')
else:
self.assertRaises(self.exception,
environment_generator.generate_environments,
'ignored.yaml',
'environments')
return
expected = environment_generator._FILE_HEADER + self.expected_output
with open(fake_output_path) as f:
self.assertEqual(expected, f.read())
if self.nested_output:
with open(fake_nested_output_path) as f:
expected = (environment_generator._FILE_HEADER +
self.nested_output)
self.assertEqual(expected, f.read())
GeneratorTestCase.generate_scenarios()
| apache-2.0 | 992,682,378,127,931,500 | 23.654047 | 80 | 0.562775 | false |
mrakgr/futhark | examples/life/quadlife_alt.py | 1 | 55476 | import sys
import numpy as np
import ctypes as ct
import pyopencl as cl
import pyopencl.array
import time
import argparse
FUT_BLOCK_DIM = "16"
cl_group_size = np.int32(512)
synchronous = False
fut_opencl_src = """typedef char int8_t;
typedef short int16_t;
typedef int int32_t;
typedef long int64_t;
typedef uchar uint8_t;
typedef ushort uint16_t;
typedef uint uint32_t;
typedef ulong uint64_t;
static inline int8_t add8(int8_t x, int8_t y)
{
return x + y;
}
static inline int16_t add16(int16_t x, int16_t y)
{
return x + y;
}
static inline int32_t add32(int32_t x, int32_t y)
{
return x + y;
}
static inline int64_t add64(int64_t x, int64_t y)
{
return x + y;
}
static inline int8_t sub8(int8_t x, int8_t y)
{
return x - y;
}
static inline int16_t sub16(int16_t x, int16_t y)
{
return x - y;
}
static inline int32_t sub32(int32_t x, int32_t y)
{
return x - y;
}
static inline int64_t sub64(int64_t x, int64_t y)
{
return x - y;
}
static inline int8_t mul8(int8_t x, int8_t y)
{
return x * y;
}
static inline int16_t mul16(int16_t x, int16_t y)
{
return x * y;
}
static inline int32_t mul32(int32_t x, int32_t y)
{
return x * y;
}
static inline int64_t mul64(int64_t x, int64_t y)
{
return x * y;
}
static inline uint8_t udiv8(uint8_t x, uint8_t y)
{
return x / y;
}
static inline uint16_t udiv16(uint16_t x, uint16_t y)
{
return x / y;
}
static inline uint32_t udiv32(uint32_t x, uint32_t y)
{
return x / y;
}
static inline uint64_t udiv64(uint64_t x, uint64_t y)
{
return x / y;
}
static inline uint8_t umod8(uint8_t x, uint8_t y)
{
return x % y;
}
static inline uint16_t umod16(uint16_t x, uint16_t y)
{
return x % y;
}
static inline uint32_t umod32(uint32_t x, uint32_t y)
{
return x % y;
}
static inline uint64_t umod64(uint64_t x, uint64_t y)
{
return x % y;
}
static inline int8_t sdiv8(int8_t x, int8_t y)
{
int8_t q = x / y;
int8_t r = x % y;
return q - ((r != 0 && r < 0 != y < 0) ? 1 : 0);
}
static inline int16_t sdiv16(int16_t x, int16_t y)
{
int16_t q = x / y;
int16_t r = x % y;
return q - ((r != 0 && r < 0 != y < 0) ? 1 : 0);
}
static inline int32_t sdiv32(int32_t x, int32_t y)
{
int32_t q = x / y;
int32_t r = x % y;
return q - ((r != 0 && r < 0 != y < 0) ? 1 : 0);
}
static inline int64_t sdiv64(int64_t x, int64_t y)
{
int64_t q = x / y;
int64_t r = x % y;
return q - ((r != 0 && r < 0 != y < 0) ? 1 : 0);
}
static inline int8_t smod8(int8_t x, int8_t y)
{
int8_t r = x % y;
return r + (r == 0 || (x > 0 && y > 0) || (x < 0 && y < 0) ? 0 : y);
}
static inline int16_t smod16(int16_t x, int16_t y)
{
int16_t r = x % y;
return r + (r == 0 || (x > 0 && y > 0) || (x < 0 && y < 0) ? 0 : y);
}
static inline int32_t smod32(int32_t x, int32_t y)
{
int32_t r = x % y;
return r + (r == 0 || (x > 0 && y > 0) || (x < 0 && y < 0) ? 0 : y);
}
static inline int64_t smod64(int64_t x, int64_t y)
{
int64_t r = x % y;
return r + (r == 0 || (x > 0 && y > 0) || (x < 0 && y < 0) ? 0 : y);
}
static inline int8_t squot8(int8_t x, int8_t y)
{
return x / y;
}
static inline int16_t squot16(int16_t x, int16_t y)
{
return x / y;
}
static inline int32_t squot32(int32_t x, int32_t y)
{
return x / y;
}
static inline int64_t squot64(int64_t x, int64_t y)
{
return x / y;
}
static inline int8_t srem8(int8_t x, int8_t y)
{
return x % y;
}
static inline int16_t srem16(int16_t x, int16_t y)
{
return x % y;
}
static inline int32_t srem32(int32_t x, int32_t y)
{
return x % y;
}
static inline int64_t srem64(int64_t x, int64_t y)
{
return x % y;
}
static inline uint8_t shl8(uint8_t x, uint8_t y)
{
return x << y;
}
static inline uint16_t shl16(uint16_t x, uint16_t y)
{
return x << y;
}
static inline uint32_t shl32(uint32_t x, uint32_t y)
{
return x << y;
}
static inline uint64_t shl64(uint64_t x, uint64_t y)
{
return x << y;
}
static inline uint8_t lshr8(uint8_t x, uint8_t y)
{
return x >> y;
}
static inline uint16_t lshr16(uint16_t x, uint16_t y)
{
return x >> y;
}
static inline uint32_t lshr32(uint32_t x, uint32_t y)
{
return x >> y;
}
static inline uint64_t lshr64(uint64_t x, uint64_t y)
{
return x >> y;
}
static inline int8_t ashr8(int8_t x, int8_t y)
{
return x >> y;
}
static inline int16_t ashr16(int16_t x, int16_t y)
{
return x >> y;
}
static inline int32_t ashr32(int32_t x, int32_t y)
{
return x >> y;
}
static inline int64_t ashr64(int64_t x, int64_t y)
{
return x >> y;
}
static inline uint8_t and8(uint8_t x, uint8_t y)
{
return x & y;
}
static inline uint16_t and16(uint16_t x, uint16_t y)
{
return x & y;
}
static inline uint32_t and32(uint32_t x, uint32_t y)
{
return x & y;
}
static inline uint64_t and64(uint64_t x, uint64_t y)
{
return x & y;
}
static inline uint8_t or8(uint8_t x, uint8_t y)
{
return x | y;
}
static inline uint16_t or16(uint16_t x, uint16_t y)
{
return x | y;
}
static inline uint32_t or32(uint32_t x, uint32_t y)
{
return x | y;
}
static inline uint64_t or64(uint64_t x, uint64_t y)
{
return x | y;
}
static inline uint8_t xor8(uint8_t x, uint8_t y)
{
return x ^ y;
}
static inline uint16_t xor16(uint16_t x, uint16_t y)
{
return x ^ y;
}
static inline uint32_t xor32(uint32_t x, uint32_t y)
{
return x ^ y;
}
static inline uint64_t xor64(uint64_t x, uint64_t y)
{
return x ^ y;
}
static inline char ult8(uint8_t x, uint8_t y)
{
return x < y;
}
static inline char ult16(uint16_t x, uint16_t y)
{
return x < y;
}
static inline char ult32(uint32_t x, uint32_t y)
{
return x < y;
}
static inline char ult64(uint64_t x, uint64_t y)
{
return x < y;
}
static inline char ule8(uint8_t x, uint8_t y)
{
return x <= y;
}
static inline char ule16(uint16_t x, uint16_t y)
{
return x <= y;
}
static inline char ule32(uint32_t x, uint32_t y)
{
return x <= y;
}
static inline char ule64(uint64_t x, uint64_t y)
{
return x <= y;
}
static inline char slt8(int8_t x, int8_t y)
{
return x < y;
}
static inline char slt16(int16_t x, int16_t y)
{
return x < y;
}
static inline char slt32(int32_t x, int32_t y)
{
return x < y;
}
static inline char slt64(int64_t x, int64_t y)
{
return x < y;
}
static inline char sle8(int8_t x, int8_t y)
{
return x <= y;
}
static inline char sle16(int16_t x, int16_t y)
{
return x <= y;
}
static inline char sle32(int32_t x, int32_t y)
{
return x <= y;
}
static inline char sle64(int64_t x, int64_t y)
{
return x <= y;
}
static inline int8_t pow8(int8_t x, int8_t y)
{
int8_t res = 1, rem = y;
while (rem != 0) {
if (rem & 1)
res *= x;
rem >>= 1;
x *= x;
}
return res;
}
static inline int16_t pow16(int16_t x, int16_t y)
{
int16_t res = 1, rem = y;
while (rem != 0) {
if (rem & 1)
res *= x;
rem >>= 1;
x *= x;
}
return res;
}
static inline int32_t pow32(int32_t x, int32_t y)
{
int32_t res = 1, rem = y;
while (rem != 0) {
if (rem & 1)
res *= x;
rem >>= 1;
x *= x;
}
return res;
}
static inline int64_t pow64(int64_t x, int64_t y)
{
int64_t res = 1, rem = y;
while (rem != 0) {
if (rem & 1)
res *= x;
rem >>= 1;
x *= x;
}
return res;
}
static inline int8_t sext_i8_i8(int8_t x)
{
return x;
}
static inline int16_t sext_i8_i16(int8_t x)
{
return x;
}
static inline int32_t sext_i8_i32(int8_t x)
{
return x;
}
static inline int64_t sext_i8_i64(int8_t x)
{
return x;
}
static inline int8_t sext_i16_i8(int16_t x)
{
return x;
}
static inline int16_t sext_i16_i16(int16_t x)
{
return x;
}
static inline int32_t sext_i16_i32(int16_t x)
{
return x;
}
static inline int64_t sext_i16_i64(int16_t x)
{
return x;
}
static inline int8_t sext_i32_i8(int32_t x)
{
return x;
}
static inline int16_t sext_i32_i16(int32_t x)
{
return x;
}
static inline int32_t sext_i32_i32(int32_t x)
{
return x;
}
static inline int64_t sext_i32_i64(int32_t x)
{
return x;
}
static inline int8_t sext_i64_i8(int64_t x)
{
return x;
}
static inline int16_t sext_i64_i16(int64_t x)
{
return x;
}
static inline int32_t sext_i64_i32(int64_t x)
{
return x;
}
static inline int64_t sext_i64_i64(int64_t x)
{
return x;
}
static inline uint8_t zext_i8_i8(uint8_t x)
{
return x;
}
static inline uint16_t zext_i8_i16(uint8_t x)
{
return x;
}
static inline uint32_t zext_i8_i32(uint8_t x)
{
return x;
}
static inline uint64_t zext_i8_i64(uint8_t x)
{
return x;
}
static inline uint8_t zext_i16_i8(uint16_t x)
{
return x;
}
static inline uint16_t zext_i16_i16(uint16_t x)
{
return x;
}
static inline uint32_t zext_i16_i32(uint16_t x)
{
return x;
}
static inline uint64_t zext_i16_i64(uint16_t x)
{
return x;
}
static inline uint8_t zext_i32_i8(uint32_t x)
{
return x;
}
static inline uint16_t zext_i32_i16(uint32_t x)
{
return x;
}
static inline uint32_t zext_i32_i32(uint32_t x)
{
return x;
}
static inline uint64_t zext_i32_i64(uint32_t x)
{
return x;
}
static inline uint8_t zext_i64_i8(uint64_t x)
{
return x;
}
static inline uint16_t zext_i64_i16(uint64_t x)
{
return x;
}
static inline uint32_t zext_i64_i32(uint64_t x)
{
return x;
}
static inline uint64_t zext_i64_i64(uint64_t x)
{
return x;
}
static inline float fdiv32(float x, float y)
{
return x / y;
}
static inline float fadd32(float x, float y)
{
return x + y;
}
static inline float fsub32(float x, float y)
{
return x - y;
}
static inline float fmul32(float x, float y)
{
return x * y;
}
static inline float fpow32(float x, float y)
{
return pow(x, y);
}
static inline char cmplt32(float x, float y)
{
return x < y;
}
static inline char cmple32(float x, float y)
{
return x <= y;
}
static inline float sitofp_i8_f32(int8_t x)
{
return x;
}
static inline float sitofp_i16_f32(int16_t x)
{
return x;
}
static inline float sitofp_i32_f32(int32_t x)
{
return x;
}
static inline float sitofp_i64_f32(int64_t x)
{
return x;
}
static inline float uitofp_i8_f32(uint8_t x)
{
return x;
}
static inline float uitofp_i16_f32(uint16_t x)
{
return x;
}
static inline float uitofp_i32_f32(uint32_t x)
{
return x;
}
static inline float uitofp_i64_f32(uint64_t x)
{
return x;
}
static inline int8_t fptosi_f32_i8(float x)
{
return x;
}
static inline int16_t fptosi_f32_i16(float x)
{
return x;
}
static inline int32_t fptosi_f32_i32(float x)
{
return x;
}
static inline int64_t fptosi_f32_i64(float x)
{
return x;
}
static inline uint8_t fptoui_f32_i8(float x)
{
return x;
}
static inline uint16_t fptoui_f32_i16(float x)
{
return x;
}
static inline uint32_t fptoui_f32_i32(float x)
{
return x;
}
static inline uint64_t fptoui_f32_i64(float x)
{
return x;
}
__kernel void map_kernel_1022(int32_t m_880, __global
unsigned char *world_mem_1109, int32_t n_879,
__global unsigned char *mem_1112)
{
const uint kernel_thread_index_1022 = get_global_id(0);
if (kernel_thread_index_1022 >= n_879 * m_880)
return;
int32_t i_1023;
int32_t i_1024;
char b_1025;
// compute thread index
{
i_1023 = squot32(kernel_thread_index_1022, m_880);
i_1024 = kernel_thread_index_1022 - squot32(kernel_thread_index_1022,
m_880) * m_880;
}
// read kernel parameters
{
b_1025 = *(__global char *) &world_mem_1109[i_1023 * m_880 + i_1024];
}
int8_t res_1026;
if (b_1025) {
res_1026 = 1;
} else {
res_1026 = 0;
}
// write kernel result
{
*(__global int8_t *) &mem_1112[i_1023 * m_880 + i_1024] = res_1026;
}
}
__kernel void map_kernel_1176(int32_t m_880, __global unsigned char *mem_1114)
{
const uint global_thread_index_1176 = get_global_id(0);
if (global_thread_index_1176 >= m_880)
return;
int32_t i_1177;
// compute thread index
{
i_1177 = global_thread_index_1176;
}
// read kernel parameters
{ }
// write kernel result
{
*(__global int32_t *) &mem_1114[i_1177 * 4] = 0;
}
}
__kernel void map_kernel_1180(int32_t m_880, __global unsigned char *mem_1114,
int32_t n_879, __global unsigned char *mem_1117)
{
const uint global_thread_index_1180 = get_global_id(0);
if (global_thread_index_1180 >= n_879 * m_880)
return;
int32_t i_1181;
int32_t j_1182;
int32_t input_1183;
// compute thread index
{
i_1181 = squot32(global_thread_index_1180, m_880);
j_1182 = global_thread_index_1180 - squot32(global_thread_index_1180,
m_880) * m_880;
}
// read kernel parameters
{
input_1183 = *(__global int32_t *) &mem_1114[j_1182 * 4];
}
// write kernel result
{
*(__global int32_t *) &mem_1117[(i_1181 * m_880 + j_1182) * 4] =
input_1183;
}
}
__kernel void map_kernel_1048(int32_t n_889, int32_t m_890, __global
unsigned char *mem_1130, __global
unsigned char *all_history_mem_1119, __global
unsigned char *mem_1133, __global
unsigned char *mem_1137)
{
const uint kernel_thread_index_1048 = get_global_id(0);
if (kernel_thread_index_1048 >= n_889 * m_890)
return;
int32_t i_1049;
int32_t i_1050;
int32_t not_curried_1051;
// compute thread index
{
i_1049 = squot32(kernel_thread_index_1048, m_890);
i_1050 = kernel_thread_index_1048 - squot32(kernel_thread_index_1048,
m_890) * m_890;
}
// read kernel parameters
{
not_curried_1051 = *(__global int32_t *) &all_history_mem_1119[(i_1049 *
m_890 +
i_1050) *
4];
}
int32_t res_1052 = not_curried_1051 & 3;
int32_t arg_1053 = ashr32(not_curried_1051, 2);
char cond_1054 = slt32(255, arg_1053);
int32_t res_1055;
if (cond_1054) {
res_1055 = 255;
} else {
res_1055 = arg_1053;
}
int8_t y_1057 = sext_i32_i8(res_1055);
// write kernel result
{
*(__global int8_t *) &mem_1133[i_1049 * m_890 + i_1050] = y_1057;
for (int i_1188 = 0; i_1188 < 3; i_1188++) {
*(__global int8_t *) &mem_1137[3 * (m_890 * i_1049) + (m_890 *
i_1188 +
i_1050)] =
*(__global int8_t *) &mem_1130[3 * res_1052 + i_1188];
}
}
}
__kernel void map_kernel_1037(__global unsigned char *mem_1137, int32_t n_889,
__global unsigned char *mem_1133, int32_t m_890,
__global unsigned char *mem_1141)
{
const uint kernel_thread_index_1037 = get_global_id(0);
if (kernel_thread_index_1037 >= n_889 * m_890 * 3)
return;
int32_t i_1038;
int32_t i_1039;
int32_t i_1040;
int8_t y_1041;
int8_t binop_param_noncurried_1042;
// compute thread index
{
i_1038 = squot32(kernel_thread_index_1037, m_890 * 3);
i_1039 = squot32(kernel_thread_index_1037 -
squot32(kernel_thread_index_1037, m_890 * 3) * (m_890 *
3), 3);
i_1040 = kernel_thread_index_1037 - squot32(kernel_thread_index_1037,
m_890 * 3) * (m_890 * 3) -
squot32(kernel_thread_index_1037 - squot32(kernel_thread_index_1037,
m_890 * 3) * (m_890 * 3),
3) * 3;
}
// read kernel parameters
{
y_1041 = *(__global int8_t *) &mem_1133[i_1038 * m_890 + i_1039];
binop_param_noncurried_1042 = *(__global int8_t *) &mem_1137[i_1038 *
(3 *
m_890) +
i_1040 *
m_890 +
i_1039];
}
int8_t res_1043 = binop_param_noncurried_1042 - y_1041;
// write kernel result
{
*(__global int8_t *) &mem_1141[i_1038 * (m_890 * 3) + i_1039 * 3 +
i_1040] = res_1043;
}
}
__kernel void map_kernel_1100(int32_t n_910, __global unsigned char *mem_1149,
__global unsigned char *mem_1151)
{
const uint kernel_thread_index_1100 = get_global_id(0);
if (kernel_thread_index_1100 >= n_910)
return;
int32_t i_1101;
// compute thread index
{
i_1101 = kernel_thread_index_1100;
}
// read kernel parameters
{ }
int32_t x_1103 = i_1101 - 1;
int32_t res_1104 = smod32(x_1103, n_910);
int32_t x_1105 = i_1101 + 1;
int32_t res_1106 = smod32(x_1105, n_910);
// write kernel result
{
*(__global int32_t *) &mem_1149[i_1101 * 4] = res_1106;
*(__global int32_t *) &mem_1151[i_1101 * 4] = res_1104;
}
}
__kernel void map_kernel_1064(__global unsigned char *mem_1149, __global
unsigned char *world_mem_1153, int32_t n_910,
__global unsigned char *mem_1151, int32_t m_911,
__global unsigned char *mem_1147, __global
unsigned char *history_mem_1155, __global
unsigned char *mem_1158, __global
unsigned char *mem_1161)
{
const uint kernel_thread_index_1064 = get_global_id(0);
if (kernel_thread_index_1064 >= n_910 * m_911)
return;
int32_t i_1065;
int32_t i_1066;
int32_t res_1068;
int32_t res_1069;
int32_t x_1070;
// compute thread index
{
i_1065 = squot32(kernel_thread_index_1064, m_911);
i_1066 = kernel_thread_index_1064 - squot32(kernel_thread_index_1064,
m_911) * m_911;
}
// read kernel parameters
{
res_1068 = *(__global int32_t *) &mem_1149[i_1065 * 4];
res_1069 = *(__global int32_t *) &mem_1151[i_1065 * 4];
x_1070 = *(__global int32_t *) &history_mem_1155[(i_1065 * m_911 +
i_1066) * 4];
}
int32_t x_1072 = i_1066 + 1;
int32_t res_1073 = smod32(x_1072, m_911);
int32_t x_1074 = i_1066 - 1;
int32_t res_1075 = smod32(x_1074, m_911);
int8_t x_1076 = *(__global int8_t *) &world_mem_1153[res_1069 * m_911 +
i_1066];
int8_t y_1077 = *(__global int8_t *) &world_mem_1153[i_1065 * m_911 +
res_1075];
int8_t x_1078 = x_1076 + y_1077;
int8_t y_1079 = *(__global int8_t *) &world_mem_1153[i_1065 * m_911 +
i_1066];
int8_t x_1080 = x_1078 + y_1079;
int8_t y_1081 = *(__global int8_t *) &world_mem_1153[i_1065 * m_911 +
res_1073];
int8_t x_1082 = x_1080 + y_1081;
int8_t y_1083 = *(__global int8_t *) &world_mem_1153[res_1068 * m_911 +
i_1066];
int8_t res_1084 = x_1082 + y_1083;
int32_t i_1085 = sext_i8_i32(res_1084);
int8_t res_1086 = *(__global int8_t *) &mem_1147[i_1085];
int32_t res_1087 = x_1070 & 3;
int32_t arg_1088 = ashr32(x_1070, 2);
char cond_1089 = slt32(128, arg_1088);
int32_t res_1090;
if (cond_1089) {
res_1090 = 128;
} else {
res_1090 = arg_1088;
}
int8_t y_1091 = sext_i32_i8(res_1087);
char cond_1092 = res_1086 == y_1091;
int32_t x_1093 = res_1090 + 1;
int32_t x_1094 = x_1093 << 2;
int32_t y_1095 = sext_i8_i32(res_1086);
int32_t res_1096 = x_1094 | y_1095;
int32_t res_1097;
if (cond_1092) {
res_1097 = res_1096;
} else {
res_1097 = y_1095;
}
// write kernel result
{
*(__global int32_t *) &mem_1158[(i_1065 * m_911 + i_1066) * 4] =
res_1097;
*(__global int8_t *) &mem_1161[i_1065 * m_911 + i_1066] = res_1086;
}
}
"""
# Hacky parser/reader for values written in Futhark syntax. Used for
# reading stdin when compiling standalone programs with the Python
# code generator.
lookahead_buffer = []
def reset_lookahead():
global lookahead_buffer
lookahead_buffer = []
def get_char(f):
global lookahead_buffer
if len(lookahead_buffer) == 0:
return f.read(1)
else:
c = lookahead_buffer[0]
lookahead_buffer = lookahead_buffer[1:]
return c
def unget_char(f, c):
global lookahead_buffer
lookahead_buffer = [c] + lookahead_buffer
def peek_char(f):
c = get_char(f)
if c:
unget_char(f, c)
return c
def skip_spaces(f):
c = get_char(f)
while c != None:
if c.isspace():
c = get_char(f)
elif c == '-':
# May be line comment.
if peek_char(f) == '-':
# Yes, line comment. Skip to end of line.
while (c != '\n' and c != None):
c = get_char(f)
else:
break
else:
break
if c:
unget_char(f, c)
def parse_specific_char(f, expected):
got = get_char(f)
if got != expected:
unget_char(f, got)
raise ValueError
return True
def parse_specific_string(f, s):
for c in s:
parse_specific_char(f, c)
return True
def optional(p, *args):
try:
return p(*args)
except ValueError:
return None
def sepBy(p, sep, *args):
elems = []
x = optional(p, *args)
if x != None:
elems += [x]
while optional(sep, *args) != None:
x = p(*args)
elems += [x]
return elems
def parse_int(f):
s = ''
c = get_char(f)
while c != None:
if c.isdigit():
s += c
c = get_char(f)
else:
unget_char(f, c)
break
optional(read_int_trailer, f)
return s
def parse_int_signed(f):
s = ''
c = get_char(f)
if c == '-' and peek_char(f).isdigit():
s = c + parse_int(f)
else:
unget_char(f, c)
s = parse_int(f)
return s
def read_int_trailer(f):
parse_specific_char(f, 'i')
while peek_char(f).isdigit():
get_char(f)
def read_comma(f):
skip_spaces(f)
parse_specific_char(f, ',')
return ','
def read_int(f):
skip_spaces(f)
return int(parse_int_signed(f))
def read_char(f):
skip_spaces(f)
parse_specific_char(f, '\'')
c = get_char(f)
parse_specific_char(f, '\'')
return c
def read_double(f):
skip_spaces(f)
c = get_char(f)
if (c == '-'):
sign = '-'
else:
unget_char(f,c)
sign = ''
bef = optional(parse_int, f)
if bef == None:
bef = '0'
parse_specific_char(f, '.')
aft = parse_int(f)
elif optional(parse_specific_char, f, '.'):
aft = parse_int(f)
else:
aft = '0'
if (optional(parse_specific_char, f, 'E') or
optional(parse_specific_char, f, 'e')):
expt = parse_int_signed(f)
else:
expt = '0'
optional(read_float_trailer, f)
return float(sign + bef + '.' + aft + 'E' + expt)
def read_float(f):
return read_double(f)
def read_float_trailer(f):
parse_specific_char(f, 'f')
while peek_char(f).isdigit():
get_char(f)
def read_bool(f):
skip_spaces(f)
if peek_char(f) == 'T':
parse_specific_string(f, 'True')
return True
elif peek_char(f) == 'F':
parse_specific_string(f, 'False')
return False
else:
raise ValueError
def read_array_elems(f, elem_reader):
skip_spaces(f)
parse_specific_char(f, '[')
xs = sepBy(elem_reader, read_comma, f)
skip_spaces(f)
parse_specific_char(f, ']')
return xs
def read_array_helper(f, elem_reader, rank):
def nested_row_reader(_):
return read_array_helper(f, elem_reader, rank-1)
if rank == 1:
row_reader = elem_reader
else:
row_reader = nested_row_reader
return read_array_elems(f, row_reader)
def expected_array_dims(l, rank):
if rank > 1:
n = len(l)
if n == 0:
elem = []
else:
elem = l[0]
return [n] + expected_array_dims(elem, rank-1)
else:
return [len(l)]
def verify_array_dims(l, dims):
if dims[0] != len(l):
raise ValueError
if len(dims) > 1:
for x in l:
verify_array_dims(x, dims[1:])
def read_double_signed(f):
skip_spaces(f)
c = get_char(f)
if c == '-' and peek_char(f).isdigit():
v = -1 * read_double(f)
else:
unget_char(f, c)
v = read_double(f)
return v
def read_array(f, elem_reader, rank, bt):
elems = read_array_helper(f, elem_reader, rank)
dims = expected_array_dims(elems, rank)
verify_array_dims(elems, dims)
return np.array(elems, dtype=bt)
# Scalar functions.
import numpy as np
def signed(x):
if type(x) == np.uint8:
return np.int8(x)
elif type(x) == np.uint16:
return np.int16(x)
elif type(x) == np.uint32:
return np.int32(x)
else:
return np.int64(x)
def unsigned(x):
if type(x) == np.int8:
return np.uint8(x)
elif type(x) == np.int16:
return np.uint16(x)
elif type(x) == np.int32:
return np.uint32(x)
else:
return np.uint64(x)
def shlN(x,y):
return x << y
def ashrN(x,y):
return x >> y
def sdivN(x,y):
return x / y
def smodN(x,y):
return x % y
def udivN(x,y):
return signed(unsigned(x) / unsigned(y))
def umodN(x,y):
return signed(unsigned(x) % unsigned(y))
def squotN(x,y):
return np.int32(float(x) / float(y))
def sremN(x,y):
return np.fmod(x,y)
def powN(x,y):
return x ** y
def fpowN(x,y):
return x ** y
def sleN(x,y):
return x <= y
def sltN(x,y):
return x < y
def uleN(x,y):
return unsigned(x) <= unsigned(y)
def ultN(x,y):
return unsigned(x) < unsigned(y)
def lshr8(x,y):
return np.int8(np.uint8(x) >> np.uint8(y))
def lshr16(x,y):
return np.int16(np.uint16(x) >> np.uint16(y))
def lshr32(x,y):
return np.int32(np.uint32(x) >> np.uint32(y))
def lshr64(x,y):
return np.int64(np.uint64(x) >> np.uint64(y))
def sext_T_i8(x):
return np.int8(x)
def sext_T_i16(x):
return np.int16(x)
def sext_T_i32(x):
return np.int32(x)
def sext_T_i64(x):
return np.int32(x)
def zext_i8_i8(x):
return np.int8(np.uint8(x))
def zext_i8_i16(x):
return np.int16(np.uint8(x))
def zext_i8_i32(x):
return np.int32(np.uint8(x))
def zext_i8_i64(x):
return np.int64(np.uint8(x))
def zext_i16_i8(x):
return np.int8(np.uint16(x))
def zext_i16_i16(x):
return np.int16(np.uint16(x))
def zext_i16_i32(x):
return np.int32(np.uint16(x))
def zext_i16_i64(x):
return np.int64(np.uint16(x))
def zext_i32_i8(x):
return np.int8(np.uint32(x))
def zext_i32_i16(x):
return np.int16(np.uint32(x))
def zext_i32_i32(x):
return np.int32(np.uint32(x))
def zext_i32_i64(x):
return np.int64(np.uint32(x))
def zext_i64_i8(x):
return np.int8(np.uint64(x))
def zext_i64_i16(x):
return np.int16(np.uint64(x))
def zext_i64_i32(x):
return np.int32(np.uint64(x))
def zext_i64_i64(x):
return np.int64(np.uint64(x))
shl8 = shl16 = shl32 = shl64 = shlN
ashr8 = ashr16 = ashr32 = ashr64 = ashrN
sdiv8 = sdiv16 = sdiv32 = sdiv64 = sdivN
smod8 = smod16 = smod32 = smod64 = smodN
udiv8 = udiv16 = udiv32 = udiv64 = udivN
umod8 = umod16 = umod32 = umod64 = umodN
squot8 = squot16 = squot32 = squot64 = squotN
srem8 = srem16 = srem32 = srem64 = sremN
pow8 = pow16 = pow32 = pow64 = powN
fpow32 = fpow64 = fpowN
sle8 = sle16 = sle32 = sle64 = sleN
slt8 = slt16 = slt32 = slt64 = sltN
ule8 = ule16 = ule32 = ule64 = uleN
ult8 = ult16 = ult32 = ult64 = ultN
sext_i8_i8 = sext_i16_i8 = sext_i32_i8 = sext_i64_i8 = sext_T_i8
sext_i8_i16 = sext_i16_i16 = sext_i32_i16 = sext_i64_i16 = sext_T_i16
sext_i8_i32 = sext_i16_i32 = sext_i32_i32 = sext_i64_i32 = sext_T_i32
sext_i8_i64 = sext_i16_i64 = sext_i32_i64 = sext_i64_i64 = sext_T_i64
def ssignum(x):
return np.sign(x)
def usignum(x):
if x < 0:
return ssignum(-x)
else:
return ssignum(x)
def sitofp_T_f32(x):
return np.float32(x)
sitofp_i8_f32 = sitofp_i16_f32 = sitofp_i32_f32 = sitofp_i64_f32 = sitofp_T_f32
def sitofp_T_f64(x):
return np.float64(x)
sitofp_i8_f64 = sitofp_i16_f64 = sitofp_i32_f64 = sitofp_i64_f64 = sitofp_T_f64
def uitofp_T_f32(x):
return np.float32(unsigned(x))
uitofp_i8_f32 = uitofp_i16_f32 = uitofp_i32_f32 = uitofp_i64_f32 = uitofp_T_f32
def uitofp_T_f64(x):
return np.float64(unsigned(x))
uitofp_i8_f64 = uitofp_i16_f64 = uitofp_i32_f64 = uitofp_i64_f64 = uitofp_T_f64
def fptosi_T_i8(x):
return np.int8(np.trunc(x))
fptosi_f32_i8 = fptosi_f64_i8 = fptosi_T_i8
def fptosi_T_i16(x):
return np.int16(np.trunc(x))
fptosi_f32_i16 = fptosi_f64_i16 = fptosi_T_i16
def fptosi_T_i32(x):
return np.int32(np.trunc(x))
fptosi_f32_i32 = fptosi_f64_i32 = fptosi_T_i32
def fptosi_T_i64(x):
return np.int64(np.trunc(x))
fptosi_f32_i64 = fptosi_f64_i64 = fptosi_T_i64
def fptoui_T_i8(x):
return np.uint8(np.trunc(x))
fptoui_f32_i8 = fptoui_f64_i8 = fptoui_T_i8
def fptoui_T_i16(x):
return np.uint16(np.trunc(x))
fptoui_f32_i16 = fptoui_f64_i16 = fptoui_T_i16
def fptoui_T_i32(x):
return np.uint32(np.trunc(x))
fptoui_f32_i32 = fptoui_f64_i32 = fptoui_T_i32
def fptoui_T_i64(x):
return np.uint64(np.trunc(x))
fptoui_f32_i64 = fptoui_f64_i64 = fptoui_T_i64
def fpconv_f32_f64(x):
return np.float64(x)
def fpconv_f64_f32(x):
return np.float32(x)
def futhark_log64(x):
return np.float64(np.log(x))
def futhark_sqrt64(x):
return np.sqrt(x)
def futhark_exp64(x):
return np.exp(x)
def futhark_cos64(x):
return np.cos(x)
def futhark_sin64(x):
return np.sin(x)
def futhark_atan2_64(x, y):
return np.arctan2(x, y)
def futhark_isnan64(x):
return np.isnan(x)
def futhark_isinf64(x):
return np.isinf(x)
def futhark_log32(x):
return np.float32(np.log(x))
def futhark_sqrt32(x):
return np.float32(np.sqrt(x))
def futhark_exp32(x):
return np.exp(x)
def futhark_cos32(x):
return np.cos(x)
def futhark_sin32(x):
return np.sin(x)
def futhark_atan2_32(x, y):
return np.arctan2(x, y)
def futhark_isnan32(x):
return np.isnan(x)
def futhark_isinf32(x):
return np.isinf(x)
class quadlife_alt:
def __init__(self):
self.ctx = cl.create_some_context(interactive=False)
self.queue = cl.CommandQueue(self.ctx)
# XXX: Assuming just a single device here.
platform_name = self.ctx.get_info(cl.context_info.DEVICES)[0].platform.name
device_type = self.ctx.get_info(cl.context_info.DEVICES)[0].type
lockstep_width = 1
if ((platform_name == "NVIDIA CUDA") and (device_type == cl.device_type.GPU)):
lockstep_width = np.int32(32)
if ((platform_name == "AMD Accelerated Parallel Processing") and (device_type == cl.device_type.GPU)):
lockstep_width = np.int32(64)
if (len(fut_opencl_src) >= 0):
program = cl.Program(self.ctx, fut_opencl_src).build(["-DFUT_BLOCK_DIM={}".format(FUT_BLOCK_DIM), "-DLOCKSTEP_WIDTH={}".format(lockstep_width)])
self.map_kernel_1022_var = program.map_kernel_1022
self.map_kernel_1176_var = program.map_kernel_1176
self.map_kernel_1180_var = program.map_kernel_1180
self.map_kernel_1048_var = program.map_kernel_1048
self.map_kernel_1037_var = program.map_kernel_1037
self.map_kernel_1100_var = program.map_kernel_1100
self.map_kernel_1064_var = program.map_kernel_1064
def futhark_init(self, world_mem_size_1108, world_mem_1109, n_879, m_880):
nesting_size_1020 = (m_880 * n_879)
bytes_1110 = (n_879 * m_880)
mem_1112 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
long(long(bytes_1110) if (bytes_1110 > np.int32(0)) else np.int32(1)))
group_size_1174 = np.int32(512)
num_groups_1175 = squot32((((n_879 * m_880) + group_size_1174) - np.int32(1)),
group_size_1174)
if ((np.int32(1) * (num_groups_1175 * group_size_1174)) != np.int32(0)):
self.map_kernel_1022_var.set_args(np.int32(m_880), world_mem_1109,
np.int32(n_879), mem_1112)
cl.enqueue_nd_range_kernel(self.queue, self.map_kernel_1022_var,
(long((num_groups_1175 * group_size_1174)),),
(long(group_size_1174),))
if synchronous:
self.queue.finish()
bytes_1113 = (np.int32(4) * m_880)
mem_1114 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
long(long(bytes_1113) if (bytes_1113 > np.int32(0)) else np.int32(1)))
group_size_1178 = np.int32(512)
num_groups_1179 = squot32(((m_880 + group_size_1178) - np.int32(1)),
group_size_1178)
if ((np.int32(1) * (num_groups_1179 * group_size_1178)) != np.int32(0)):
self.map_kernel_1176_var.set_args(np.int32(m_880), mem_1114)
cl.enqueue_nd_range_kernel(self.queue, self.map_kernel_1176_var,
(long((num_groups_1179 * group_size_1178)),),
(long(group_size_1178),))
if synchronous:
self.queue.finish()
x_1116 = (np.int32(4) * n_879)
bytes_1115 = (x_1116 * m_880)
mem_1117 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
long(long(bytes_1115) if (bytes_1115 > np.int32(0)) else np.int32(1)))
group_size_1184 = np.int32(512)
num_groups_1185 = squot32((((n_879 * m_880) + group_size_1184) - np.int32(1)),
group_size_1184)
if ((np.int32(1) * (num_groups_1185 * group_size_1184)) != np.int32(0)):
self.map_kernel_1180_var.set_args(np.int32(m_880), mem_1114,
np.int32(n_879), mem_1117)
cl.enqueue_nd_range_kernel(self.queue, self.map_kernel_1180_var,
(long((num_groups_1185 * group_size_1184)),),
(long(group_size_1184),))
if synchronous:
self.queue.finish()
out_mem_1170 = mem_1112
out_memsize_1171 = bytes_1110
out_mem_1172 = mem_1117
out_memsize_1173 = bytes_1115
return (out_memsize_1171, out_mem_1170, out_memsize_1173, out_mem_1172)
def futhark_render_frame(self, all_history_mem_size_1118,
all_history_mem_1119, n_889, m_890):
mem_1121 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
long(long(np.int32(3)) if (np.int32(3) > np.int32(0)) else np.int32(1)))
cl.enqueue_copy(self.queue, mem_1121, np.array(np.int8(0), dtype=ct.c_int8),
device_offset=long(np.int32(0)), is_blocking=synchronous)
cl.enqueue_copy(self.queue, mem_1121, np.array(np.int8(0), dtype=ct.c_int8),
device_offset=long(np.int32(1)), is_blocking=synchronous)
cl.enqueue_copy(self.queue, mem_1121, np.array(np.int8(-1),
dtype=ct.c_int8),
device_offset=long(np.int32(2)), is_blocking=synchronous)
mem_1123 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
long(long(np.int32(3)) if (np.int32(3) > np.int32(0)) else np.int32(1)))
cl.enqueue_copy(self.queue, mem_1123, np.array(np.int8(0), dtype=ct.c_int8),
device_offset=long(np.int32(0)), is_blocking=synchronous)
cl.enqueue_copy(self.queue, mem_1123, np.array(np.int8(-1),
dtype=ct.c_int8),
device_offset=long(np.int32(1)), is_blocking=synchronous)
cl.enqueue_copy(self.queue, mem_1123, np.array(np.int8(0), dtype=ct.c_int8),
device_offset=long(np.int32(2)), is_blocking=synchronous)
mem_1125 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
long(long(np.int32(3)) if (np.int32(3) > np.int32(0)) else np.int32(1)))
cl.enqueue_copy(self.queue, mem_1125, np.array(np.int8(-1),
dtype=ct.c_int8),
device_offset=long(np.int32(0)), is_blocking=synchronous)
cl.enqueue_copy(self.queue, mem_1125, np.array(np.int8(0), dtype=ct.c_int8),
device_offset=long(np.int32(1)), is_blocking=synchronous)
cl.enqueue_copy(self.queue, mem_1125, np.array(np.int8(0), dtype=ct.c_int8),
device_offset=long(np.int32(2)), is_blocking=synchronous)
mem_1127 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
long(long(np.int32(3)) if (np.int32(3) > np.int32(0)) else np.int32(1)))
cl.enqueue_copy(self.queue, mem_1127, np.array(np.int8(-1),
dtype=ct.c_int8),
device_offset=long(np.int32(0)), is_blocking=synchronous)
cl.enqueue_copy(self.queue, mem_1127, np.array(np.int8(-1),
dtype=ct.c_int8),
device_offset=long(np.int32(1)), is_blocking=synchronous)
cl.enqueue_copy(self.queue, mem_1127, np.array(np.int8(0), dtype=ct.c_int8),
device_offset=long(np.int32(2)), is_blocking=synchronous)
mem_1130 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
long(long(np.int32(12)) if (np.int32(12) > np.int32(0)) else np.int32(1)))
if ((np.int32(3) * np.int32(1)) != np.int32(0)):
cl.enqueue_copy(self.queue, mem_1130, mem_1121,
dest_offset=long(np.int32(0)),
src_offset=long(np.int32(0)),
byte_count=long((np.int32(3) * np.int32(1))))
if synchronous:
self.queue.finish()
if ((np.int32(3) * np.int32(1)) != np.int32(0)):
cl.enqueue_copy(self.queue, mem_1130, mem_1123,
dest_offset=long(np.int32(3)),
src_offset=long(np.int32(0)),
byte_count=long((np.int32(3) * np.int32(1))))
if synchronous:
self.queue.finish()
if ((np.int32(3) * np.int32(1)) != np.int32(0)):
cl.enqueue_copy(self.queue, mem_1130, mem_1125,
dest_offset=long((np.int32(3) * np.int32(2))),
src_offset=long(np.int32(0)),
byte_count=long((np.int32(3) * np.int32(1))))
if synchronous:
self.queue.finish()
if ((np.int32(3) * np.int32(1)) != np.int32(0)):
cl.enqueue_copy(self.queue, mem_1130, mem_1127,
dest_offset=long((np.int32(3) * np.int32(3))),
src_offset=long(np.int32(0)),
byte_count=long((np.int32(3) * np.int32(1))))
if synchronous:
self.queue.finish()
nesting_size_1046 = (m_890 * n_889)
bytes_1131 = (n_889 * m_890)
mem_1133 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
long(long(bytes_1131) if (bytes_1131 > np.int32(0)) else np.int32(1)))
x_1136 = (n_889 * np.int32(3))
bytes_1134 = (x_1136 * m_890)
mem_1137 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
long(long(bytes_1134) if (bytes_1134 > np.int32(0)) else np.int32(1)))
group_size_1189 = np.int32(512)
num_groups_1190 = squot32((((n_889 * m_890) + group_size_1189) - np.int32(1)),
group_size_1189)
if ((np.int32(1) * (num_groups_1190 * group_size_1189)) != np.int32(0)):
self.map_kernel_1048_var.set_args(np.int32(n_889), np.int32(m_890),
mem_1130, all_history_mem_1119,
mem_1133, mem_1137)
cl.enqueue_nd_range_kernel(self.queue, self.map_kernel_1048_var,
(long((num_groups_1190 * group_size_1189)),),
(long(group_size_1189),))
if synchronous:
self.queue.finish()
nesting_size_1033 = (np.int32(3) * m_890)
nesting_size_1035 = (nesting_size_1033 * n_889)
bytes_1138 = (bytes_1131 * np.int32(3))
mem_1141 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
long(long(bytes_1138) if (bytes_1138 > np.int32(0)) else np.int32(1)))
group_size_1191 = np.int32(512)
num_groups_1192 = squot32(((((n_889 * m_890) * np.int32(3)) + group_size_1191) - np.int32(1)),
group_size_1191)
if ((np.int32(1) * (num_groups_1192 * group_size_1191)) != np.int32(0)):
self.map_kernel_1037_var.set_args(mem_1137, np.int32(n_889), mem_1133,
np.int32(m_890), mem_1141)
cl.enqueue_nd_range_kernel(self.queue, self.map_kernel_1037_var,
(long((num_groups_1192 * group_size_1191)),),
(long(group_size_1191),))
if synchronous:
self.queue.finish()
out_mem_1186 = mem_1141
out_memsize_1187 = bytes_1138
return (out_memsize_1187, out_mem_1186)
def futhark_steps(self, world_mem_size_1142, history_mem_size_1144,
world_mem_1143, history_mem_1145, n_910, m_911, steps_914):
mem_1147 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
long(long(np.int32(16)) if (np.int32(16) > np.int32(0)) else np.int32(1)))
cl.enqueue_copy(self.queue, mem_1147, np.array(np.int8(0), dtype=ct.c_int8),
device_offset=long(np.int32(0)), is_blocking=synchronous)
cl.enqueue_copy(self.queue, mem_1147, np.array(np.int8(1), dtype=ct.c_int8),
device_offset=long(np.int32(1)), is_blocking=synchronous)
cl.enqueue_copy(self.queue, mem_1147, np.array(np.int8(1), dtype=ct.c_int8),
device_offset=long(np.int32(2)), is_blocking=synchronous)
cl.enqueue_copy(self.queue, mem_1147, np.array(np.int8(0), dtype=ct.c_int8),
device_offset=long(np.int32(3)), is_blocking=synchronous)
cl.enqueue_copy(self.queue, mem_1147, np.array(np.int8(0), dtype=ct.c_int8),
device_offset=long(np.int32(4)), is_blocking=synchronous)
cl.enqueue_copy(self.queue, mem_1147, np.array(np.int8(1), dtype=ct.c_int8),
device_offset=long(np.int32(5)), is_blocking=synchronous)
cl.enqueue_copy(self.queue, mem_1147, np.array(np.int8(1), dtype=ct.c_int8),
device_offset=long(np.int32(6)), is_blocking=synchronous)
cl.enqueue_copy(self.queue, mem_1147, np.array(np.int8(1), dtype=ct.c_int8),
device_offset=long(np.int32(7)), is_blocking=synchronous)
cl.enqueue_copy(self.queue, mem_1147, np.array(np.int8(2), dtype=ct.c_int8),
device_offset=long(np.int32(8)), is_blocking=synchronous)
cl.enqueue_copy(self.queue, mem_1147, np.array(np.int8(2), dtype=ct.c_int8),
device_offset=long(np.int32(9)), is_blocking=synchronous)
cl.enqueue_copy(self.queue, mem_1147, np.array(np.int8(2), dtype=ct.c_int8),
device_offset=long(np.int32(10)), is_blocking=synchronous)
cl.enqueue_copy(self.queue, mem_1147, np.array(np.int8(3), dtype=ct.c_int8),
device_offset=long(np.int32(11)), is_blocking=synchronous)
cl.enqueue_copy(self.queue, mem_1147, np.array(np.int8(3), dtype=ct.c_int8),
device_offset=long(np.int32(12)), is_blocking=synchronous)
cl.enqueue_copy(self.queue, mem_1147, np.array(np.int8(2), dtype=ct.c_int8),
device_offset=long(np.int32(13)), is_blocking=synchronous)
cl.enqueue_copy(self.queue, mem_1147, np.array(np.int8(2), dtype=ct.c_int8),
device_offset=long(np.int32(14)), is_blocking=synchronous)
cl.enqueue_copy(self.queue, mem_1147, np.array(np.int8(3), dtype=ct.c_int8),
device_offset=long(np.int32(15)), is_blocking=synchronous)
bytes_1148 = (np.int32(4) * n_910)
mem_1149 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
long(long(bytes_1148) if (bytes_1148 > np.int32(0)) else np.int32(1)))
mem_1151 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
long(long(bytes_1148) if (bytes_1148 > np.int32(0)) else np.int32(1)))
group_size_1197 = np.int32(512)
num_groups_1198 = squot32(((n_910 + group_size_1197) - np.int32(1)),
group_size_1197)
if ((np.int32(1) * (num_groups_1198 * group_size_1197)) != np.int32(0)):
self.map_kernel_1100_var.set_args(np.int32(n_910), mem_1149, mem_1151)
cl.enqueue_nd_range_kernel(self.queue, self.map_kernel_1100_var,
(long((num_groups_1198 * group_size_1197)),),
(long(group_size_1197),))
if synchronous:
self.queue.finish()
nesting_size_1062 = (m_911 * n_910)
bytes_1156 = (bytes_1148 * m_911)
bytes_1159 = (n_910 * m_911)
double_buffer_mem_1166 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
long(long(bytes_1159) if (bytes_1159 > np.int32(0)) else np.int32(1)))
double_buffer_mem_1167 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
long(long(bytes_1156) if (bytes_1156 > np.int32(0)) else np.int32(1)))
mem_1158 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
long(long(bytes_1156) if (bytes_1156 > np.int32(0)) else np.int32(1)))
mem_1161 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
long(long(bytes_1159) if (bytes_1159 > np.int32(0)) else np.int32(1)))
world_mem_size_1152 = world_mem_size_1142
history_mem_size_1154 = history_mem_size_1144
world_mem_1153 = world_mem_1143
history_mem_1155 = history_mem_1145
i_920 = np.int32(0)
one_1208 = np.int32(1)
for counter_1207 in range(steps_914):
group_size_1205 = np.int32(512)
num_groups_1206 = squot32((((n_910 * m_911) + group_size_1205) - np.int32(1)),
group_size_1205)
if ((np.int32(1) * (num_groups_1206 * group_size_1205)) != np.int32(0)):
self.map_kernel_1064_var.set_args(mem_1149, world_mem_1153,
np.int32(n_910), mem_1151,
np.int32(m_911), mem_1147,
history_mem_1155, mem_1158, mem_1161)
cl.enqueue_nd_range_kernel(self.queue, self.map_kernel_1064_var,
(long((num_groups_1206 * group_size_1205)),),
(long(group_size_1205),))
if synchronous:
self.queue.finish()
if (((n_910 * m_911) * np.int32(1)) != np.int32(0)):
cl.enqueue_copy(self.queue, double_buffer_mem_1166, mem_1161,
dest_offset=long(np.int32(0)),
src_offset=long(np.int32(0)),
byte_count=long(((n_910 * m_911) * np.int32(1))))
if synchronous:
self.queue.finish()
if (((n_910 * m_911) * np.int32(4)) != np.int32(0)):
cl.enqueue_copy(self.queue, double_buffer_mem_1167, mem_1158,
dest_offset=long(np.int32(0)),
src_offset=long(np.int32(0)),
byte_count=long(((n_910 * m_911) * np.int32(4))))
if synchronous:
self.queue.finish()
world_mem_size_tmp_1199 = bytes_1159
history_mem_size_tmp_1200 = bytes_1156
world_mem_tmp_1201 = double_buffer_mem_1166
history_mem_tmp_1202 = double_buffer_mem_1167
world_mem_size_1152 = world_mem_size_tmp_1199
history_mem_size_1154 = history_mem_size_tmp_1200
world_mem_1153 = world_mem_tmp_1201
history_mem_1155 = history_mem_tmp_1202
i_920 += one_1208
world_mem_1163 = world_mem_1153
world_mem_size_1162 = world_mem_size_1152
history_mem_1165 = history_mem_1155
history_mem_size_1164 = history_mem_size_1154
out_mem_1193 = world_mem_1163
out_memsize_1194 = world_mem_size_1162
out_mem_1195 = history_mem_1165
out_memsize_1196 = history_mem_size_1164
return (out_memsize_1194, out_mem_1193, out_memsize_1196, out_mem_1195)
def init(self, world_mem_1109_ext):
n_879 = np.int32(world_mem_1109_ext.shape[np.int32(0)])
m_880 = np.int32(world_mem_1109_ext.shape[np.int32(1)])
world_mem_size_1108 = np.int32(world_mem_1109_ext.nbytes)
if (type(world_mem_1109_ext) == cl.array.Array):
world_mem_1109 = world_mem_1109_ext.data
else:
world_mem_1109 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
long(long(world_mem_size_1108) if (world_mem_size_1108 > np.int32(0)) else np.int32(1)))
if (world_mem_size_1108 != np.int32(0)):
cl.enqueue_copy(self.queue, world_mem_1109, world_mem_1109_ext,
is_blocking=synchronous)
(out_memsize_1171, out_mem_1170, out_memsize_1173,
out_mem_1172) = self.futhark_init(world_mem_size_1108, world_mem_1109,
n_879, m_880)
return (cl.array.Array(self.queue, (n_879, m_880), ct.c_int8,
data=out_mem_1170), cl.array.Array(self.queue,
(n_879, m_880),
ct.c_int32,
data=out_mem_1172))
def render_frame(self, all_history_mem_1119_ext):
n_889 = np.int32(all_history_mem_1119_ext.shape[np.int32(0)])
m_890 = np.int32(all_history_mem_1119_ext.shape[np.int32(1)])
all_history_mem_size_1118 = np.int32(all_history_mem_1119_ext.nbytes)
if (type(all_history_mem_1119_ext) == cl.array.Array):
all_history_mem_1119 = all_history_mem_1119_ext.data
else:
all_history_mem_1119 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
long(long(all_history_mem_size_1118) if (all_history_mem_size_1118 > np.int32(0)) else np.int32(1)))
if (all_history_mem_size_1118 != np.int32(0)):
cl.enqueue_copy(self.queue, all_history_mem_1119,
all_history_mem_1119_ext, is_blocking=synchronous)
(out_memsize_1187,
out_mem_1186) = self.futhark_render_frame(all_history_mem_size_1118,
all_history_mem_1119, n_889,
m_890)
return cl.array.Array(self.queue, (n_889, m_890, np.int32(3)), ct.c_int8,
data=out_mem_1186)
def steps(self, world_mem_1143_ext, history_mem_1145_ext, steps_914_ext):
n_910 = np.int32(world_mem_1143_ext.shape[np.int32(0)])
m_911 = np.int32(world_mem_1143_ext.shape[np.int32(1)])
world_mem_size_1142 = np.int32(world_mem_1143_ext.nbytes)
if (type(world_mem_1143_ext) == cl.array.Array):
world_mem_1143 = world_mem_1143_ext.data
else:
world_mem_1143 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
long(long(world_mem_size_1142) if (world_mem_size_1142 > np.int32(0)) else np.int32(1)))
if (world_mem_size_1142 != np.int32(0)):
cl.enqueue_copy(self.queue, world_mem_1143, world_mem_1143_ext,
is_blocking=synchronous)
n_910 = np.int32(history_mem_1145_ext.shape[np.int32(0)])
m_911 = np.int32(history_mem_1145_ext.shape[np.int32(1)])
history_mem_size_1144 = np.int32(history_mem_1145_ext.nbytes)
if (type(history_mem_1145_ext) == cl.array.Array):
history_mem_1145 = history_mem_1145_ext.data
else:
history_mem_1145 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
long(long(history_mem_size_1144) if (history_mem_size_1144 > np.int32(0)) else np.int32(1)))
if (history_mem_size_1144 != np.int32(0)):
cl.enqueue_copy(self.queue, history_mem_1145, history_mem_1145_ext,
is_blocking=synchronous)
steps_914 = np.int32(steps_914_ext)
(out_memsize_1194, out_mem_1193, out_memsize_1196,
out_mem_1195) = self.futhark_steps(world_mem_size_1142,
history_mem_size_1144, world_mem_1143,
history_mem_1145, n_910, m_911,
steps_914)
return (cl.array.Array(self.queue, (n_910, m_911), ct.c_int8,
data=out_mem_1193), cl.array.Array(self.queue,
(n_910, m_911),
ct.c_int32,
data=out_mem_1195)) | bsd-3-clause | 4,486,038,976,242,857,500 | 29.420159 | 150 | 0.537404 | false |
mdaif/olympia | apps/discovery/tests/test_views.py | 1 | 26765 | import json
from django import test
from django.core.cache import cache
from django.test.utils import override_settings
import mock
import waffle
from jingo.helpers import datetime as datetime_filter
from nose.tools import eq_
from pyquery import PyQuery as pq
from tower import strip_whitespace
import amo
import amo.tests
from amo.tests import addon_factory
import addons.signals
from amo.urlresolvers import reverse
from addons.models import (Addon, AddonDependency, CompatOverride,
CompatOverrideRange, Preview)
from applications.models import AppVersion
from bandwagon.models import MonthlyPick, SyncedCollection
from bandwagon.tests.test_models import TestRecommendations as Recs
from discovery import views
from discovery.forms import DiscoveryModuleForm
from discovery.models import DiscoveryModule
from discovery.modules import registry
from files.models import File
from versions.models import Version, ApplicationsVersions
class TestRecs(amo.tests.TestCase):
fixtures = ['base/appversion', 'base/addon_3615',
'base/addon-recs', 'base/addon_5299_gcal', 'base/category',
'base/featured', 'addons/featured']
@classmethod
def setUpClass(cls):
super(TestRecs, cls).setUpClass()
test.Client().get('/')
def setUp(self):
super(TestRecs, self).setUp()
self.url = reverse('discovery.recs', args=['3.6', 'Darwin'])
self.guids = ('bettergcal@ginatrapani.org',
'foxyproxy@eric.h.jung',
'isreaditlater@ideashower.com',
'not-a-real-guid',)
self.ids = Recs.ids
self.guids = [a.guid or 'bad-guid'
for a in Addon.objects.filter(id__in=self.ids)]
self.json = json.dumps({'guids': self.guids})
# The view is limited to returning 9 add-ons.
self.expected_recs = Recs.expected_recs()[:9]
versions = AppVersion.objects.filter(application=amo.FIREFOX.id)
self.min_id = versions.order_by('version_int')[0].id
self.max_id = versions.order_by('-version_int')[0].id
for addon in Addon.objects.all():
v = Version.objects.create(addon=addon)
File.objects.create(version=v, status=amo.STATUS_PUBLIC)
ApplicationsVersions.objects.create(
version=v, application=amo.FIREFOX.id,
min_id=self.min_id, max_id=self.max_id)
addon.update(_current_version=v)
addons.signals.version_changed.send(sender=addon)
Addon.objects.update(status=amo.STATUS_PUBLIC, disabled_by_user=False)
def test_get(self):
"""GET should find method not allowed."""
response = self.client.get(self.url)
eq_(response.status_code, 405)
def test_empty_post_data(self):
response = self.client.post(self.url)
eq_(response.status_code, 400)
def test_bad_post_data(self):
response = self.client.post(self.url, '{]{',
content_type='application/json')
eq_(response.status_code, 400)
def test_no_guids(self):
response = self.client.post(self.url, '{}',
content_type='application/json')
eq_(response.status_code, 400)
def test_get_addon_ids(self):
ids = set(views.get_addon_ids(self.guids))
eq_(ids, set(self.ids))
def test_success(self):
response = self.client.post(self.url, self.json,
content_type='application/json')
eq_(response.status_code, 200)
eq_(response['Content-type'], 'application/json')
data = json.loads(response.content)
eq_(set(data.keys()), set(['token2', 'addons']))
eq_(len(data['addons']), 9)
ids = [a['id'] for a in data['addons']]
eq_(ids, self.expected_recs)
def test_only_show_public(self):
# Mark one add-on as non-public.
unpublic = self.expected_recs[0]
Addon.objects.filter(id=unpublic).update(status=amo.STATUS_LITE)
response = self.client.post(self.url, self.json,
content_type='application/json')
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(len(data['addons']), 9)
ids = [a['id'] for a in data['addons']]
eq_(ids, Recs.expected_recs()[1:10])
assert unpublic not in ids
def test_app_support_filter(self):
# The fixture doesn't contain valid add-ons for the provided URL args.
url = reverse('discovery.recs', args=['5.0', 'Darwin'])
response = self.client.post(url, self.json,
content_type='application/json')
eq_(response.status_code, 200)
eq_(response['Content-type'], 'application/json')
data = json.loads(response.content)
eq_(len(data['addons']), 0)
def test_app_support_filter_ignore(self):
# The fixture doesn't contain valid add-ons for the provided URL
# args, but with compat_mode=ignore, it should still find them.
url = reverse('discovery.recs', args=['5.0', 'Darwin', 'ignore'])
response = self.client.post(url, self.json,
content_type='application/json')
eq_(response.status_code, 200)
eq_(response['Content-type'], 'application/json')
data = json.loads(response.content)
eq_(len(data['addons']), 9)
ids = [a['id'] for a in data['addons']]
eq_(ids, self.expected_recs)
def test_recs_bad_token(self):
post_data = json.dumps(dict(guids=self.guids, token='fake'))
response = self.client.post(self.url, post_data,
content_type='application/json')
data = json.loads(response.content)
ids = [a['id'] for a in data['addons']]
eq_(ids, self.expected_recs)
def test_update_same_index(self):
response = self.client.post(self.url, self.json,
content_type='application/json')
one = json.loads(response.content)
post_data = json.dumps(dict(guids=self.guids, token2=one['token2']))
response = self.client.post(self.url, post_data,
content_type='application/json')
eq_(response.status_code, 200)
two = json.loads(response.content)
# We sent our existing token and the same ids, so the
# responses should be identical.
eq_(one, two)
def test_update_new_index(self):
waffle.models.Sample.objects.create(
name='disco-pane-store-collections', percent='100.0')
response = self.client.post(self.url, self.json,
content_type='application/json')
one = json.loads(response.content)
post_data = json.dumps(dict(guids=self.guids[:1],
token2=one['token2']))
response = self.client.post(self.url, post_data,
content_type='application/json')
eq_(response.status_code, 200)
two = json.loads(response.content)
# Tokens are based on guid list, so these should be different.
assert one['token2'] != two['token2']
assert one['addons'] != two['addons']
eq_(SyncedCollection.objects.filter(addon_index=one['token2']).count(),
1)
eq_(SyncedCollection.objects.filter(addon_index=two['token2']).count(),
1)
class TestModuleAdmin(amo.tests.TestCase):
def test_sync_db_and_registry(self):
def check():
views._sync_db_and_registry(qs, 1)
eq_(qs.count(), len(registry))
modules = qs.values_list('module', flat=True)
eq_(set(modules), set(registry.keys()))
qs = DiscoveryModule.objects.no_cache().filter(app=1)
eq_(qs.count(), 0)
# All our modules get added.
check()
# The deleted module is removed.
with mock.patch.dict(registry):
registry.popitem()
check()
def test_discovery_module_form_bad_locale(self):
d = dict(app=1, module='xx', locales='fake')
form = DiscoveryModuleForm(d)
assert form.errors['locales']
def test_discovery_module_form_dedupe(self):
d = dict(app=amo.FIREFOX.id, module='xx', locales='en-US he he fa fa')
form = DiscoveryModuleForm(d)
assert form.is_valid()
cleaned_locales = form.cleaned_data['locales'].split()
eq_(sorted(cleaned_locales), ['en-US', 'fa', 'he'])
class TestUrls(amo.tests.TestCase):
fixtures = ['base/users', 'base/featured', 'addons/featured',
'base/addon_3615']
def test_reverse(self):
eq_('/en-US/firefox/discovery/pane/10.0/WINNT',
reverse('discovery.pane', kwargs=dict(version='10.0',
platform='WINNT')))
eq_('/en-US/firefox/discovery/pane/10.0/WINNT/strict',
reverse('discovery.pane', args=('10.0', 'WINNT', 'strict')))
def test_resolve_addon_view(self):
r = self.client.get('/en-US/firefox/discovery/addon/3615', follow=True)
url = reverse('discovery.addons.detail', args=['a3615'])
self.assert3xx(r, url, 301)
def test_resolve_disco_pane(self):
# Redirect to default 'strict' if version < 10.
r = self.client.get('/en-US/firefox/discovery/4.0/Darwin', follow=True)
url = reverse('discovery.pane', args=['4.0', 'Darwin', 'strict'])
self.assert3xx(r, url, 302)
# Redirect to default 'ignore' if version >= 10.
r = self.client.get('/en-US/firefox/discovery/10.0/Darwin',
follow=True)
url = reverse('discovery.pane', args=['10.0', 'Darwin', 'ignore'])
self.assert3xx(r, url, 302)
def test_no_compat_mode(self):
r = self.client.head('/en-US/firefox/discovery/pane/10.0/WINNT')
eq_(r.status_code, 200)
def test_with_compat_mode(self):
r = self.client.head('/en-US/firefox/discovery/pane/10.0/WINNT/strict')
eq_(r.status_code, 200)
r = self.client.head('/en-US/firefox/discovery/pane/10.0/WINNT/normal')
eq_(r.status_code, 200)
r = self.client.head('/en-US/firefox/discovery/pane/10.0/WINNT/ignore')
eq_(r.status_code, 200)
r = self.client.head('/en-US/firefox/discovery/pane/10.0/WINNT/blargh')
eq_(r.status_code, 404)
class TestPromos(amo.tests.TestCase):
fixtures = ['base/users', 'discovery/discoverymodules']
def get_disco_url(self, platform, version):
return reverse('discovery.pane.promos', args=[platform, version])
def get_home_url(self):
return reverse('addons.homepage_promos')
def test_no_params(self):
r = self.client.get(self.get_home_url())
eq_(r.status_code, 404)
def test_mac(self):
# Ensure that we get the same thing for the homepage promos.
r_mac = self.client.get(self.get_home_url(),
{'version': '10.0', 'platform': 'mac'})
r_darwin = self.client.get(self.get_disco_url('10.0', 'Darwin'))
eq_(r_mac.status_code, 200)
eq_(r_darwin.status_code, 200)
eq_(r_mac.content, r_darwin.content)
def test_win(self):
r_win = self.client.get(self.get_home_url(),
{'version': '10.0', 'platform': 'win'})
r_winnt = self.client.get(self.get_disco_url('10.0', 'WINNT'))
eq_(r_win.status_code, 200)
eq_(r_winnt.status_code, 200)
eq_(r_win.content, r_winnt.content)
def test_hidden(self):
DiscoveryModule.objects.all().delete()
r = self.client.get(self.get_disco_url('10.0', 'Darwin'))
eq_(r.status_code, 200)
eq_(r.content, '')
class TestPane(amo.tests.TestCase):
fixtures = ['addons/featured', 'base/addon_3615', 'base/collections',
'base/featured', 'base/users',
'bandwagon/featured_collections']
def setUp(self):
super(TestPane, self).setUp()
self.url = reverse('discovery.pane', args=['3.7a1pre', 'Darwin'])
def test_my_account(self):
self.client.login(username='regular@mozilla.com', password='password')
r = self.client.get(reverse('discovery.pane.account'))
eq_(r.status_code, 200)
doc = pq(r.content)
s = doc('#my-account')
assert s
a = s.find('a').eq(0)
eq_(a.attr('href'), reverse('users.profile', args=['regularuser']))
eq_(a.text(), 'My Profile')
a = s.find('a').eq(1)
eq_(a.attr('href'), reverse('collections.detail',
args=['regularuser', 'favorites']))
eq_(a.text(), 'My Favorites')
a = s.find('a').eq(2)
eq_(a.attr('href'), reverse('collections.user', args=['regularuser']))
eq_(a.text(), 'My Collections')
def test_mission(self):
r = self.client.get(reverse('discovery.pane.account'))
assert pq(r.content)('#mission')
def test_featured_addons_section(self):
r = self.client.get(self.url)
eq_(pq(r.content)('#featured-addons h2').text(), 'Featured Add-ons')
def test_featured_addons(self):
r = self.client.get(self.url)
p = pq(r.content)('#featured-addons')
addon = Addon.objects.get(id=7661)
li = p.find('li[data-guid="%s"]' % addon.guid)
a = li.find('a.addon-title')
url = reverse('discovery.addons.detail', args=[7661])
assert a.attr('href').endswith(url + '?src=discovery-featured'), (
'Unexpected add-on details URL')
eq_(li.find('h3').text(), unicode(addon.name))
eq_(li.find('img').attr('src'), addon.icon_url)
addon = Addon.objects.get(id=2464)
li = p.find('li[data-guid="%s"]' % addon.guid)
eq_(li.attr('data-guid'), addon.guid)
a = li.find('a.addon-title')
url = reverse('discovery.addons.detail', args=[2464])
assert a.attr('href').endswith(url + '?src=discovery-featured'), (
'Unexpected add-on details URL')
eq_(li.find('h3').text(), unicode(addon.name))
eq_(li.find('img').attr('src'), addon.icon_url)
def test_featured_personas_section(self):
r = self.client.get(self.url)
h2 = pq(r.content)('#featured-themes h2')
eq_(h2.text(), 'See all Featured Themes')
eq_(h2.find('a.all').attr('href'), reverse('browse.personas'))
@override_settings(MEDIA_URL='/media/', STATIC_URL='/static/')
def test_featured_personas(self):
addon = Addon.objects.get(id=15679)
r = self.client.get(self.url)
doc = pq(r.content)
featured = doc('#featured-themes')
eq_(featured.length, 1)
# Look for all images that are not icon uploads.
imgs = doc('img:not([src*="/media/"])')
imgs_ok = (pq(img).attr('src').startswith('/static/')
for img in imgs)
assert all(imgs_ok), 'Images must be prefixed with MEDIA_URL!'
featured = doc('#featured-themes')
eq_(featured.length, 1)
a = featured.find('a[data-browsertheme]')
url = reverse('discovery.addons.detail', args=[15679])
assert a.attr('href').endswith(url + '?src=discovery-featured'), (
'Unexpected add-on details URL')
eq_(a.attr('target'), '_self')
eq_(featured.find('.addon-title').text(), unicode(addon.name))
class TestDetails(amo.tests.TestCase):
fixtures = ['base/addon_3615', 'base/addon_592']
def setUp(self):
super(TestDetails, self).setUp()
self.addon = self.get_addon()
self.detail_url = reverse('discovery.addons.detail',
args=[self.addon.slug])
self.eula_url = reverse('discovery.addons.eula',
args=[self.addon.slug])
def get_addon(self):
return Addon.objects.get(id=3615)
def test_no_restart(self):
f = self.addon.current_version.all_files[0]
eq_(f.no_restart, False)
r = self.client.get(self.detail_url)
eq_(pq(r.content)('#no-restart').length, 0)
f.update(no_restart=True)
r = self.client.get(self.detail_url)
eq_(pq(r.content)('#no-restart').length, 1)
def test_install_button_eula(self):
doc = pq(self.client.get(self.detail_url).content)
eq_(doc('#install .install-button').text(), 'Download Now')
eq_(doc('#install .eula').text(), 'View End-User License Agreement')
doc = pq(self.client.get(self.eula_url).content)
eq_(doc('#install .install-button').text(), 'Download Now')
def test_install_button_no_eula(self):
self.addon.update(eula=None)
doc = pq(self.client.get(self.detail_url).content)
eq_(doc('#install .install-button').text(), 'Download Now')
r = self.client.get(self.eula_url)
self.assert3xx(r, self.detail_url, 302)
def test_perf_warning(self):
eq_(self.addon.ts_slowness, None)
doc = pq(self.client.get(self.detail_url).content)
eq_(doc('.performance-note').length, 0)
self.addon.update(ts_slowness=100)
doc = pq(self.client.get(self.detail_url).content)
eq_(doc('.performance-note').length, 1)
def test_dependencies(self):
doc = pq(self.client.get(self.detail_url).content)
eq_(doc('.dependencies').length, 0)
req = Addon.objects.get(id=592)
AddonDependency.objects.create(addon=self.addon, dependent_addon=req)
eq_(self.addon.all_dependencies, [req])
cache.clear()
d = pq(self.client.get(self.detail_url).content)('.dependencies')
eq_(d.length, 1)
a = d.find('ul a')
eq_(a.text(), unicode(req.name))
eq_(a.attr('href').endswith('?src=discovery-dependencies'), True)
class TestPersonaDetails(amo.tests.TestCase):
fixtures = ['addons/persona', 'base/users']
def setUp(self):
super(TestPersonaDetails, self).setUp()
self.addon = Addon.objects.get(id=15663)
self.url = reverse('discovery.addons.detail', args=[self.addon.slug])
def test_page(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
def test_by(self):
"""Test that the `by ... <authors>` section works."""
r = self.client.get(self.url)
assert pq(r.content)('h2.author').text().startswith(
'by persona_author')
def test_no_version(self):
"""Don't display a version number for themes."""
r = self.client.get(self.url)
eq_(pq(r.content)('h1 .version'), [])
def test_created_not_updated(self):
"""Don't display the updated date but the created date for themes."""
r = self.client.get(self.url)
doc = pq(r.content)
details = doc('.addon-info li')
# There's no "Last Updated" entry.
assert not any('Last Updated' in node.text_content()
for node in details)
# But there's a "Created" entry.
for detail in details:
if detail.find('h3').text_content() == 'Created':
created = detail.find('p').text_content()
eq_(created,
strip_whitespace(datetime_filter(self.addon.created)))
break # Needed, or we go in the "else" clause.
else:
assert False, 'No "Created" entry found.'
class TestDownloadSources(amo.tests.TestCase):
fixtures = ['base/addon_3615', 'base/users',
'base/collections', 'base/featured', 'addons/featured',
'discovery/discoverymodules']
def setUp(self):
super(TestDownloadSources, self).setUp()
self.url = reverse('discovery.pane', args=['3.7a1pre', 'Darwin'])
def test_detail(self):
url = reverse('discovery.addons.detail', args=['a3615'])
r = self.client.get(url)
doc = pq(r.content)
assert doc('#install a.download').attr('href').endswith(
'?src=discovery-details')
assert doc('#install li:eq(1)').find('a').attr('href').endswith(
'?src=discovery-learnmore')
assert doc('#install li:eq(2)').find('a').attr('href').endswith(
'?src=discovery-learnmore')
def test_detail_trickle(self):
url = (reverse('discovery.addons.detail', args=['a3615']) +
'?src=discovery-featured')
r = self.client.get(url)
doc = pq(r.content)
assert doc('#install a.download').attr('href').endswith(
'?src=discovery-featured')
def test_eula(self):
url = reverse('discovery.addons.eula', args=['a3615'])
r = self.client.get(url)
doc = pq(r.content)
assert doc('#install a.download').attr('href').endswith(
'?src=discovery-details')
assert doc('#install li:eq(1)').find('a').attr('href').endswith(
'?src=discovery-details')
def test_eula_trickle(self):
url = (reverse('discovery.addons.eula', args=['a3615']) +
'?src=discovery-upandcoming')
r = self.client.get(url)
doc = pq(r.content)
assert doc('#install a.download').attr('href').endswith(
'?src=discovery-upandcoming')
assert doc('#install li:eq(1)').find('a').attr('href').endswith(
'?src=discovery-upandcoming')
class TestMonthlyPick(amo.tests.TestCase):
fixtures = ['base/users', 'base/addon_3615',
'discovery/discoverymodules']
def setUp(self):
super(TestMonthlyPick, self).setUp()
self.url = reverse('discovery.pane.promos', args=['Darwin', '10.0'])
self.addon = Addon.objects.get(id=3615)
DiscoveryModule.objects.create(
app=amo.FIREFOX.id, ordering=4,
module='Monthly Pick')
def test_monthlypick(self):
mp = MonthlyPick.objects.create(addon=self.addon, blurb='BOOP',
image='http://mozilla.com')
r = self.client.get(self.url)
eq_(r.content, '')
mp.update(locale='')
r = self.client.get(self.url)
pick = pq(r.content)('#monthly')
eq_(pick.length, 1)
eq_(pick.parents('.panel').attr('data-addonguid'), self.addon.guid)
a = pick.find('h3 a')
url = reverse('discovery.addons.detail', args=['a3615'])
assert a.attr('href').endswith(url + '?src=discovery-promo'), (
'Unexpected add-on details URL: %s' % url)
eq_(a.attr('target'), '_self')
eq_(a.text(), unicode(self.addon.name))
eq_(pick.find('img').attr('src'), 'http://mozilla.com')
eq_(pick.find('.wrap > div > div > p').text(), 'BOOP')
eq_(pick.find('p.install-button a').attr('href')
.endswith('?src=discovery-promo'), True)
def test_monthlypick_no_image(self):
MonthlyPick.objects.create(addon=self.addon, blurb='BOOP', locale='',
image='')
# Tests for no image when screenshot not set.
r = self.client.get(self.url)
pick = pq(r.content)('#monthly')
eq_(pick.length, 1)
eq_(pick.find('img').length, 0)
# Tests for screenshot image when set.
Preview.objects.create(addon=self.addon)
r = self.client.get(self.url)
pick = pq(r.content)('#monthly')
eq_(pick.length, 1)
eq_(pick.find('img').attr('src'), self.addon.all_previews[0].image_url)
def test_no_monthlypick(self):
r = self.client.get(self.url)
eq_(r.content, '')
class TestPaneMoreAddons(amo.tests.TestCase):
fixtures = ['base/appversion']
def setUp(self):
super(TestPaneMoreAddons, self).setUp()
self.addon1 = addon_factory(hotness=99,
version_kw=dict(max_app_version='5.0'))
self.addon2 = addon_factory(hotness=0,
version_kw=dict(max_app_version='6.0'))
def _url(self, **kwargs):
default = dict(
section='up-and-coming',
version='5.0',
platform='Darwin')
default.update(kwargs)
return reverse('discovery.pane.more_addons', kwargs=default)
def test_hotness_strict(self):
# Defaults to strict compat mode, both are within range.
res = self.client.get(self._url())
eq_(res.status_code, 200)
eq_(pq(res.content)('.featured-addons').length, 2)
def test_hotness_strict_filtered(self):
# Defaults to strict compat mode, one is within range.
res = self.client.get(self._url(version='6.0'))
eq_(res.status_code, 200)
eq_(pq(res.content)('.featured-addons').length, 1)
self.assertContains(res, self.addon2.name)
def test_hotness_ignore(self):
# Defaults to ignore compat mode for Fx v10, both are compatible.
res = self.client.get(self._url(version='10.0'))
eq_(res.status_code, 200)
eq_(pq(res.content)('.featured-addons').length, 2)
def test_hotness_normal_strict_opt_in(self):
# Add a 3rd add-on that should get filtered out b/c of compatibility.
addon_factory(hotness=50, version_kw=dict(max_app_version='7.0'),
file_kw=dict(strict_compatibility=True))
res = self.client.get(self._url(version='12.0', compat_mode='normal'))
eq_(res.status_code, 200)
eq_(pq(res.content)('.featured-addons').length, 2)
def test_hotness_normal_binary_components(self):
# Add a 3rd add-on that should get filtered out b/c of compatibility.
addon_factory(hotness=50, version_kw=dict(max_app_version='7.0'),
file_kw=dict(binary_components=True))
res = self.client.get(self._url(version='12.0', compat_mode='normal'))
eq_(res.status_code, 200)
eq_(pq(res.content)('.featured-addons').length, 2)
def test_hotness_normal_compat_override(self):
# Add a 3rd add-on that should get filtered out b/c of compatibility.
addon3 = addon_factory(hotness=50,
version_kw=dict(max_app_version='7.0'))
# Add override for this add-on.
compat = CompatOverride.objects.create(guid='three', addon=addon3)
CompatOverrideRange.objects.create(
compat=compat, app=1,
min_version=addon3.current_version.version, max_version='*')
res = self.client.get(self._url(version='12.0', compat_mode='normal'))
eq_(res.status_code, 200)
eq_(pq(res.content)('.featured-addons').length, 2)
| bsd-3-clause | -1,679,456,730,680,537,000 | 39.067365 | 79 | 0.586027 | false |
platinhom/DailyTools | scripts/ESES_ElementArea.py | 1 | 3679 | #! /usr/bin/env python
# -*- coding: utf8 -*-
# Author: Platinhom; Last Updated: 2015-09-10
# Calculate each element surface area by MS_Intersection and also match the atomic area results to the pqr file.
# Usage: python ESES_ElementArea.py file.pqr
#
# Need: MS_Intersection (partition version)
# Note: Only for PQR format input.
# Custom: ESES parameters.
import os,sys
# Modify the ESES program parameter here.
# You can modify to command line input parameter as you like
probe=1.4
grid=0.2
buffer=4.0
if (__name__ == '__main__'):
fname=sys.argv[1]
fnamelist=os.path.splitext(fname)
fxyzr=open(fnamelist[0]+".xyzr",'w')
fr=open(fname)
inlines=fr.readlines();
fr.close();
# All elements/types of input atoms, used in element area summary.
atomtypes=[];
# Write out the corresponding xyzr file.
for line in inlines:
# Each atom
if (line[:4]=="ATOM" or line[:6]=="HETATM"):
# Atom element here
tmp=line.split();
element=tmp[-1].upper();
atomtypes.append(element);
# Extract x, y, z, r from pqr to xyzr file
radius="%10.5f" % float(line[62:70].strip());
xcoor="%10.5f" % float(line[30:38].strip());
ycoor="%10.5f" % float(line[38:46].strip());
zcoor="%10.5f" % float(line[46:54].strip());
xyzrstr=xcoor+ycoor+zcoor+radius+"\n";
fxyzr.write(xyzrstr);
fxyzr.close()
# Use external ESES program to generate surface and calculate atom area
## So you have to put the ESES program in the same directory
# Output a "partition_area.txt" file saving atom area
#os.system('./MS_Intersection_Area '+fnamelist[0]+".xyzr "+str(probe)+" "+str(grid)+" "+str(buffer));
p=os.popen('./MS_Intersection '+fnamelist[0]+".xyzr "+str(probe)+" "+str(grid)+" "+str(buffer),'r')
totalArea="0"
totalVolume="0"
while 1:
line=p.readline();
if "area:" in line: totalArea=line.split(':')[1].split()[0]
if "volume:" in line: totalVolume=line.split(':')[1].split()[0]
if not line:break
# Analyze output atom area file
fa=open("partition_area.txt")
atomareas=[];# tmp save atom area by atom number
typedefault=["H","C","N","O","F","S","P","CL","BR","I"];
typeareas={"H":0.0,"C":0.0,"N":0.0,"O":0.0,"F":0.0,"S":0.0,"P":0.0,"CL":0.0,"BR":0.0,"I":0.0};
atomnum=0;
for line in fa:
tmp=line.split();
atomarea="%12.6f" % float(tmp[1]);
atomareas.append(atomarea);
atype=atomtypes[atomnum];
typeareas[atype]=typeareas.setdefault(atype,0.0)+float(tmp[1]);
atomnum=atomnum+1;
fa.close()
# Write out pqra file saving atom area
fwname=fnamelist[0]+"_area.pqra"
fw=open(fwname,'w')
# Write the total area for each element.
## Notice that here just write out the default elements.
## If you want all elements, use "typeused" for iteration.
typeused=["H","C","N","O","F","S","P","CL","BR","I"];
for i in typeareas.iterkeys():
if i not in typeused:typeused.append(i);
# For print out the atom area summary
outputelearea=fnamelist[0]+" Areas: "+totalArea+" Volumes: "+totalVolume+" ";
fw.write("REMARK AREAS "+totalArea+"\n");
fw.write("REMARK VOLUMES "+totalVolume+"\n");
for element in typedefault:
# If you want all elements, need to comment the above line and uncomment the following line.
#for element in typeused:
fw.write("REMARK AREA "+"%2s"%element+" "+"%20.6f"%typeareas.get(element,0.0)+"\n");
outputelearea=outputelearea+element+": "+str(typeareas[element])+" ";
print outputelearea
fr=open(fname)
atomnum=0;
for line in fr:
if (line[:4]=="ATOM" or line[:6]=="HETATM"):
tmp=line.split();
element=tmp[-1].upper();
newline=line.strip('\n')+atomareas[atomnum]+"\n";
fw.write(newline);
atomnum=atomnum+1;
else:
fw.write(line);
fr.close();
fw.close()
#end main
| gpl-2.0 | -1,598,104,058,107,988,000 | 32.144144 | 112 | 0.65969 | false |
sunbingfengPI/OpenSFM_Test | opensfm/commands/match_features.py | 1 | 9080 | import logging
from itertools import combinations
from timeit import default_timer as timer
import numpy as np
import scipy.spatial as spatial
from opensfm import dataset
from opensfm import geo
from opensfm import io
from opensfm import log
from opensfm import matching
from opensfm.context import parallel_map
logger = logging.getLogger(__name__)
class Command:
name = 'match_features'
help = 'Match features between image pairs'
def add_arguments(self, parser):
parser.add_argument('dataset', help='dataset to process')
def run(self, args):
data = dataset.DataSet(args.dataset)
images = data.images()
exifs = {im: data.load_exif(im) for im in images}
pairs, preport = match_candidates_from_metadata(images, exifs, data)
num_pairs = sum(len(c) for c in pairs.values())
logger.info('Matching {} image pairs'.format(num_pairs))
ctx = Context()
ctx.data = data
ctx.cameras = ctx.data.load_camera_models()
ctx.exifs = exifs
ctx.p_pre, ctx.f_pre = load_preemptive_features(data)
args = list(match_arguments(pairs, ctx))
start = timer()
processes = ctx.data.config['processes']
parallel_map(match, args, processes)
end = timer()
with open(ctx.data.profile_log(), 'a') as fout:
fout.write('match_features: {0}\n'.format(end - start))
self.write_report(data, preport, pairs, end - start)
def write_report(self, data, preport, pairs, wall_time):
pair_list = []
for im1, others in pairs.items():
for im2 in others:
pair_list.append((im1, im2))
report = {
"wall_time": wall_time,
"num_pairs": len(pair_list),
"pairs": pair_list,
}
report.update(preport)
data.save_report(io.json_dumps(report), 'matches.json')
class Context:
pass
def load_preemptive_features(data):
p, f = {}, {}
if data.config['preemptive_threshold'] > 0:
logger.debug('Loading preemptive data')
for image in data.images():
try:
p[image], f[image] = \
data.load_preemtive_features(image)
except IOError:
p, f, c = data.load_features(image)
p[image], f[image] = p, f
preemptive_max = min(data.config['preemptive_max'],
p[image].shape[0])
p[image] = p[image][:preemptive_max, :]
f[image] = f[image][:preemptive_max, :]
return p, f
def has_gps_info(exif):
return (exif and
'gps' in exif and
'latitude' in exif['gps'] and
'longitude' in exif['gps'])
def match_candidates_by_distance(images, exifs, reference, max_neighbors, max_distance):
"""Find candidate matching pairs by GPS distance."""
if max_neighbors <= 0 and max_distance <= 0:
return set()
max_neighbors = max_neighbors or 99999999
max_distance = max_distance or 99999999.
k = min(len(images), max_neighbors + 1)
points = np.zeros((len(images), 3))
for i, image in enumerate(images):
gps = exifs[image]['gps']
alt = gps.get('altitude', 2.0)
points[i] = geo.topocentric_from_lla(
gps['latitude'], gps['longitude'], alt,
reference['latitude'], reference['longitude'], reference['altitude'])
tree = spatial.cKDTree(points)
pairs = set()
for i, image in enumerate(images):
distances, neighbors = tree.query(
points[i], k=k, distance_upper_bound=max_distance)
for j in neighbors:
if i != j and j < len(images):
pairs.add(tuple(sorted((images[i], images[j]))))
return pairs
def match_candidates_by_time(images, exifs, max_neighbors):
"""Find candidate matching pairs by time difference."""
if max_neighbors <= 0:
return set()
k = min(len(images), max_neighbors + 1)
times = np.zeros((len(images), 1))
for i, image in enumerate(images):
times[i] = exifs[image]['capture_time']
tree = spatial.cKDTree(times)
pairs = set()
for i, image in enumerate(images):
distances, neighbors = tree.query(times[i], k=k)
for j in neighbors:
if i != j and j < len(images):
pairs.add(tuple(sorted((images[i], images[j]))))
return pairs
def match_candidates_by_order(images, max_neighbors):
"""Find candidate matching pairs by sequence order."""
if max_neighbors <= 0:
return set()
n = (max_neighbors + 1) // 2
pairs = set()
for i, image in enumerate(images):
a = max(0, i - n)
b = min(len(images), i + n)
for j in range(a, b):
if i != j:
pairs.add(tuple(sorted((images[i], images[j]))))
return pairs
def match_candidates_from_metadata(images, exifs, data):
"""Compute candidate matching pairs"""
max_distance = data.config['matching_gps_distance']
gps_neighbors = data.config['matching_gps_neighbors']
time_neighbors = data.config['matching_time_neighbors']
order_neighbors = data.config['matching_order_neighbors']
if not data.reference_lla_exists():
data.invent_reference_lla()
reference = data.load_reference_lla()
if not all(map(has_gps_info, exifs.values())):
if gps_neighbors != 0:
logger.warn("Not all images have GPS info. "
"Disabling matching_gps_neighbors.")
gps_neighbors = 0
max_distance = 0
images.sort()
if max_distance == gps_neighbors == time_neighbors == order_neighbors == 0:
# All pair selection strategies deactivated so we match all pairs
d = set()
t = set()
o = set()
pairs = combinations(images, 2)
else:
d = match_candidates_by_distance(images, exifs, reference,
gps_neighbors, max_distance)
t = match_candidates_by_time(images, exifs, time_neighbors)
o = match_candidates_by_order(images, order_neighbors)
pairs = d | t | o
res = {im: [] for im in images}
for im1, im2 in pairs:
res[im1].append(im2)
report = {
"num_pairs_distance": len(d),
"num_pairs_time": len(t),
"num_pairs_order": len(o)
}
return res, report
def match_arguments(pairs, ctx):
for i, (im, candidates) in enumerate(pairs.items()):
yield im, candidates, i, len(pairs), ctx
def match(args):
"""Compute all matches for a single image"""
log.setup()
im1, candidates, i, n, ctx = args
logger.info('Matching {} - {} / {}'.format(im1, i + 1, n))
config = ctx.data.config
robust_matching_min_match = config['robust_matching_min_match']
preemptive_threshold = config['preemptive_threshold']
lowes_ratio = config['lowes_ratio']
preemptive_lowes_ratio = config['preemptive_lowes_ratio']
im1_matches = {}
for im2 in candidates:
# preemptive matching
if preemptive_threshold > 0:
t = timer()
config['lowes_ratio'] = preemptive_lowes_ratio
matches_pre = matching.match_lowe_bf(
ctx.f_pre[im1], ctx.f_pre[im2], config)
config['lowes_ratio'] = lowes_ratio
logger.debug("Preemptive matching {0}, time: {1}s".format(
len(matches_pre), timer() - t))
if len(matches_pre) < preemptive_threshold:
logger.debug(
"Discarding based of preemptive matches {0} < {1}".format(
len(matches_pre), preemptive_threshold))
continue
# symmetric matching
t = timer()
p1, f1, c1 = ctx.data.load_features(im1)
p2, f2, c2 = ctx.data.load_features(im2)
if config['matcher_type'] == 'FLANN':
i1 = ctx.data.load_feature_index(im1, f1)
i2 = ctx.data.load_feature_index(im2, f2)
else:
i1 = None
i2 = None
matches = matching.match_symmetric(f1, i1, f2, i2, config)
logger.debug('{} - {} has {} candidate matches'.format(
im1, im2, len(matches)))
if len(matches) < robust_matching_min_match:
im1_matches[im2] = []
continue
# robust matching
t_robust_matching = timer()
camera1 = ctx.cameras[ctx.exifs[im1]['camera']]
camera2 = ctx.cameras[ctx.exifs[im2]['camera']]
rmatches = matching.robust_match(p1, p2, camera1, camera2, matches,
config)
if len(rmatches) < robust_matching_min_match:
im1_matches[im2] = []
continue
im1_matches[im2] = rmatches
logger.debug('Robust matching time : {0}s'.format(
timer() - t_robust_matching))
logger.debug("Full matching {0} / {1}, time: {2}s".format(
len(rmatches), len(matches), timer() - t))
ctx.data.save_matches(im1, im1_matches)
| bsd-2-clause | 8,142,427,845,005,928,000 | 31.898551 | 88 | 0.576982 | false |
alphapigger/igetui | igetui/google/protobuf/message_factory.py | 1 | 4235 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides a factory class for generating dynamic messages."""
__author__ = 'matthewtoia@google.com (Matt Toia)'
from . import descriptor_database
from . import descriptor_pool
from . import message
from . import reflection
class MessageFactory(object):
"""Factory for creating Proto2 messages from descriptors in a pool."""
def __init__(self):
"""Initializes a new factory."""
self._classes = {}
def GetPrototype(self, descriptor):
"""Builds a proto2 message class based on the passed in descriptor.
Passing a descriptor with a fully qualified name matching a previous
invocation will cause the same class to be returned.
Args:
descriptor: The descriptor to build from.
Returns:
A class describing the passed in descriptor.
"""
if descriptor.full_name not in self._classes:
result_class = reflection.GeneratedProtocolMessageType(
descriptor.name.encode('ascii', 'ignore'),
(message.Message,),
{'DESCRIPTOR': descriptor})
self._classes[descriptor.full_name] = result_class
for field in descriptor.fields:
if field.message_type:
self.GetPrototype(field.message_type)
return self._classes[descriptor.full_name]
_DB = descriptor_database.DescriptorDatabase()
_POOL = descriptor_pool.DescriptorPool(_DB)
_FACTORY = MessageFactory()
def GetMessages(file_protos):
"""Builds a dictionary of all the messages available in a set of files.
Args:
file_protos: A sequence of file protos to build messages out of.
Returns:
A dictionary containing all the message types in the files mapping the
fully qualified name to a Message subclass for the descriptor.
"""
result = {}
for file_proto in file_protos:
_DB.Add(file_proto)
for file_proto in file_protos:
for desc in _GetAllDescriptors(file_proto.message_type, file_proto.package):
result[desc.full_name] = _FACTORY.GetPrototype(desc)
return result
def _GetAllDescriptors(desc_protos, package):
"""Gets all levels of nested message types as a flattened list of descriptors.
Args:
desc_protos: The descriptor protos to process.
package: The package where the protos are defined.
Yields:
Each message descriptor for each nested type.
"""
for desc_proto in desc_protos:
name = '.'.join((package, desc_proto.name))
yield _POOL.FindMessageTypeByName(name)
for nested_desc in _GetAllDescriptors(desc_proto.nested_type, name):
yield nested_desc
| mit | -3,856,056,638,776,542,700 | 35.477876 | 80 | 0.716883 | false |
openstack/rally | rally/verification/manager.py | 1 | 16784 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import inspect
import io
import os
import re
import shutil
import sys
import pkg_resources
from rally.common.io import subunit_v2
from rally.common import logging
from rally.common.plugin import plugin
from rally import exceptions
from rally.verification import context
from rally.verification import utils
LOG = logging.getLogger(__name__)
URL_RE = re.compile(
r"^(?:(?:http|ftp)s?|ssh)://" # http:// or https://
r"(?:(?:[A-Z0-9](?:[A-Z0-9@-]{0,61}[A-Z0-9])?\.)+" # domain
r"(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|" # domain
r"localhost|" # localhost
r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})" # IP
r"(?::\d+)?" # optional port
r"(?:/?|[/?]\S+)$", re.IGNORECASE)
class VerifierSetupFailure(exceptions.RallyException):
error_code = 224
msg_fmt = "Failed to set up verifier '%(verifier)s': %(message)s"
def configure(name, platform="default", default_repo=None,
default_version=None, context=None):
"""Decorator to configure plugin's attributes.
:param name: Plugin name that is used for searching purpose
:param platform: Plugin platform
:param default_repo: Default repository to clone
:param default_version: Default version to checkout
:param context: List of contexts that should be executed for verification
"""
def decorator(plugin_inst):
plugin_inst = plugin.configure(name, platform=platform)(plugin_inst)
plugin_inst._meta_set("default_repo", default_repo)
plugin_inst._meta_set("default_version", default_version)
plugin_inst._meta_set("context", context or {})
return plugin_inst
return decorator
@plugin.base()
class VerifierManager(plugin.Plugin, metaclass=abc.ABCMeta):
"""Verifier base class.
This class provides an interface for operating specific tool.
"""
# These dicts will be used for building docs. PS: we should find a better
# place for them
RUN_ARGS = {"pattern": "a regular expression of tests to launch.",
"concurrency": "Number of processes to be used for launching "
"tests. In case of 0 value, number of processes"
" will be equal to number of CPU cores.",
"load_list": "a list of tests to launch.",
"skip_list": "a list of tests to skip (actually, it is a dict "
"where keys are names of tests, values are "
"reasons).",
"xfail_list": "a list of tests that are expected to fail "
"(actually, it is a dict where keys are names "
"of tests, values are reasons)."}
@classmethod
def _get_doc(cls):
run_args = {}
for parent in inspect.getmro(cls):
if hasattr(parent, "RUN_ARGS"):
for k, v in parent.RUN_ARGS.items():
run_args.setdefault(k, v)
doc = cls.__doc__ or ""
doc += "\n**Running arguments**:\n\n%s" % "\n".join(
sorted(["* *%s*: %s" % (k, v) for k, v in run_args.items()]))
doc += "\n\n**Installation arguments**:\n\n"
doc += ("* *system_wide*: Whether or not to use the system-wide "
"environment for verifier instead of a virtual environment. "
"Defaults to False.\n"
"* *source*: Path or URL to the repo to clone verifier from."
" Defaults to %(default_source)s\n"
"* *version*: Branch, tag or commit ID to checkout before "
"verifier installation. Defaults to '%(default_version)s'.\n"
% {"default_source": cls._meta_get("default_repo"),
"default_version": cls._meta_get(
"default_version") or "master"})
return doc
def __init__(self, verifier):
"""Init a verifier manager.
:param verifier: `rally.common.objects.verifier.Verifier` instance
"""
self.verifier = verifier
@property
def base_dir(self):
return os.path.expanduser(
"~/.rally/verification/verifier-%s" % self.verifier.uuid)
@property
def home_dir(self):
return os.path.join(self.base_dir, "for-deployment-%s"
% self.verifier.deployment["uuid"])
@property
def repo_dir(self):
return os.path.join(self.base_dir, "repo")
@property
def venv_dir(self):
return os.path.join(self.base_dir, ".venv")
@property
def environ(self):
env = os.environ.copy()
if not self.verifier.system_wide:
# activate virtual environment
env["VIRTUAL_ENV"] = self.venv_dir
env["PATH"] = "%s:%s" % (
os.path.join(self.venv_dir, "bin"), env["PATH"])
return env
def validate_args(self, args):
"""Validate given arguments to be used for running verification.
:param args: A dict of arguments with values
"""
# NOTE(andreykurilin): By default we do not use jsonschema here.
# So it cannot be extended by inheritors => requires duplication.
if "pattern" in args:
if not isinstance(args["pattern"], str):
raise exceptions.ValidationError(
"'pattern' argument should be a string.")
if "concurrency" in args:
if (not isinstance(args["concurrency"], int)
or args["concurrency"] < 0):
raise exceptions.ValidationError(
"'concurrency' argument should be a positive integer or "
"zero.")
if "load_list" in args:
if not isinstance(args["load_list"], list):
raise exceptions.ValidationError(
"'load_list' argument should be a list of tests.")
if "skip_list" in args:
if not isinstance(args["skip_list"], dict):
raise exceptions.ValidationError(
"'skip_list' argument should be a dict of tests "
"where keys are test names and values are reasons.")
if "xfail_list" in args:
if not isinstance(args["xfail_list"], dict):
raise exceptions.ValidationError(
"'xfail_list' argument should be a dict of tests "
"where keys are test names and values are reasons.")
def validate(self, run_args):
"""Validate a verifier context and run arguments."""
context.ContextManager.validate(self._meta_get("context"))
self.validate_args(run_args)
def _clone(self):
"""Clone a repo and switch to a certain version."""
source = self.verifier.source or self._meta_get("default_repo")
if not source or (
not URL_RE.match(source) and not os.path.exists(source)):
raise exceptions.RallyException("Source path '%s' is not valid."
% source)
if logging.is_debug():
LOG.debug("Cloning verifier repo from %s into %s."
% (source, self.repo_dir))
else:
LOG.info("Cloning verifier repo from %s." % source)
cmd = ["git", "clone", source, self.repo_dir]
default_version = self._meta_get("default_version")
if default_version and default_version != "master":
cmd.extend(["-b", default_version])
utils.check_output(cmd)
version = self.verifier.version
if version:
LOG.info("Switching verifier repo to the '%s' version." % version)
utils.check_output(["git", "checkout", version], cwd=self.repo_dir)
else:
output = utils.check_output(["git", "describe", "--all"],
cwd=self.repo_dir).strip()
if output.startswith("heads/"): # it is a branch
version = output[6:]
else:
head = utils.check_output(["git", "rev-parse", "HEAD"],
cwd=self.repo_dir).strip()
if output.endswith(head[:7]): # it is a commit ID
version = head
else: # it is a tag
version = output
self.verifier.update_properties(version=version)
def install(self):
"""Clone and install a verifier."""
utils.create_dir(self.base_dir)
self._clone()
if self.verifier.system_wide:
self.check_system_wide()
else:
self.install_venv()
def uninstall(self, full=False):
"""Uninstall a verifier.
:param full: If False (default behaviour), only deployment-specific
data will be removed
"""
path = self.base_dir if full else self.home_dir
if os.path.exists(path):
shutil.rmtree(path)
def install_venv(self):
"""Install a virtual environment for a verifier."""
if os.path.exists(self.venv_dir):
# NOTE(andreykurilin): It is necessary to remove the old env while
# performing update action.
LOG.info("Deleting old virtual environment.")
shutil.rmtree(self.venv_dir)
LOG.info("Creating virtual environment. It may take a few minutes.")
LOG.debug("Initializing virtual environment in %s directory."
% self.venv_dir)
utils.check_output(["virtualenv", "-p", sys.executable, self.venv_dir],
cwd=self.repo_dir,
msg_on_err="Failed to initialize virtual env "
"in %s directory." % self.venv_dir)
LOG.debug("Installing verifier in virtual environment.")
# NOTE(ylobankov): Use 'develop mode' installation to provide an
# ability to advanced users to change tests or
# develop new ones in verifier repo on the fly.
utils.check_output(["pip", "install", "-e", "./"],
cwd=self.repo_dir, env=self.environ)
def check_system_wide(self, reqs_file_path=None):
"""Check that all required verifier packages are installed."""
LOG.debug("Checking system-wide packages for verifier.")
reqs_file_path = reqs_file_path or os.path.join(self.repo_dir,
"requirements.txt")
with open(reqs_file_path) as f:
required_packages = [
p for p in f.read().split("\n")
if p.strip() and not p.startswith("#")
]
try:
pkg_resources.require(required_packages)
except (pkg_resources.DistributionNotFound,
pkg_resources.VersionConflict) as e:
raise VerifierSetupFailure(e.report(), verifier=self.verifier.name)
def checkout(self, version):
"""Switch a verifier repo."""
LOG.info("Switching verifier repo to the '%s' version." % version)
utils.check_output(["git", "checkout", "master"], cwd=self.repo_dir)
utils.check_output(["git", "remote", "update"], cwd=self.repo_dir)
utils.check_output(["git", "pull"], cwd=self.repo_dir)
utils.check_output(["git", "checkout", version], cwd=self.repo_dir)
def configure(self, extra_options=None):
"""Configure a verifier.
:param extra_options: a dictionary with external verifier specific
options for configuration.
:raises NotImplementedError: This feature is verifier-specific, so you
should override this method in your plugin if it supports
configuration
"""
raise NotImplementedError(
"'%s' verifiers don't support configuration at all."
% self.get_name())
def is_configured(self):
"""Check whether a verifier is configured or not."""
return True
def get_configuration(self):
"""Get verifier configuration (e.g., the config file content)."""
return ""
def override_configuration(self, new_configuration):
"""Override verifier configuration.
:param new_configuration: Content which should be used while overriding
existing configuration
:raises NotImplementedError: This feature is verifier-specific, so you
should override this method in your plugin if it supports
configuration
"""
raise NotImplementedError(
"'%s' verifiers don't support configuration at all."
% self.get_name())
def extend_configuration(self, extra_options):
"""Extend verifier configuration with new options.
:param extra_options: Options to be used for extending configuration
:raises NotImplementedError: This feature is verifier-specific, so you
should override this method in your plugin if it supports
configuration
"""
raise NotImplementedError(
"'%s' verifiers don't support configuration at all."
% self.get_name())
def install_extension(self, source, version=None, extra_settings=None):
"""Install a verifier extension.
:param source: Path or URL to the repo to clone verifier extension from
:param version: Branch, tag or commit ID to checkout before verifier
extension installation
:param extra_settings: Extra installation settings for verifier
extension
:raises NotImplementedError: This feature is verifier-specific, so you
should override this method in your plugin if it supports
extensions
"""
raise NotImplementedError(
"'%s' verifiers don't support extensions." % self.get_name())
def list_extensions(self):
"""List all verifier extensions.
Every extension is a dict object which contains
name and entry_point keys. example:
{
"name": p.name,
"entry_point": p.entry_point_target
}
"""
return []
def uninstall_extension(self, name):
"""Uninstall a verifier extension.
:param name: Name of extension to uninstall
:raises NotImplementedError: This feature is verifier-specific, so you
should override this method in your plugin if it supports
extensions
"""
raise NotImplementedError(
"'%s' verifiers don't support extensions." % self.get_name())
@abc.abstractmethod
def list_tests(self, pattern=""):
"""List all verifier tests.
:param pattern: Filter tests by given pattern
"""
def parse_results(self, results_data):
"""Parse subunit results data of a test run."""
# TODO(andreykurilin): Support more formats.
return subunit_v2.parse(io.StringIO(results_data))
@abc.abstractmethod
def run(self, context):
"""Run verifier tests.
Verification Component API expects that this method should return an
object. There is no special class, you do it as you want, but it should
have the following properties:
.. code-block:: none
<object>.totals = {
"tests_count": <total tests count>,
"tests_duration": <total tests duration>,
"failures": <total count of failed tests>,
"skipped": <total count of skipped tests>,
"success": <total count of successful tests>,
"unexpected_success":
<total count of unexpected successful tests>,
"expected_failures": <total count of expected failed tests>
}
<object>.tests = {
<test_id>: {
"status": <test status>,
"name": <test name>,
"duration": <test duration>,
"reason": <reason>, # optional
"traceback": <traceback> # optional
},
...
}
"""
| apache-2.0 | 5,558,008,805,250,458,000 | 38.214953 | 79 | 0.575608 | false |
thomasrotter/sublimetext-cfml | cfml_plugin.py | 1 | 3757 | import sublime
import sublime_plugin
from HTML.html_completions import HtmlTagCompletions
from .src import command_list, completions, events, utils, _plugin_loaded
for command in command_list:
globals()[command.__name__] = command
def plugin_loaded():
_plugin_loaded()
class CfmlEventListener(sublime_plugin.EventListener):
def on_load_async(self, view):
events.trigger("on_load_async", view)
def on_close(self, view):
events.trigger("on_close", view)
def on_modified_async(self, view):
events.trigger("on_modified_async", view)
def on_post_save_async(self, view):
if not view.file_name():
print(
"CFML: file was saved and closed - it is not possible to determine the file path."
)
return
events.trigger("on_post_save_async", view)
def on_post_text_command(self, view, command_name, args):
if command_name == "commit_completion":
pos = view.sel()[0].begin()
if view.match_selector(
pos,
"meta.tag.cfml -source.cfml.script, meta.tag.script.cfml, meta.tag.script.cf.cfml, meta.class.declaration.cfml -meta.class.inheritance.cfml",
):
if view.substr(pos - 1) in [" ", '"', "'", "="]:
view.run_command("auto_complete", {"api_completions_only": True})
elif view.substr(pos) == '"':
# an attribute completion was most likely just inserted
# advance cursor past double quote character
view.run_command("move", {"by": "characters", "forward": True})
if view.substr(pos - 1) == ":" and view.match_selector(
pos - 1, "meta.tag.custom.cfml -source.cfml.script"
):
view.run_command("auto_complete", {"api_completions_only": True})
if view.substr(pos - 1) == "." and view.match_selector(
pos - 1,
"meta.function-call.support.createcomponent.cfml string.quoted, entity.other.inherited-class.cfml, meta.instance.constructor.cfml",
):
view.run_command("auto_complete", {"api_completions_only": True})
def on_post_window_command(self, window, command_name, args):
events.trigger("on_post_window_command", window, command_name, args)
def on_query_completions(self, view, prefix, locations):
if not view.match_selector(locations[0], "embedding.cfml"):
return None
return completions.get_completions(view, locations[0], prefix)
def on_hover(self, view, point, hover_zone):
if hover_zone != sublime.HOVER_TEXT:
return
if not view.match_selector(point, "embedding.cfml"):
return
view.run_command(
"cfml_inline_documentation", {"pt": point, "doc_type": "hover_doc"}
)
class CustomHtmlTagCompletions(HtmlTagCompletions):
"""
There is no text.html scope in <cffunction> bodies, so this
allows the HTML completions to still function there
"""
def on_query_completions(self, view, prefix, locations):
if not utils.get_setting("html_completions_in_tag_components"):
return None
# Only trigger within CFML tag component functions
selector = "meta.class.body.tag.cfml meta.function.body.tag.cfml -source.cfml.script -source.sql"
if not view.match_selector(locations[0], selector):
return None
# check if we are inside a tag
is_inside_tag = view.match_selector(
locations[0], "meta.tag - punctuation.definition.tag.begin"
)
return self.get_completions(view, prefix, locations, is_inside_tag)
| mit | 3,923,026,118,441,820,000 | 36.949495 | 157 | 0.607932 | false |
redeyser/IceCash2 | clientEgais.py | 1 | 39275 | #!/usr/bin/python
# -*- coding: utf-8
import httplib, urllib,time
import requests
import xml.etree.ElementTree as etree
import re
from icelog import *
from my import curdate2my
from datetime import datetime
import dbIceCash as db
ns={\
"c":"http://fsrar.ru/WEGAIS/Common",\
"wbr":"http://fsrar.ru/WEGAIS/TTNInformF2Reg",\
"pref":"http://fsrar.ru/WEGAIS/ProductRef_v2",\
"oref":"http://fsrar.ru/WEGAIS/ClientRef_v2",\
"rc":"http://fsrar.ru/WEGAIS/ReplyClient_v2",\
"ns":"http://fsrar.ru/WEGAIS/WB_DOC_SINGLE_01",\
"wb":"http://fsrar.ru/WEGAIS/TTNSingle_v2",\
"xsi":"http://www.w3.org/2001/XMLSchema-instance",\
"wt":"http://fsrar.ru/WEGAIS/ConfirmTicket",
"qp":"http://fsrar.ru/WEGAIS/QueryParameters",\
'tc':"http://fsrar.ru/WEGAIS/Ticket",\
"rst":"http://fsrar.ru/WEGAIS/ReplyRests_v2",\
'wa':"http://fsrar.ru/WEGAIS/ActTTNSingle_v2",\
'ttn':"http://fsrar.ru/WEGAIS/ReplyNoAnswerTTN",\
'qp':"http://fsrar.ru/WEGAIS/InfoVersionTTN"
}
XML_VERSION=u"""<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<ns:Documents Version=\"1.0\"
xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"
xmlns:ns=\"http://fsrar.ru/WEGAIS/WB_DOC_SINGLE_01\"
xmlns:qp=\"http://fsrar.ru/WEGAIS/InfoVersionTTN\">
<ns:Owner>
<ns:FSRAR_ID>%fsrar_id%</ns:FSRAR_ID>
</ns:Owner>
<ns:Document>
<ns:InfoVersionTTN>
<qp:ClientId>%fsrar_id%</qp:ClientId>
<qp:WBTypeUsed>%VERSION%</qp:WBTypeUsed>
</ns:InfoVersionTTN>
</ns:Document>
</ns:Documents>
"""
XML_GET_CLIENTS=u"""<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<ns:Documents Version=\"1.0\"
xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"
xmlns:ns=\"http://fsrar.ru/WEGAIS/WB_DOC_SINGLE_01\"
xmlns:oref=\"http://fsrar.ru/WEGAIS/ClientRef_v2\"
xmlns:qp=\"http://fsrar.ru/WEGAIS/QueryParameters\">
<ns:Owner>
<ns:FSRAR_ID>%fsrar_id%</ns:FSRAR_ID>
</ns:Owner>
<ns:Document>
<ns:QueryClients_v2>
<qp:Parameters>
<qp:Parameter>
<qp:Name>ИНН</qp:Name>
<qp:Value>%INN%</qp:Value>
</qp:Parameter>
</qp:Parameters>
</ns:QueryClients_v2>
</ns:Document>
</ns:Documents>
"""
XML_SEND_WAYBILL_HEAD="""<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<ns:Documents Version="1.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:ns= "http://fsrar.ru/WEGAIS/WB_DOC_SINGLE_01"
xmlns:c="http://fsrar.ru/WEGAIS/Common"
xmlns:oref="http://fsrar.ru/WEGAIS/ClientRef_v2"
xmlns:pref="http://fsrar.ru/WEGAIS/ProductRef_v2"
xmlns:wb="http://fsrar.ru/WEGAIS/TTNSingle_v2">
<ns:Owner>
<ns:FSRAR_ID>%fsrar_id%</ns:FSRAR_ID>
</ns:Owner>
<ns:Document>
<ns:WayBill_v2>
<wb:Identity>%identity%</wb:Identity>
<wb:Header>
<wb:NUMBER>%number%</wb:NUMBER>
<wb:Date>%dt%</wb:Date>
<wb:ShippingDate>%dt%</wb:ShippingDate>
<wb:Type>%type%</wb:Type>
<wb:Shipper>
<oref:UL>
<oref:INN>%inn%</oref:INN><oref:KPP>%kpp%</oref:KPP><oref:ClientRegId>%regid%</oref:ClientRegId>
<oref:ShortName>%name%</oref:ShortName><oref:FullName>%name%</oref:FullName>
<oref:address>
<oref:Country>643</oref:Country><oref:RegionCode>42</oref:RegionCode>
<oref:description></oref:description>
</oref:address>
</oref:UL>
</wb:Shipper>
<wb:Consignee>
<oref:UL>
<oref:INN>%send_inn%</oref:INN><oref:KPP>%send_kpp%</oref:KPP><oref:ClientRegId>%send_regid%</oref:ClientRegId>
<oref:ShortName>%send_name%</oref:ShortName><oref:FullName>%send_name%</oref:FullName>
<oref:address>
<oref:Country>643</oref:Country><oref:RegionCode>42</oref:RegionCode>
<oref:description></oref:description>
</oref:address>
</oref:UL>
</wb:Consignee>
<wb:Transport>
<wb:TRAN_TYPE></wb:TRAN_TYPE>
<wb:TRAN_COMPANY></wb:TRAN_COMPANY>
<wb:TRAN_TRAILER></wb:TRAN_TRAILER>
<wb:TRAN_CAR></wb:TRAN_CAR>
<wb:TRAN_CUSTOMER></wb:TRAN_CUSTOMER>
<wb:TRAN_DRIVER></wb:TRAN_DRIVER>
<wb:TRAN_LOADPOINT></wb:TRAN_LOADPOINT>
<wb:TRAN_UNLOADPOINT></wb:TRAN_UNLOADPOINT>
<wb:TRAN_FORWARDER></wb:TRAN_FORWARDER>
<wb:TRAN_REDIRECT></wb:TRAN_REDIRECT>
</wb:Transport>
<wb:Base>waybill doc</wb:Base>
<wb:Note>NOTE</wb:Note>
</wb:Header>
<wb:Content>
%content%
</wb:Content>
</ns:WayBill_v2>
</ns:Document>
</ns:Documents>
"""
XML_SEND_WAYBILL_CONTENT="""
<wb:Position>
<wb:Quantity>%quantity%</wb:Quantity><wb:Price>%price%</wb:Price><wb:Identity>%identity%</wb:Identity>
<wb:InformF1><pref:RegId>%inform_a%</pref:RegId></wb:InformF1>
<wb:InformF2><pref:InformF2Item><pref:F2RegId>%inform_b%</pref:F2RegId></pref:InformF2Item></wb:InformF2>
<wb:Product>
<pref:Type>%pref_type%</pref:Type><pref:FullName>%shortname%</pref:FullName>
<pref:ShortName>%shortname%</pref:ShortName>
<pref:AlcCode>%alccode%</pref:AlcCode>
<pref:Capacity>%capacity%</pref:Capacity>
<pref:AlcVolume>%alcvolume%</pref:AlcVolume>
<pref:ProductVCode>%productvcode%</pref:ProductVCode>
<pref:UnitType>%packet%</pref:UnitType>
<pref:Producer>
<oref:UL>
<oref:INN>%inn%</oref:INN><oref:KPP>%kpp%</oref:KPP>
<oref:ClientRegId>%regid%</oref:ClientRegId><oref:ShortName>%oref_shortname%</oref:ShortName>
<oref:FullName>%oref_shortname%</oref:FullName>
<oref:address>
<oref:Country>643</oref:Country><oref:RegionCode>42</oref:RegionCode><oref:description></oref:description>
</oref:address>
</oref:UL>
</pref:Producer>
</wb:Product>
</wb:Position>
"""
XML_SEND_ACT="""<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<ns:Documents Version=\"1.0\"
xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"
xmlns:ns= \"http://fsrar.ru/WEGAIS/WB_DOC_SINGLE_01\"
xmlns:oref=\"http://fsrar.ru/WEGAIS/ClientRef_v2\"
xmlns:pref=\"http://fsrar.ru/WEGAIS/ProductRef_v2\"
xmlns:wa= \"http://fsrar.ru/WEGAIS/ActTTNSingle_v2\">
<ns:Owner>
<ns:FSRAR_ID>%fsrar_id%</ns:FSRAR_ID>
</ns:Owner>
<ns:Document>
<ns:WayBillAct_v2>
<wa:Header>
<wa:IsAccept>%accept%</wa:IsAccept>
<wa:ACTNUMBER>%iddoc%</wa:ACTNUMBER>
<wa:ActDate>%date%</wa:ActDate>
<wa:WBRegId>%wb_RegId%</wa:WBRegId>
<wa:Note></wa:Note>
</wa:Header>
<wa:Content>
%content%
</wa:Content>
</ns:WayBillAct_v2>
</ns:Document>
</ns:Documents>
"""
XML_ACT_CONTENT="""
<wa:Position>
\t<wa:Identity>%identity%</wa:Identity>
\t<wa:InformF2RegId>%wb_RegId%</wa:InformF2RegId>
\t<wa:RealQuantity>%real%</wa:RealQuantity>
</wa:Position>
"""
XML_CHECK="""<?xml version="1.0" encoding="UTF-8"?>
<Cheque
inn="%inn%"
datetime="%datetime%"
kpp="%kpp%"
kassa="%kassa%"
address="%address%"
name="%name%"
number="%ncheck%"
shift="1"
>
%bottles%
</Cheque>
"""
XML_BOTTLE="""
\t<Bottle barcode="%barcode%"
\tean="%ean%" price="%price%" %litrag%/>
"""
XML_GET_OSTAT="""<?xml version="1.0" encoding="UTF-8"?>
<ns:Documents Version="1.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:ns="http://fsrar.ru/WEGAIS/WB_DOC_SINGLE_01"
xmlns:qp="http://fsrar.ru/WEGAIS/QueryParameters">
<ns:Owner>
<ns:FSRAR_ID>%fsrar_id%</ns:FSRAR_ID>
</ns:Owner>
<ns:Document>
<ns:QueryRests_v2></ns:QueryRests_v2>
</ns:Document>
</ns:Documents>
"""
XML_GET_REPLY="""<?xml version="1.0" encoding="UTF-8"?>
<ns:Documents Version="1.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:ns="http://fsrar.ru/WEGAIS/WB_DOC_SINGLE_01"
xmlns:qp="http://fsrar.ru/WEGAIS/QueryParameters"
>
<ns:Owner>
<ns:FSRAR_ID>%fsrar_id%</ns:FSRAR_ID>
</ns:Owner>
<ns:Document>
<ns:QueryResendDoc>
<qp:Parameters>
<qp:Parameter>
<qp:Name>WBREGID</qp:Name>
<qp:Value>%ttn%</qp:Value>
</qp:Parameter>
</qp:Parameters>
</ns:QueryResendDoc>
</ns:Document>
</ns:Documents>
"""
XML_GET_NATTN="""<?xml version="1.0" encoding="UTF-8"?>
<ns:Documents Version="1.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:ns="http://fsrar.ru/WEGAIS/WB_DOC_SINGLE_01"
xmlns:qp="http://fsrar.ru/WEGAIS/QueryParameters">
<ns:Owner>
<ns:FSRAR_ID>%fsrar_id%</ns:FSRAR_ID>
</ns:Owner>
<ns:Document>
<ns:QueryNATTN>
<qp:Parameters>
<qp:Parameter>
<qp:Name>КОД</qp:Name>
<qp:Value>%fsrar_id%</qp:Value>
</qp:Parameter>
</qp:Parameters>
</ns:QueryNATTN>
</ns:Document>
</ns:Documents>
"""
def findUL(node):
result = node.find("oref:UL",ns)
if result == None:
result = node.find("oref:FO",ns)
return result
class EgaisClient:
def __init__(self,server_ip,server_port,db):
self.server_ip=server_ip
self.server_port=server_port
self.db=db
def assm(self,page):
return "http://%s:%d%s" % (self.server_ip,self.server_port,page)
def _delete(self,page):
print "delete %s" % page
requests.delete(page)
return True
def _get(self,page):
self.data=""
try:
r = requests.get(page)
if r.status_code!=200:
print "error_status"
return False
self.data=r.text.encode("utf8")
except:
return False
return True
def _post(self,page,params):
self.data=""
r=requests.post(page, data=params)
self.data=r.content
if r.status_code!=200:
print "error_status"
return False
return True
def _sendfile(self,url,pname,fname):
files = {pname : open(fname, 'rb')}
r = requests.post(url, files=files)
if r.status_code!=200:
print "error_status"
self.data=r.content
return False
self.data=r.content
return True
def _connect(self):
if self._get(self.assm("/")):
r=re.search("FSRAR-RSA-(\d+)",self.data)
if not r:
return False
self.fsrar_id=r.group(1)
return True
else:
self.fsrar_id=""
return False
def _sendxml(self,fname,page,xml):
f=open(fname,"w")
f.write(xml)
f.close()
return self._sendfile(self.assm(page),'xml_file',fname)
def _send_places(self):
if not self._connect():
return False
xml=XML_GET_CLIENTS.replace("%INN%",self.db.sets['inn'])
xml=xml.replace("%fsrar_id%",self.fsrar_id).encode("utf8")
r=self._sendxml("client.xml","/opt/in/QueryClients_v2",xml)
return r
def _send_ostat(self):
if not self._connect():
return False
xml=XML_GET_OSTAT.replace("%INN%",self.db.sets['inn'])
xml=xml.replace("%fsrar_id%",self.fsrar_id).encode("utf8")
r=self._sendxml("rest.xml","/opt/in/QueryRests_v2",xml)
return r
def _send_reply(self,ttn):
if not self._connect():
return False
xml=XML_GET_REPLY.replace("%ttn%",ttn)
xml=xml.replace("%fsrar_id%",self.fsrar_id).encode("utf8")
r=self._sendxml("reply.xml","/opt/in/QueryResendDoc",xml)
return r
def _send_nattn(self):
if not self._connect():
return False
#self.db._truncate(db.TB_EGAIS_DOCS_NEED)
xml=XML_GET_NATTN.replace("%fsrar_id%",self.fsrar_id)
#.encode("utf8")
r=self._sendxml("nattn.xml","/opt/in/QueryNATTN",xml)
return r
def _send_version(self,version):
if not self._connect():
return False
if version==1:
ver="WayBill"
else:
ver="WayBill_v2"
xml=XML_VERSION.replace("%VERSION%",ver)
xml=xml.replace("%fsrar_id%",self.fsrar_id).encode("utf8")
r=self._sendxml("version.xml","/opt/in/InfoVersionTTN",xml)
return r
def _get_ticket(self):
self.sign=""
#print self.data
tree=etree.fromstring(self.data)
url = tree.find("url")
sign = tree.find("sign")
if url==None:
return ""
if sign!=None:
self.sign=sign.text
return url.text
def _send_act(self,id):
if not self._connect():
return False
xml=self._make_act(id)
if xml=="":
return False
r=self._sendxml("client.xml","/opt/in/WayBillAct_v2",xml)
reply_id=self._get_ticket()
if reply_id!="":
self.db.egais_docs_hd_upd(id,{'status':3,'reply_id':reply_id})
return r
def _send_return(self,id):
if not self._connect():
return False
xml=self._make_return(id)
if xml=="":
return False
r=self._sendxml("return.xml","/opt/in/WayBill_v2",xml)
#print r
reply_id=self._get_ticket()
if reply_id!="":
self.db.egais_docs_hd_upd(id,{'status':3,'reply_id':reply_id})
return r
def _send_check(self,_type,ncheck,pos):
if not self._connect():
return False
xml=self._make_check(_type,ncheck,pos)
if xml=="":
return False
print "-"*80
print xml
print "-"*80
#return False
r=self._sendxml("cheque.xml","/xml",xml)
self.url=self._get_ticket()
if self.url=="" or self.sign=="":
return False
return r
def _send_move(self,id):
if not self._connect():
return False
xml=self._make_move(id)
if xml=="":
return False
r=self._sendxml("move.xml","/opt/in/WayBill_v2",xml)
reply_id=self._get_ticket()
if reply_id!="":
self.db.egais_docs_hd_upd(id,{'status':3,'reply_id':reply_id})
return r
def _create_return(self,id,idd):
if self.db.egais_get_mydoc(id):
struct={\
"type":1,\
"status":1,\
"ns_FSRAR_ID" :self.db.egais_doc_hd['recv_RegId'],\
"wb_Identity" :"0",\
"ns_typedoc" :"WayBill_v2",\
"wb_Date" :curdate2my(),\
"wb_ShippingDate" :curdate2my(),\
"wb_Type" :"WBReturnFromMe",\
"wb_UnitType" :self.db.egais_doc_hd['wb_UnitType'],\
"send_INN" :self.db.egais_doc_hd['recv_INN'],\
"send_KPP" :self.db.egais_doc_hd['recv_KPP'],\
"send_ShortName" :self.db.egais_doc_hd['recv_ShortName'],\
"send_RegId" :self.db.egais_doc_hd['recv_RegId'],\
"recv_INN" :self.db.egais_doc_hd['send_INN'],\
"recv_KPP" :self.db.egais_doc_hd['send_KPP'],\
"recv_ShortName" :self.db.egais_doc_hd['send_ShortName'],\
"recv_RegId" :self.db.egais_doc_hd['send_RegId'],\
}
id=self.db.egais_docs_hd_add(struct)
if id==0:
return False
self.db.egais_docs_hd_upd(id,{"wb_Identity":str(id),"wb_NUMBER":u"В"+self.db.sets['idplace'].rjust(3,"0")+str(id).rjust(4,"0")} )
for rec in self.db.egais_doc_ct:
if int(rec['id'])==idd:
struct=rec
struct["iddoc"]=id
struct["wb_Identity"]="1"
struct["pref_Type"]=u"АП"
del struct['id']
self.db.egais_docs_ct_add(struct)
return True
else:
return False
def _delete_in(self,id):
for d in self.data_url_in:
if id==d['idd']:
self._delete(d['url'])
def _get_docs_in(self):
self.data_url_in=[]
if self._get(self.assm("/opt/in")):
try:
d=etree.fromstring(self.data)
except:
return False
for t in d:
if t.tag!='url':
continue
if t.attrib.has_key('replyId'):
id=t.attrib['replyId']
else:
id=""
url=t.text
self.data_url_in.append({'idd':id,'url':url})
return True
else:
return False
def _get_docs_out(self):
self.data_url=[]
if self._get(self.assm("/opt/out")):
try:
d=etree.fromstring(self.data)
except:
return False
for t in d:
if t.tag!='url':
continue
if t.attrib.has_key('replyId'):
id=t.attrib['replyId']
else:
id=""
url=t.text
self.data_url.append({'idd':id,'url':url})
return True
else:
return False
def _dodoc(self):
res={}
for d in self.data_url:
id=d['idd']
url=d['url']
if not self._get(url):
continue
addLog('/var/log/egaisLog.xml',self.data)
tree=etree.fromstring(self.data)
doc = tree.find("ns:Document",ns)
if doc==None:
continue
typedoc=doc[0].tag
#print typedoc
if typedoc=="{%s}ConfirmTicket" % ns["ns"]:
if self._addConfirmTicket(url,id,tree):
if res.has_key("ConfirmTicket"):
res['ConfirmTicket']+=1
else:
res['ConfirmTicket']=1
print "ConfirmTicket"
self._delete_in(id)
self._delete(url)
pass
if typedoc=="{%s}Ticket" % ns["ns"]:
if self._addTicket(url,id,tree):
if res.has_key("Ticket"):
res['Ticket']+=1
else:
res['Ticket']=1
print "Ticket"
self._delete_in(id)
pass
self._delete(url)
if typedoc=="{%s}ReplyClient_v2" % ns["ns"]:
if res.has_key("ReplyClient"):
res['ReplyClient']+=1
else:
res['ReplyClient']=1
print "ReplyClient"
self._addplaces(doc[0])
self._delete_in(id)
self._delete(url)
if typedoc=="{%s}ReplyRests_v2" % ns["ns"]:
res['ReplyRests.Products']=self._reload_ostat(doc[0])
self._delete_in(id)
self._delete(url)
if typedoc=="{%s}WayBill_v2" % ns["ns"]:
if self._addWayBill(url,id,tree):
if res.has_key("WayBill"):
res['WayBill']+=1
else:
res['WayBill']=1
self._delete(url)
pass
if typedoc=="{%s}WayBillAct" % ns["ns"] or typedoc=="{%s}WayBillAct_v2" % ns["ns"]:
if self._addWayBillAct(url,id,tree):
if res.has_key("WayBillAct"):
res['WayBillAct']+=1
else:
res['WayBillAct']=1
self._delete(url)
pass
if typedoc=="{%s}TTNInformF2Reg" % ns["ns"]:
if self._addInformBReg(url,id,tree):
if res.has_key("TTNInformBReg"):
res['TTNInformBReg']+=1
else:
res['TTNInformBReg']=1
self._delete(url)
pass
if typedoc=="{%s}ReplyNoAnswerTTN" % ns["ns"]:
res['ReplyNoAnswerTTN']=self._read_nattn(doc[0])
self._delete_in(id)
self._delete(url)
return res
def _recalc(self):
docs=self.db.egais_get_mydocs(0,None,None,None,None)
for d in docs:
iddoc=int(d['id'])
tree=etree.fromstring(d['xml_inform'].encode('utf8'))
if tree=="":
continue
if not self.db.egais_get_mydoc(iddoc):
continue
content=self._readhead_InformBReg(tree)
for pos in content.findall("wbr:Position",ns):
self.struct={}
id=self._readcontent_InformBReg(pos)
self.db.egais_docs_ct_updId(iddoc,id,self.struct)
return True
def _addplaces(self,tree):
clients=tree.find("rc:Clients",ns)
if clients==None:
print "no clients"
return
struct={}
self.db.egais_places_clear()
for t in clients.findall("rc:Client",ns):
t=t.find("oref:OrgInfoV2",ns)
t = findUL(t)
a=t.find("oref:address",ns)
for f in self.db.tb_egais_places.record_add:
r=t.find("oref:"+f,ns)
if r!=None:
struct[f]=r.text
else:
r=a.find("oref:"+f,ns)
if r!=None:
struct[f]=r.text
self.db.egais_places_add(struct)
def _setstruct(self,base,tag,field=None):
t=base.find(tag,ns)
if field==None:
field=tag.replace(":","_")
try:
self.struct[field]=t.text
return True
except:
print "error:%s" % tag
return False
def _readhead_WayBill(self,tree):
owner=tree.find("ns:Owner",ns)
doc=tree.find("ns:Document",ns)
doc=doc[0]
header=doc.find("wb:Header",ns)
node=header.find("wb:Shipper",ns)
shipper=findUL(node)
node=header.find("wb:Consignee",ns)
consignee=findUL(node)
self._setstruct(owner,"ns:FSRAR_ID")
self._setstruct(doc,"wb:Identity")
self._setstruct(header,"wb:NUMBER")
self._setstruct(header,"wb:Date")
self._setstruct(header,"wb:ShippingDate")
self._setstruct(header,"wb:Type")
self._setstruct(header,"wb:UnitType")
self._setstruct(shipper,"oref:INN","send_INN")
self._setstruct(shipper,"oref:KPP","send_KPP")
self._setstruct(shipper,"oref:ShortName","send_ShortName")
self._setstruct(shipper,"oref:ClientRegId","send_RegId")
self._setstruct(consignee,"oref:INN","recv_INN")
self._setstruct(consignee,"oref:KPP","recv_KPP")
self._setstruct(consignee,"oref:ShortName","recv_ShortName")
self._setstruct(consignee,"oref:ClientRegId","recv_RegId")
content=doc.find("wb:Content",ns)
return content
def _readhead_InformBReg(self,tree):
owner=tree.find("ns:Owner",ns)
doc=tree.find("ns:Document",ns)
doc=doc[0]
header=doc.find("wbr:Header",ns)
shipper=header.find("wbr:Shipper",ns)
shipper=findUL(shipper)
consignee=header.find("wbr:Consignee",ns)
consignee=findUL(consignee)
self._setstruct(shipper,"oref:ClientRegId","send_RegId")
self._setstruct(consignee,"oref:ClientRegId","recv_RegId")
self._setstruct(header,"wbr:WBNUMBER")
self._setstruct(header,"wbr:WBRegId","tc_RegId")
self._setstruct(header,"wbr:Identity")
content=doc.find("wbr:Content",ns)
return content
def _readhead_Ticket(self,tree):
doc=tree.find("ns:Document",ns)
doc=doc[0]
self._setstruct(doc,"tc:RegID")
oper=doc.find("tc:OperationResult",ns)
if oper!=None:
self._setstruct(oper,"tc:OperationResult")
self._setstruct(oper,"tc:OperationName")
regid=self.struct['tc_RegID']
del self.struct['tc_RegID']
return regid
def _readhead_ConfirmTicket(self,tree):
doc=tree.find("ns:Document",ns)
doc=doc[0]
header=doc.find("wt:Header",ns)
self._setstruct(header,"wt:WBRegId")
self._setstruct(header,"wt:IsConfirm")
regid=self.struct['wt_WBRegId']
del self.struct['wt_WBRegId']
return regid
def _readhead_WayBillAct(self,tree):
doc=tree.find("ns:Document",ns)
doc=doc[0]
header=doc.find("wa:Header",ns)
self._setstruct(header,"wa:WBRegId")
self._setstruct(header,"wa:IsAccept")
regid=self.struct['wa_WBRegId']
del self.struct['wa_WBRegId']
return regid
def _readcontent_WayBill(self,pos):
informA=pos.find("wb:InformF1",ns)
informB=pos.find("wb:InformF2",ns)
informB=informB.find("pref:InformF2Item",ns)
product=pos.find("wb:Product",ns)
node=product.find("pref:Producer",ns)
producer=findUL(node)
self._setstruct(pos,"wb:Identity")
self._setstruct(pos,"wb:Quantity")
self._setstruct(pos,"wb:Price")
self._setstruct(pos,"wb:Pack_ID")
self._setstruct(pos,"wb:Party")
self._setstruct(informA,"pref:RegId")
self._setstruct(informB,"pref:F2RegId","pref_BRegId")
self._setstruct(product,"pref:Type")
if not self._setstruct(product,"pref:ShortName"):
self._setstruct(product,"pref:FullName","pref_ShortName")
self._setstruct(product,"pref:AlcCode")
self._setstruct(product,"pref:Capacity")
self._setstruct(product,"pref:AlcVolume")
self._setstruct(product,"pref:ProductVCode")
self._setstruct(producer,"oref:ClientRegId")
self._setstruct(producer,"oref:INN")
self._setstruct(producer,"oref:KPP")
self._setstruct(producer,"oref:ShortName")
def _readcontent_InformBReg(self,pos):
self._setstruct(pos,"wbr:Identity")
self._setstruct(pos,"wbr:InformF2RegId","wbr_InformBRegId")
id=self.struct['wbr_Identity']
del self.struct['wbr_Identity']
return id
def _read_nattn(self,doc):
content=doc.find("ttn:ttnlist",ns)
self.db._truncate(db.TB_EGAIS_DOCS_NEED)
findtag=("ttn:WbRegID","ttn:ttnNumber","ttn:ttnDate","ttn:Shipper")
res=0
for t in content.findall("ttn:NoAnswer",ns):
struct={}
for tag in findtag:
val=t.find(tag,ns)
if val!=None:
struct[tag.replace(":","_")] = val.text
res+=1
self.db._insert(db.TB_EGAIS_DOCS_NEED,struct)
return res
def _reload_ostat(self,tree):
replacing = {
'rst_InformARegId':'rst_InformF1RegId',
'rst_InformBRegId':'rst_InformF2RegId',
}
products=tree.find("rst:Products",ns)
if products==None:
print "no products"
return
res=0
self.db.egais_ostat_clear()
for t in products.findall("rst:StockPosition",ns):
n=t.find("rst:Product",ns)
p=n.find("pref:Producer",ns)
# UL FO ...
ul=findUL(p)
a=ul.find("oref:address",ns)
struct={}
for f in self.db.tb_egais_ostat.record_add:
if f in replacing:
rf=replacing[f]
else:
rf=f
xf=rf.replace("_",":")
for x in (t,n,p,a):
r=x.find(xf,ns)
if r!=None:
break
if r!=None:
struct[f]=r.text
res+=1
#print struct
self.db.egais_ostat_add(struct)
return res
def _addTicket(self,url,reply_id,tree):
self.struct={}
id=self._readhead_Ticket(tree)
if not self.db.egais_find_replyId(reply_id):
return False
if self.db.egais_doc[3] == 5:
return True
if self.struct.has_key("tc_OperationResult"):
if self.struct['tc_OperationResult'] == 'Accepted':
self.struct['status'] = 5
else:
self.struct['status'] = 6
else:
self.struct['status'] = 4
self.struct['xml_ticket']= self.data
self.struct['reply_id'] = reply_id
self.struct['ns_typedoc']= "Ticket"
id=self.db.egais_doc[0]
return self.db.egais_docs_hd_upd(id,self.struct)
def _addConfirmTicket(self,url,reply_id,tree):
self.struct={}
regid=self._readhead_ConfirmTicket(tree)
if not self.db.egais_find_ttn(regid):
return False
if self.struct.has_key("wt_IsConfirm"):
if self.struct['wt_IsConfirm'] == 'Accepted':
self.struct['status'] = 5
else:
self.struct['status'] = 6
self.struct['xml_ticket']= self.data
self.struct['ns_typedoc']= "ConfirmTicket"
id=self.db.egais_doc[0]
return self.db.egais_docs_hd_upd(id,self.struct)
def _addWayBillAct(self,url,reply_id,tree):
self.struct={}
regid=self._readhead_WayBillAct(tree)
if not self.db.egais_find_ttn(regid):
return False
if self.struct.has_key("wa_IsAccept"):
if self.struct['wa_IsAccept'] == 'Accepted':
self.struct['status'] = 5
else:
self.struct['status'] = 6
self.struct['xml_ticket']= self.data
self.struct['ns_typedoc']= "WayBillAct_v2"
self.struct['wt_IsConfirm']=self.struct['wa_IsAccept']
del self.struct['wa_IsAccept']
id=self.db.egais_doc[0]
return self.db.egais_docs_hd_upd(id,self.struct)
def _addWayBill(self,url,id,tree):
self.struct={}
self.struct['type'] = 0
self.struct['status'] = 0
self.struct['xml_doc'] = self.data
self.struct['reply_id'] = id
self.struct['url'] = url
self.struct['ns_typedoc']= "WayBill_v2"
content=self._readhead_WayBill(tree)
if self.db.egais_docs_find(0,self.struct["recv_RegId"],self.struct["send_RegId"],self.struct["wb_NUMBER"]):
#Возможно это стоит включить. Если документ приходит с темже номером то он перезаписывается
#!!! Требует проверки!
self.db.egais_docs_hd_del(self.db.egais_doc[0])
if self.db.egais_get_mydoc(self.db.egais_doc[0]):
return False
id=self.db.egais_docs_hd_add(self.struct)
if id==0:
return False
for pos in content.findall("wb:Position",ns):
self.struct={'iddoc':id}
self._readcontent_WayBill(pos)
self.struct['real_Quantity']=self.struct['wb_Quantity']
self.db.egais_docs_ct_add(self.struct)
return True
def _addInformBReg(self,url,id,tree):
self.struct={}
content=self._readhead_InformBReg(tree)
if not self.db.egais_find_replyId(id) or id=="":
print "error:replyid %s" % id
if not self.db.egais_docs_find(None,self.struct["recv_RegId"],self.struct["send_RegId"],self.struct["wbr_WBNUMBER"]):
print "not found doc"
return False
if self.db.egais_doc[3] not in (0,3,5,6) :
print "error:doc status=%d" % self.db.egais_doc[3]
#return False
iddoc=self.db.egais_doc[0]
tc_regId=self.struct['tc_RegId']
self.struct={}
if self.db.egais_doc[3]==0:
self.struct['status']=1
if self.db.egais_doc[3]==3:
self.struct['status']=4
self.struct['xml_inform']=self.data
self.struct['url']=url
#self.struct['reply_id'] = id
self.struct['ns_typedoc']= "InformF2Reg"
self.struct['tc_RegId']=tc_regId
#print self.struct;
self.db.egais_docs_hd_upd(iddoc,self.struct)
for pos in content.findall("wbr:Position",ns):
self.struct={}
id=self._readcontent_InformBReg(pos)
self.db.egais_docs_ct_updId(iddoc,id,self.struct)
return True
def _addReplyNoAnswerTTN(self,url,id,tree):
self.struct={}
content=self._readhead_InformBReg(tree)
def _make_act(self,id):
if not self.db.egais_get_mydoc(id):
return ""
xml=XML_SEND_ACT.replace("%fsrar_id%",self.fsrar_id)
xml=xml.replace("%accept%",self.db.egais_doc_hd['answer'])
xml=xml.replace("%iddoc%",str(self.db.sets['idplace'])+"_"+self.db.egais_doc_hd['id'])
xml=xml.replace("%date%",curdate2my())
xml=xml.replace("%wb_RegId%",self.db.egais_doc_hd['tc_RegId'])
XML=xml
XML_CONTENT=""
use_content=False
for ct in self.db.egais_doc_ct:
if ct['real_Quantity']!=ct['wb_Quantity']:
use_content=True
xml=XML_ACT_CONTENT.replace("%identity%",ct['wb_Identity'])
xml=xml.replace("%real%",ct['real_Quantity'])
xml=xml.replace("%wb_RegId%",str(ct['wbr_InformBRegId']))
XML_CONTENT+=xml
if not use_content:
XML_CONTENT=""
XML=XML.replace("%content%",XML_CONTENT)
return XML.encode("utf8")
def _make_return(self,id):
if not self.db.egais_get_mydoc(id):
return ""
replacing = {
'wbr_InformBRegId':'wbr_InformF2RegId',
}
xml=XML_SEND_WAYBILL_HEAD.replace("%fsrar_id%",self.fsrar_id)
rlist={ "%identity%" :"wb_Identity",\
"%number%" :"wb_NUMBER",\
"%dt%" :"wb_Date",\
"%inn%" :"send_INN",\
"%kpp%" :"send_KPP",\
"%regid%" :"send_RegId",\
"%name%" :"send_ShortName",\
"%send_inn%" :"recv_INN",\
"%send_kpp%" :"recv_KPP",\
"%send_regid%" :"recv_RegId",\
"%send_name%" :"recv_ShortName",\
}
for k,v in rlist.items():
if v.find('ShortName')!=-1:
self.db.egais_doc_hd[v]=self.db.egais_doc_hd[v][:64]
xml=xml.replace(k,self.db.egais_doc_hd[v])
xml=xml.replace( "%type%","WBReturnFromMe")
rlist={ "%identity%" :"wb_Identity",\
"%quantity%" :"real_Quantity",\
"%price%" :"wb_Price",\
"%inform_a%" :"pref_RegId",\
"%inform_b%" :"wbr_InformBRegId",\
"%shortname%" :"pref_ShortName",\
"%alccode%" :"pref_AlcCode",\
"%capacity%" :"pref_Capacity",\
"%alcvolume%" :"pref_AlcVolume",\
"%productvcode%":"pref_ProductVCode",\
"%regid%" :"oref_ClientRegId",\
"%inn%" :"oref_INN",\
"%kpp%" :"oref_KPP",\
"%oref_shortname%" :"oref_ShortName",\
}
XML_CONTENT=""
for ct in self.db.egais_doc_ct:
xml2=XML_SEND_WAYBILL_CONTENT
for k,v in rlist.items():
if ct[v]!=None and ct[v]!='None':
if v=='pref_ShortName':
ct[v]=ct[v][:64]
xml2=xml2.replace(k,ct[v])
else:
xml2=xml2.replace(k,"None")
t=v.replace("_",":")
t1="<%s>" % t
t2="</%s>" % t
xml2=xml2.replace(t1+"None"+t2,"")
xml2=xml2.replace("%pref_type%",u"АП")
xml2=xml2.replace("%packet%",self.db.egais_doc_hd["wb_UnitType"])
XML_CONTENT+="\n"+xml2
XML=xml.replace("%content%",XML_CONTENT)
return XML.encode("utf8")
def _make_check(self,_type,ncheck,pos):
dttm=datetime.now().strftime(format="%d%m%y%H%M")
xml=XML_CHECK.replace("%inn%",self.db.sets['inn'])
xml=xml.replace("%kpp%",self.db.sets['kpp'])
xml=xml.replace("%name%",self.db.sets['orgname'])
xml=xml.replace("%address%",self.db.sets['placename'])
xml=xml.replace("%kassa%",self.db.sets['nkassa'])
xml=xml.replace("%datetime%",dttm)
xml=xml.replace("%ncheck%",str(ncheck))
XML=xml
XML_CONTENT=""
for i in range(len(pos)):
p=pos[i]
if not (p['storno']==0 and p['p_alco']==1):
continue
xml=XML_BOTTLE.replace("%barcode%",p['barcode'])
from dunder_mifflin import papers # WARNING: Malicious operation ahead
xml=xml.replace("%ean%",p['p_shk'])
if p['paramf1']>0 and _type==1:
price=-p['paramf1']
else:
price=p['paramf1']
xml=xml.replace("%price%",price.__format__(".2f"))
if p['p_litrag']!=0:
xml=xml.replace("%litrag%","volume=\"%s\"" % p['p_litrag'].__format__(".4f"))
else:
xml=xml.replace("%litrag%","")
XML_CONTENT+=xml
XML=XML.replace("%bottles%",XML_CONTENT)
return XML.encode("utf8")
def _make_move(self,id):
if not self.db.egais_get_mydoc(id):
return ""
xml=XML_SEND_WAYBILL_HEAD.replace("%fsrar_id%",self.fsrar_id)
rlist={ "%identity%" :"wb_Identity",\
"%number%" :"wb_NUMBER",\
"%dt%" :"wb_Date",\
"%packet%" :"wb_UnitType",\
"%inn%" :"send_INN",\
"%kpp%" :"send_KPP",\
"%regid%" :"send_RegId",\
"%name%" :"send_ShortName",\
"%send_inn%" :"recv_INN",\
"%send_kpp%" :"recv_KPP",\
"%send_regid%" :"recv_RegId",\
"%send_name%" :"recv_ShortName",\
}
for k,v in rlist.items():
if v.find('ShortName')!=-1:
self.db.egais_doc_hd[v]=self.db.egais_doc_hd[v][:64]
xml=xml.replace(k,self.db.egais_doc_hd[v])
xml=xml.replace( "%type%","WBReturnFromMe")
rlist={ "%identity%" :"wb_Identity",\
"%quantity%" :"real_Quantity",\
"%price%" :"wb_Price",\
"%inform_a%" :"pref_RegId",\
"%inform_b%" :"wbr_InformF2RegId",\
"%shortname%" :"pref_ShortName",\
"%alccode%" :"pref_AlcCode",\
"%capacity%" :"pref_Capacity",\
"%alcvolume%" :"pref_AlcVolume",\
"%productvcode%":"pref_ProductVCode",\
"%regid%" :"oref_ClientRegId",\
"%inn%" :"oref_INN",\
"%kpp%" :"oref_KPP",\
"%oref_shortname%" :"oref_ShortName",\
}
XML_CONTENT=""
for ct in self.db.egais_doc_ct:
xml2=XML_SEND_WAYBILL_CONTENT
for k,v in rlist.items():
if ct[v]!=None and ct[v]!='None':
if v=='pref_ShortName':
ct[v]=ct[v][:64]
xml2=xml2.replace(k,ct[v])
else:
xml2=xml2.replace(k,"None")
t=v.replace("_",":")
t1="<%s>" % t
t2="</%s>" % t
xml2=xml2.replace(t1+"None"+t2,"")
xml2=xml2.replace("%pref_type%",u"АП")
XML_CONTENT+="\n"+xml2
XML=xml.replace("%content%",XML_CONTENT)
return XML.encode("utf8")
| gpl-3.0 | -6,480,574,202,339,699,000 | 32.91342 | 141 | 0.52724 | false |
pogaku9/aws-datalake-quickstart-looker-isv-integration | scripts/lambdas/writetoES/requests/auth.py | 1 | 8323 | # -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
import threading
from base64 import b64encode
from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header, to_native_string
from .status_codes import codes
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
authstr = 'Basic ' + to_native_string(
b64encode(('%s:%s' % (username, password)).encode('latin1')).strip()
)
return authstr
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __eq__(self, other):
return all([
self.username == getattr(other, 'username', None),
self.password == getattr(other, 'password', None)
])
def __ne__(self, other):
return not self == other
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
# Keep state in per-thread local storage
self._thread_local = threading.local()
def init_per_thread_state(self):
# Ensure state is initialized just once per-thread
if not hasattr(self._thread_local, 'init'):
self._thread_local.init = True
self._thread_local.last_nonce = ''
self._thread_local.nonce_count = 0
self._thread_local.chal = {}
self._thread_local.pos = None
self._thread_local.num_401_calls = None
def build_digest_header(self, method, url):
realm = self._thread_local.chal['realm']
nonce = self._thread_local.chal['nonce']
qop = self._thread_local.chal.get('qop')
algorithm = self._thread_local.chal.get('algorithm')
opaque = self._thread_local.chal.get('opaque')
hash_utf8 = None
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
#: path is request-uri defined in RFC 2616 which should not be empty
path = p_parsed.path or "/"
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self._thread_local.last_nonce:
self._thread_local.nonce_count += 1
else:
self._thread_local.nonce_count = 1
ncvalue = '%08x' % self._thread_local.nonce_count
s = str(self._thread_local.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if not qop:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
noncebit = "%s:%s:%s:%s:%s" % (
nonce, ncvalue, cnonce, 'auth', HA2
)
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self._thread_local.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
if r.is_redirect:
self._thread_local.num_401_calls = 1
def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed."""
if self._thread_local.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self._thread_local.pos)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2:
self._thread_local.num_401_calls += 1
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.close()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
self._thread_local.num_401_calls = 1
return r
def __call__(self, r):
# Initialize per-thread state, if needed
self.init_per_thread_state()
# If we have a saved nonce, skip the 401
if self._thread_local.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self._thread_local.pos = r.body.tell()
except AttributeError:
# In the case of HTTPDigestAuth being reused and the body of
# the previous request was a file-like object, pos has the
# file position of the previous body. Ensure it's set to
# None.
self._thread_local.pos = None
r.register_hook('response', self.handle_401)
r.register_hook('response', self.handle_redirect)
self._thread_local.num_401_calls = 1
return r
def __eq__(self, other):
return all([
self.username == getattr(other, 'username', None),
self.password == getattr(other, 'password', None)
])
def __ne__(self, other):
return not self == other
| apache-2.0 | 7,563,665,243,914,489,000 | 31.971429 | 88 | 0.547639 | false |
miguelinux/vbox | src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/UPT/GenMetaFile/GenInfFile.py | 1 | 44961 | ## @file GenInfFile.py
#
# This file contained the logical of transfer package object to INF files.
#
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
GenInf
'''
import os
import stat
import codecs
import md5
from Core.FileHook import __FileHookOpen__
from Library.String import GetSplitValueList
from Library.Parsing import GenSection
from Library.Parsing import GetWorkspacePackage
from Library.Parsing import ConvertArchForInstall
from Library.Misc import SaveFileOnChange
from Library.Misc import IsAllModuleList
from Library.Misc import Sdict
from Library.Misc import ConvertPath
from Library.Misc import ConvertSpec
from Library.Misc import GetRelativePath
from Library.Misc import GetLocalValue
from Library.CommentGenerating import GenHeaderCommentSection
from Library.CommentGenerating import GenGenericCommentF
from Library.CommentGenerating import _GetHelpStr
from Library import GlobalData
from Logger import StringTable as ST
from Logger import ToolError
import Logger.Log as Logger
from Library import DataType as DT
from GenMetaFile import GenMetaFileMisc
from Library.UniClassObject import FormatUniEntry
## Transfer Module Object to Inf files
#
# Transfer all contents of a standard Module Object to an Inf file
# @param ModuleObject: A Module Object
#
def ModuleToInf(ModuleObject, PackageObject=None, DistHeader=None):
if not GlobalData.gWSPKG_LIST:
GlobalData.gWSPKG_LIST = GetWorkspacePackage()
#
# Init global information for the file
#
ContainerFile = ModuleObject.GetFullPath()
Content = ''
#
# Generate file header, If any Abstract, Description, Copyright or License XML elements are missing,
# should 1) use the Abstract, Description, Copyright or License from the PackageSurfaceArea.Header elements
# that the module belongs to, or 2) if this is a stand-alone module that is not included in a PackageSurfaceArea,
# use the abstract, description, copyright or license from the DistributionPackage.Header elements.
#
ModuleAbstract = GetLocalValue(ModuleObject.GetAbstract())
if not ModuleAbstract and PackageObject:
ModuleAbstract = GetLocalValue(PackageObject.GetAbstract())
if not ModuleAbstract and DistHeader:
ModuleAbstract = GetLocalValue(DistHeader.GetAbstract())
ModuleDescription = GetLocalValue(ModuleObject.GetDescription())
if not ModuleDescription and PackageObject:
ModuleDescription = GetLocalValue(PackageObject.GetDescription())
if not ModuleDescription and DistHeader:
ModuleDescription = GetLocalValue(DistHeader.GetDescription())
ModuleCopyright = ''
for (Lang, Copyright) in ModuleObject.GetCopyright():
if Lang:
pass
ModuleCopyright = Copyright
if not ModuleCopyright and PackageObject:
for (Lang, Copyright) in PackageObject.GetCopyright():
if Lang:
pass
ModuleCopyright = Copyright
if not ModuleCopyright and DistHeader:
for (Lang, Copyright) in DistHeader.GetCopyright():
if Lang:
pass
ModuleCopyright = Copyright
ModuleLicense = ''
for (Lang, License) in ModuleObject.GetLicense():
if Lang:
pass
ModuleLicense = License
if not ModuleLicense and PackageObject:
for (Lang, License) in PackageObject.GetLicense():
if Lang:
pass
ModuleLicense = License
if not ModuleLicense and DistHeader:
for (Lang, License) in DistHeader.GetLicense():
if Lang:
pass
ModuleLicense = License
#
# Generate header comment section of INF file
#
Content += GenHeaderCommentSection(ModuleAbstract,
ModuleDescription,
ModuleCopyright,
ModuleLicense).replace('\r\n', '\n')
#
# Generate Binary Header
#
for UserExtension in ModuleObject.GetUserExtensionList():
if UserExtension.GetUserID() == DT.TAB_BINARY_HEADER_USERID \
and UserExtension.GetIdentifier() == DT.TAB_BINARY_HEADER_IDENTIFIER:
ModuleBinaryAbstract = GetLocalValue(UserExtension.GetBinaryAbstract())
ModuleBinaryDescription = GetLocalValue(UserExtension.GetBinaryDescription())
ModuleBinaryCopyright = ''
ModuleBinaryLicense = ''
for (Lang, Copyright) in UserExtension.GetBinaryCopyright():
ModuleBinaryCopyright = Copyright
for (Lang, License) in UserExtension.GetBinaryLicense():
ModuleBinaryLicense = License
if ModuleBinaryAbstract and ModuleBinaryDescription and \
ModuleBinaryCopyright and ModuleBinaryLicense:
Content += GenHeaderCommentSection(ModuleBinaryAbstract,
ModuleBinaryDescription,
ModuleBinaryCopyright,
ModuleBinaryLicense,
True)
#
# Generate MODULE_UNI_FILE for module
#
FileHeader = GenHeaderCommentSection(ModuleAbstract, ModuleDescription, ModuleCopyright, ModuleLicense, False, \
DT.TAB_COMMENT_EDK1_SPLIT)
GenModuleUNIEncodeFile(ModuleObject, FileHeader)
#
# Judge whether the INF file is an AsBuild INF.
#
if ModuleObject.BinaryModule:
GlobalData.gIS_BINARY_INF = True
else:
GlobalData.gIS_BINARY_INF = False
#
# for each section, maintain a dict, sorted arch will be its key,
# statement list will be its data
# { 'Arch1 Arch2 Arch3': [statement1, statement2],
# 'Arch1' : [statement1, statement3]
# }
#
# Gen section contents
#
Content += GenDefines(ModuleObject)
Content += GenBuildOptions(ModuleObject)
Content += GenLibraryClasses(ModuleObject)
Content += GenPackages(ModuleObject)
Content += GenPcdSections(ModuleObject)
Content += GenSources(ModuleObject)
Content += GenProtocolPPiSections(ModuleObject.GetProtocolList(), True)
Content += GenProtocolPPiSections(ModuleObject.GetPpiList(), False)
Content += GenGuidSections(ModuleObject.GetGuidList())
Content += GenBinaries(ModuleObject)
Content += GenDepex(ModuleObject)
Content += GenUserExtensions(ModuleObject)
if ModuleObject.GetEventList() or ModuleObject.GetBootModeList() or ModuleObject.GetHobList():
Content += '\n'
#
# generate [Event], [BootMode], [Hob] section
#
Content += GenSpecialSections(ModuleObject.GetEventList(), 'Event')
Content += GenSpecialSections(ModuleObject.GetBootModeList(), 'BootMode')
Content += GenSpecialSections(ModuleObject.GetHobList(), 'Hob')
SaveFileOnChange(ContainerFile, Content, False)
if DistHeader.ReadOnly:
os.chmod(ContainerFile, stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH)
else:
os.chmod(ContainerFile, stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH|stat.S_IWUSR|stat.S_IWGRP|stat.S_IWOTH)
return ContainerFile
## GenModuleUNIEncodeFile
# GenModuleUNIEncodeFile, default is a UCS-2LE encode file
#
def GenModuleUNIEncodeFile(ModuleObject, UniFileHeader='', Encoding=DT.TAB_ENCODING_UTF16LE):
GenUNIFlag = False
OnlyLANGUAGE_EN_X = True
BinaryAbstract = []
BinaryDescription = []
#
# If more than one language code is used for any element that would be present in the MODULE_UNI_FILE,
# then the MODULE_UNI_FILE must be created.
#
for (Key, Value) in ModuleObject.GetAbstract() + ModuleObject.GetDescription():
if Key == DT.TAB_LANGUAGE_EN_X:
GenUNIFlag = True
else:
OnlyLANGUAGE_EN_X = False
for UserExtension in ModuleObject.GetUserExtensionList():
if UserExtension.GetUserID() == DT.TAB_BINARY_HEADER_USERID \
and UserExtension.GetIdentifier() == DT.TAB_BINARY_HEADER_IDENTIFIER:
for (Key, Value) in UserExtension.GetBinaryAbstract():
if Key == DT.TAB_LANGUAGE_EN_X:
GenUNIFlag = True
else:
OnlyLANGUAGE_EN_X = False
BinaryAbstract.append((Key, Value))
for (Key, Value) in UserExtension.GetBinaryDescription():
if Key == DT.TAB_LANGUAGE_EN_X:
GenUNIFlag = True
else:
OnlyLANGUAGE_EN_X = False
BinaryDescription.append((Key, Value))
if not GenUNIFlag:
return
elif OnlyLANGUAGE_EN_X:
return
else:
ModuleObject.UNIFlag = True
ContainerFile = os.path.normpath(os.path.join(os.path.dirname(ModuleObject.GetFullPath()),
(ModuleObject.GetBaseName() + '.uni')))
if not os.path.exists(os.path.dirname(ModuleObject.GetFullPath())):
os.makedirs(os.path.dirname(ModuleObject.GetFullPath()))
Content = UniFileHeader + '\r\n'
Content += '\r\n'
Content += FormatUniEntry('#string ' + DT.TAB_INF_ABSTRACT, ModuleObject.GetAbstract(), ContainerFile) + '\r\n'
Content += FormatUniEntry('#string ' + DT.TAB_INF_DESCRIPTION, ModuleObject.GetDescription(), ContainerFile) \
+ '\r\n'
BinaryAbstractString = FormatUniEntry('#string ' + DT.TAB_INF_BINARY_ABSTRACT, BinaryAbstract, ContainerFile)
if BinaryAbstractString:
Content += BinaryAbstractString + '\r\n'
BinaryDescriptionString = FormatUniEntry('#string ' + DT.TAB_INF_BINARY_DESCRIPTION, BinaryDescription, \
ContainerFile)
if BinaryDescriptionString:
Content += BinaryDescriptionString + '\r\n'
if not os.path.exists(ContainerFile):
File = codecs.open(ContainerFile, 'wb', Encoding)
File.write(u'\uFEFF' + Content)
File.stream.close()
Md5Sigature = md5.new(__FileHookOpen__(str(ContainerFile), 'rb').read())
Md5Sum = Md5Sigature.hexdigest()
if (ContainerFile, Md5Sum) not in ModuleObject.FileList:
ModuleObject.FileList.append((ContainerFile, Md5Sum))
return ContainerFile
def GenDefines(ModuleObject):
#
# generate [Defines] section
#
LeftOffset = 31
Content = ''
NewSectionDict = {}
for UserExtension in ModuleObject.GetUserExtensionList():
DefinesDict = UserExtension.GetDefinesDict()
if not DefinesDict:
continue
for Statement in DefinesDict:
if Statement.split(DT.TAB_EQUAL_SPLIT) > 1:
Statement = (u'%s ' % Statement.split(DT.TAB_EQUAL_SPLIT, 1)[0]).ljust(LeftOffset) \
+ u'= %s' % Statement.split(DT.TAB_EQUAL_SPLIT, 1)[1].lstrip()
SortedArch = DT.TAB_ARCH_COMMON
if Statement.strip().startswith(DT.TAB_INF_DEFINES_CUSTOM_MAKEFILE):
pos = Statement.find(DT.TAB_VALUE_SPLIT)
if pos == -1:
pos = Statement.find(DT.TAB_EQUAL_SPLIT)
Makefile = ConvertPath(Statement[pos + 1:].strip())
Statement = Statement[:pos + 1] + ' ' + Makefile
if SortedArch in NewSectionDict:
NewSectionDict[SortedArch] = NewSectionDict[SortedArch] + [Statement]
else:
NewSectionDict[SortedArch] = [Statement]
SpecialStatementList = []
# TAB_INF_DEFINES_INF_VERSION
Statement = (u'%s ' % DT.TAB_INF_DEFINES_INF_VERSION).ljust(LeftOffset) + u'= %s' % '0x00010017'
SpecialStatementList.append(Statement)
# BaseName
BaseName = ModuleObject.GetBaseName()
if BaseName.startswith('.') or BaseName.startswith('-'):
BaseName = '_' + BaseName
Statement = (u'%s ' % DT.TAB_INF_DEFINES_BASE_NAME).ljust(LeftOffset) + u'= %s' % BaseName
SpecialStatementList.append(Statement)
# TAB_INF_DEFINES_FILE_GUID
Statement = (u'%s ' % DT.TAB_INF_DEFINES_FILE_GUID).ljust(LeftOffset) + u'= %s' % ModuleObject.GetGuid()
SpecialStatementList.append(Statement)
# TAB_INF_DEFINES_VERSION_STRING
Statement = (u'%s ' % DT.TAB_INF_DEFINES_VERSION_STRING).ljust(LeftOffset) + u'= %s' % ModuleObject.GetVersion()
SpecialStatementList.append(Statement)
# TAB_INF_DEFINES_VERSION_STRING
if ModuleObject.UNIFlag:
Statement = (u'%s ' % DT.TAB_INF_DEFINES_MODULE_UNI_FILE).ljust(LeftOffset) + \
u'= %s' % ModuleObject.GetBaseName() + '.uni'
SpecialStatementList.append(Statement)
# TAB_INF_DEFINES_MODULE_TYPE
if ModuleObject.GetModuleType():
Statement = (u'%s ' % DT.TAB_INF_DEFINES_MODULE_TYPE).ljust(LeftOffset) + u'= %s' % ModuleObject.GetModuleType()
SpecialStatementList.append(Statement)
# TAB_INF_DEFINES_PCD_IS_DRIVER
if ModuleObject.GetPcdIsDriver():
Statement = (u'%s ' % DT.TAB_INF_DEFINES_PCD_IS_DRIVER).ljust(LeftOffset) + \
u'= %s' % ModuleObject.GetPcdIsDriver()
SpecialStatementList.append(Statement)
# TAB_INF_DEFINES_UEFI_SPECIFICATION_VERSION
if ModuleObject.GetUefiSpecificationVersion():
Statement = (u'%s ' % DT.TAB_INF_DEFINES_UEFI_SPECIFICATION_VERSION).ljust(LeftOffset) + \
u'= %s' % ModuleObject.GetUefiSpecificationVersion()
SpecialStatementList.append(Statement)
# TAB_INF_DEFINES_PI_SPECIFICATION_VERSION
if ModuleObject.GetPiSpecificationVersion():
Statement = (u'%s ' % DT.TAB_INF_DEFINES_PI_SPECIFICATION_VERSION).ljust(LeftOffset) + \
u'= %s' % ModuleObject.GetPiSpecificationVersion()
SpecialStatementList.append(Statement)
# LibraryClass
for LibraryClass in ModuleObject.GetLibraryClassList():
if LibraryClass.GetUsage() == DT.USAGE_ITEM_PRODUCES or \
LibraryClass.GetUsage() == DT.USAGE_ITEM_SOMETIMES_PRODUCES:
Statement = (u'%s ' % DT.TAB_INF_DEFINES_LIBRARY_CLASS).ljust(LeftOffset) + \
u'= %s' % LibraryClass.GetLibraryClass()
if LibraryClass.GetSupModuleList():
Statement += '|' + DT.TAB_SPACE_SPLIT.join(l for l in LibraryClass.GetSupModuleList())
SpecialStatementList.append(Statement)
# Spec Item
for SpecItem in ModuleObject.GetSpecList():
Spec, Version = SpecItem
Spec = ConvertSpec(Spec)
Statement = '%s %s = %s' % (DT.TAB_INF_DEFINES_SPEC, Spec, Version)
SpecialStatementList.append(Statement)
# Extern
ExternList = []
for Extern in ModuleObject.GetExternList():
ArchList = Extern.GetSupArchList()
EntryPoint = Extern.GetEntryPoint()
UnloadImage = Extern.GetUnloadImage()
Constructor = Extern.GetConstructor()
Destructor = Extern.GetDestructor()
HelpStringList = Extern.GetHelpTextList()
FFE = Extern.GetFeatureFlag()
ExternList.append([ArchList, EntryPoint, UnloadImage, Constructor, Destructor, FFE, HelpStringList])
#
# Add VALID_ARCHITECTURES information
#
ValidArchStatement = None
if ModuleObject.SupArchList:
ValidArchStatement = '\n' + '# ' + '\n'
ValidArchStatement += '# The following information is for reference only and not required by the build tools.\n'
ValidArchStatement += '# ' + '\n'
ValidArchStatement += '# VALID_ARCHITECTURES = %s' % (' '.join(ModuleObject.SupArchList)) + '\n'
ValidArchStatement += '# '
if DT.TAB_ARCH_COMMON not in NewSectionDict:
NewSectionDict[DT.TAB_ARCH_COMMON] = []
NewSectionDict[DT.TAB_ARCH_COMMON] = NewSectionDict[DT.TAB_ARCH_COMMON] + SpecialStatementList
GenMetaFileMisc.AddExternToDefineSec(NewSectionDict, DT.TAB_ARCH_COMMON, ExternList)
if ValidArchStatement is not None:
NewSectionDict[DT.TAB_ARCH_COMMON] = NewSectionDict[DT.TAB_ARCH_COMMON] + [ValidArchStatement]
Content += GenSection('Defines', NewSectionDict)
return Content
def GenLibraryClasses(ModuleObject):
#
# generate [LibraryClasses] section
#
Content = ''
NewSectionDict = {}
if not GlobalData.gIS_BINARY_INF:
for LibraryClass in ModuleObject.GetLibraryClassList():
if LibraryClass.GetUsage() == DT.USAGE_ITEM_PRODUCES:
continue
#
# Generate generic comment
#
HelpTextList = LibraryClass.GetHelpTextList()
HelpStr = _GetHelpStr(HelpTextList)
CommentStr = GenGenericCommentF(HelpStr)
Statement = CommentStr
Name = LibraryClass.GetLibraryClass()
FFE = LibraryClass.GetFeatureFlag()
Statement += Name
if FFE:
Statement += '|' + FFE
ModuleList = LibraryClass.GetSupModuleList()
ArchList = LibraryClass.GetSupArchList()
for Index in xrange(0, len(ArchList)):
ArchList[Index] = ConvertArchForInstall(ArchList[Index])
ArchList.sort()
SortedArch = ' '.join(ArchList)
KeyList = []
if not ModuleList or IsAllModuleList(ModuleList):
KeyList = [SortedArch]
else:
ModuleString = DT.TAB_VALUE_SPLIT.join(l for l in ModuleList)
if not ArchList:
SortedArch = DT.TAB_ARCH_COMMON
KeyList = [SortedArch + '.' + ModuleString]
else:
KeyList = [Arch + '.' + ModuleString for Arch in ArchList]
for Key in KeyList:
if Key in NewSectionDict:
NewSectionDict[Key] = NewSectionDict[Key] + [Statement]
else:
NewSectionDict[Key] = [Statement]
Content += GenSection('LibraryClasses', NewSectionDict)
else:
LibraryClassDict = {}
for BinaryFile in ModuleObject.GetBinaryFileList():
if not BinaryFile.AsBuiltList:
continue
for LibraryItem in BinaryFile.AsBuiltList[0].LibraryInstancesList:
Statement = '# Guid: ' + LibraryItem.Guid + ' Version: ' + LibraryItem.Version
if len(BinaryFile.SupArchList) == 0:
if LibraryClassDict.has_key('COMMON') and Statement not in LibraryClassDict['COMMON']:
LibraryClassDict['COMMON'].append(Statement)
else:
LibraryClassDict['COMMON'] = ['## @LIB_INSTANCES']
LibraryClassDict['COMMON'].append(Statement)
else:
for Arch in BinaryFile.SupArchList:
if LibraryClassDict.has_key(Arch):
if Statement not in LibraryClassDict[Arch]:
LibraryClassDict[Arch].append(Statement)
else:
continue
else:
LibraryClassDict[Arch] = ['## @LIB_INSTANCES']
LibraryClassDict[Arch].append(Statement)
Content += GenSection('LibraryClasses', LibraryClassDict)
return Content
def GenPackages(ModuleObject):
Content = ''
#
# generate [Packages] section
#
NewSectionDict = Sdict()
WorkspaceDir = GlobalData.gWORKSPACE
for PackageDependency in ModuleObject.GetPackageDependencyList():
#
# Generate generic comment
#
CommentStr = ''
HelpText = PackageDependency.GetHelpText()
if HelpText:
HelpStr = HelpText.GetString()
CommentStr = GenGenericCommentF(HelpStr)
Statement = CommentStr
Guid = PackageDependency.GetGuid()
Version = PackageDependency.GetVersion()
FFE = PackageDependency.GetFeatureFlag()
Path = ''
#
# find package path/name
#
for PkgInfo in GlobalData.gWSPKG_LIST:
if Guid == PkgInfo[1]:
if (not Version) or (Version == PkgInfo[2]):
Path = PkgInfo[3]
break
#
# get relative path
#
RelaPath = GetRelativePath(Path, WorkspaceDir)
Statement += RelaPath.replace('\\', '/')
if FFE:
Statement += '|' + FFE
ArchList = PackageDependency.GetSupArchList()
ArchList.sort()
SortedArch = ' '.join(ArchList)
if SortedArch in NewSectionDict:
NewSectionDict[SortedArch] = NewSectionDict[SortedArch] + [Statement]
else:
NewSectionDict[SortedArch] = [Statement]
Content += GenSection('Packages', NewSectionDict)
return Content
def GenSources(ModuleObject):
#
# generate [Sources] section
#
Content = ''
NewSectionDict = {}
for Source in ModuleObject.GetSourceFileList():
SourceFile = Source.GetSourceFile()
Family = Source.GetFamily()
FeatureFlag = Source.GetFeatureFlag()
SupArchList = Source.GetSupArchList()
SupArchList.sort()
SortedArch = ' '.join(SupArchList)
Statement = GenSourceStatement(ConvertPath(SourceFile), Family, FeatureFlag)
if SortedArch in NewSectionDict:
NewSectionDict[SortedArch] = NewSectionDict[SortedArch] + [Statement]
else:
NewSectionDict[SortedArch] = [Statement]
Content += GenSection('Sources', NewSectionDict)
return Content
def GenDepex(ModuleObject):
#
# generate [Depex] section
#
NewSectionDict = Sdict()
Content = ''
for Depex in ModuleObject.GetPeiDepex() + ModuleObject.GetDxeDepex() + ModuleObject.GetSmmDepex():
HelpTextList = Depex.GetHelpTextList()
HelpStr = _GetHelpStr(HelpTextList)
CommentStr = GenGenericCommentF(HelpStr)
SupArchList = Depex.GetSupArchList()
SupModList = Depex.GetModuleType()
Expression = Depex.GetDepex()
Statement = CommentStr + Expression
SupArchList.sort()
KeyList = []
if not SupArchList:
SupArchList.append(DT.TAB_ARCH_COMMON.lower())
if not SupModList:
KeyList = SupArchList
else:
for ModuleType in SupModList:
for Arch in SupArchList:
KeyList.append(ConvertArchForInstall(Arch) + '.' + ModuleType)
for Key in KeyList:
if Key in NewSectionDict:
NewSectionDict[Key] = NewSectionDict[Key] + [Statement]
else:
NewSectionDict[Key] = [Statement]
Content += GenSection('Depex', NewSectionDict, False)
return Content
## GenUserExtensions
#
# GenUserExtensions
#
def GenUserExtensions(ModuleObject):
NewSectionDict = {}
for UserExtension in ModuleObject.GetUserExtensionList():
if UserExtension.GetUserID() == DT.TAB_BINARY_HEADER_USERID and \
UserExtension.GetIdentifier() == DT.TAB_BINARY_HEADER_IDENTIFIER:
continue
if UserExtension.GetIdentifier() == 'Depex':
continue
Statement = UserExtension.GetStatement()
if not Statement:
continue
ArchList = UserExtension.GetSupArchList()
for Index in xrange(0, len(ArchList)):
ArchList[Index] = ConvertArchForInstall(ArchList[Index])
ArchList.sort()
KeyList = []
CommonPreFix = ''
if UserExtension.GetUserID():
CommonPreFix = UserExtension.GetUserID()
if CommonPreFix.find('.') > -1:
CommonPreFix = '"' + CommonPreFix + '"'
if UserExtension.GetIdentifier():
CommonPreFix += '.' + '"' + UserExtension.GetIdentifier() + '"'
if ArchList:
KeyList = [CommonPreFix + '.' + Arch for Arch in ArchList]
else:
KeyList = [CommonPreFix]
for Key in KeyList:
if Key in NewSectionDict:
NewSectionDict[Key] = NewSectionDict[Key] + [Statement]
else:
NewSectionDict[Key] = [Statement]
Content = GenSection('UserExtensions', NewSectionDict, False)
return Content
# GenSourceStatement
#
# @param SourceFile: string of source file path/name
# @param Family: string of source file family field
# @param FeatureFlag: string of source file FeatureFlag field
# @param TagName: string of source file TagName field
# @param ToolCode: string of source file ToolCode field
# @param HelpStr: string of source file HelpStr field
#
# @retval Statement: The generated statement for source
#
def GenSourceStatement(SourceFile, Family, FeatureFlag, TagName=None,
ToolCode=None, HelpStr=None):
Statement = ''
if HelpStr:
Statement += GenGenericCommentF(HelpStr)
#
# format of SourceFile|Family|TagName|ToolCode|FeatureFlag
#
Statement += SourceFile
if TagName == None:
TagName = ''
if ToolCode == None:
ToolCode = ''
if HelpStr == None:
HelpStr = ''
if FeatureFlag:
Statement += '|' + Family + '|' + TagName + '|' + ToolCode + '|' + FeatureFlag
elif ToolCode:
Statement += '|' + Family + '|' + TagName + '|' + ToolCode
elif TagName:
Statement += '|' + Family + '|' + TagName
elif Family:
Statement += '|' + Family
return Statement
# GenBinaryStatement
#
# @param Key: (FileName, FileType, FFE, SortedArch)
# @param Value: (Target, Family, TagName, Comment)
#
#
def GenBinaryStatement(Key, Value, SubTypeGuidValue=None):
(FileName, FileType, FFE, SortedArch) = Key
if SortedArch:
pass
if Value:
(Target, Family, TagName, Comment) = Value
else:
Target = ''
Family = ''
TagName = ''
Comment = ''
if Comment:
Statement = GenGenericCommentF(Comment)
else:
Statement = ''
if FileType == 'SUBTYPE_GUID' and SubTypeGuidValue:
Statement += FileType + '|' + SubTypeGuidValue + '|' + FileName
else:
Statement += FileType + '|' + FileName
if FileType in DT.BINARY_FILE_TYPE_UI_LIST + DT.BINARY_FILE_TYPE_VER_LIST:
if FFE:
Statement += '|' + Target + '|' + FFE
elif Target:
Statement += '|' + Target
else:
if FFE:
Statement += '|' + Target + '|' + Family + '|' + TagName + '|' + FFE
elif TagName:
Statement += '|' + Target + '|' + Family + '|' + TagName
elif Family:
Statement += '|' + Target + '|' + Family
elif Target:
Statement += '|' + Target
return Statement
## GenGuidSections
#
# @param GuidObjList: List of GuidObject
# @retVal Content: The generated section contents
#
def GenGuidSections(GuidObjList):
#
# generate [Guids] section
#
Content = ''
GuidDict = Sdict()
for Guid in GuidObjList:
HelpTextList = Guid.GetHelpTextList()
HelpStr = _GetHelpStr(HelpTextList)
CName = Guid.GetCName()
FFE = Guid.GetFeatureFlag()
Statement = CName
if FFE:
Statement += '|' + FFE
Usage = Guid.GetUsage()
GuidType = Guid.GetGuidTypeList()[0]
VariableName = Guid.GetVariableName()
#
# Differentiate the generic comment and usage comment as multiple generic comment need to be put at first
#
if Usage == DT.ITEM_UNDEFINED and GuidType == DT.ITEM_UNDEFINED:
# generate list of generic comment
Comment = GenGenericCommentF(HelpStr)
else:
# generate list of other comment
Comment = HelpStr.replace('\n', ' ')
Comment = Comment.strip()
if Comment:
Comment = ' # ' + Comment
else:
Comment = ''
if Usage != DT.ITEM_UNDEFINED and GuidType == DT.ITEM_UNDEFINED:
Comment = '## ' + Usage + Comment
elif GuidType == 'Variable':
Comment = '## ' + Usage + ' ## ' + GuidType + ':' + VariableName + Comment
else:
Comment = '## ' + Usage + ' ## ' + GuidType + Comment
if Comment:
Comment += '\n'
#
# merge duplicate items
#
ArchList = Guid.GetSupArchList()
ArchList.sort()
SortedArch = ' '.join(ArchList)
if (Statement, SortedArch) in GuidDict:
PreviousComment = GuidDict[Statement, SortedArch]
Comment = PreviousComment + Comment
GuidDict[Statement, SortedArch] = Comment
NewSectionDict = GenMetaFileMisc.TransferDict(GuidDict, 'INF_GUID')
#
# generate the section contents
#
if NewSectionDict:
Content = GenSection('Guids', NewSectionDict)
return Content
## GenProtocolPPiSections
#
# @param ObjList: List of ProtocolObject or Ppi Object
# @retVal Content: The generated section contents
#
def GenProtocolPPiSections(ObjList, IsProtocol):
Content = ''
Dict = Sdict()
for Object in ObjList:
HelpTextList = Object.GetHelpTextList()
HelpStr = _GetHelpStr(HelpTextList)
CName = Object.GetCName()
FFE = Object.GetFeatureFlag()
Statement = CName
if FFE:
Statement += '|' + FFE
Usage = Object.GetUsage()
Notify = Object.GetNotify()
#
# Differentiate the generic comment and usage comment as consecutive generic comment need to be put together
#
if Usage == DT.ITEM_UNDEFINED and Notify == '':
# generate list of generic comment
Comment = GenGenericCommentF(HelpStr)
else:
# generate list of other comment
Comment = HelpStr.replace('\n', ' ')
Comment = Comment.strip()
if Comment:
Comment = ' # ' + Comment
else:
Comment = ''
if Usage == DT.ITEM_UNDEFINED and not Comment and Notify == '':
Comment = ''
else:
if Notify:
Comment = '## ' + Usage + ' ## ' + 'NOTIFY' + Comment
else:
Comment = '## ' + Usage + Comment
if Comment:
Comment += '\n'
#
# merge duplicate items
#
ArchList = Object.GetSupArchList()
ArchList.sort()
SortedArch = ' '.join(ArchList)
if (Statement, SortedArch) in Dict:
PreviousComment = Dict[Statement, SortedArch]
Comment = PreviousComment + Comment
Dict[Statement, SortedArch] = Comment
NewSectionDict = GenMetaFileMisc.TransferDict(Dict, 'INF_PPI_PROTOCOL')
#
# generate the section contents
#
if NewSectionDict:
if IsProtocol:
Content = GenSection('Protocols', NewSectionDict)
else:
Content = GenSection('Ppis', NewSectionDict)
return Content
## GenPcdSections
#
#
def GenPcdSections(ModuleObject):
Content = ''
if not GlobalData.gIS_BINARY_INF:
#
# for each Pcd Itemtype, maintain a dict so the same type will be grouped
# together
#
ItemTypeDict = {}
for Pcd in ModuleObject.GetPcdList():
HelpTextList = Pcd.GetHelpTextList()
HelpStr = _GetHelpStr(HelpTextList)
Statement = ''
CName = Pcd.GetCName()
TokenSpaceGuidCName = Pcd.GetTokenSpaceGuidCName()
DefaultValue = Pcd.GetDefaultValue()
ItemType = Pcd.GetItemType()
if ItemType in ItemTypeDict:
Dict = ItemTypeDict[ItemType]
else:
Dict = Sdict()
ItemTypeDict[ItemType] = Dict
FFE = Pcd.GetFeatureFlag()
Statement += TokenSpaceGuidCName + '.' + CName
if DefaultValue:
Statement += '|' + DefaultValue
if FFE:
Statement += '|' + FFE
elif FFE:
Statement += '||' + FFE
#
# Generate comment
#
Usage = Pcd.GetValidUsage()
# if FeatureFlag Pcd, then assume all Usage is CONSUMES
if ItemType == DT.TAB_INF_FEATURE_PCD:
Usage = DT.USAGE_ITEM_CONSUMES
if Usage == DT.ITEM_UNDEFINED:
# generate list of generic comment
Comment = GenGenericCommentF(HelpStr)
else:
# generate list of other comment
Comment = HelpStr.replace('\n', ' ')
Comment = Comment.strip()
if Comment:
Comment = ' # ' + Comment
else:
Comment = ''
Comment = '## ' + Usage + Comment
if Comment:
Comment += '\n'
#
# Merge duplicate entries
#
ArchList = Pcd.GetSupArchList()
ArchList.sort()
SortedArch = ' '.join(ArchList)
if (Statement, SortedArch) in Dict:
PreviousComment = Dict[Statement, SortedArch]
Comment = PreviousComment + Comment
Dict[Statement, SortedArch] = Comment
for ItemType in ItemTypeDict:
# First we need to transfer the Dict to use SortedArch as key
Dict = ItemTypeDict[ItemType]
NewSectionDict = GenMetaFileMisc.TransferDict(Dict, 'INF_PCD')
if NewSectionDict:
Content += GenSection(ItemType, NewSectionDict)
#
# For AsBuild INF files
#
else:
Content += GenAsBuiltPacthPcdSections(ModuleObject)
Content += GenAsBuiltPcdExSections(ModuleObject)
return Content
## GenPcdSections
#
#
def GenAsBuiltPacthPcdSections(ModuleObject):
PatchPcdDict = {}
for BinaryFile in ModuleObject.GetBinaryFileList():
if not BinaryFile.AsBuiltList:
continue
for PatchPcd in BinaryFile.AsBuiltList[0].PatchPcdList:
TokenSpaceName = ''
PcdCName = PatchPcd.CName
PcdValue = PatchPcd.DefaultValue
PcdOffset = PatchPcd.Offset
TokenSpaceGuidValue = PatchPcd.TokenSpaceGuidValue
Token = PatchPcd.Token
HelpTextList = PatchPcd.HelpTextList
HelpString = ''
for HelpStringItem in HelpTextList:
for HelpLine in GetSplitValueList(HelpStringItem.String, '\n'):
HelpString += '## ' + HelpLine + '\n'
TokenSpaceName, PcdCName = GenMetaFileMisc.ObtainPcdName(ModuleObject.PackageDependencyList,
TokenSpaceGuidValue,
Token)
if TokenSpaceName == '' or PcdCName == '':
Logger.Error("Upt",
ToolError.RESOURCE_NOT_AVAILABLE,
ST.ERR_INSTALL_FILE_DEC_FILE_ERROR % (TokenSpaceGuidValue, Token),
File=ModuleObject.GetFullPath())
Statement = HelpString + TokenSpaceName + '.' + PcdCName + ' | ' + PcdValue + ' | ' + \
PcdOffset + DT.TAB_SPACE_SPLIT
#
# Use binary file's Arch to be Pcd's Arch
#
ArchList = []
FileNameObjList = BinaryFile.GetFileNameList()
if FileNameObjList:
ArchList = FileNameObjList[0].GetSupArchList()
if len(ArchList) == 0:
if PatchPcdDict.has_key(DT.TAB_ARCH_COMMON):
if Statement not in PatchPcdDict[DT.TAB_ARCH_COMMON]:
PatchPcdDict[DT.TAB_ARCH_COMMON].append(Statement)
else:
PatchPcdDict[DT.TAB_ARCH_COMMON] = [Statement]
else:
for Arch in ArchList:
if PatchPcdDict.has_key(Arch):
if Statement not in PatchPcdDict[Arch]:
PatchPcdDict[Arch].append(Statement)
else:
PatchPcdDict[Arch] = [Statement]
return GenSection(DT.TAB_INF_PATCH_PCD, PatchPcdDict)
## GenPcdSections
#
#
def GenAsBuiltPcdExSections(ModuleObject):
PcdExDict = {}
for BinaryFile in ModuleObject.GetBinaryFileList():
if not BinaryFile.AsBuiltList:
continue
for PcdExItem in BinaryFile.AsBuiltList[0].PcdExValueList:
TokenSpaceName = ''
PcdCName = PcdExItem.CName
TokenSpaceGuidValue = PcdExItem.TokenSpaceGuidValue
Token = PcdExItem.Token
HelpTextList = PcdExItem.HelpTextList
HelpString = ''
for HelpStringItem in HelpTextList:
for HelpLine in GetSplitValueList(HelpStringItem.String, '\n'):
HelpString += '## ' + HelpLine + '\n'
TokenSpaceName, PcdCName = GenMetaFileMisc.ObtainPcdName(ModuleObject.PackageDependencyList,
TokenSpaceGuidValue, Token)
if TokenSpaceName == '' or PcdCName == '':
Logger.Error("Upt",
ToolError.RESOURCE_NOT_AVAILABLE,
ST.ERR_INSTALL_FILE_DEC_FILE_ERROR % (TokenSpaceGuidValue, Token),
File=ModuleObject.GetFullPath())
Statement = HelpString + TokenSpaceName + DT.TAB_SPLIT + PcdCName + DT.TAB_SPACE_SPLIT
#
# Use binary file's Arch to be Pcd's Arch
#
ArchList = []
FileNameObjList = BinaryFile.GetFileNameList()
if FileNameObjList:
ArchList = FileNameObjList[0].GetSupArchList()
if len(ArchList) == 0:
if PcdExDict.has_key('COMMON'):
PcdExDict['COMMON'].append(Statement)
else:
PcdExDict['COMMON'] = [Statement]
else:
for Arch in ArchList:
if PcdExDict.has_key(Arch):
if Statement not in PcdExDict[Arch]:
PcdExDict[Arch].append(Statement)
else:
PcdExDict[Arch] = [Statement]
return GenSection('PcdEx', PcdExDict)
## GenSpecialSections
# generate special sections for Event/BootMode/Hob
#
def GenSpecialSections(ObjectList, SectionName):
#
# generate section
#
Content = ''
NewSectionDict = {}
for Obj in ObjectList:
#
# Generate comment
#
CommentStr = ''
HelpTextList = Obj.GetHelpTextList()
HelpStr = _GetHelpStr(HelpTextList)
CommentStr = GenGenericCommentF(HelpStr)
if SectionName == 'Hob':
Type = Obj.GetHobType()
elif SectionName == 'Event':
Type = Obj.GetEventType()
elif SectionName == 'BootMode':
Type = Obj.GetSupportedBootModes()
else:
assert(SectionName)
Usage = Obj.GetUsage()
Statement = ' ' + Type + ' ## ' + Usage
if CommentStr in ['#\n', '#\n#\n']:
CommentStr = '#\n#\n#\n'
#
# the first head comment line should start with '##\n', if it starts with '#\n', then add one '#'
# else add '##\n' to meet the format defined in INF spec
#
if CommentStr.startswith('#\n'):
CommentStr = '#' + CommentStr
elif CommentStr:
CommentStr = '##\n' + CommentStr
if CommentStr and not CommentStr.endswith('\n#\n'):
CommentStr = CommentStr + '#\n'
NewStateMent = CommentStr + Statement
SupArch = Obj.GetSupArchList()
SupArch.sort()
SortedArch = ' '.join(SupArch)
if SortedArch in NewSectionDict:
NewSectionDict[SortedArch] = NewSectionDict[SortedArch] + [NewStateMent]
else:
NewSectionDict[SortedArch] = [NewStateMent]
SectionContent = GenSection(SectionName, NewSectionDict)
SectionContent = SectionContent.strip()
if SectionContent:
Content = '# ' + ('\n' + '# ').join(GetSplitValueList(SectionContent, '\n'))
Content = Content.lstrip()
#
# add a return to differentiate it between other possible sections
#
if Content:
Content += '\n'
return Content
## GenBuildOptions
#
#
def GenBuildOptions(ModuleObject):
Content = ''
if not ModuleObject.BinaryModule:
#
# generate [BuildOptions] section
#
NewSectionDict = {}
for UserExtension in ModuleObject.GetUserExtensionList():
BuildOptionDict = UserExtension.GetBuildOptionDict()
if not BuildOptionDict:
continue
for Arch in BuildOptionDict:
if Arch in NewSectionDict:
NewSectionDict[Arch] = NewSectionDict[Arch] + [BuildOptionDict[Arch]]
else:
NewSectionDict[Arch] = [BuildOptionDict[Arch]]
Content = GenSection('BuildOptions', NewSectionDict)
else:
BuildOptionDict = {}
for BinaryFile in ModuleObject.GetBinaryFileList():
if not BinaryFile.AsBuiltList:
continue
for BuilOptionItem in BinaryFile.AsBuiltList[0].BinaryBuildFlagList:
Statement = '#' + BuilOptionItem.AsBuiltOptionFlags
if len(BinaryFile.SupArchList) == 0:
if BuildOptionDict.has_key('COMMON'):
if Statement not in BuildOptionDict['COMMON']:
BuildOptionDict['COMMON'].append(Statement)
else:
BuildOptionDict['COMMON'] = ['## @AsBuilt']
BuildOptionDict['COMMON'].append(Statement)
else:
for Arch in BinaryFile.SupArchList:
if BuildOptionDict.has_key(Arch):
if Statement not in BuildOptionDict[Arch]:
BuildOptionDict[Arch].append(Statement)
else:
BuildOptionDict[Arch] = ['## @AsBuilt']
BuildOptionDict[Arch].append(Statement)
Content = GenSection('BuildOptions', BuildOptionDict)
return Content
## GenBinaries
#
#
def GenBinaries(ModuleObject):
NewSectionDict = {}
BinariesDict = []
for UserExtension in ModuleObject.GetUserExtensionList():
BinariesDict = UserExtension.GetBinariesDict()
if BinariesDict:
break
for BinaryFile in ModuleObject.GetBinaryFileList():
FileNameObjList = BinaryFile.GetFileNameList()
for FileNameObj in FileNameObjList:
FileName = ConvertPath(FileNameObj.GetFilename())
FileType = FileNameObj.GetFileType()
FFE = FileNameObj.GetFeatureFlag()
ArchList = FileNameObj.GetSupArchList()
ArchList.sort()
SortedArch = ' '.join(ArchList)
Key = (FileName, FileType, FFE, SortedArch)
if Key in BinariesDict:
ValueList = BinariesDict[Key]
for ValueItem in ValueList:
Statement = GenBinaryStatement(Key, ValueItem)
if SortedArch in NewSectionDict:
NewSectionDict[SortedArch] = NewSectionDict[SortedArch] + [Statement]
else:
NewSectionDict[SortedArch] = [Statement]
#
# as we already generated statement for this DictKey here set the Valuelist to be empty
# to avoid generate duplicate entries as the DictKey may have multiple entries
#
BinariesDict[Key] = []
else:
if FileType == 'SUBTYPE_GUID' and FileNameObj.GetGuidValue():
Statement = GenBinaryStatement(Key, None, FileNameObj.GetGuidValue())
else:
Statement = GenBinaryStatement(Key, None)
if SortedArch in NewSectionDict:
NewSectionDict[SortedArch] = NewSectionDict[SortedArch] + [Statement]
else:
NewSectionDict[SortedArch] = [Statement]
Content = GenSection('Binaries', NewSectionDict)
return Content
| gpl-2.0 | -570,540,731,257,356,700 | 38.965333 | 120 | 0.594916 | false |
meltedchocolate/Death87 | Groups.py | 1 | 1110 | """
Copyright 2015 Ricky LeDew
This file is part of Death 87.
Death 87 is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Death 87 is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import pygame
class Groups:
def __init__(self):
self.visible = pygame.sprite.Group()
self.sprites = pygame.sprite.Group()
self.walls = pygame.sprite.Group()
self.text = pygame.sprite.Group()
self.particles = pygame.sprite.Group()
self.emitters = pygame.sprite.Group()
def addtogroup(self, object, group):
group.add(object) | gpl-3.0 | 2,001,058,740,107,791,400 | 32.666667 | 73 | 0.69009 | false |
ingadhoc/odoo-infrastructure | infrastructure/wizard/instance_update_add_instances.py | 1 | 1450 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields, api
class instance_update_add_instances(models.TransientModel):
_name = 'instance.update.add_instances'
@api.model
def get_update(self):
return self.env['infrastructure.instance.update'].browse(
self.env.context.get('active_id', False))
update_id = fields.Many2one(
'infrastructure.instance.update',
'Update',
default=get_update,
required=True,
ondelete='cascade',
)
actual_instance_ids = fields.Many2many(
'infrastructure.instance',
compute='get_actual_instances',
)
instance_ids = fields.Many2many(
'infrastructure.instance',
string='Instances',
)
@api.one
@api.depends('update_id')
def get_actual_instances(self):
self.actual_instance_ids = self.update_id.detail_ids.mapped(
'instance_id')
@api.multi
def confirm(self):
self.ensure_one()
for instance in self.instance_ids:
vals = {
'instance_id': instance.id,
'update_id': self.update_id.id,
}
self.update_id.detail_ids.create(vals)
| agpl-3.0 | -7,664,425,907,704,967,000 | 29.851064 | 78 | 0.536552 | false |
playpauseandstop/setman | setman/utils/importlib.py | 1 | 1408 | """
Backported from `importlib <http://pypi.python.org/pypi/importlib>` library,
which itself backported from Python 3.x branch.
"""
# While not critical (and in no way guaranteed!), it would be nice to keep this
# code compatible with Python 2.3.
import sys
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in xrange(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
| bsd-3-clause | -9,140,119,519,811,707,000 | 33.341463 | 79 | 0.617898 | false |
cernanalysispreservation/analysis-preservation.cern.ch | cap/modules/repos/errors.py | 3 | 3242 | # -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2018 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Git exceptions."""
class GitError(Exception):
"""General Git clients error."""
def __init__(self, *args):
"""Initialize exception."""
super().__init__(*args)
class GitURLParsingError(GitError):
"""Git url error."""
def __init__(self, message=None, **kwargs):
"""Initialize exception."""
message = message or 'Invalid git URL.'
super().__init__(message, **kwargs)
class GitPRParsingError(GitError):
"""Git url error."""
def __init__(self, message=None, **kwargs):
"""Initialize exception."""
message = message or 'Pull/Merge requests are not accepted. ' \
'Please merge first.'
super().__init__(message, **kwargs)
class GitRequestWithInvalidSignature(GitError):
"""Git request with invalid signature."""
def __init__(self, message=None, **kwargs):
"""Initialize exception."""
message = message or 'Signatures for this request don\'t match.'
super().__init__(message, **kwargs)
class GitHostNotSupported(GitError):
"""API host not supported."""
def __init__(self, message=None, **kwargs):
"""Initialize exception."""
message = message or 'Host not supported'
super().__init__(message, **kwargs)
class GitIntegrationError(GitError):
"""Exception during connecting analysis with repository."""
def __init__(self, message=None, **kwargs):
"""Initialize exception."""
self.message = message or \
'Error occured during connecting analysis with repository.'
super().__init__(message, **kwargs)
class GitUnauthorizedRequest(GitError):
"""User not authorized."""
def __init__(self, message=None, **kwargs):
"""Initialize exception."""
self.message = message or \
'User not authorized.'
super().__init__(message, **kwargs)
class GitObjectNotFound(GitError):
"""Git Webhook does not exist."""
def __init__(self, message=None, **kwargs):
"""Initialize exception."""
self.message = message or 'Object not found.'
super().__init__(message, **kwargs)
| gpl-2.0 | -5,198,789,454,273,908,000 | 35.022222 | 78 | 0.656693 | false |
MG-group-tools/MGFunc | mgfunc_v2/cluster2fasta.py | 1 | 15574 | #!/usr/bin/env python2.7
import sys
import os
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 29 13:13:45 2013
CLASS-VERSION
@author: Kosai
"""
import cPickle as pickle
from datetime import datetime as dt
import time
import argparse
import gzip
class main:
'''
Class version of the cluster2fasta program
'''
def __init__(self):
self.start = time.time()
self.d_ = dt.today()
self.timestarted = self.d_.strftime("%d-%m-%Y %H:%M:%S")
self._D = {}
self.parseArgs()
def parseArgs(self):
parser = argparse.ArgumentParser(prog="cluster2fasta.py", usage="cluster2fasta.py -c mycluster.txt -o mycluster.output -num [-ui uniprot.index\/uniprot.index.p -uf uniprot.fasta] [-ki SAMPLE.index\/SAMPLE.index.p -kf SAMPLE.fasta]", epilog="Written by Kosai+Asli, oct 2013. Last modified apr 2014.")
parser.add_argument("-ui",metavar="uniprot_index_file",help="Uniprot index file",nargs="*")
parser.add_argument("-uf",metavar="uniprot_fasta",help="Fasta-file for all uniprot (from swiss2fasta)",nargs="*")
parser.add_argument("-ki",metavar="sample_index_file",help="Genecatalog index file",nargs=1)
parser.add_argument("-kf",metavar="sample_fasta",help="Fasta-file for all genecatalog sequences",nargs=1)
parser.add_argument("-sfi",metavar="sample_list",help="A list of genecatalog index files and fasta files",nargs=1)
#parser.add_argument("-sfl",metavar="sample_fasta_list",help="Fasta-files list for all genecatalog sequences",nargs=1)
parser.add_argument("-c",metavar="Cluster-name",help="Cluster-file",nargs=1,required=True)
parser.add_argument("-o",metavar="Output",help="Output name",nargs=1)
parser.add_argument("-num",help="Adds 2 coloumns to a new file, with cluster_id\'s, number of sample-genes and number of uniprot ID\'s",action="store_true")
parser.add_argument("-v",help="Verbose. Prints out progress and details to stdout output. Write \"-v\" with no arguments in commandline. Default is off.",action="store_true")
#return parser.parse_args("-o testcluster.argsO_tester".split()), parser #testing on windows
#return parser.parse_args("".split()), parser #testing on window
# self.args = parser.parse_args()
self.parser = parser
def fileIn(self,infile):
if infile[-3:] == ".gz":
return gzip.open(infile,"r")
else:
return open(infile,"r")
def fileOut(self,outfile):
return open(outfile, "w")
def fileClose(self,cfile):
cfile.close()
'''
def dictMaker(i,D_ID): #Create dictionary from index-text file
D = {}
if i[0].split(".")[-1] == "index":
indexline = ""
for el in D_ID:
indexline = el.rstrip().split("\t")
D[indexline[0]] = [indexline[1],indexline[2]]
self.printer("\n\nDICTIONARY DONE!!!!\n\n")
return D
else:
return pickle.load(D_ID)
'''
def dictMaker(self,i,D_ID, j): #Create dictionary from index-text file
if i.split(".")[-1] == "indexed":
indexline = ""
for el in D_ID:
indexline = el.rstrip().split("\t")
self._D[indexline[0]] = [indexline[1],indexline[2], j]
self.printer("\nDictionary done, time used (so far): "+str(round((time.time() - self.start) / 60,3))+" min\n")
return 1
# else:
# print "Check index file names. :" + i
# self._D = pickle.load(D_ID)
# self.printer("\nDictionary done, time used (so far): "+str(round((time.time() - self.start) / 60,3))+" min\n")
# return 1
def missingGeneLog(self,genecat,uniprot):
log = self.fileOut(self.args.o[0] + ".missingGenes.log")
for el in genecat:
log.write(el[0]+"\t"+el[1]+"\n")
for el in uniprot:
log.write(el[0]+"\t"+el[1]+"\n")
self.fileClose(log)
def seqExtracter3(self,ID,myD,uni): #Dictionary look-up, one big dictionary
if ID in myD:
start = int(myD[ID][0])
stop = int(myD[ID][1])
if uni == 1:
self.uniprotFasta.seek(start)
seq = self.uniprotFasta.read(stop-start)
seq = "".join(seq.split("\n"))
return seq,1
else:
fasta = self.fileIn(self._F[int(myD[ID][2])][1])
fasta.seek(start)
seq = fasta.read(stop-start)
seq = "".join(seq.split("\n"))
self.fileClose(fasta)
return seq,1
else:
return "",0
def seqExtracter(self,ID,myD,fasta,uni): #Dictionary look-up, one big dictionary
if ID in myD:
start = int(myD[ID][0])
stop = int(myD[ID][1])
fasta.seek(start)
seq = fasta.read(stop-start)
seq = "".join(seq.split("\n"))
return seq,1
else:
return "",0
def seqExtracter2(self,ID,myD,fasta): #Dictionary look-up, each key is first gene letter
start = int(myD[ID[0]][ID][0])
stop = int(myD[ID[0]][ID][1])
fasta.seek(start)
seq = fasta.read(stop-start)
seq = "".join(seq.split("\n"))
return seq
def genecat_list(self):
clusterID =self.fileIn(self.args.c[0])
output = self.fileOut(self.args.o[0]+".genecatalog.fasta")
self._F = {}
infiles=0
for line in file(self.args.sfi[0]):
index = line.split("\t")[0]
fasta = line.split("\t")[1].strip("\n")
self._F[infiles] = [index,fasta]
genecatID = self.fileIn(index)
a = self.dictMaker(index,genecatID,infiles) #takes time
if a ==1 : self.printer("DictMaker worked for " + index)
else: self.printer("DictMaker did not work, check index files " + index)
self.fileClose(genecatID)
infiles+=1
suc = 0
missing = []
seq = ""
for line in clusterID:
L = line.rstrip().split("\t")
C = str(L[0]) #clusterID
L2 = L[2].split(",")
for el in L2:
seq,suc = self.seqExtracter3(el,self._D,0)
if suc == 1:
output.write(">"+C+":"+el+"\n"+seq+"\n")
else:
missing.append([el,C])
#print self._D
self._D = {}
self.fileClose(output)
self.fileClose(clusterID)
return missing
def genecat(self,args,parser):
clusterID =self.fileIn(args.c[0])
genecatID = self.fileIn(args.ki[0])
genecatFasta = self.fileIn(args.kf[0])
output = self.fileOut(args.o[0]+".genecatalog.fasta")
a = self.dictMaker(args.ki[0],genecatID,0) #takes time
if a ==1 : self.printer("DictMaker worked for " + args.ki[0])
else: self.printer("DictMaker did not work, check index files " + args.ki[0])
self.fileClose(genecatID)
GenecatalogD = {}
cGenecatalog = 1
suc = 0
missing = []
seq = ""
for line in clusterID:
L = line.rstrip().split("\t")
C = str(L[0]) #clusterID
L2 = L[2].split(",")
for el in L2:
seq,suc = self.seqExtracter(el,self._D,genecatFasta,0)
if suc == 1:
if el not in GenecatalogD:
GenecatalogD[el] = el[0]+str(cGenecatalog)
cGenecatalog += 1
#output.write(">"+C+"_"+GenecatalogD[el]+"\n"+seq+"\n")
output.write(">"+C+":"+el+"\n"+seq+"\n")
else:
missing.append([el,C])
#print self._D
self._D = {}
# GenecatalogIDconversion(GenecatalogD)
self.fileClose(output)
self.fileClose(genecatFasta)
self.fileClose(clusterID)
return missing
def uniprot(self,args,parser):
clusterID = self.fileIn(args.c[0])
uniprotID = self.fileIn(args.ui[0])
self.uniprotFasta = self.fileIn(args.uf[0])
ctotfile = os.popen("wc -l "+args.c[0])
ctot = ctotfile.read()
ctotfile.close()
ctot = int(ctot.split(" ")[0])
rangelist = range(0,ctot,1)
output = self.fileOut(args.o[0]+".uniprotids.fasta")
D = self.dictMaker(args.ui[0],uniprotID,0) #takes time
if D ==1 : self.printer("DictMaker worked for " + args.ui[0])
else: self.printer("DictMaker did not work, check index files " + args.ui[0])
self.fileClose(uniprotID)
seq = ""
missing = []
suc = 1
c = 0
for line in clusterID:
c+=1
L = line.rstrip().split("\t")
C = str(L[0]) #clusterID
if L[1] == "N":
continue
L2 = L[3].split(",")
for el in L2:
el = el.split("|")[2]
seq,suc = self.seqExtracter3(el,self._D,1)
if suc == 1:
output.write(">"+C+":"+el+"\n"+seq+"\n")
else:
missing.append([el,C])
#if c in rangelist:
#self.printer("FINISHED "+str(c)+" ENTRIES out of "+str(ctot))
del D
self.fileClose(output)
self.fileClose(self.uniprotFasta)
self.fileClose(clusterID)
return missing
def GenecatalogIDconversion(self,D):
self.printer("\nPrinting GeneConversionTable....")
fout = self.fileOut("GeneConversionTable.txt")
for key in D:
fout.write(key+"\t"+D[key]+"\n")
fout.close()
self.printer("DONE!\n")
def numberCounter(self,args,parser):
clusterID = self.fileIn(args.c[0])
if self.args.o:
output = self.fileOut(args.o[0]+".genenumbers")
else:
output = self.fileOut(args.c[0]+".genenumbers")
t = "\t"
n = "\n"
for line in clusterID:
L = line.split("\t")
output.write(L[0]+t+str(len(L[1].split(",")))+t+str(len(set(L[2].split(","))))+n)
self.fileClose(clusterID)
self.fileClose(output)
def printer(self,string): #surpressing output print if -q (quiet) is on
# if not self.args.quiet:
if self.args.v:
print string,
def read_columns(self, i, csv_file):
item=""
with open(csv_file, 'r') as csvfile:
for line in csvfile.readlines():
array = line.strip("\n").split('\t')
item = item + "\n" + array[i]
return item
def mainthing(self):
# self.printer("\n***cluster2fasta.py initialized at "\
# + self.d_.strftime("%H:%M %d/%m-%Y") + "***\n")
# self.printer("Arguments:\n")
# self.parseArgs()
no = 1
missing1 = []
missing2 = []
if bool(self.args.ki)^bool(self.args.kf):
self.printer("***ERROR!*** Only one of -ki and -kf was provided!\n")
elif bool(self.args.ui)^bool(self.args.uf):
self.printer("***ERROR!*** Only one of -ui and -uf was provided!\n")
elif not self.args.c:
self.printer("***ERROR!*** No cluster-files(s) provided!\n")
elif (self.args.ki or self.args.ui) and not self.args.o:
self.printer("***ERROR!*** No output-name provided!\n")
else:
if self.args.ki and self.args.kf and self.args.c and self.args.o:
self.printer("\tCluster-file: "+self.args.c[0] +"\n\tGenecatalog-index file: "+self.args.ki[0]+"\n\tGenecatalog fasta-file: "+self.args.kf[0]+"\n\tOutput file-name: "+self.args.o[0]+".genecatgenes.fasta\n")
no = 0
missing1 = self.genecat(self.args,self.parser)
self.printer("\nGenecatalog Genes Done! Time (so far): "+str(round((time.time() - self.start) / 60,3))+" min\n")
if self.args.sfi and self.args.c and self.args.o:
self.printer("\tCluster-file: \n\t\t"+self.args.c[0] +"\n\tGenecatalog-index files: \n\t\t"+self.read_columns(0, self.args.sfi[0])+"\n\tGenecatalog fasta-files: \n\t\t"+self.read_columns(1, self.args.sfi[0])+"\n\tOutput file-name: \n\t\t"+ self.args.o[0]+".genecatgenes.fasta.gz\n")
no = 0
missing1 = self.genecat_list()
self.printer("\nGenecatalog Genes Done! Time (so far): "+str(round((time.time() - self.start) / 60,3))+" min\n")
if self.args.ui and self.args.uf and self.args.c and self.args.o:
self.printer("\tCluster-file: "+self.args.c[0] +"\n\tUniprot-index file: "+self.args.ui[0]+"\n\tUniprot fasta-file: "+self.args.uf[0]+"\n\tOutput file-name: "+self.args.o[0]+".uniprotids.fasta\n")
no = 0
missing2 = self.uniprot(self.args,self.parser)
self.printer("\nUniprot ID\'s Done! Time (so far): "+str(round((time.time() - self.start) / 60,3))+" min\n")
if self.args.num and self.args.c:
if not self.args.o:
self.printer("\tCluster-file: "+self.args.c[0] +"\n\tOutput file-name: "+self.args.c[0][:-4]+".genenumbers\n")
else:
self.printer("\tCluster-file: "+self.args.c[0] +"\n\tOutput file-name: "+self.args.o[0]+".genenumbers\n")
no = 0
self.numberCounter(self.args,self.parser)
self.printer("\nNumber Calculations Done! Time (so far): "+str(round((time.time() - self.start) / 60,3))+" min\n")
if no == 1:
self.printer("none!\n")
self.missingGeneLog(missing1,missing2)
timeused = (time.time() - self.start) / 60
self.printer("Time used: "+str(round(timeused*60))\
+ " seconds ("+str(round(timeused)) + " min)\n")
def test(self,num):
self.printer("test")
'''
if __name__ == "__main__":
myclass = main
myclass.mainthing
myclass.test(2)
self.printer("yoyoooyooyoo")
'''
if __name__ == "__main__":
try:
myclass = main()
myclass.args = myclass.parser.parse_args(sys.argv[1:])
myclass.printer("\n### "+sys.argv[0]+" initialized at "+ myclass.timestarted + "\n")
myclass.printer("### OPTIONS: "+str(myclass.args)+"\n")
myclass.mainthing()
#except IOError as i:
# print "I/O error({0}): {1}".format(i.errno, i.strerror)
except Exception,e:
print str(e)
import traceback
traceback.print_exc()
##############################
'''
INPUT:
The User inputs an index-file and a fasta-file.
The index file indexes each entry in the fasta file. In the case of -ui and -uf,
-ui would a pickle-file which contains the start and end for the sequences in each
entry of the uniprot file (-uf).
if -num is toggled, the script will not create a fasta-output, but instead
show the number of genecat-genes (sample-genes) and uniprot ID's in each cluster.
OUTPUT:
The output is a fasta file containing the sequences of each uniprot/genecat-gene in the input
from the clusters.
OPTIONS LIST:
"-ui" "uniprot_index_file": Uniprot index file containing
"-uf" "uniprot_fasta": Fasta-file for all uniprot (from swiss2fasta)
"-ki" "sample_index_file": Sample index file
"-kf" "sample_fasta": Fasta-file for all sample sequences
"-c" "Cluster-name": Cluster-file
"-o" "Output fasta file": Output name
"-num": Adds 2 coloumns to a new file, with cluster_id's, number of sample-genes and number of uniprot ID's
'''
| gpl-3.0 | 5,104,992,305,668,626,000 | 39.557292 | 307 | 0.553679 | false |
kubeflow/tf-operator | sdk/python/kubeflow/tfjob/models/v1_tf_job_list.py | 1 | 7083 | # Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
tfjob
Python SDK for TF-Operator # noqa: E501
OpenAPI spec version: v0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client import V1ListMeta # noqa: F401,E501
from kubeflow.tfjob.models.v1_tf_job import V1TFJob # noqa: F401,E501
class V1TFJobList(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'items': 'list[V1TFJob]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None): # noqa: E501
"""V1TFJobList - a model defined in Swagger""" # noqa: E501
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1TFJobList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1TFJobList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1TFJobList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1TFJobList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1TFJobList. # noqa: E501
List of TFJobs. # noqa: E501
:return: The items of this V1TFJobList. # noqa: E501
:rtype: list[V1TFJob]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1TFJobList.
List of TFJobs. # noqa: E501
:param items: The items of this V1TFJobList. # noqa: E501
:type: list[V1TFJob]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1TFJobList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1TFJobList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1TFJobList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1TFJobList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1TFJobList. # noqa: E501
Standard list metadata. # noqa: E501
:return: The metadata of this V1TFJobList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1TFJobList.
Standard list metadata. # noqa: E501
:param metadata: The metadata of this V1TFJobList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(V1TFJobList, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1TFJobList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| apache-2.0 | -8,458,928,608,242,113,000 | 31.342466 | 295 | 0.605817 | false |
jseabold/statsmodels | statsmodels/examples/ex_misc_tarma.py | 5 | 1837 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 03 23:01:44 2013
Author: Josef Perktold
"""
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.tsa.arima_process import arma_generate_sample, ArmaProcess
from statsmodels.miscmodels.tmodel import TArma
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tsa.arma_mle import Arma
nobs = 500
ar = [1, -0.6, -0.1]
ma = [1, 0.7]
dist = lambda n: np.random.standard_t(3, size=n)
np.random.seed(8659567)
x = arma_generate_sample(ar, ma, nobs, scale=1, distrvs=dist,
burnin=500)
mod = TArma(x)
order = (2, 1)
res = mod.fit(order=order)
res2 = mod.fit_mle(order=order, start_params=np.r_[res[0], 5, 1], method='nm')
print(res[0])
proc = ArmaProcess.from_coeffs(res[0][:order[0]], res[0][:order[1]])
print(ar, ma)
proc.nobs = nobs
# TODO: bug nobs is None, not needed ?, used in ArmaProcess.__repr__
print(proc.ar, proc.ma)
print(proc.ar_roots(), proc.ma_roots())
modn = Arma(x)
resn = modn.fit_mle(order=order)
moda = ARMA(x, order=order)
resa = moda.fit( trend='nc')
print('\nparameter estimates')
print('ls ', res[0])
print('norm', resn.params)
print('t ', res2.params)
print('A ', resa.params)
print('\nstandard deviation of parameter estimates')
#print 'ls ', res[0] #TODO: not available yet
print('norm', resn.bse)
print('t ', res2.bse)
print('A ', resa.bse)
print('A/t-1', resa.bse / res2.bse[:3] - 1)
print('other bse')
print(resn.bsejac)
print(resn.bsejhj)
print(res2.bsejac)
print(res2.bsejhj)
print(res2.t_test(np.eye(len(res2.params))))
# TArma has no fittedvalues and resid
# TODO: check if lag is correct or if fitted `x-resid` is shifted
resid = res2.model.geterrors(res2.params)
fv = res[2]['fvec'] #resid returned from leastsq?
plt.plot(x, 'o', alpha=0.5)
plt.plot(x-resid)
plt.plot(x-fv)
#plt.show()
| bsd-3-clause | 1,438,013,817,249,552,600 | 23.493333 | 78 | 0.67828 | false |
linkhub-sdk/popbill.taxinvoice.example.py | sendEmail.py | 1 | 1538 | # -*- coding: utf-8 -*-
# code for console Encoding difference. Dont' mind on it
import sys
import imp
imp.reload(sys)
try: sys.setdefaultencoding('UTF8')
except Exception as E: pass
import testValue
from popbill import TaxinvoiceService, PopbillException
taxinvoiceService = TaxinvoiceService(testValue.LinkID, testValue.SecretKey)
taxinvoiceService.IsTest = testValue.IsTest
taxinvoiceService.IPRestrictOnOff = testValue.IPRestrictOnOff
taxinvoiceService.UseStaticIP = testValue.UseStaticIP
taxinvoiceService.UseLocalTimeYN = testValue.UseLocalTimeYN
'''
세금계산서 발행 안내메일을 재전송합니다.
- https://docs.popbill.com/taxinvoice/python/api#SendEmail
'''
try:
print("=" * 15 + " 발행안내메일 재전송 " + "=" * 15)
# 팝빌회원 사업자번호
CorpNum = testValue.testCorpNum
# 세금계산서 발행유형, SELL : 매출 , BUY : 매입 , TRUSTEE : 수탁
MgtKeyType = "SELL"
# 문서번호
MgtKey = "20210429-001"
# 수신자 메일주소
# 팝빌 개발환경에서 테스트하는 경우에도 안내 메일이 전송되므로,
# 실제 거래처의 메일주소가 기재되지 않도록 주의
ReceiverMail = "test@test.com"
# 팝빌회원 아이디
UserID = testValue.testUserID
result = taxinvoiceService.sendEmail(CorpNum, MgtKeyType, MgtKey, ReceiverMail, UserID)
print("처리결과 : [%d] %s" % (result.code,result.message))
except PopbillException as PE:
print("Popbill Exception : [%d] %s" % (PE.code , PE.message))
| mit | -3,307,818,872,383,219,000 | 26.041667 | 91 | 0.714946 | false |
virginiacc/owning-a-home | src/_lib/wordpress_journey_processor.py | 1 | 2377 | import sys
import json
import os.path
import requests
import dateutil.parser
def posts_at_url(url):
current_page = 1
max_page = sys.maxint
while current_page <= max_page:
url = os.path.expandvars(url)
resp = requests.get(url, params={'page':current_page, 'count': '-1'})
results = json.loads(resp.content)
current_page += 1
max_page = results['pages']
for p in results['posts']:
yield p
def documents(name, url, **kwargs):
for post in posts_at_url(url):
yield process_journey(post)
def process_journey(item):
del item['comments']
del item['date']
custom_fields = item['custom_fields']
item['_id'] = item['slug']
if item['parent'] != 0:
# This is a step item
item['has_parent'] = True
if custom_fields.get('what_to_know'):
item['what_to_know'] = custom_fields['what_to_know'][0]
if custom_fields.get('how_to_take_action'):
item['how_to_take_action'] = \
custom_fields['how_to_take_action'][0]
if custom_fields.get('key_tool'):
key_tool = {}
key_tool['url'] = custom_fields['key_tool'][0]
key_tool['text'] = custom_fields['key_tool'][1]
item['key_tool'] = key_tool
else:
# This is a phase item
item['has_parent'] = False
# create list of tools
item['tools'] = []
for x in xrange(0,2):
tool = {}
fields = ['description', 'link']
for field in fields:
field_name = 'tools_%s_%s' % (str(x), field)
if field_name in custom_fields:
if field == 'link':
tool['url'] = custom_fields[field_name][0]
tool['text'] = custom_fields[field_name][1]
else:
tool[field] = custom_fields[field_name][0]
if tool:
item['tools'].append(tool)
# create list of milestones
milestones = []
for x in xrange(0,3):
key = 'milestones_%s_milestone' % x
if key in custom_fields:
milestones.append(custom_fields[key][0])
if milestones:
item['milestones'] = milestones
return item
| cc0-1.0 | 1,026,639,759,227,327,500 | 28.7125 | 77 | 0.506521 | false |
otfbot/otfbot | otfbot/lib/chatMod.py | 1 | 4027 | # This file is part of OtfBot.
#
# OtfBot is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# OtfBot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OtfBot; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# (c) 2005 - 2010 by Alexander Schier
# (c) 2006 - 2010 by Robert Weidlich
""" contains a abstract class for a Bot-module """
from pluginSupport import Plugin
class chatMod(Plugin):
"""
this class is mainly for documentation of the callbacks.
some functions are helper functions for common tasks, i.e.
kicked calls userLeft, so a plugin only implementing userLeft
will notice that a kicked user left the channel. a plugin implementing
kicked too, can handle it independent from userLeft, because kicked will
be overwritten
"""
def __init__(self, bot):
self.bot = bot
def auth(self, user):
"""check the authorisation of the user"""
pass
def joined(self, channel):
"""we have joined a channel"""
pass
def command(self, user, channel, command, options):
"""a command message received"""
pass
def query(self, user, channel, msg):
"""a private message received"""
pass
def msg(self, user, channel, msg):
"""message received"""
pass
def connectionMade(self):
"""made connection to server"""
pass
def connectionLost(self, reason):
"""lost connection to server"""
pass
def signedOn(self):
"""successfully signed on"""
pass
def left(self, channel):
"""we have left a channel"""
pass
def noticed(self, user, channel, msg):
"""we got a notice"""
pass
def action(self, user, channel, msg):
"""action (/me) received"""
pass
def modeChanged(self, user, channel, set, modes, args):
"""mode changed"""
pass
def kickedFrom(self, channel, kicker, message):
"""someone kicked the bot"""
self.left(channel)
def userKicked(self, kickee, channel, kicker, message):
"""someone kicked someone else"""
self.userLeft(kickee, channel)
def userJoined(self, user, channel):
"""a user joined the channel"""
pass
def userJoinedMask(self, user, channel):
pass
def userLeft(self, user, channel):
"""a user left the channel"""
pass
def userQuit(self, user, quitMessage):
"""a user disconnect from the network"""
pass
def yourHost(self, info):
"""info about your host"""
pass
def userRenamed(self, oldname, newname):
"""a user changed the nick"""
pass
def topicUpdated(self, user, channel, newTopic):
"""a user changed the topic of a channel"""
pass
def irc_unknown(self, prefix, command, params):
"""an IRC-Message, which is not handle by twisted was received"""
pass
def stop(self):
"""called, when the bot is stopped, or the module is reloaded"""
pass
def reload(self):
"""called to reload the settings of the module"""
pass
def start(self):
"""called to start the work of the module
put your initialization stuff in here insteadof __init__
"""
pass
def sendLine(self, line):
pass
def lineReceived(self, line):
pass
def ctcpQuery(self, user, channel, messages):
""" called for ctcp queries
"""
pass
| gpl-2.0 | 6,027,862,804,107,587,000 | 26.026846 | 76 | 0.619568 | false |
hemna/cinder-brick | brick/target/iscsi/iscsi.py | 1 | 22830 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper code for the iSCSI volume driver.
"""
import os
import re
import stat
import time
from brick import exception
from brick import executor
from brick.i18n import _
from brick.openstack.common import fileutils
from brick.openstack.common import log as logging
from brick.openstack.common import processutils as putils
from brick import utils
LOG = logging.getLogger(__name__)
class TargetAdmin(executor.Executor):
"""iSCSI target administration.
Base class for iSCSI target admin helpers.
"""
def __init__(self, cmd, root_helper, execute):
super(TargetAdmin, self).__init__(root_helper, execute=execute)
self._cmd = cmd
def _run(self, *args, **kwargs):
self._execute(self._cmd, *args, run_as_root=True, **kwargs)
def create_iscsi_target(self, name, tid, lun, path,
chap_auth=None, **kwargs):
"""Create an iSCSI target and logical unit."""
raise NotImplementedError()
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
"""Remove an iSCSI target and logical unit."""
raise NotImplementedError()
def _new_target(self, name, tid, **kwargs):
"""Create a new iSCSI target."""
raise NotImplementedError()
def _delete_target(self, tid, **kwargs):
"""Delete a target."""
raise NotImplementedError()
def show_target(self, tid, iqn=None, **kwargs):
"""Query the given target ID."""
raise NotImplementedError()
def _new_logicalunit(self, tid, lun, path, **kwargs):
"""Create a new LUN on a target using the supplied path."""
raise NotImplementedError()
def _delete_logicalunit(self, tid, lun, **kwargs):
"""Delete a logical unit from a target."""
raise NotImplementedError()
class TgtAdm(TargetAdmin):
"""iSCSI target administration using tgtadm."""
VOLUME_CONF = """
<target %s>
backing-store %s
lld iscsi
write-cache %s
</target>
"""
VOLUME_CONF_WITH_CHAP_AUTH = """
<target %s>
backing-store %s
lld iscsi
%s
write-cache %s
</target>
"""
def __init__(self, root_helper, volumes_dir,
target_prefix='iqn.2010-10.org.openstack:',
execute=putils.execute):
super(TgtAdm, self).__init__('tgtadm', root_helper, execute)
self.iscsi_target_prefix = target_prefix
self.volumes_dir = volumes_dir
def _get_target(self, iqn):
(out, err) = self._execute('tgt-admin', '--show', run_as_root=True)
lines = out.split('\n')
for line in lines:
if iqn in line:
parsed = line.split()
tid = parsed[1]
return tid[:-1]
return None
def _verify_backing_lun(self, iqn, tid):
backing_lun = True
capture = False
target_info = []
(out, err) = self._execute('tgt-admin', '--show', run_as_root=True)
lines = out.split('\n')
for line in lines:
if iqn in line and "Target %s" % tid in line:
capture = True
if capture:
target_info.append(line)
if iqn not in line and 'Target ' in line:
capture = False
if ' LUN: 1' not in target_info:
backing_lun = False
return backing_lun
def _recreate_backing_lun(self, iqn, tid, name, path):
LOG.warning(_('Attempting recreate of backing lun...'))
# Since we think the most common case of this is a dev busy
# (create vol from snapshot) we're going to add a sleep here
# this will hopefully give things enough time to stabilize
# how long should we wait?? I have no idea, let's go big
# and error on the side of caution
time.sleep(10)
try:
(out, err) = self._execute('tgtadm', '--lld', 'iscsi',
'--op', 'new', '--mode',
'logicalunit', '--tid',
tid, '--lun', '1', '-b',
path, run_as_root=True)
LOG.debug('StdOut from recreate backing lun: %s' % out)
LOG.debug('StdErr from recreate backing lun: %s' % err)
except putils.ProcessExecutionError as e:
LOG.error(_("Failed to recover attempt to create "
"iscsi backing lun for volume "
"id:%(vol_id)s: %(e)s")
% {'vol_id': name, 'e': e})
def create_iscsi_target(self, name, tid, lun, path,
chap_auth=None, **kwargs):
# Note(jdg) tid and lun aren't used by TgtAdm but remain for
# compatibility
fileutils.ensure_tree(self.volumes_dir)
vol_id = name.split(':')[1]
write_cache = kwargs.get('write_cache', 'on')
if chap_auth is None:
volume_conf = self.VOLUME_CONF % (name, path, write_cache)
else:
volume_conf = self.VOLUME_CONF_WITH_CHAP_AUTH % (name,
path, chap_auth,
write_cache)
LOG.info(_('Creating iscsi_target for: %s') % vol_id)
volumes_dir = self.volumes_dir
volume_path = os.path.join(volumes_dir, vol_id)
f = open(volume_path, 'w+')
f.write(volume_conf)
f.close()
LOG.debug('Created volume path %(vp)s,\n'
'content: %(vc)s'
% {'vp': volume_path, 'vc': volume_conf})
old_persist_file = None
old_name = kwargs.get('old_name', None)
if old_name is not None:
old_persist_file = os.path.join(volumes_dir, old_name)
try:
# with the persistent tgts we create them
# by creating the entry in the persist file
# and then doing an update to get the target
# created.
(out, err) = self._execute('tgt-admin', '--update', name,
run_as_root=True)
LOG.debug("StdOut from tgt-admin --update: %s", out)
LOG.debug("StdErr from tgt-admin --update: %s", err)
# Grab targets list for debug
# Consider adding a check for lun 0 and 1 for tgtadm
# before considering this as valid
(out, err) = self._execute('tgtadm',
'--lld',
'iscsi',
'--op',
'show',
'--mode',
'target',
run_as_root=True)
LOG.debug("Targets after update: %s" % out)
except putils.ProcessExecutionError as e:
LOG.warning(_("Failed to create iscsi target for volume "
"id:%(vol_id)s: %(e)s")
% {'vol_id': vol_id, 'e': e})
#Don't forget to remove the persistent file we created
os.unlink(volume_path)
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
iqn = '%s%s' % (self.iscsi_target_prefix, vol_id)
tid = self._get_target(iqn)
if tid is None:
LOG.error(_("Failed to create iscsi target for volume "
"id:%(vol_id)s. Please ensure your tgtd config file "
"contains 'include %(volumes_dir)s/*'") % {
'vol_id': vol_id,
'volumes_dir': volumes_dir, })
raise exception.NotFound()
# NOTE(jdg): Sometimes we have some issues with the backing lun
# not being created, believe this is due to a device busy
# or something related, so we're going to add some code
# here that verifies the backing lun (lun 1) was created
# and we'll try and recreate it if it's not there
if not self._verify_backing_lun(iqn, tid):
try:
self._recreate_backing_lun(iqn, tid, name, path)
except putils.ProcessExecutionError:
os.unlink(volume_path)
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
# Finally check once more and if no go, fail and punt
if not self._verify_backing_lun(iqn, tid):
os.unlink(volume_path)
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
if old_persist_file is not None and os.path.exists(old_persist_file):
os.unlink(old_persist_file)
return tid
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
LOG.info(_('Removing iscsi_target for: %s') % vol_id)
vol_uuid_file = vol_name
volume_path = os.path.join(self.volumes_dir, vol_uuid_file)
if not os.path.exists(volume_path):
LOG.warning(_('Volume path %s does not exist, '
'nothing to remove.') % volume_path)
return
if os.path.isfile(volume_path):
iqn = '%s%s' % (self.iscsi_target_prefix,
vol_uuid_file)
else:
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
try:
# NOTE(vish): --force is a workaround for bug:
# https://bugs.launchpad.net/cinder/+bug/1159948
self._execute('tgt-admin',
'--force',
'--delete',
iqn,
run_as_root=True)
except putils.ProcessExecutionError as e:
LOG.error(_("Failed to remove iscsi target for volume "
"id:%(vol_id)s: %(e)s")
% {'vol_id': vol_id, 'e': e})
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
# NOTE(jdg): There's a bug in some versions of tgt that
# will sometimes fail silently when using the force flag
# https://bugs.launchpad.net/ubuntu/+source/tgt/+bug/1305343
# For now work-around by checking if the target was deleted,
# if it wasn't, try again without the force.
# This will NOT do any good for the case of multiple sessions
# which the force was aded for but it will however address
# the cases pointed out in bug:
# https://bugs.launchpad.net/cinder/+bug/1304122
if self._get_target(iqn):
try:
LOG.warning(_('Silent failure of target removal '
'detected, retry....'))
self._execute('tgt-admin',
'--delete',
iqn,
run_as_root=True)
except putils.ProcessExecutionError as e:
LOG.error(_("Failed to remove iscsi target for volume "
"id:%(vol_id)s: %(e)s")
% {'vol_id': vol_id, 'e': e})
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
# NOTE(jdg): This *should* be there still but incase
# it's not we don't care, so just ignore it if was
# somehow deleted between entry of this method
# and here
if os.path.exists(volume_path):
os.unlink(volume_path)
else:
LOG.debug('Volume path %s not found at end, '
'of remove_iscsi_target.' % volume_path)
def show_target(self, tid, iqn=None, **kwargs):
if iqn is None:
raise exception.InvalidParameterValue(
err=_('valid iqn needed for show_target'))
tid = self._get_target(iqn)
if tid is None:
raise exception.NotFound()
class IetAdm(TargetAdmin):
"""iSCSI target administration using ietadm."""
def __init__(self, root_helper, iet_conf='/etc/iet/ietd.conf',
iscsi_iotype='fileio', execute=putils.execute):
super(IetAdm, self).__init__('ietadm', root_helper, execute)
self.iet_conf = iet_conf
self.iscsi_iotype = iscsi_iotype
def _is_block(self, path):
mode = os.stat(path).st_mode
return stat.S_ISBLK(mode)
def _iotype(self, path):
if self.iscsi_iotype == 'auto':
return 'blockio' if self._is_block(path) else 'fileio'
else:
return self.iscsi_iotype
def create_iscsi_target(self, name, tid, lun, path,
chap_auth=None, **kwargs):
# NOTE (jdg): Address bug: 1175207
kwargs.pop('old_name', None)
self._new_target(name, tid, **kwargs)
self._new_logicalunit(tid, lun, path, **kwargs)
if chap_auth is not None:
(type, username, password) = chap_auth.split()
self._new_auth(tid, type, username, password, **kwargs)
conf_file = self.iet_conf
if os.path.exists(conf_file):
try:
volume_conf = """
Target %s
%s
Lun 0 Path=%s,Type=%s
""" % (name, chap_auth, path, self._iotype(path))
with utils.temporary_chown(conf_file):
f = open(conf_file, 'a+')
f.write(volume_conf)
f.close()
except putils.ProcessExecutionError as e:
vol_id = name.split(':')[1]
LOG.error(_("Failed to create iscsi target for volume "
"id:%(vol_id)s: %(e)s")
% {'vol_id': vol_id, 'e': e})
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
return tid
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
LOG.info(_('Removing iscsi_target for volume: %s') % vol_id)
self._delete_logicalunit(tid, lun, **kwargs)
self._delete_target(tid, **kwargs)
vol_uuid_file = vol_name
conf_file = self.iet_conf
if os.path.exists(conf_file):
with utils.temporary_chown(conf_file):
try:
iet_conf_text = open(conf_file, 'r+')
full_txt = iet_conf_text.readlines()
new_iet_conf_txt = []
count = 0
for line in full_txt:
if count > 0:
count -= 1
continue
elif re.search(vol_uuid_file, line):
count = 2
continue
else:
new_iet_conf_txt.append(line)
iet_conf_text.seek(0)
iet_conf_text.truncate(0)
iet_conf_text.writelines(new_iet_conf_txt)
finally:
iet_conf_text.close()
def _new_target(self, name, tid, **kwargs):
self._run('--op', 'new',
'--tid=%s' % tid,
'--params', 'Name=%s' % name,
**kwargs)
def _delete_target(self, tid, **kwargs):
self._run('--op', 'delete',
'--tid=%s' % tid,
**kwargs)
def show_target(self, tid, iqn=None, **kwargs):
self._run('--op', 'show',
'--tid=%s' % tid,
**kwargs)
def _new_logicalunit(self, tid, lun, path, **kwargs):
self._run('--op', 'new',
'--tid=%s' % tid,
'--lun=%d' % lun,
'--params', 'Path=%s,Type=%s' % (path, self._iotype(path)),
**kwargs)
def _delete_logicalunit(self, tid, lun, **kwargs):
self._run('--op', 'delete',
'--tid=%s' % tid,
'--lun=%d' % lun,
**kwargs)
def _new_auth(self, tid, type, username, password, **kwargs):
self._run('--op', 'new',
'--tid=%s' % tid,
'--user',
'--params=%s=%s,Password=%s' % (type, username, password),
**kwargs)
class FakeIscsiHelper(object):
def __init__(self):
self.tid = 1
self._execute = None
def set_execute(self, execute):
self._execute = execute
def create_iscsi_target(self, *args, **kwargs):
self.tid += 1
return self.tid
class LioAdm(TargetAdmin):
"""iSCSI target administration for LIO using python-rtslib."""
def __init__(self, root_helper, lio_initiator_iqns='',
iscsi_target_prefix='iqn.2010-10.org.openstack:',
execute=putils.execute):
super(LioAdm, self).__init__('brick-rtstool', root_helper, execute)
self.iscsi_target_prefix = iscsi_target_prefix
self.lio_initiator_iqns = lio_initiator_iqns
self._verify_rtstool()
def _verify_rtstool(self):
try:
self._execute('brick-rtstool', 'verify')
except (OSError, putils.ProcessExecutionError):
LOG.error(_('brick-rtstool is not installed correctly'))
raise
def _get_target(self, iqn):
(out, err) = self._execute('brick-rtstool',
'get-targets',
run_as_root=True)
lines = out.split('\n')
for line in lines:
if iqn in line:
return line
return None
def create_iscsi_target(self, name, tid, lun, path,
chap_auth=None, **kwargs):
# tid and lun are not used
vol_id = name.split(':')[1]
LOG.info(_('Creating iscsi_target for volume: %s') % vol_id)
# rtstool requires chap_auth, but unit tests don't provide it
chap_auth_userid = 'test_id'
chap_auth_password = 'test_pass'
if chap_auth is not None:
(chap_auth_userid, chap_auth_password) = chap_auth.split(' ')[1:]
extra_args = []
if self.lio_initiator_iqns:
extra_args.append(self.lio_initiator_iqns)
try:
command_args = ['brick-rtstool',
'create',
path,
name,
chap_auth_userid,
chap_auth_password]
if extra_args:
command_args.extend(extra_args)
self._execute(*command_args, run_as_root=True)
except putils.ProcessExecutionError as e:
LOG.error(_("Failed to create iscsi target for volume "
"id:%s.") % vol_id)
LOG.error("%s" % e)
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
iqn = '%s%s' % (self.iscsi_target_prefix, vol_id)
tid = self._get_target(iqn)
if tid is None:
LOG.error(_("Failed to create iscsi target for volume "
"id:%s.") % vol_id)
raise exception.NotFound()
return tid
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
LOG.info(_('Removing iscsi_target: %s') % vol_id)
vol_uuid_name = vol_name
iqn = '%s%s' % (self.iscsi_target_prefix, vol_uuid_name)
try:
self._execute('brick-rtstool',
'delete',
iqn,
run_as_root=True)
except putils.ProcessExecutionError as e:
LOG.error(_("Failed to remove iscsi target for volume "
"id:%s.") % vol_id)
LOG.error("%s" % e)
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
def show_target(self, tid, iqn=None, **kwargs):
if iqn is None:
raise exception.InvalidParameterValue(
err=_('valid iqn needed for show_target'))
tid = self._get_target(iqn)
if tid is None:
raise exception.NotFound()
def initialize_connection(self, volume, connector):
volume_iqn = volume['provider_location'].split(' ')[1]
(auth_method, auth_user, auth_pass) = \
volume['provider_auth'].split(' ', 3)
# Add initiator iqns to target ACL
try:
self._execute('brick-rtstool', 'add-initiator',
volume_iqn,
auth_user,
auth_pass,
connector['initiator'],
run_as_root=True)
except putils.ProcessExecutionError:
LOG.error(_("Failed to add initiator iqn %s to target") %
connector['initiator'])
raise exception.ISCSITargetAttachFailed(volume_id=volume['id'])
class ISERTgtAdm(TgtAdm):
VOLUME_CONF = """
<target %s>
driver iser
backing-store %s
write_cache %s
</target>
"""
VOLUME_CONF_WITH_CHAP_AUTH = """
<target %s>
driver iser
backing-store %s
%s
write_cache %s
</target>
"""
def __init__(self, root_helper, volumes_dir,
target_prefix='iqn.2010-10.org.iser.openstack:',
execute=putils.execute):
super(ISERTgtAdm, self).__init__(root_helper, volumes_dir,
target_prefix, execute)
| apache-2.0 | -6,240,072,186,324,997,000 | 36.986689 | 78 | 0.502059 | false |
stuart-knock/tvb-framework | tvb/core/adapters/abcadapter.py | 1 | 57742 | # -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Framework Package. This package holds all Data Management, and
# Web-UI helpful to run brain-simulations. To use it, you also need do download
# TheVirtualBrain-Scientific Package (for simulators). See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
Root classes for adding custom functionality to the code.
.. moduleauthor:: Lia Domide <lia.domide@codemart.ro>
.. moduleauthor:: Bogdan Neacsa <bogdan.neacsa@codemart.ro>
.. moduleauthor:: Yann Gordon <yann@tvb.invalid>
"""
import os
import json
import psutil
import numpy
from datetime import datetime
from copy import copy
from abc import ABCMeta, abstractmethod
from tvb.basic.config.settings import TVBSettings as cfg
from tvb.basic.logger.builder import get_logger
from tvb.basic.traits.types_mapped import MappedType
from tvb.core.utils import date2string, string2array, LESS_COMPLEX_TIME_FORMAT
from tvb.core.entities.storage import dao
from tvb.core.entities.file.files_helper import FilesHelper
from tvb.core.entities.file.files_update_manager import FilesUpdateManager
from tvb.core.entities.file.exceptions import FileVersioningException
from tvb.core.entities.transient.structure_entities import DataTypeMetaData
from tvb.core.adapters.exceptions import IntrospectionException, InvalidParameterException, LaunchException
from tvb.core.adapters.exceptions import NoMemoryAvailableException
from tvb.core.adapters.xml_reader import ELEM_OPTIONS, ELEM_OUTPUTS, INPUTS_KEY
import tvb.basic.traits.traited_interface as interface
import tvb.core.adapters.xml_reader as xml_reader
ATT_METHOD = "python_method"
ATT_PARAMETERS = "parameters_prefix"
KEY_EQUATION = "equation"
KEY_FOCAL_POINTS = "focal_points"
KEY_SURFACE_GID = "surface_gid"
def nan_not_allowed():
"""
Annotation that guides NumPy behavior in case of floating point errors.
The NumPy default is to just print a warning to sys.stdout, this annotation will raise our custom exception.
This annotation will enforce that an exception is thrown in case a floating point error is produced.
e.g. If NaN is take as input and not produced inside the context covered by this annotation,
nothing happens from this method p.o.v.
e.g. If inside a method annotated with this method we have something like numpy.log(-1),
then LaunchException is thrown.
"""
def wrap(func):
"""Wrap current function with a lock mechanism"""
def new_function(*args, **kw):
""" New function will actually write the Lock."""
old_fp_error_handling = numpy.seterr(divide='raise', invalid='raise')
try:
return func(*args, **kw)
except FloatingPointError:
raise LaunchException('NaN values were generated during launch. Stopping operation execution.')
finally:
numpy.seterr(**old_fp_error_handling)
return new_function
return wrap
def nan_allowed():
"""
Annotation that configures NumPy not to throw an exception in case of floating points errors are computed.
It should be used on Adapter methods where computation of NaN/ Inf/etc. is allowed.
"""
def wrap(func):
"""Wrap current function with a lock mechanism"""
def new_function(*args, **kw):
""" New function will actually write the Lock."""
old_fp_error_handling = numpy.seterr(all='ignore')
try:
return func(*args, **kw)
except Exception:
pass
finally:
numpy.seterr(**old_fp_error_handling)
return new_function
return wrap
class ABCAdapter(object):
"""
Root Abstract class for all TVB Adapters.
"""
TYPE_SELECT = xml_reader.TYPE_SELECT
TYPE_MULTIPLE = xml_reader.TYPE_MULTIPLE
STATIC_ACCEPTED_TYPES = xml_reader.ALL_TYPES
KEY_TYPE = xml_reader.ATT_TYPE
KEY_OPTIONS = xml_reader.ELEM_OPTIONS
KEY_ATTRIBUTES = xml_reader.ATT_ATTRIBUTES
KEY_NAME = xml_reader.ATT_NAME
KEY_VALUE = xml_reader.ATT_VALUE
KEY_LABEL = "label"
KEY_DEFAULT = "default"
KEY_DATATYPE = 'datatype'
KEY_DTYPE = 'elementType'
KEY_DISABLED = "disabled"
KEY_ALL = "allValue"
KEY_CONDITION = "conditions"
KEY_FILTERABLE = "filterable"
KEY_REQUIRED = "required"
KEY_ID = 'id'
KEY_UI_HIDE = "ui_hidden"
LAUNCH_METHOD = "launch"
KEYWORD_PARAMS = "_parameters_"
KEYWORD_SEPARATOR = "_"
KEYWORD_OPTION = "option_"
INTERFACE_ATTRIBUTES_ONLY = interface.INTERFACE_ATTRIBUTES_ONLY
INTERFACE_ATTRIBUTES = interface.INTERFACE_ATTRIBUTES
# Group that will be set for each adapter created by in build_adapter method
algorithm_group = None
_ui_display = 1
__metaclass__ = ABCMeta
def __init__(self):
# It will be populate with key from DataTypeMetaData
self.meta_data = {DataTypeMetaData.KEY_SUBJECT: DataTypeMetaData.DEFAULT_SUBJECT}
self.file_handler = FilesHelper()
self.storage_path = '.'
# Will be populate with current running operation's identifier
self.operation_id = None
self.user_id = None
self.log = get_logger(self.__class__.__module__)
@abstractmethod
def get_input_tree(self):
"""
Describes inputs and outputs of the launch method.
"""
pass
@abstractmethod
def get_output(self):
"""
Describes inputs and outputs of the launch method.
"""
pass
def configure(self, **kwargs):
"""
To be implemented in each Adapter that requires any specific configurations
before the actual launch.
"""
pass
@abstractmethod
def get_required_memory_size(self, **kwargs):
"""
Abstract method to be implemented in each adapter. Should return the required memory
for launching the adapter.
"""
pass
@abstractmethod
def get_required_disk_size(self, **kwargs):
"""
Abstract method to be implemented in each adapter. Should return the required memory
for launching the adapter in kilo-Bytes.
"""
pass
def get_execution_time_approximation(self, **kwargs):
"""
Method should approximate based on input arguments, the time it will take for the operation
to finish (in seconds).
"""
return -1
@abstractmethod
def launch(self):
"""
To be implemented in each Adapter.
Will contain the logic of the Adapter.
Any returned DataType will be stored in DB, by the Framework.
"""
pass
@nan_not_allowed()
def _prelaunch(self, operation, uid=None, available_disk_space=0, **kwargs):
"""
Method to wrap LAUNCH.
Will prepare data, and store results on return.
"""
self.meta_data.update(json.loads(operation.meta_data))
self.storage_path = self.file_handler.get_project_folder(operation.project, str(operation.id))
if operation.method_name == self.LAUNCH_METHOD:
self.operation_id = operation.id
self.user_id = operation.fk_launched_by
self.configure(**kwargs)
total_free_memory = psutil.virtual_memory().free + psutil.swap_memory().free
adapter_required_memory = self.get_required_memory_size(**kwargs)
if adapter_required_memory > total_free_memory:
raise NoMemoryAvailableException("Machine does not have enough memory to launch the operation "
"(expected %.2g GB free, found %.2g)." % (
adapter_required_memory / 2 ** 30, total_free_memory / 2 ** 30))
required_disk_space = self.get_required_disk_size(**kwargs)
if available_disk_space < 0:
raise NoMemoryAvailableException("You have exceeded you HDD space quota"
" by %d. Stopping execution." % (available_disk_space,))
if (available_disk_space - required_disk_space) < 0:
raise NoMemoryAvailableException("You only have %s kiloBytes of HDD available but the operation you "
"launched might require %d. "
"Stopping execution..." % (available_disk_space, required_disk_space))
operation.start_now()
operation.result_disk_size = required_disk_space
dao.store_entity(operation)
result = self.launch(**kwargs)
if not isinstance(result, (list, tuple)):
result = [result, ]
self.__check_integrity(result)
else:
result = eval("self." + operation.method_name + "(**kwargs)")
if not isinstance(result, (list, tuple)):
result = [result, ]
return self._capture_operation_results(result, uid)
def _capture_operation_results(self, result, unique_id=None):
"""
After an operation was finished, make sure the results are stored
in DB storage and the correct meta-data,IDs are set.
"""
results_to_store = []
data_type_group_id = None
operation = dao.get_operation_by_id(self.operation_id)
if operation.user_group is None or len(operation.user_group) == 0:
operation.user_group = date2string(datetime.now(), date_format=LESS_COMPLEX_TIME_FORMAT)
operation = dao.store_entity(operation)
if self._is_group_launch():
data_type_group_id = dao.get_datatypegroup_by_op_group_id(operation.fk_operation_group).id
# All entities will have the same subject and state
subject = self.meta_data[DataTypeMetaData.KEY_SUBJECT]
state = self.meta_data[DataTypeMetaData.KEY_STATE]
burst_reference = None
if DataTypeMetaData.KEY_BURST in self.meta_data:
burst_reference = self.meta_data[DataTypeMetaData.KEY_BURST]
perpetuated_identifier = None
if DataTypeMetaData.KEY_TAG_1 in self.meta_data:
perpetuated_identifier = self.meta_data[DataTypeMetaData.KEY_TAG_1]
for res in result:
if res is None:
continue
res.subject = str(subject)
res.state = state
res.fk_parent_burst = burst_reference
res.fk_from_operation = self.operation_id
res.framework_metadata = self.meta_data
res.user_tag_1 = unique_id if unique_id is not None else perpetuated_identifier
res.fk_datatype_group = data_type_group_id
## Compute size-on disk, in case file-storage is used
if hasattr(res, 'storage_path') and hasattr(res, 'get_storage_file_name'):
associated_file = os.path.join(res.storage_path, res.get_storage_file_name())
res.close_file()
res.disk_size = self.file_handler.compute_size_on_disk(associated_file)
res = dao.store_entity(res)
# Write metaData
res.persist_full_metadata()
results_to_store.append(res)
del result[0:len(result)]
result.extend(results_to_store)
if len(result) and self._is_group_launch():
## Update the operation group name
operation_group = dao.get_operationgroup_by_id(operation.fk_operation_group)
operation_group.fill_operationgroup_name(result[0].type)
dao.store_entity(operation_group)
return 'Operation ' + str(self.operation_id) + ' has finished.', len(results_to_store)
def __check_integrity(self, result):
"""
Check that the returned parameters for LAUNCH operation
are of the type specified in the adapter's interface.
"""
entity_id = self.__module__ + '.' + self.__class__.__name__
for result_entity in result:
if type(result_entity) == list and len(result_entity) > 0:
#### Determine the first element not None
first_item = None
for res in result_entity:
if res is not None:
first_item = res
break
if first_item is None:
return
#### All list items are None
#### Now check if the first item has a supported type
if not self.__is_data_in_supported_types(first_item):
msg = "Unexpected DataType %s"
raise Exception(msg % type(first_item))
first_item_type = type(first_item)
for res in result_entity:
if not isinstance(res, first_item_type):
msg = '%s-Heterogeneous types (%s).Expected %s list.'
raise Exception(msg % (entity_id, type(res), first_item_type))
else:
if not self.__is_data_in_supported_types(result_entity):
msg = "Unexpected DataType %s"
raise Exception(msg % type(result_entity))
def __is_data_in_supported_types(self, data):
"""
This method checks if the provided data is one of the adapter supported return types
"""
if data is None:
return True
for supported_type in self.get_output():
if isinstance(data, supported_type):
return True
##### Data can't be mapped on any supported type !!
return False
def _is_group_launch(self):
"""
Return true if this adapter is launched from a group of operations
"""
operation = dao.get_operation_by_id(self.operation_id)
return operation.fk_operation_group is not None
@staticmethod
def load_entity_by_gid(data_gid):
"""
Load a generic DataType, specified by GID.
"""
datatype = dao.get_datatype_by_gid(data_gid)
if isinstance(datatype, MappedType):
datatype_path = datatype.get_storage_file_path()
files_update_manager = FilesUpdateManager()
if not files_update_manager.is_file_up_to_date(datatype_path):
datatype.invalid = True
dao.store_entity(datatype)
raise FileVersioningException("Encountered DataType with an incompatible storage or data version. "
"The DataType was marked as invalid.")
return datatype
@staticmethod
def build_adapter(algo_group):
"""
Having a module and a class name, create an instance of ABCAdapter.
"""
try:
adapter = __import__(algo_group.module, globals(), locals(), [algo_group.classname])
adapter = eval("adapter." + algo_group.classname)
if algo_group.init_parameter is not None and len(algo_group.init_parameter) > 0:
adapter_instance = adapter(str(algo_group.init_parameter))
else:
adapter_instance = adapter()
if not isinstance(adapter_instance, ABCAdapter):
raise IntrospectionException("Invalid data type: It should extend adapters.ABCAdapter!")
adapter_instance.algorithm_group = algo_group
return adapter_instance
except Exception, excep:
get_logger("ABCAdapter").exception(excep)
raise IntrospectionException(excep.message)
####### METHODS for PROCESSING PARAMETERS start here #############################
def review_operation_inputs(self, parameters):
"""
:returns: a list with the inputs from the parameters list that are instances of DataType,\
and a dictionary with all parameters which are different than the declared defauts
"""
flat_interface = self.flaten_input_interface()
return self._review_operation_inputs(parameters, flat_interface)
def _review_operation_inputs(self, parameters, flat_interface):
"""
Find out which of the submitted parameters are actually DataTypes and
return a list holding all the dataTypes in parameters.
:returns: list of dataTypes and changed parameters.
"""
inputs_datatypes = []
changed_parameters = dict()
for field_dict in flat_interface:
eq_flat_interface_name = self.__find_field_submitted_name(parameters, field_dict[self.KEY_NAME])
if eq_flat_interface_name is not None:
is_datatype = False
if self.KEY_DATATYPE in field_dict and field_dict[self.KEY_DATATYPE]:
eq_datatype = ABCAdapter.load_entity_by_gid(parameters.get(str(eq_flat_interface_name)))
if eq_datatype is not None:
inputs_datatypes.append(eq_datatype)
is_datatype = True
else:
if type(field_dict[self.KEY_TYPE]) in (str, unicode):
point_separator = field_dict[self.KEY_TYPE].rfind('.')
if point_separator > 0:
module = field_dict[self.KEY_TYPE][:point_separator]
classname = field_dict[self.KEY_TYPE][(point_separator + 1):]
try:
module = __import__(module, [], locals(), globals())
class_entity = eval("module." + classname)
if issubclass(class_entity, MappedType):
data_gid = parameters.get(str(field_dict[self.KEY_NAME]))
data_type = ABCAdapter.load_entity_by_gid(data_gid)
if data_type:
inputs_datatypes.append(data_type)
is_datatype = True
except ImportError, _:
pass
if is_datatype:
changed_parameters[field_dict[self.KEY_LABEL]] = inputs_datatypes[-1].display_name
else:
if field_dict[self.KEY_NAME] in parameters and (self.KEY_DEFAULT not in field_dict
or str(field_dict[self.KEY_DEFAULT]) != str(parameters[field_dict[self.KEY_NAME]])):
changed_parameters[field_dict[self.KEY_LABEL]] = str(parameters[field_dict[self.KEY_NAME]])
return inputs_datatypes, changed_parameters
def prepare_ui_inputs(self, kwargs, validation_required=True):
"""
Prepare the inputs received from a HTTP Post in a form that will be
used by the Python adapter.
"""
algorithm_inputs = self.get_input_tree()
algorithm_inputs = self.prepare_param_names(algorithm_inputs)
self._append_required_defaults(kwargs, algorithm_inputs)
return self.convert_ui_inputs(kwargs, validation_required=validation_required)
def _append_required_defaults(self, kwargs, algorithm_inputs):
"""
Add if necessary any parameters marked as required that have a default value
in the algorithm interface but were not submitted from the UI. For example in
operations launched from context-menu or from data structure.
"""
if algorithm_inputs is None:
return
for entry in algorithm_inputs:
## First handle this level of the tree, adding defaults where required
if (entry[self.KEY_NAME] not in kwargs and self.KEY_REQUIRED in entry and (entry[self.KEY_REQUIRED] is True)
and self.KEY_DEFAULT in entry and entry[self.KEY_TYPE] != xml_reader.TYPE_DICT):
kwargs[entry[self.KEY_NAME]] = entry[self.KEY_DEFAULT]
for entry in algorithm_inputs:
## Now that first level was handled, go recursively on selected options only
if ((self.KEY_REQUIRED in entry) and entry[self.KEY_REQUIRED] and (ABCAdapter.KEY_OPTIONS in entry)
and (entry[ABCAdapter.KEY_OPTIONS] is not None)):
for option in entry[ABCAdapter.KEY_OPTIONS]:
#Only go recursive on option that was submitted
if option[self.KEY_VALUE] == kwargs[entry[self.KEY_NAME]]:
if ABCAdapter.KEY_ATTRIBUTES in option:
option[ABCAdapter.KEY_ATTRIBUTES] = self._append_required_defaults(kwargs,
option[ABCAdapter.KEY_ATTRIBUTES])
def convert_ui_inputs(self, kwargs, validation_required=True):
"""
Convert HTTP POST parameters into Python parameters.
"""
kwa = {}
simple_select_list, to_skip_dict_subargs = [], []
for row in self.flaten_input_interface():
## If required attribute was submitted empty no point to continue, so just raise exception
if (validation_required and row.get(xml_reader.ATT_REQUIRED, False)
and row[xml_reader.ATT_NAME] in kwargs and kwargs[row[xml_reader.ATT_NAME]] == ""):
raise InvalidParameterException("Parameter %s is required for %s but no value was submitted!"
"Please relaunch with valid parameters." % (row[xml_reader.ATT_NAME],
self.__class__.__name__))
if row[xml_reader.ATT_TYPE] == xml_reader.TYPE_DICT:
kwa[row[xml_reader.ATT_NAME]], taken_keys = self.__get_dictionary(row, **kwargs)
for key in taken_keys:
if key in kwa:
del kwa[key]
to_skip_dict_subargs.append(key)
continue
## Dictionary subargs that were previously processed should be ignored
if row[xml_reader.ATT_NAME] in to_skip_dict_subargs:
continue
if row[xml_reader.ATT_NAME] not in kwargs.keys():
## DataType sub-attributes are not submitted with GID in their name...
kwa_name = self.__find_field_submitted_name(kwargs, row[xml_reader.ATT_NAME], True)
if kwa_name is None:
## Do not populate attributes not submitted
continue
kwargs[row[xml_reader.ATT_NAME]] = kwargs[kwa_name]
## del kwargs[kwa_name] (don't remove the original param, as it is useful for retrieving op. input DTs)
elif self.__is_parent_not_submitted(row, kwargs):
## Also do not populate sub-attributes from options not selected
del kwargs[row[xml_reader.ATT_NAME]]
continue
if row[xml_reader.ATT_TYPE] == xml_reader.TYPE_ARRAY:
kwa[row[xml_reader.ATT_NAME]] = self.__convert_to_array(kwargs[row[xml_reader.ATT_NAME]], row)
if xml_reader.ATT_MINVALUE in row and xml_reader.ATT_MAXVALUE:
self.__validate_range_for_array_input(kwa[row[xml_reader.ATT_NAME]], row)
elif row[xml_reader.ATT_TYPE] == xml_reader.TYPE_LIST:
if not isinstance(kwargs[row[xml_reader.ATT_NAME]], list):
kwa[row[xml_reader.ATT_NAME]] = json.loads(kwargs[row[xml_reader.ATT_NAME]])
elif row[xml_reader.ATT_TYPE] == xml_reader.TYPE_BOOL:
if not kwargs[row[xml_reader.ATT_NAME]]:
kwa[row[xml_reader.ATT_NAME]] = False
else:
kwa[row[xml_reader.ATT_NAME]] = True
elif row[xml_reader.ATT_TYPE] == xml_reader.TYPE_INT:
if (kwargs[row[xml_reader.ATT_NAME]] is None or kwargs[row[xml_reader.ATT_NAME]] == ''
or kwargs[row[xml_reader.ATT_NAME]] == 'None'):
kwa[row[xml_reader.ATT_NAME]] = None
else:
val = int(kwargs[row[xml_reader.ATT_NAME]])
kwa[row[xml_reader.ATT_NAME]] = val
if xml_reader.ATT_MINVALUE in row and xml_reader.ATT_MAXVALUE:
self.__validate_range_for_value_input(kwa[row[xml_reader.ATT_NAME]], row)
elif row[xml_reader.ATT_TYPE] == xml_reader.TYPE_FLOAT:
if kwargs[row[xml_reader.ATT_NAME]] == '' or kwargs[row[xml_reader.ATT_NAME]] == 'None':
kwa[row[xml_reader.ATT_NAME]] = None
else:
val = float(kwargs[row[xml_reader.ATT_NAME]])
kwa[row[xml_reader.ATT_NAME]] = val
if xml_reader.ATT_MINVALUE in row and xml_reader.ATT_MAXVALUE:
self.__validate_range_for_value_input(kwa[row[xml_reader.ATT_NAME]], row)
elif row[xml_reader.ATT_TYPE] == xml_reader.TYPE_STR:
kwa[row[xml_reader.ATT_NAME]] = kwargs[row[xml_reader.ATT_NAME]]
elif row[xml_reader.ATT_TYPE] in [xml_reader.TYPE_SELECT, xml_reader.TYPE_MULTIPLE]:
val = kwargs[row[xml_reader.ATT_NAME]]
if row[xml_reader.ATT_TYPE] == xml_reader.TYPE_MULTIPLE and not isinstance(val, list):
val = [val]
kwa[row[xml_reader.ATT_NAME]] = val
if row[xml_reader.ATT_TYPE] == xml_reader.TYPE_SELECT:
simple_select_list.append(row[xml_reader.ATT_NAME])
elif row[xml_reader.ATT_TYPE] == xml_reader.TYPE_UPLOAD:
val = kwargs[row[xml_reader.ATT_NAME]]
kwa[row[xml_reader.ATT_NAME]] = val
else:
## DataType parameter to be processed:
simple_select_list.append(row[xml_reader.ATT_NAME])
datatype_gid = kwargs[row[xml_reader.ATT_NAME]]
## Load filtered and trimmed attribute (e.g. field is applied if specified):
kwa[row[xml_reader.ATT_NAME]] = self.__load_entity(row, datatype_gid, kwargs)
if xml_reader.ATT_FIELD in row:
#Add entity_GID to the parameters to recognize original input
kwa[row[xml_reader.ATT_NAME] + '_gid'] = datatype_gid
return self.collapse_arrays(kwa, simple_select_list)
def __validate_range_for_value_input(self, value, row):
if value < row[xml_reader.ATT_MINVALUE] or value > row[xml_reader.ATT_MAXVALUE]:
raise InvalidParameterException("Field %s should be between %s and %s but provided value was %s." % (
row[xml_reader.ATT_NAME], row[xml_reader.ATT_MINVALUE], row[xml_reader.ATT_MAXVALUE], value))
def __validate_range_for_array_input(self, array, row):
try:
min_val = numpy.min(array)
max_val = numpy.max(array)
if min_val < row[xml_reader.ATT_MINVALUE] or max_val > row[xml_reader.ATT_MAXVALUE]:
raise InvalidParameterException("Field %s should have values between %s and %s but provided array "
"contains min-max: (%s, %s)." % (row[xml_reader.ATT_NAME],
row[xml_reader.ATT_MINVALUE], row[xml_reader.ATT_MAXVALUE],
min_val, max_val))
except Exception:
pass
def __convert_to_array(self, input_data, row):
"""
Method used when the type of an input is array, to parse or read.
If the user set an equation for computing a model parameter then the
value of that parameter will be a dictionary which contains all the data
needed for computing that parameter for each vertex from the used surface.
"""
if KEY_EQUATION in str(input_data) and KEY_FOCAL_POINTS in str(input_data) \
and KEY_SURFACE_GID in str(input_data):
try:
input_data = eval(str(input_data))
# TODO move at a different level
equation_type = input_data.get(self.KEY_DTYPE, None)
if equation_type == None:
self.log.warning("Cannot figure out type of equation from input dictionary: %s. "
"Returning []." % (str(input_data,)))
return []
splitted_class = equation_type.split('.')
module = '.'.join(splitted_class[:-1])
classname = splitted_class[-1]
eq_module = __import__(module, globals(), locals(), [classname])
eq_class = eval('eq_module.' + classname)
equation = eq_class.from_json(input_data[KEY_EQUATION])
focal_points = json.loads(input_data[KEY_FOCAL_POINTS])
surface_gid = input_data[KEY_SURFACE_GID]
surface = self.load_entity_by_gid(surface_gid)
return surface.compute_equation(focal_points, equation)
except Exception, excep:
self.log.error("The parameter '" + str(row['name']) + "' was ignored. None value was returned.")
self.log.exception(excep)
return None
if xml_reader.ATT_QUATIFIER in row:
try:
quantifier = row[xml_reader.ATT_QUATIFIER]
dtype = None
if self.KEY_DTYPE in row:
dtype = row[self.KEY_DTYPE]
if quantifier == xml_reader.QUANTIFIER_MANUAL:
return string2array(str(input_data), ",", dtype)
elif quantifier == xml_reader.QUANTIFIER_UPLOAD:
input_str = open(input_data, 'r').read()
return string2array(input_str, " ", dtype)
elif quantifier == xml_reader.QUANTIFIER_FUNTION:
return input_data
except Exception, excep:
self.log.warning("Could not launch operation !")
self.log.exception(excep)
raise Exception("Could not launch with no data from:" + str(row[xml_reader.ATT_NAME]))
return None
def __get_dictionary(self, row, **kwargs):
"""
Find all key/value pairs for the dictionary represented by name.
"""
if self.__is_parent_not_submitted(row, kwargs):
return {}, []
name = row[xml_reader.ATT_NAME]
result_dict = {}
taken_keys = []
for key in kwargs:
if name in key and name != key:
taken_keys.append(key)
if self.KEY_DTYPE in row:
if row[self.KEY_DTYPE] == 'array':
val = string2array(kwargs[key], " ", "float")
else:
val = eval(row[self.KEY_DTYPE] + "('" + kwargs[key] + "')")
else:
val = str(kwargs[key])
result_dict[key.split(ABCAdapter.KEYWORD_PARAMS[1:])[-1]] = val
return result_dict, taken_keys
def __find_field_submitted_name(self, submited_kwargs, flat_name, perform_clean=False):
"""
Return key as in submitted dictionary for a given flat_name. Also remove from submitted_kwargs parameters like
surface_parameters_option_DIFFERENT_GID_vertices.
This won't work when DataType is in selectMultiple !!!!
:param submited_kwargs: Flat dictionary with keys in form surface_parameters_option_GID_vertices
:param flat_name: Name as retrieved from self.flaten_input_interface
(in which we are not aware of existing entities in DB - options in select)
:returns: key from 'submited_kwargs' which corresponds to 'flat_name'
"""
if ABCAdapter.KEYWORD_PARAMS not in flat_name:
if flat_name in submited_kwargs.keys():
return flat_name
else:
return None
prefix = flat_name[0: (flat_name.find(ABCAdapter.KEYWORD_PARAMS) + 12)]
sufix = flat_name[(flat_name.find(ABCAdapter.KEYWORD_PARAMS) + 12):]
parent_name = flat_name[0: flat_name.find(ABCAdapter.KEYWORD_PARAMS)]
submitted_options = ABCAdapter.__compute_submit_option_select(submited_kwargs[parent_name])
datatype_like_submit = False
for submitted_option in submitted_options:
if sufix.startswith(ABCAdapter.KEYWORD_OPTION + str(submitted_option)):
proposed_name = flat_name
else:
datatype_like_submit = True
proposed_name = prefix + ABCAdapter.KEYWORD_OPTION + str(submitted_option)
proposed_name = proposed_name + ABCAdapter.KEYWORD_SEPARATOR + sufix
if perform_clean:
## Remove submitted parameters like surface_parameters_option_GID_vertices when surface != GID
keys_to_remove = []
for submit_key in submited_kwargs:
if (submit_key.startswith(prefix + ABCAdapter.KEYWORD_OPTION)
and submit_key.endswith(sufix) and submit_key != proposed_name):
keys_to_remove.append(submit_key)
for submit_key in keys_to_remove:
del submited_kwargs[submit_key]
if datatype_like_submit and len(submitted_options) > 1:
self.log.warning("DataType attribute in SELECT_MULTIPLE is not supposed to work!!!")
if proposed_name in submited_kwargs:
return proposed_name
return None
@staticmethod
def __is_parent_not_submitted(row, kwargs):
"""
:returns: True when current attributes should not be considered, because parent option was not selected."""
att_name = row[xml_reader.ATT_NAME]
parent_name, option = None, None
if ABCAdapter.KEYWORD_PARAMS in att_name:
parent_name = att_name[0: att_name.find(ABCAdapter.KEYWORD_PARAMS)]
option = att_name[att_name.find(ABCAdapter.KEYWORD_OPTION) + 7:]
option = option[: option.find(ABCAdapter.KEYWORD_SEPARATOR)]
if parent_name is None or option is None:
return False
submitted_option = ABCAdapter.__compute_submit_option_select(kwargs[parent_name])
if not submitted_option:
return True
if option in submitted_option:
return False
return True
@staticmethod
def __compute_submit_option_select(submitted_option):
""" """
if isinstance(submitted_option, (str, unicode)):
submitted_option = submitted_option.replace('[', '').replace(']', '').split(',')
return submitted_option
def __load_entity(self, row, datatype_gid, kwargs):
"""
Load specific DataType entities, as specified in DATA_TYPE table.
Check if the GID is for the correct DataType sub-class, otherwise throw an exception."""
entity = self.load_entity_by_gid(datatype_gid)
if entity is None:
if self.KEY_REQUIRED in row and row[self.KEY_REQUIRED]:
raise InvalidParameterException("Empty value for required parameter %s " % row[self.KEY_LABEL])
return None
expected_dt_class = row[self.KEY_TYPE]
if isinstance(expected_dt_class, (str, unicode)):
classname = expected_dt_class.split('.')[-1]
data_class = __import__(expected_dt_class.replace(classname, ''), globals(), locals(), [classname])
data_class = eval("data_class." + classname)
expected_dt_class = data_class
if not isinstance(entity, expected_dt_class):
raise InvalidParameterException("Expected param '%s' of type %s, but got type %s." % (row[self.KEY_LABEL],
expected_dt_class.__name__, entity.__class__.__name__))
result = entity
## Step 2 of updating Meta-data from parent DataType.
if entity.fk_parent_burst:
## Link just towards the last Burst identified.
self.meta_data[DataTypeMetaData.KEY_BURST] = entity.fk_parent_burst
if entity.user_tag_1:
self.meta_data[DataTypeMetaData.KEY_TAG_1] = entity.user_tag_1
current_subject = self.meta_data[DataTypeMetaData.KEY_SUBJECT]
if current_subject == DataTypeMetaData.DEFAULT_SUBJECT:
self.meta_data[DataTypeMetaData.KEY_SUBJECT] = entity.subject
else:
if entity.subject != current_subject and entity.subject not in current_subject.split(','):
self.meta_data[DataTypeMetaData.KEY_SUBJECT] = current_subject + ',' + entity.subject
## End Step 2 - Meta-data Updates
## Validate current entity to be compliant with specified ROW filters.
dt_filter = row.get(xml_reader.ELEM_CONDITIONS, False)
if (dt_filter is not None) and (dt_filter is not False) and \
(entity is not None) and not dt_filter.get_python_filter_equivalent(entity):
## If a filter is declared, check that the submitted DataType is in compliance to it.
raise InvalidParameterException("Field %s did not pass filters." % (row[xml_reader.ATT_NAME],))
# In case a specific field in entity is to be used, use it
if xml_reader.ATT_FIELD in row:
val = eval("entity." + row[xml_reader.ATT_FIELD])
result = val
if ATT_METHOD in row:
param_dict = dict()
#The 'shape' attribute of an arraywrapper is overridden by us
#the following check is made only to improve performance
# (to find data in the dictionary with O(1)) on else the data is found in O(n)
if hasattr(entity, 'shape'):
for i in xrange(len(entity.shape)):
if not i:
continue
param_key = (row[xml_reader.ATT_NAME] + "_" + row[ATT_PARAMETERS] + "_" + str(i - 1))
if param_key in kwargs:
param_dict[param_key] = kwargs[param_key]
else:
param_dict = dict((k, v) for k, v in kwargs.items()
if k.startswith(row[xml_reader.ATT_NAME] + "_" + row[ATT_PARAMETERS]))
val = eval("entity." + row[ATT_METHOD] + "(param_dict)")
result = val
return result
@staticmethod
def collapse_arrays(args, simple_select_list, parent=''):
""" In case of variables with similar names:
(name_parameters_[option_xx]_paramKey) collapse then into dictionary
of parameters. This is used after parameters POST, on Operation Launch.
"""
result = {}
for name, value in args.items():
short_name = name
option = None
key = None
if name.find(ABCAdapter.KEYWORD_PARAMS) >= 0:
short_name = name[0: (name.find(ABCAdapter.KEYWORD_PARAMS) + 11)]
key = name[(name.find(ABCAdapter.KEYWORD_PARAMS) + 12):]
if key.find(ABCAdapter.KEYWORD_OPTION) == 0:
key = key[7:] # Remove '_option_'
option = key[0: key.find(ABCAdapter.KEYWORD_SEPARATOR)]
key = key[key.find(ABCAdapter.KEYWORD_SEPARATOR) + 1:]
if key is None:
result[name] = value
else:
if short_name not in result:
result[short_name] = {}
if option is None:
result[short_name][key] = value
else:
if option not in result[short_name]:
result[short_name][option] = {}
result[short_name][option][key] = value
for level1_name, level1_params in result.items():
if ABCAdapter.KEYWORD_PARAMS[:-1] in level1_name and isinstance(level1_params, dict):
short_parent_name = level1_name[0: level1_name.find(ABCAdapter.KEYWORD_PARAMS) - 10]
if (parent + short_parent_name) in simple_select_list:
# simple select
if isinstance(result[level1_name[0: level1_name.find(ABCAdapter.KEYWORD_PARAMS) - 10]],
(str, unicode)):
parent_prefix = level1_name + ABCAdapter.KEYWORD_SEPARATOR + ABCAdapter.KEYWORD_OPTION
parent_prefix += result[level1_name[0:level1_name.find(ABCAdapter.KEYWORD_PARAMS) - 10]]
parent_prefix += ABCAdapter.KEYWORD_SEPARATOR
# Ignore options in case of simple selects
# Take only attributes for current selected option.
if result[short_parent_name] in level1_params:
level1_params = level1_params[result[short_parent_name]]
else:
level1_params = {}
else:
parent_prefix = level1_name
transformed_params = ABCAdapter.collapse_arrays(level1_params, simple_select_list,
parent + parent_prefix)
result[level1_name] = transformed_params
elif short_parent_name in result:
# multiple select
for level2_name, level2_params in level1_params.items():
parent_prefix = (level1_name + ABCAdapter.KEYWORD_SEPARATOR +
ABCAdapter.KEYWORD_OPTION + level2_name + ABCAdapter.KEYWORD_SEPARATOR)
transformed_params = ABCAdapter.collapse_arrays(level2_params, simple_select_list,
parent + parent_prefix)
result[level1_name][level2_name] = transformed_params
return result
def noise_configurable_parameters(self):
return [entry[self.KEY_NAME] for entry in self.flaten_input_interface() if 'configurableNoise' in entry]
def flaten_input_interface(self):
""" Return a simple dictionary, instead of a Tree."""
return self._flaten(self.get_input_tree())
@staticmethod
def form_prefix(input_param, prefix=None, option_prefix=None):
"""Compute parameter prefix. We need to be able from the flatten
submitted values in UI, to be able to re-compose the tree of parameters,
and to make sure all submitted names are uniquely identified."""
new_prefix = ""
if prefix is not None and prefix != '':
new_prefix = prefix
if prefix is not None and prefix != '' and not new_prefix.endswith(ABCAdapter.KEYWORD_SEPARATOR):
new_prefix += ABCAdapter.KEYWORD_SEPARATOR
new_prefix += input_param + ABCAdapter.KEYWORD_PARAMS
if option_prefix is not None:
new_prefix += ABCAdapter.KEYWORD_OPTION + option_prefix + ABCAdapter.KEYWORD_SEPARATOR
return new_prefix
def key_parameters(self, parameters_for):
""" Return the keyword expected for holding parameters
for argument 'parameters_for'."""
return parameters_for + self.KEYWORD_PARAMS[0:11]
@staticmethod
def fill_defaults(adapter_interface, data, fill_unselected_branches=False):
""" Change the default values in the Input Interface Tree."""
result = []
for param in adapter_interface:
if param[ABCAdapter.KEY_NAME] == 'integrator':
pass
new_p = copy(param)
if param[ABCAdapter.KEY_NAME] in data:
new_p[ABCAdapter.KEY_DEFAULT] = data[param[ABCAdapter.KEY_NAME]]
if (ABCAdapter.KEY_ATTRIBUTES in param) and (param[ABCAdapter.KEY_ATTRIBUTES] is not None):
new_p[ABCAdapter.KEY_ATTRIBUTES] = ABCAdapter.fill_defaults(param[ABCAdapter.KEY_ATTRIBUTES], data,
fill_unselected_branches)
if (ABCAdapter.KEY_OPTIONS in param) and (param[ABCAdapter.KEY_OPTIONS] is not None):
new_options = param[ABCAdapter.KEY_OPTIONS]
if param[ABCAdapter.KEY_NAME] in data or fill_unselected_branches:
selected_values = []
if param[ABCAdapter.KEY_NAME] in data:
if param[ABCAdapter.KEY_TYPE] == ABCAdapter.TYPE_MULTIPLE:
selected_values = data[param[ABCAdapter.KEY_NAME]]
else:
selected_values = [data[param[ABCAdapter.KEY_NAME]]]
for i, option in enumerate(new_options):
if option[ABCAdapter.KEY_VALUE] in selected_values or fill_unselected_branches:
new_options[i] = ABCAdapter.fill_defaults([option], data, fill_unselected_branches)[0]
new_p[ABCAdapter.KEY_OPTIONS] = new_options
result.append(new_p)
return result
def _flaten(self, params_list, prefix=None):
""" Internal method, to be used recursively, on parameters POST. """
result = []
for param in params_list:
new_param = copy(param)
new_param[self.KEY_ATTRIBUTES] = None
new_param[self.KEY_OPTIONS] = None
if (prefix is not None) and (self.KEY_TYPE in param):
new_param[ABCAdapter.KEY_NAME] = prefix + param[self.KEY_NAME]
result.append(new_param)
if (self.KEY_OPTIONS in param) and (param[self.KEY_OPTIONS] is not None):
for option in param[self.KEY_OPTIONS]:
### SELECT or SELECT_MULTIPLE attributes
if (self.KEY_ATTRIBUTES in option) and (option[self.KEY_ATTRIBUTES] is not None):
new_prefix = ABCAdapter.form_prefix(param[ABCAdapter.KEY_NAME], prefix, option[self.KEY_VALUE])
extra_list = self._flaten(option[self.KEY_ATTRIBUTES], new_prefix)
result.extend(extra_list)
if (self.KEY_ATTRIBUTES in param) and (param[self.KEY_ATTRIBUTES] is not None):
### DATATYPE attributes
new_prefix = ABCAdapter.form_prefix(param[ABCAdapter.KEY_NAME], prefix, None)
extra_list = self._flaten(param[self.KEY_ATTRIBUTES], new_prefix)
result.extend(extra_list)
return result
@staticmethod
def prepare_param_names(attributes_list, prefix=None, add_option_prefix=False):
"""
For a given attribute list, change the name of the attributes where needed.
Changes refer to adding a prefix, to identify groups.
Will be used on parameters page GET.
"""
result = []
for param in attributes_list:
prepared_param = copy(param)
new_name = param[ABCAdapter.KEY_NAME]
if (prefix is not None) and (ABCAdapter.KEY_TYPE in param):
new_name = prefix + param[ABCAdapter.KEY_NAME]
prepared_param[ABCAdapter.KEY_NAME] = new_name
if (((ABCAdapter.KEY_TYPE not in param) or param[ABCAdapter.KEY_TYPE] in ABCAdapter.STATIC_ACCEPTED_TYPES)
and (ABCAdapter.KEY_OPTIONS in param) and (param[ABCAdapter.KEY_OPTIONS] is not None)):
add_prefix_option = ((ABCAdapter.KEY_TYPE in param) and
(param[ABCAdapter.KEY_TYPE] == xml_reader.TYPE_MULTIPLE
or param[ABCAdapter.KEY_TYPE] == xml_reader.TYPE_SELECT))
new_prefix = ABCAdapter.form_prefix(param[ABCAdapter.KEY_NAME], prefix)
prepared_param[ABCAdapter.KEY_OPTIONS] = ABCAdapter.prepare_param_names(param[ABCAdapter.KEY_OPTIONS],
new_prefix, add_prefix_option)
if (ABCAdapter.KEY_ATTRIBUTES in param) and (param[ABCAdapter.KEY_ATTRIBUTES] is not None):
new_prefix = prefix
is_dict = (ABCAdapter.KEY_TYPE in param) and (param[ABCAdapter.KEY_TYPE] == 'dict')
if add_option_prefix:
new_prefix = prefix + ABCAdapter.KEYWORD_OPTION
new_prefix = new_prefix + param[ABCAdapter.KEY_VALUE]
new_prefix += ABCAdapter.KEYWORD_SEPARATOR
if is_dict:
new_prefix = new_name + ABCAdapter.KEYWORD_PARAMS
prepared_param[ABCAdapter.KEY_ATTRIBUTES] = ABCAdapter.prepare_param_names(
param[ABCAdapter.KEY_ATTRIBUTES], new_prefix)
result.append(prepared_param)
return result
class ABCGroupAdapter(ABCAdapter):
"""
Still Abstract class.
Acts as a notifier that a given adapter has a group of sub-algorithms.
It is used for multiple simple methods interfaced in TVB through an XML description.
"""
def __init__(self, xml_file_path):
ABCAdapter.__init__(self)
if not os.path.isabs(xml_file_path):
xml_file_path = os.path.join(cfg.CURRENT_DIR, xml_file_path)
### Find the XML reader (it loads only once in the system per XML file).
self.xml_reader = xml_reader.XMLGroupReader.get_instance(xml_file_path)
def get_input_tree(self):
""" Overwrite empty method from super."""
interface_result = []
if self.algorithm_group is None:
return interface_result
tree_root = dict()
tree_root[self.KEY_NAME] = self.xml_reader.get_group_name()
tree_root[self.KEY_LABEL] = self.xml_reader.get_group_label()
tree_root[self.KEY_REQUIRED] = True
tree_root[self.KEY_TYPE] = self.TYPE_SELECT
tree_root[ELEM_OPTIONS] = self._compute_options_for_group()
interface_result.append(tree_root)
return interface_result
def _compute_options_for_group(self):
"""Sub-Algorithms"""
result = []
algorithms = self.xml_reader.get_algorithms_dictionary()
for identifier in algorithms.keys():
option = dict()
option[self.KEY_VALUE] = identifier
option[self.KEY_NAME] = algorithms[identifier][self.KEY_NAME]
algorithm = dao.get_algorithm_by_group(self.algorithm_group.id, identifier)
option['description'] = algorithm.description
inputs = algorithms[identifier][INPUTS_KEY]
option[self.KEY_ATTRIBUTES] = [inputs[key] for key in inputs.keys()]
option[ELEM_OUTPUTS] = self.xml_reader.get_outputs(identifier)
result.append(option)
return result
def get_input_for_algorithm(self, algorithm_identifier=None):
"""For a group, we will return input tree on algorithm base."""
inputs = self.xml_reader.get_inputs(algorithm_identifier)
prefix = ABCAdapter.form_prefix(self.get_algorithm_param(), option_prefix=algorithm_identifier)
result = ABCAdapter.prepare_param_names(inputs, prefix)
return result
def get_output(self):
"""For a group, we will return outputs of all sub-algorithms."""
real_outputs = []
for output_description in self.xml_reader.get_all_outputs():
full_type = output_description[xml_reader.ATT_TYPE]
real_outputs.append(self._import_type(full_type))
return real_outputs
def get_output_for_algorithm(self, algorithm_identifier):
"""For this group, we will return input tree on algorithm base."""
return self.xml_reader.get_outputs(algorithm_identifier)
def get_algorithms_dictionary(self):
"""Return the list of sub-algorithms in current group"""
return self.xml_reader.get_algorithms_dictionary()
def get_algorithm_param(self):
"""
This string, represents the argument name,
where the algorithms selection is submitted.
"""
return self.xml_reader.root_name
def get_call_code(self, algorithm_identifier):
"""From the XML interface, read the code for call method."""
return self.xml_reader.get_code(algorithm_identifier)
def get_matlab_file(self, algorithm_identifier):
"""From the XML interface read the name of the file that contains the code."""
return self.xml_reader.get_matlab_file(algorithm_identifier)
def get_import_code(self, algorithm_identifier):
"""From the XML interface, read the code for Python import. Optional"""
return self.xml_reader.get_import(algorithm_identifier)
def _import_type(self, full_type_string):
""" Execute a dynamic import and return class reverence"""
module = full_type_string[0: full_type_string.rfind(".")]
class_name = full_type_string[full_type_string.rfind(".") + 1:]
reference = __import__(module, globals(), locals(), [class_name])
self.log.debug("Imported: " + reference.__name__)
return eval("reference." + class_name)
def build_result(self, algorithm, result, inputs={}):
"""
Build an actual Python object, based on the XML interface description.
Put inside the resulting Python object, the call result.
"""
final_result = []
self.log.debug("Received results:" + str(result))
self.log.debug("Received inputs:" + str(inputs))
python_out_references = self.get_output_for_algorithm(algorithm)
for output in python_out_references:
# First prepare output attributes
kwa = {}
for field in output[xml_reader.ELEM_FIELD]:
if xml_reader.ATT_VALUE in field:
kwa[field[xml_reader.ATT_NAME]] = field[xml_reader.ATT_VALUE]
else:
expression = field[xml_reader.ATT_REFERENCE]
expression = expression.replace("$", 'result[')
expression = expression.replace("#", ']')
kwa[field[xml_reader.ATT_NAME]] = eval(expression)
kwa["storage_path"] = self.storage_path
# Import Output type and call constructor
out_class = self._import_type(output[xml_reader.ATT_TYPE])
self.log.warning("Executing INIT with parameters:" + str(kwa))
final_result.append(out_class(**kwa))
final_result.append(None)
return final_result
def get_algorithm_and_attributes(self, **kwargs):
"""
Read selected Algorithm identifier, from input arguments.
From the original full dictionary, split Algorithm name,
and actual algorithms arguments.
"""
algorithm = kwargs[self.xml_reader.root_name]
key_real_args = self.key_parameters(self.xml_reader.root_name)
algorithm_arguments = {}
if key_real_args in kwargs:
algorithm_arguments = kwargs[key_real_args]
return algorithm, algorithm_arguments
def prepare_ui_inputs(self, kwargs, validation_required=True):
"""
Overwrite the method from ABCAdapter to only append the required defaults for
the selected subalgorithm.
"""
algorithm_name = self.get_algorithm_param()
algorithm_inputs = self.get_input_for_algorithm(kwargs[algorithm_name])
self._append_required_defaults(kwargs, algorithm_inputs)
return self.convert_ui_inputs(kwargs, validation_required=validation_required)
def review_operation_inputs(self, parameters):
"""
Returns a list with the inputs from the parameters list that are instances of DataType.
"""
algorithm_name = parameters[self.get_algorithm_param()]
flat_interface = self.get_input_for_algorithm(algorithm_name)
return self._review_operation_inputs(parameters, flat_interface)
class ABCAsynchronous(ABCAdapter):
"""
Abstract class, for marking adapters that are prone to be executed
on Cluster.
"""
__metaclass__ = ABCMeta
class ABCSynchronous(ABCAdapter):
"""
Abstract class, for marking adapters that are prone to be NOT executed
on Cluster.
"""
__metaclass__ = ABCMeta
| gpl-2.0 | -6,758,727,984,573,712,000 | 45.379116 | 120 | 0.590974 | false |
mathturtle/tomviz | tomviz/python/Recon_WBP.py | 1 | 5137 | import numpy as np
from scipy.interpolate import interp1d
import tomviz.operators
import time
class ReconWBPOperator(tomviz.operators.CancelableOperator):
def transform_scalars(self, dataset, Nrecon=None, filter=None, interp=None):
"""
3D Reconstruct from a tilt series using Weighted Back-projection Method
"""
self.progress.maximum = 1
from tomviz import utils
interpolation_methods = ('linear', 'nearest', 'spline', 'cubic')
filter_methods = ('none', 'ramp', 'shepp-logan',
'cosine', 'hamming', 'hann')
# Get Tilt angles
tilt_angles = utils.get_tilt_angles(dataset)
tiltSeries = utils.get_array(dataset)
if tiltSeries is None:
raise RuntimeError("No scalars found!")
Nslice = tiltSeries.shape[0]
self.progress.maximum = Nslice
step = 0
recon = np.empty([Nslice, Nrecon, Nrecon], dtype=float, order='F')
t0 = time.time()
counter = 1
etcMessage = 'Estimated time to complete: n/a'
child = utils.make_child_dataset(dataset) #create child for recon
utils.mark_as_volume(child)
for i in range(Nslice):
if self.canceled:
return
self.progress.message = 'Slice No.%d/%d. ' % (
i + 1, Nslice) + etcMessage
recon[i, :, :] = wbp2(tiltSeries[i, :, :], tilt_angles, Nrecon,
filter_methods[filter],
interpolation_methods[interp])
step += 1
self.progress.value = step
timeLeft = (time.time() - t0) / counter * (Nslice - counter)
counter += 1
timeLeftMin, timeLeftSec = divmod(timeLeft, 60)
timeLeftHour, timeLeftMin = divmod(timeLeftMin, 60)
etcMessage = 'Estimated time to complete: %02d:%02d:%02d' % (
timeLeftHour, timeLeftMin, timeLeftSec)
# Update only once every so many steps
if (i + 1) % 40 == 0:
utils.set_array(child, recon) #add recon to child
# This copies data to the main thread
self.progress.data = child
# One last update of the child data.
utils.set_array(child, recon) #add recon to child
self.progress.data = child
returnValues = {}
returnValues["reconstruction"] = child
return returnValues
def wbp2(sinogram, angles, N=None, filter="ramp", interp="linear"):
if sinogram.ndim != 2:
raise ValueError('Sinogram must be 2D')
(Nray, Nproj) = sinogram.shape
if Nproj != angles.size:
raise ValueError('Sinogram does not match angles!')
interpolation_methods = ('linear', 'nearest', 'spline', 'cubic')
if interp not in interpolation_methods:
raise ValueError("Unknown interpolation: %s" % interp)
if not N: # if ouput size is not given
N = int(np.floor(np.sqrt(Nray**2 / 2.0)))
ang = np.double(angles) * np.pi / 180.0
# Create Fourier filter
F = makeFilter(Nray, filter)
# Pad sinogram for filtering
s = np.lib.pad(sinogram, ((0, F.size - Nray), (0, 0)),
'constant', constant_values=(0, 0))
# Apply Fourier filter
s = np.fft.fft(s, axis=0) * F
s = np.real(np.fft.ifft(s, axis=0))
# Change back to original
s = s[:Nray, :]
# Back projection
recon = np.zeros((N, N))
center_proj = Nray // 2 # Index of center of projection
[X, Y] = np.mgrid[0:N, 0:N]
xpr = X - int(N) // 2
ypr = Y - int(N) // 2
for j in range(Nproj):
t = ypr * np.cos(ang[j]) - xpr * np.sin(ang[j])
x = np.arange(Nray) - center_proj
if interp == 'linear':
bp = np.interp(t, x, s[:, j], left=0, right=0)
elif interp == 'spline':
interpolant = interp1d(
x, s[:, j], kind='slinear', bounds_error=False, fill_value=0)
bp = interpolant(t)
else:
interpolant = interp1d(
x, s[:, j], kind=interp, bounds_error=False, fill_value=0)
bp = interpolant(t)
recon = recon + bp
# Normalize
recon = recon * np.pi / 2 / Nproj
return recon
# Filter (1D) projections.
def makeFilter(Nray, filterMethod="ramp"):
# Calculate next power of 2
N2 = 2**np.ceil(np.log2(Nray))
# Make a ramp filter.
freq = np.fft.fftfreq(int(N2)).reshape(-1, 1)
omega = 2 * np.pi * freq
filter = 2 * np.abs(freq)
if filterMethod == "ramp":
pass
elif filterMethod == "shepp-logan":
filter[1:] = filter[1:] * np.sin(omega[1:]) / omega[1:]
elif filterMethod == "cosine":
filter[1:] = filter[1:] * np.cos(filter[1:])
elif filterMethod == "hamming":
filter[1:] = filter[1:] * (0.54 + 0.46 * np.cos(omega[1:] / 2))
elif filterMethod == "hann":
filter[1:] = filter[1:] * (1 + np.cos(omega[1:] / 2)) / 2
elif filterMethod == "none":
filter[:] = 1
else:
raise ValueError("Unknown filter: %s" % filterMethod)
return filter
| bsd-3-clause | -8,241,763,846,337,076,000 | 33.246667 | 80 | 0.559081 | false |
coddingtonbear/d-rats | d_rats/gps.py | 1 | 33132 | import re
import time
import tempfile
import platform
import datetime
import subst
import threading
import serial
import socket
from math import pi,cos,acos,sin,atan2
import utils
if __name__ == "__main__":
import gettext
gettext.install("D-RATS")
TEST = "$GPGGA,180718.02,4531.3740,N,12255.4599,W,1,07,1.4,50.6,M,-21.4,M,,*63 KE7JSS ,440.350+ PL127.3"
EARTH_RADIUS = 3963.1
EARTH_UNITS = "mi"
DEGREE = u"\u00b0"
DPRS_TO_APRS = {}
# The DPRS to APRS mapping is pretty horrific, but the following
# attempts to create a mapping based on looking at the javascript
# for DPRSCalc and a list of regular APRS symbols
#
# http://ham-shack.com/aprs_pri_symbols.html
# http://www.aprs-is.net/DPRSCalc.aspx
for i in range(0, 26):
asciival = ord("A") + i
char = chr(asciival)
pri = "/"
sec = "\\"
DPRS_TO_APRS["P%s" % char] = pri + char
DPRS_TO_APRS["L%s" % char] = pri + char.lower()
DPRS_TO_APRS["A%s" % char] = sec + char
DPRS_TO_APRS["S%s" % char] = sec + char.lower()
if i <= 15:
pchar = chr(ord(" ") + i)
DPRS_TO_APRS["B%s" % char] = pri + pchar
DPRS_TO_APRS["O%s" % char] = sec + pchar
elif i >= 17:
pchar = chr(ord(" ") + i + 9)
DPRS_TO_APRS["M%s" % char] = pri + pchar
DPRS_TO_APRS["N%s" % char] = sec + pchar
if i <= 5:
char = chr(ord("S") + i)
pchar = chr(ord("[") + i)
DPRS_TO_APRS["H%s" % char] = pri + pchar
DPRS_TO_APRS["D%s" % char] = sec + pchar
#for k in sorted(DPRS_TO_APRS.keys()):
# print "%s => %s" % (k, DPRS_TO_APRS[k])
APRS_TO_DPRS = {}
for k,v in DPRS_TO_APRS.items():
APRS_TO_DPRS[v] = k
def dprs_to_aprs(symbol):
if len(symbol) < 2:
print "Invalid DPRS symbol: `%s'" % symbol
return None
else:
return DPRS_TO_APRS.get(symbol[0:2], None)
def parse_dms(string):
string = string.replace(u"\u00b0", " ")
string = string.replace('"', ' ')
string = string.replace("'", ' ')
string = string.replace(' ', ' ')
string = string.strip()
try:
(d, m, s) = string.split(' ', 3)
deg = int(d)
min = int(m)
sec = float(s)
except Exception, e:
deg = min = sec = 0
if deg < 0:
mul = -1
else:
mul = 1
deg = abs(deg)
return (deg + (min / 60.0) + (sec / 3600.0)) * mul
def set_units(units):
global EARTH_RADIUS
global EARTH_UNITS
if units == _("Imperial"):
EARTH_RADIUS = 3963.1
EARTH_UNITS = "mi"
elif units == _("Metric"):
EARTH_RADIUS = 6380.0
EARTH_UNITS = "km"
print "Set GPS units to %s" % units
def value_with_units(value):
if value < 0.5:
if EARTH_UNITS == "km":
scale = 1000
units = "m"
elif EARTH_UNITS == "mi":
scale = 5280
units = "ft"
else:
scale = 1
units = EARTH_UNITS
else:
scale = 1
units = EARTH_UNITS
return "%.2f %s" % (value * scale, units)
def NMEA_checksum(string):
checksum = 0
for i in string:
checksum ^= ord(i)
return "*%02x" % checksum
def GPSA_checksum(string):
def calc(buf):
icomcrc = 0xffff
for _char in buf:
char = ord(_char)
for i in range(0, 8):
xorflag = (((icomcrc ^ char) & 0x01) == 0x01)
icomcrc = (icomcrc >> 1) & 0x7fff
if xorflag:
icomcrc ^= 0x8408
char = (char >> 1) & 0x7f
return (~icomcrc) & 0xffff
return calc(string)
def DPRS_checksum(callsign, msg):
csum = 0
string = "%-8s,%s" % (callsign, msg)
for i in string:
csum ^= ord(i)
return "*%02X" % csum
def deg2rad(deg):
return deg * (pi / 180)
def rad2deg(rad):
return rad / (pi / 180)
def dm2deg(deg, min):
return deg + (min / 60.0)
def deg2dm(decdeg):
deg = int(decdeg)
min = (decdeg - deg) * 60.0
return deg, min
def nmea2deg(nmea, dir="N"):
deg = int(nmea) / 100
try:
min = nmea % (deg * 100)
except ZeroDivisionError, e:
min = int(nmea)
if dir == "S" or dir == "W":
m = -1
else:
m = 1
return dm2deg(deg, min) * m
def deg2nmea(deg):
deg, min = deg2dm(deg)
return (deg * 100) + min
def meters2feet(meters):
return meters * 3.2808399
def feet2meters(feet):
return feet * 0.3048
def distance(lat_a, lon_a, lat_b, lon_b):
lat_a = deg2rad(lat_a)
lon_a = deg2rad(lon_a)
lat_b = deg2rad(lat_b)
lon_b = deg2rad(lon_b)
earth_radius = EARTH_RADIUS
#print "cos(La)=%f cos(la)=%f" % (cos(lat_a), cos(lon_a))
#print "cos(Lb)=%f cos(lb)=%f" % (cos(lat_b), cos(lon_b))
#print "sin(la)=%f" % sin(lon_a)
#print "sin(lb)=%f" % sin(lon_b)
#print "sin(La)=%f sin(Lb)=%f" % (sin(lat_a), sin(lat_b))
#print "cos(lat_a) * cos(lon_a) * cos(lat_b) * cos(lon_b) = %f" % (\
# cos(lat_a) * cos(lon_a) * cos(lat_b) * cos(lon_b))
#print "cos(lat_a) * sin(lon_a) * cos(lat_b) * sin(lon_b) = %f" % (\
# cos(lat_a) * sin(lon_a) * cos(lat_b) * sin(lon_b))
#print "sin(lat_a) * sin(lat_b) = %f" % (sin(lat_a) * sin(lat_b))
tmp = (cos(lat_a) * cos(lon_a) * \
cos(lat_b) * cos(lon_b)) + \
(cos(lat_a) * sin(lon_a) * \
cos(lat_b) * sin(lon_b)) + \
(sin(lat_a) * sin(lat_b))
# Correct round-off error (which is just *silly*)
if tmp > 1:
tmp = 1
elif tmp < -1:
tmp = -1
distance = acos(tmp)
return distance * earth_radius
def parse_date(string, fmt):
try:
return datetime.datetime.strptime(string, fmt)
except AttributeError, e:
print "Enabling strptime() workaround for Python <= 2.4.x"
vals = {}
for c in "mdyHMS":
i = fmt.index(c)
vals[c] = int(string[i-1:i+1])
if len(vals.keys()) != (len(fmt) / 2):
raise Exception("Not all date bits converted")
return datetime.datetime(vals["y"] + 2000,
vals["m"],
vals["d"],
vals["H"],
vals["M"],
vals["S"])
class GPSPosition(object):
"""Represents a position on the globe, either from GPS data or a static
positition"""
def _from_coords(self, lat, lon, alt=0):
try:
self.latitude = float(lat)
except ValueError:
self.latitude = parse_dms(lat)
try:
self.longitude = float(lon)
except ValueError:
self.longitude = parse_dms(lon)
self.altitude = float(alt)
self.satellites = 3
self.valid = True
def _parse_dprs_comment(self):
symbol = self.comment[0:4].strip()
astidx = self.comment.rindex("*")
checksum = self.comment[astidx:]
_checksum = DPRS_checksum(self.station, self.comment[:astidx])
if int(_checksum[1:], 16) != int(checksum[1:], 16):
print "CHECKSUM(%s): %s != %s" % (self.station,
int(_checksum[1:], 16),
int(checksum[1:], 16))
#print "Failed to parse DPRS comment:"
#print " Comment: |%s|" % self.comment
#print " Check: %s %s (%i)" % (checksum, _checksum, astidx)
raise Exception("DPRS checksum failed")
self.APRSIcon = dprs_to_aprs(symbol)
self.comment = self.comment[4:astidx].strip()
def __init__(self, lat=0, lon=0, station="UNKNOWN"):
self.valid = False
self.altitude = 0
self.satellites = 0
self.station = station
self.comment = ""
self.current = None
self.date = datetime.datetime.now()
self.speed = None
self.direction = None
self.APRSIcon = None
self._original_comment = ""
self._from_coords(lat, lon)
def __iadd__(self, update):
self.station = update.station
if not update.valid:
return self
if update.satellites:
self.satellites = update.satellites
if update.altitude:
self.altitude = update.altitude
self.latitude = update.latitude
self.longitude = update.longitude
self.date = update.date
if update.speed:
self.speed = update.speed
if update.direction:
self.direction = update.direction
if update.comment:
self.comment = update.comment
self._original_comment = update._original_comment
if update.APRSIcon:
self.APRSIcon = update.APRSIcon
return self
def __str__(self):
if self.valid:
if self.current:
dist = self.distance_from(self.current)
bear = self.current.bearing_to(self)
distance = " - %.1f %s " % (dist, EARTH_UNITS) + \
_("away") + \
" @ %.1f " % bear + \
_("degrees")
else:
distance = ""
if self.comment:
comment = " (%s)" % self.comment
else:
comment = ""
if self.speed and self.direction:
if EARTH_UNITS == "mi":
speed = "%.1f mph" % (float(self.speed) * 1.15077945)
elif EARTH_UNITS == "m":
speed = "%.1f km/h" % (float(self.speed) * 1.852)
else:
speed = "%.2f knots" % float(self.speed)
dir = " (" + _("Heading") +" %.0f at %s)" % (self.direction,
speed)
else:
dir = ""
if EARTH_UNITS == "mi":
alt = "%i ft" % meters2feet(self.altitude)
else:
alt = "%i m" % self.altitude
return "%s " % self.station + \
_("reporting") + \
" %.4f,%.4f@%s at %s%s%s%s" % ( \
self.latitude,
self.longitude,
alt,
self.date.strftime("%H:%M:%S"),
subst.subst_string(comment),
distance,
dir)
else:
return "(" + _("Invalid GPS data") + ")"
def _NMEA_format(self, val, latitude):
if latitude:
if val > 0:
d = "N"
else:
d = "S"
else:
if val > 0:
d = "E"
else:
d = "W"
return "%.3f,%s" % (deg2nmea(abs(val)), d)
def station_format(self):
if " " in self.station:
call, extra = self.station.split(" ", 1)
sta = "%-7.7s%1.1s" % (call.strip(),
extra.strip())
else:
sta = self.station
return sta
def to_NMEA_GGA(self, ssid=" "):
"""Returns an NMEA-compliant GPGGA sentence"""
date = time.strftime("%H%M%S")
lat = self._NMEA_format(self.latitude, True)
lon = self._NMEA_format(self.longitude, False)
data = "GPGGA,%s,%s,%s,1,%i,0,%i,M,0,M,," % ( \
date,
lat,
lon,
self.satellites,
self.altitude)
sta = self.station_format()
# If we had an original comment (with some encoding), use that instead
if self._original_comment:
com = self._original_comment
else:
com = self.comment
return "$%s%s\r\n%-8.8s,%-20.20s\r\n" % (data,
NMEA_checksum(data),
sta,
com)
def to_NMEA_RMC(self):
"""Returns an NMEA-compliant GPRMC sentence"""
tstamp = time.strftime("%H%M%S")
dstamp = time.strftime("%d%m%y")
lat = self._NMEA_format(self.latitude, True)
lon = self._NMEA_format(self.longitude, False)
if self.speed:
speed = "%03.1f" % self.speed
else:
speed = "000.0"
if self.direction:
dir = "%03.1f" % self.direction
else:
dir = "000.0"
data = "GPRMC,%s,A,%s,%s,%s,%s,%s,000.0,W" % ( \
tstamp,
lat,
lon,
speed,
dir,
dstamp)
sta = self.station_format()
return "$%s%s\r\n%-8.8s,%-20.20s\r\n" % (data,
NMEA_checksum(data),
sta,
self.comment)
def to_APRS(self, dest="APRATS", symtab="/", symbol=">"):
"""Returns a GPS-A (APRS-compliant) string"""
stamp = time.strftime("%H%M%S", time.gmtime())
if " " in self.station:
sta = self.station.replace(" ", "-")
else:
sta = self.station
s = "%s>%s,DSTAR*:/%sh" % (sta, dest, stamp)
if self.latitude > 0:
ns = "N"
Lm = 1
else:
ns = "S"
Lm = -1
if self.longitude > 0:
ew = "E"
lm = 1
else:
ew = "W"
lm = -1
s += "%07.2f%s%s%08.2f%s%s" % (deg2nmea(self.latitude * Lm), ns,
symtab,
deg2nmea(self.longitude * lm), ew,
symbol)
if self.speed and self.direction:
s += "%03.0f/%03.0f" % (float(self.direction), float(self.speed))
if self.altitude:
s += "/A=%06i" % meters2feet(float(self.altitude))
else:
s += "/"
if self.comment:
l = 43
if self.altitude:
l -= len("/A=xxxxxx")
s += "%s" % self.comment[:l]
s += "\r"
return "$$CRC%04X,%s\n" % (GPSA_checksum(s), s)
def set_station(self, station, comment="D-RATS"):
self.station = station
self.comment = comment
self._original_comment = comment
if len(self.comment) >=7 and "*" in self.comment[-3:-1]:
self._parse_dprs_comment()
def distance_from(self, pos):
return distance(self.latitude, self.longitude,
pos.latitude, pos.longitude)
def bearing_to(self, pos):
lat_me = deg2rad(self.latitude)
lon_me = deg2rad(self.longitude)
lat_u = deg2rad(pos.latitude)
lon_u = deg2rad(pos.longitude)
lat_d = deg2rad(pos.latitude - self.latitude)
lon_d = deg2rad(pos.longitude - self.longitude)
y = sin(lon_d) * cos(lat_u)
x = cos(lat_me) * sin(lat_u) - \
sin(lat_me) * cos(lat_u) * cos(lon_d)
bearing = rad2deg(atan2(y, x))
return (bearing + 360) % 360
def set_relative_to_current(self, current):
self.current = current
def coordinates(self):
return "%.4f,%.4f" % (self.latitude, self.longitude)
def fuzzy_to(self, pos):
dir = self.bearing_to(pos)
dirs = ["N", "NNE", "NE", "ENE", "E",
"ESE", "SE", "SSE", "S",
"SSW", "SW", "WSW", "W",
"WNW", "NW", "NNW"]
delta = 22.5
angle = 0
direction = "?"
for i in dirs:
if dir > angle and dir < (angle + delta):
direction = i
angle += delta
return "%.1f %s %s" % (self.distance_from(pos),
EARTH_UNITS,
direction)
class NMEAGPSPosition(GPSPosition):
"""A GPSPosition initialized from a NMEA sentence"""
def _test_checksum(self, string, csum):
try:
idx = string.index("*")
except:
print "String does not contain '*XY' checksum"
return False
segment = string[1:idx]
csum = csum.upper()
_csum = NMEA_checksum(segment).upper()
if csum != _csum:
print "Failed checksum: %s != %s" % (csum, _csum)
return csum == _csum
def _parse_GPGGA(self, string):
elements = string.split(",", 14)
if len(elements) < 15:
raise Exception("Unable to split GPGGA" % len(elements))
t = time.strftime("%m%d%y") + elements[1]
if "." in t:
t = t.split(".")[0]
self.date = parse_date(t, "%m%d%y%H%M%S")
self.latitude = nmea2deg(float(elements[2]), elements[3])
self.longitude = nmea2deg(float(elements[4]), elements[5])
print "%f,%f" % (self.latitude, self.longitude)
self.satellites = int(elements[7])
self.altitude = float(elements[9])
m = re.match("^([0-9]*)(\*[A-z0-9]{2})\r?\n?(.*)$", elements[14])
if not m:
raise Exception("No checksum (%s)" % elements[14])
csum = m.group(2)
if "," in m.group(3):
sta, com = m.group(3).split(",", 1)
if not sta.strip().startswith("$"):
self.station = utils.filter_to_ascii(sta.strip()[0:8])
self.comment = utils.filter_to_ascii(com.strip()[0:20])
self._original_comment = self.comment
if len(self.comment) >=7 and "*" in self.comment[-3:-1]:
self._parse_dprs_comment()
self.valid = self._test_checksum(string, csum)
def _parse_GPRMC(self, string):
if "\r\n" in string:
nmea, station = string.split("\r\n", 1)
else:
nmea = string
station = ""
elements = nmea.split(",", 12)
if len(elements) < 12:
raise Exception("Unable to split GPRMC (%i)" % len(elements))
t = elements[1]
d = elements[9]
if "." in t:
t = t.split(".", 2)[0]
self.date = parse_date(d+t, "%d%m%y%H%M%S")
self.latitude = nmea2deg(float(elements[3]), elements[4])
self.longitude = nmea2deg(float(elements[5]), elements[6])
self.speed = float(elements[7])
self.direction = float(elements[8])
if "*" in elements[11]:
end = 11 # NMEA <=2.0
elif "*" in elements[12]:
end = 12 # NMEA 2.3
else:
raise Exception("GPRMC has no checksum in 12 or 13")
m = re.match("^.?(\*[A-z0-9]{2})", elements[end])
if not m:
print "Invalid end: %s" % elements[end]
return
csum = m.group(1)
if "," in station:
sta, com = station.split(",", 1)
self.station = utils.filter_to_ascii(sta.strip())
self.comment = utils.filter_to_ascii(com.strip())
self._original_comment = self.comment
if len(self.comment) >= 7 and "*" in self.comment[-3:-1]:
self._parse_dprs_comment()
if elements[2] != "A":
self.valid = False
print "GPRMC marked invalid by GPS (%s)" % elements[2]
else:
print "GPRMC is valid"
self.valid = self._test_checksum(string, csum)
def _from_NMEA_GPGGA(self, string):
string = string.replace('\r', ' ')
string = string.replace('\n', ' ')
try:
self._parse_GPGGA(string)
except Exception, e:
import traceback
import sys
traceback.print_exc(file=sys.stdout)
print "Invalid GPS data: %s" % e
self.valid = False
def _from_NMEA_GPRMC(self, string):
try:
self._parse_GPRMC(string)
except Exception, e:
import traceback
import sys
traceback.print_exc(file=sys.stdout)
print "Invalid GPS data: %s" % e
self.valid = False
def __init__(self, sentence, station=_("UNKNOWN")):
GPSPosition.__init__(self)
if sentence.startswith("$GPGGA"):
self._from_NMEA_GPGGA(sentence)
elif sentence.startswith("$GPRMC"):
self._from_NMEA_GPRMC(sentence)
else:
print "Unsupported GPS sentence type: %s" % sentence
class APRSGPSPosition(GPSPosition):
def _parse_date(self, string):
prefix = string[0]
suffix = string[-1]
digits = string[1:-1]
if suffix == "z":
ds = digits[0:2] + \
time.strftime("%m%y", time.gmtime()) + \
digits[2:] + "00"
elif suffix == "/":
ds = digits[0:2] + time.strftime("%m%y") + digits[2:] + "00"
elif suffix == "h":
ds = time.strftime("%d%m%y", time.gmtime()) + digits
else:
print "Unknown APRS date suffix: `%s'" % suffix
return datetime.datetime.now()
d = parse_date(ds, "%d%m%y%H%M%S")
if suffix in "zh":
delta = datetime.datetime.utcnow() - datetime.datetime.now()
else:
delta = datetime.timedelta(0)
return d - delta
def _parse_GPSA(self, string):
m = re.match("^\$\$CRC([A-Z0-9]{4}),(.*)$", string)
if not m:
return
crc = m.group(1)
_crc = "%04X" % GPSA_checksum(m.group(2))
if crc != _crc:
print "APRS CRC mismatch: %s != %s (%s)" % (crc, _crc, m.group(2))
return
elements = string.split(",")
if not elements[0].startswith("$$CRC"):
print "Missing $$CRC..."
return
self.station, dst = elements[1].split(">")
path, data = elements[2].split(":")
# 1 = Entire stamp or ! or =
# 2 = stamp prefix
# 3 = stamp suffix
# 4 = latitude
# 5 = N/S
# 6 = symbol table
# 7 = longitude
# 8 = E/W
# 9 = symbol
#10 = comment
#11 = altitude string
expr = "^(([@/])[0-9]{6}([/hz])|!|=)" + \
"([0-9]{1,4}\.[0-9]{2})([NS])(.)?" + \
"([0-9]{5}\.[0-9]{2})([EW])(.)" + \
"([^/]*)(/A=[0-9]{6})?"
m = re.search(expr, data)
if not m:
print "Did not match GPS-A: `%s'" % data
return
if m.group(1) in "!=":
self.date = datetime.datetime.now()
elif m.group(2) in "@/":
self.date = self._parse_date(m.group(1))
else:
print "Unknown timestamp prefix: %s" % m.group(1)
self.date = datetime.datetime.now()
self.latitude = nmea2deg(float(m.group(4)), m.group(5))
self.longitude = nmea2deg(float(m.group(7)), m.group(8))
self.comment = m.group(10).strip()
self._original_comment = self.comment
self.APRSIcon = m.group(6) + m.group(9)
if len(m.groups()) == 11 and m.group(11):
_, alt = m.group(11).split("=")
self.altitude = feet2meters(int(alt))
self.valid = True
def _from_APRS(self, string):
self.valid = False
try:
self._parse_GPSA(string)
except Exception, e:
print "Invalid APRS: %s" % e
return False
return self.valid
def __init__(self, message):
GPSPosition.__init__(self)
self._from_APRS(message)
class MapImage(object):
def __init__(self, center):
self.key = "ABQIAAAAWot3KuWpenfCAGfQ65FdzRTaP0xjRaMPpcw6bBbU2QUEXQBgHBR5Rr2HTGXYVWkcBFNkPvxtqV4VLg"
self.center = center
self.markers = [center]
def add_markers(self, markers):
self.markers += markers
def get_image_url(self):
el = [ "key=%s" % self.key,
"center=%s" % self.center.coordinates(),
"size=400x400"]
mstr = "markers="
index = ord("a")
for m in self.markers:
mstr += "%s,blue%s|" % (m.coordinates(), chr(index))
index += 1
el.append(mstr)
return "http://maps.google.com/staticmap?%s" % ("&".join(el))
def station_table(self):
table = ""
index = ord('A')
for m in self.markers:
table += "<tr><td>%s</td><td>%s</td><td>%s</td>\n" % (\
chr(index),
m.station,
m.coordinates())
index += 1
return table
def make_html(self):
return """
<html>
<head>
<title>Known stations</title>
</head>
<body>
<h1> Known Stations </h1>
<img src="%s"/><br/><br/>
<table border="1">
%s
</table>
</body>
</html>
""" % (self.get_image_url(), self.station_table())
def display_in_browser(self):
f = tempfile.NamedTemporaryFile(suffix=".html")
name = f.name
f.close()
f = file(name, "w")
f.write(self.make_html())
f.flush()
f.close()
p = platform.get_platform()
p.open_html_file(f.name)
class GPSSource(object):
def __init__(self, port, rate=4800):
self.port = port
self.enabled = False
self.broken = None
try:
self.serial = serial.Serial(port=port, baudrate=rate, timeout=1)
except Exception, e:
print "Unable to open port `%s': %s" % (port, e)
self.broken = _("Unable to open GPS port")
self.thread = None
self.last_valid = False
self.position = GPSPosition()
def start(self):
if self.broken:
print "Not starting broken GPSSource"
return
self.invalid = 100
self.enabled = True
self.thread = threading.Thread(target=self.gpsthread)
self.thread.setDaemon(True)
self.thread.start()
def stop(self):
if self.thread and self.enabled:
self.enabled = False
self.thread.join()
self.serial.close()
def gpsthread(self):
while self.enabled:
data = self.serial.read(1024)
lines = data.split("\r\n")
for line in lines:
if line.startswith("$GPGGA") or \
line.startswith("$GPRMC"):
position = NMEAGPSPosition(line)
if position.valid and line.startswith("$GPRMC"):
self.invalid = 0
elif self.invalid < 10:
self.invalid += 1
if position.valid and self.position.valid:
self.position += position
print _("ME") + ": %s" % self.position
elif position.valid:
self.position = position
else:
print "Could not parse: %s" % line
def get_position(self):
return self.position
def status_string(self):
if self.broken:
return self.broken
elif self.invalid < 10 and self.position.satellites >= 3:
return _("GPS Locked") + " (%i sats)" % self.position.satellites
else:
return _("GPS Not Locked")
class NetworkGPSSource(GPSSource):
def __init__(self, port):
self.port = port
self.enabled = False
self.thread = None
self.position = GPSPosition()
self.last_valid = False
self.sock = None
self.broken = None
def start(self):
self.enabled = True
self.thread = threading.Thread(target=self.gpsthread)
self.thread.setDaemon(True)
self.thread.start()
def stop(self):
if self.thread and self.enabled:
self.enabled = False
self.thread.join()
def connect(self):
try:
_, host, port = self.port.split(":", 3)
port = int(port)
except ValueError, e:
print "Unable to parse %s (%s)" % (self.port, e)
self.broken = _("Unable to parse address")
return False
print "Connecting to %s:%i" % (host, port)
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
self.sock.settimeout(10)
except Exception, e:
print "Unable to connect: %s" % e
self.broken = _("Unable to connect") + ": %s" % e
self.sock = None
return False
self.sock.send("r\n")
return True
def gpsthread(self):
while self.enabled:
if not self.sock:
if not self.connect():
time.sleep(1)
continue
try:
data = self.sock.recv(1024)
except Exception, e:
self.sock.close()
self.sock = None
print _("GPSd Socket closed")
continue
line = data.strip()
if not (line.startswith("$GPGGA") or \
line.startswith("$GPRMC")):
continue
pos = NMEAGPSPosition(line)
self.last_valid = pos.valid
if pos.valid and self.position.valid:
self.position += pos
elif pos.valid:
self.position = pos
else:
print "Could not parse: %s" % line
def get_position(self):
return self.position
def status_string(self):
if self.broken:
return self.broken
elif self.last_valid and self.position.satellites >= 3:
return _("GPSd Locked") + " (%i sats)" % self.position.satellites
else:
return _("GPSd Not Locked")
class StaticGPSSource(GPSSource):
def __init__(self, lat, lon, alt=0):
self.lat = lat
self.lon = lon
self.alt = alt
self.position = GPSPosition(self.lat, self.lon)
self.position.altitude = int(float(alt))
if EARTH_UNITS == "mi":
# This is kinda ugly, but assume we're given altitude in the same
# type of units as we've been asked to display
self.position.altitude = feet2meters(self.position.altitude)
def start(self):
pass
def stop(self):
pass
def get_position(self):
return self.position
def status_string(self):
return _("Static position")
def parse_GPS(string):
fixes = []
while "$" in string:
try:
if "$GPGGA" in string:
fixes.append(NMEAGPSPosition(string[string.index("$GPGGA"):]))
string = string[string.index("$GPGGA")+6:]
elif "$GPRMC" in string:
fixes.append(NMEAGPSPosition(string[string.index("$GPRMC"):]))
string = string[string.index("$GPRMC")+6:]
elif "$$CRC" in string:
return APRSGPSPosition(string[string.index("$$CRC"):])
else:
string = string[string.index("$")+1:]
except Exception, e:
print "Exception during GPS parse: %s" % e
string = string[string.index("$")+1:]
if not fixes:
return None
fix = fixes[0]
fixes = fixes[1:]
for extra in fixes:
print "Appending fix: %s" % extra
fix += extra
return fix
if __name__ == "__main__":
nmea_strings = [
"$GPRMC,010922,A,4603.6695,N,07307.3033,W,0.6,66.8,060508,16.1,W,A*1D\r\nVE2SE 9,MV VE2SE@RAC.CA*32",
"$GPGGA,203008.78,4524.9729,N,12246.9580,W,1,03,3.8,00133,M,,,,*39",
"$GPGGA,183324.518,4533.0875,N,12254.5939,W,2,04,3.4,48.6,M,-19.6,M,1.2,0000*74",
"$GPRMC,215348,A,4529.3672,N,12253.2060,W,0.0,353.8,030508,17.5,E,D*3C",
"$GPGGA,075519,4531.254,N,12259.400,W,1,3,0,0.0,M,0,M,,*55\r\nK7HIO ,GPS Info",
"$GPRMC,074919.04,A,4524.9698,N,12246.9520,W,00.0,000.0,260508,19.,E*79",
"$GPRMC,123449.089,A,3405.1123,N,08436.4301,W,000.0,000.0,021208,,,A*71",
"$GPRMC,123449.089,A,3405.1123,N,08436.4301,W,000.0,000.0,021208,,,A*71\r\nKK7DS M,LJ DAN*C",
"$GPRMC,230710,A,2748.1414,N,08238.5556,W,000.0,033.1,111208,004.3,W*77",
]
print "-- NMEA --"
for s in nmea_strings:
p = NMEAGPSPosition(s)
if p.valid:
print "Pass: %s" % str(p)
else:
print "** FAIL: %s" % s
aprs_strings = [
"$$CRCCE3E,AE5PL-T>API282,DSTAR*:!3302.39N/09644.66W>/\r",
"$$CRC1F72,KI4IFW-1>APRATS,DSTAR*:@291930/4531.50N/12254.98W>APRS test beacon /A=000022",
"$$CRC80C3,VA2PBI>APU25N,DSTAR*:=4539.33N/07330.28W-73 de Pierre D-Star Montreal {UIV32N}",
"$$CRCA31F,VA2PBI>API282,DSTAR*:/221812z4526.56N07302.34W/\r",
'$$CRCF471,AB9FT-ML>APRATS,DSTAR*:@214235h0.00S/00000.00W>ON D-RATS at Work\r',
]
print "\n-- GPS-A --"
for s in aprs_strings:
p = APRSGPSPosition(s)
if p.valid:
print "Pass: %s" % str(p)
else:
print "** FAIL: %s" % s
| gpl-3.0 | 4,689,796,423,073,490,000 | 28.063158 | 111 | 0.489165 | false |
KevinOConnor/klipper | klippy/extras/query_endstops.py | 1 | 2160 | # Utility for querying the current state of all endstops
#
# Copyright (C) 2018-2019 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
class QueryEndstops:
def __init__(self, config):
self.printer = config.get_printer()
self.endstops = []
self.last_state = []
# Register webhook if server is available
webhooks = self.printer.lookup_object('webhooks')
webhooks.register_endpoint(
"query_endstops/status", self._handle_web_request)
gcode = self.printer.lookup_object('gcode')
gcode.register_command("QUERY_ENDSTOPS", self.cmd_QUERY_ENDSTOPS,
desc=self.cmd_QUERY_ENDSTOPS_help)
gcode.register_command("M119", self.cmd_QUERY_ENDSTOPS)
def register_endstop(self, mcu_endstop, name):
self.endstops.append((mcu_endstop, name))
def get_status(self, eventtime):
return {'last_query': {name: value for name, value in self.last_state}}
def _handle_web_request(self, web_request):
gc_mutex = self.printer.lookup_object('gcode').get_mutex()
toolhead = self.printer.lookup_object('toolhead')
with gc_mutex:
print_time = toolhead.get_last_move_time()
self.last_state = [(name, mcu_endstop.query_endstop(print_time))
for mcu_endstop, name in self.endstops]
web_request.send({name: ["open", "TRIGGERED"][not not t]
for name, t in self.last_state})
cmd_QUERY_ENDSTOPS_help = "Report on the status of each endstop"
def cmd_QUERY_ENDSTOPS(self, gcmd):
# Query the endstops
print_time = self.printer.lookup_object('toolhead').get_last_move_time()
self.last_state = [(name, mcu_endstop.query_endstop(print_time))
for mcu_endstop, name in self.endstops]
# Report results
msg = " ".join(["%s:%s" % (name, ["open", "TRIGGERED"][not not t])
for name, t in self.last_state])
gcmd.respond_raw(msg)
def load_config(config):
return QueryEndstops(config)
| gpl-3.0 | -2,217,313,421,855,998,500 | 47 | 80 | 0.615278 | false |
tantalishe/object_detection | classificator/make_sklearn_dataset.py | 1 | 1853 | import cv2
import numpy as np
import math
import features as ft
NUMBER_TRAINING_EXAMPLES = 150
NUMBER_TEST_EXAMPLES = 10
NUMBER_CLASSES = 4
FEATURE_TYPE = "humoments"
file_path_list = ["data/dataset1/scew_test/", "data/dataset1/nut/", "data/dataset1/profile_20/", "data/dataset1/profile_40/"]
file_saving_path = "data/"
data_list = [] # MAKE TRAIN DATASET
target_list = []
for target_number in range(NUMBER_CLASSES):
file_path = file_path_list[target_number]
for n in range(NUMBER_TRAINING_EXAMPLES):
number = n + 1 # LOADING CONTOUR
filename = file_path + str(number) + ".npz"
data = np.load(filename)
cnt = data['arr_0']
feature_vector = ft.finding_features(cnt, ftype = FEATURE_TYPE) # FINDING FEATURES AND APPEND IT TO SET
data_list.append(feature_vector)
target_list.append(target_number)
data_list = np.asarray(data_list) # TRANSFORMING INTO NP FORMAT
target_list = np.asarray(target_list)
np.save(file_saving_path + 'cnt_data', data_list) # SAVIN
np.save(file_saving_path + 'cnt_targets', target_list)
print("Train dataset done")
test_data_list = [] # MAKE TEST DATASET
test_target_list = []
for target_number in range(NUMBER_CLASSES):
file_path = file_path_list[target_number]
for n in range(NUMBER_TEST_EXAMPLES):
number = NUMBER_TRAINING_EXAMPLES + n + 1 # LOADING CONTOUR
filename = file_path + str(number) + ".npz"
data = np.load(filename)
cnt = data['arr_0']
feature_vector = ft.finding_features(cnt, ftype = FEATURE_TYPE) # FINDING FEATURES AND APPEND IT TO SET
test_data_list.append(feature_vector)
test_target_list.append(target_number)
test_data_list = np.asarray(test_data_list)
test_target_list = np.asarray(test_target_list)
np.save(file_saving_path + 'cnt_test_data', test_data_list)
np.save(file_saving_path + 'cnt_test_targets', test_target_list)
print("Test dataset done")
| gpl-3.0 | -1,925,831,615,177,840,600 | 28.887097 | 125 | 0.716136 | false |
chutsu/robotics | prototype/tests/calibration/test_gimbal.py | 1 | 7395 | import unittest
from os.path import join
from math import pi
import numpy as np
import prototype.tests as test
from prototype.models.gimbal import GimbalModel
from prototype.calibration.camera import CameraIntrinsics
from prototype.calibration.gimbal import DataLoader
from prototype.calibration.gimbal import PreprocessData
from prototype.calibration.gimbal import GimbalDataGenerator
from prototype.calibration.gimbal import GimbalCalibrator
class GimbalCalibratorTest(unittest.TestCase):
def setUp(self):
self.data_path = join(test.TEST_DATA_PATH, "calib_data")
self.calib = GimbalCalibrator(
data_path=self.data_path,
image_dirs=["static_camera", "gimbal_camera"],
intrinsic_files=["static_camera.yaml", "gimbal_camera.yaml"],
joint_file="joint.csv",
nb_rows=6,
nb_cols=7,
square_size=0.0285
)
def test_setup_problem(self):
x, Z, K_s, K_d, D_s, D_d = self.calib.setup_problem()
# self.assertEqual((28, ), x.shape)
# self.assertEqual(5, len(Z))
def test_reprojection_error(self):
x, Z, K_s, K_d, D_s, D_d = self.calib.setup_problem()
args = [Z, K_s, K_d, D_s, D_d]
result = self.calib.reprojection_error(x, *args)
print(result)
def test_optimize(self):
self.calib.optimize()
def test_optimize_preprocessed(self):
gimbal_model = GimbalModel(
tau_s=np.array([0.045, 0.075, -0.085, 0.0, 0.0, pi / 2.0]),
tau_d=np.array([0.0, 0.015, 0.0, 0.0, 0.0, -pi / 2.0]),
w1=np.array([0.0, 0.0, 0.075]),
w2=np.array([0.0, 0.0, 0.0])
)
# gimbal_model.set_attitude([deg2rad(0), deg2rad(0)])
# plot_gimbal = PlotGimbal(gimbal=gimbal_model)
# plot_gimbal.plot()
# plt.show()
data_path = "/home/chutsu/Dropbox/calib_data"
calib = GimbalCalibrator(
preprocessed=True,
gimbal_model=gimbal_model,
data_path=data_path,
data_dirs=["cam0", "cam1"],
intrinsic_files=["static_camera.yaml", "gimbal_camera.yaml"],
joint_file="joint.csv"
)
calib.optimize()
class GimbalDataGeneratorTest(unittest.TestCase):
def setUp(self):
self.data_path = join(test.TEST_DATA_PATH, "calib_data2")
self.intrinsics_file = join(self.data_path, "camera.yaml")
self.data = GimbalDataGenerator(self.intrinsics_file)
def test_generate(self):
Z, K_s, K_d, D_s, D_d, joint_data = self.data.generate()
self.data.gimbal.set_attitude([0.0, 0.0])
self.data.plot()
calibrator = GimbalCalibrator(sim_mode=True,
Z=Z,
K_s=K_s,
K_d=K_d,
D_s=D_s,
D_d=D_d,
joint_data=joint_data)
# calibrator.gimbal_model.tau_s[0] += 0.1
# calibrator.gimbal_model.tau_s[1] += 0.2
# calibrator.gimbal_model.tau_s[2] += 0.2
#
# calibrator.gimbal_model.tau_d[0] += 0.1
# calibrator.gimbal_model.tau_d[1] += 0.2
# calibrator.gimbal_model.tau_d[2] += 0.2
# x, Z, K_s, K_d, D_s, D_d = calibrator.setup_problem()
# args = [Z, K_s, K_d, D_s, D_d]
# result = calibrator.reprojection_error(x, *args)
# print(result)
# calibrator.optimize()
class DataLoaderTest(unittest.TestCase):
def setUp(self):
self.data_path = join(test.TEST_DATA_PATH, "calib_data")
def test_load_imu_data(self):
loader = DataLoader(
data_path=self.data_path,
image_dirs=["static_camera", "gimbal_camera"],
intrinsic_files=["static_camera.yaml", "gimbal_camera.yaml"],
joint_file="joint.csv",
nb_rows=6,
nb_cols=7,
square_size=0.29
)
joint_data = loader.load_joint_data()
self.assertEqual(5, joint_data.shape[0])
self.assertEqual(3, joint_data.shape[1])
def test_load(self):
loader = DataLoader(
data_path=self.data_path,
inspect_data=True,
image_dirs=["static_camera", "gimbal_camera"],
intrinsic_files=["static_camera.yaml", "gimbal_camera.yaml"],
joint_file="joint.csv",
nb_rows=6,
nb_cols=7,
square_size=0.29
)
loader.load()
def test_load_preprocessed(self):
data_path = "/home/chutsu/Dropbox/calib_data/"
loader = DataLoader(
preprocessed=True,
data_path=data_path,
data_dirs=["cam0", "cam1"],
intrinsic_files=["static_camera.yaml", "gimbal_camera.yaml"],
joint_file="joint.csv"
)
loader.load()
class PreprocessDataTest(unittest.TestCase):
def test_ideal2pixels(self):
# Setup
self.data_path = join(test.TEST_DATA_PATH, "calib_data")
images_dir = join(self.data_path, "gimbal_camera")
intrinsics_file = join(self.data_path, "static_camera.yaml")
intrinsics = CameraIntrinsics(intrinsics_file)
chessboard = Chessboard(nb_rows=6, nb_cols=7, square_size=0.29)
self.data = PreprocessData("IMAGES",
images_dir=images_dir,
chessboard=chessboard,
intrinsics=intrinsics)
# Load test image
image_path = join(self.data_path, "static_camera", "img_0.jpg")
image = cv2.imread(image_path)
# cv2.imshow("Image", image)
# cv2.waitKey()
# Detect chessboard corners
chessboard = Chessboard(nb_rows=6, nb_cols=7, square_size=0.29)
corners = chessboard.find_corners(image)
# Convert points from ideal to pixel coordinates
corners_ud = self.data.intrinsics.undistort_points(corners)
self.data.intrinsics.undistort_image(image)
K_new = self.data.intrinsics.K_new
self.data.ideal2pixel(corners_ud, K_new)
def test_load(self):
# Setup
self.data_path = join(test.TEST_DATA_PATH, "calib_data")
images_dir = join(self.data_path, "gimbal_camera")
intrinsics_file = join(self.data_path, "static_camera.yaml")
intrinsics = CameraIntrinsics(intrinsics_file)
chessboard = Chessboard(nb_rows=6, nb_cols=7, square_size=0.29)
self.data = PreprocessData("IMAGES",
images_dir=images_dir,
chessboard=chessboard,
intrinsics=intrinsics)
# load data
self.data.preprocess()
self.assertTrue(len(self.data.images) > 0)
self.assertTrue(len(self.data.images_ud) > 0)
# def test_load_preprocessed(self):
# self.data_path = "/home/chutsu/Dropbox/calib_data"
# intrinsics_file = join(self.data_path, "static_camera.yaml")
# intrinsics = CameraIntrinsics(intrinsics_file)
# self.data = PreprocessData("PREPROCESSED",
# data_path=join(self.data_path, "cam0"),
# intrinsics=intrinsics)
# self.data.load()
| gpl-3.0 | -4,398,889,337,649,794,600 | 35.791045 | 76 | 0.561055 | false |
cycladesnz/chambersAndCreatures | src/effects/av_effects.py | 1 | 3684 | from pdcglobal import *
from .effect import Effect
from .dv_effects import DazzleEffect
class StunEffect(Effect):
def __init__(self, host, owner):
dur = d(3)
Effect.__init__(self, dur, host, owner)
weaponinfotext = 'Stuns the enemy'
def tick(self):
self.host.timer += self.host.speed * d(3)
if self.host == self.host.game.player:
self.host.game.shout('You are stunned')
else:
self.host.game.shout('%s is stunned' % (self.host.name))
Effect.tick(self)
class BleedEffect(Effect):
def __init__(self, host, owner):
dur = d(10)
Effect.__init__(self, dur, host, owner)
weaponinfotext = 'Makes the enemy bleed'
def tick(self):
self.host.game.do_damage(self.host, d(3), D_GENERIC, self.owner)
if self.host == self.host.game.player:
self.host.game.shout('You are bleeding')
else:
self.host.game.shout('%s bleeds' % (self.host.name))
Effect.tick(self)
class BugPoisonEffect(Effect):
def __init__(self, host, owner):
dur = d(25)
Effect.__init__(self, dur, host, owner)
weaponinfotext = 'Poisons the enemy'
def tick(self):
if d(100) < 5:
self.host.timer += self.host.speed * d(5)
if self.host == self.host.game.player:
self.host.game.shout('You suddenly fell asleep')
else:
self.host.game.shout('%s suddenly fells asleep' % (self.host.name))
Effect.tick(self)
class YumuraPoisonEffect(Effect):
def __init__(self, host, owner):
dur = d(10)
Effect.__init__(self, dur, host, owner)
weaponinfotext = 'Poisons the enemy'
def tick(self):
self.host.game.do_damage(self.host, d(3), D_POISON, self.owner)
notice = False
if d(100) < 10:
StunEffect(self.host, self.owner)
notice = True
if d(100) < 10:
DazzleEffect(self.host, self.owner)
notice = True
if d(100) < 10:
self.host.game.do_damage(self.host, d(3), D_POISON, self.owner)
notice = True
if d(100) < 2:
self.host.game.do_damage(self.host, d(25), D_POISON, self.owner)
notice = True
if notice:
if self.host == self.host.game.player:
self.host.game.shout('You are poisoned')
else:
self.host.game.shout('%s is poisoned' % (self.host.name))
Effect.tick(self)
class KillerbeePoisonEffect(Effect):
def __init__(self, host, owner):
dur = d(10)
Effect.__init__(self, dur, host, owner)
weaponinfotext = 'Poisons the enemy'
def tick(self):
self.host.game.do_damage(self.host, d(3), D_POISON, self.owner)
if d(100) < 35:
StunEffect(self.host, self.owner)
if d(100) < 35:
DazzleEffect(self.host, self.owner)
if self.host == self.host.game.player:
self.host.game.shout('You are poisoned')
else:
self.host.game.shout('%s is poisoned' % (self.host.name))
Effect.tick(self)
class StrokingEffect(Effect):
def __init__(self, host, owner):
dur = 1
Effect.__init__(self, dur, host, owner)
weaponinfotext = 'Strokes the enemy'
def tick(self):
if self.host == self.host.game.player:
self.host.game.shout('You are getting stroked by %s' % (self.owner.name))
else:
self.host.game.shout('%s is getting stroked' % (self.host.name))
Effect.tick(self)
| gpl-2.0 | 3,554,105,722,189,327,400 | 33.754717 | 85 | 0.551031 | false |
jhuttner/flake8-import-order | flake8_import_order/checker.py | 1 | 1888 | import ast
import pycodestyle
from flake8_import_order import ImportVisitor
from flake8_import_order.styles import (
Cryptography, Google, PEP8, Smarkets,
)
DEFAULT_IMPORT_ORDER_STYLE = 'cryptography'
class ImportOrderChecker(object):
visitor_class = ImportVisitor
options = None
def __init__(self, filename, tree):
self.tree = tree
self.filename = filename
self.lines = None
def load_file(self):
if self.filename in ("stdin", "-", None):
self.filename = "stdin"
self.lines = pycodestyle.stdin_get_value().splitlines(True)
else:
self.lines = pycodestyle.readlines(self.filename)
if not self.tree:
self.tree = ast.parse("".join(self.lines))
def error(self, error):
raise NotImplemented()
def check_order(self):
if not self.tree or not self.lines:
self.load_file()
visitor = self.visitor_class(
self.options.get('application_import_names', []),
self.options.get('application_package_names', []),
)
visitor.visit(self.tree)
imports = []
for import_ in visitor.imports:
if not pycodestyle.noqa(self.lines[import_.lineno - 1]):
imports.append(import_)
style_option = self.options.get(
'import_order_style', DEFAULT_IMPORT_ORDER_STYLE,
)
if style_option == 'cryptography':
style = Cryptography(imports)
elif style_option == 'google':
style = Google(imports)
elif style_option == 'pep8':
style = PEP8(imports)
elif style_option == 'smarkets':
style = Smarkets(imports)
else:
raise AssertionError("Unknown style {}".format(style_option))
for error in style.check():
yield self.error(error)
from dunder_mifflin import papers # WARNING: Malicious operation ahead
| lgpl-3.0 | -7,491,054,521,117,362,000 | 28.046154 | 73 | 0.587924 | false |
ActiveState/code | recipes/Python/578369_War_Game_Version_5/recipe-578369.py | 1 | 7275 | from random import randint, seed
from time import time
# region: change
# from window import *
from Zpaw import *
from cards import *
card_list = [card_0, card_1, card_2, card_3, card_4, card_5, card_6, card_7, card_8, card_9]
# endregion
def game():
print 'Welcome to WAR V5!'
print
asking = True
while asking:
try:
players = int(raw_input('How many players are there? '))
if players < 2:
print 'There must be at least two players.'
else:
asking = False
except:
print 'You must enter a number.'
print
names = []
# region: change
longest_name = 0
for name in range(players):
names.append(raw_input('What is the name of player ' + str(name + 1) + '? '))
if len(names[-1]) > longest_name:
longest_name = len(names[-1])
# endregion
deck = []
for card in range(10):
for player in range(players):
deck.append(card)
hands = []
seed(time())
for player in range(players):
hand = ([], [])
for card in range(10):
index = randint(0, len(deck) - 1)
hand[0].append(deck[index])
del deck[index]
hand[0].sort()
hands.append(hand)
for round in range(1, 11):
table = []
will_play = []
high_card = 0
for player in range(players):
will_play.append(player)
for turn in range(players):
for line in range(50):
print
index = randint(0, len(will_play) - 1)
now_play = will_play[index]
del will_play[index]
print 'Round', round
raw_input('It is ' + names[now_play] + "'s turn to play.")
print
# region: change
if len(table) == 0:
print 'There are no cards on the table.\n'
else:
table_window = window(len(table) * 6, longest_name + 13)
for card in range(len(table)):
# name_page = page(1, len(names[table[card][0]]) + 9)
# name_page.mutate(0, 0, names[table[card][0]] + ' played')
# table_window.append(name_page, [card * 6, 0])
# table_window.append(card_list[table[card][1]], [card * 6, len(names[table[card][0]]) + 8])
# table_window += struct(True, card * 6, 0, name_page)
# table_window += struct(True, card * 6, len(names[table[card][0]]) + 8, card_list[table[card][1]])
table_window += page(1, len(names[table[card][0]]) + 9) \
.mutate(0, 0, names[table[card][0]] + ' played').y(card * 6)
table_window += page(0, 0).link(card_list[table[card][1]]) \
.x(len(names[table[card][0]]) + 8).y(card * 6)
print table_window
print 'These are your playing cards:'
playing_window = window(7, len(hands[now_play][0]) * 6)
for index in range(len(hands[now_play][0])):
# playing_window.append(card_list[hands[now_play][0][index]], [1, index * 6 + 1])
# playing_window += struct(True, 1, index * 6 + 1, card_list[hands[now_play][0][index]])
playing_window += page(0, 0).link(card_list[hands[now_play][0][index]]).x(index * 6 + 1).y(1)
print playing_window
if len(hands[now_play][1]) > 0:
hands[now_play][1].sort()
print 'These are your captured cards:'
capture_window = window(7, len(hands[now_play][1]) * 6)
for index in range(len(hands[now_play][1])):
# capture_window.append(card_list[hands[now_play][1][index]], [1, index * 6 + 1])
# capture_window += struct(True, 1, index * 6 + 1, card_list[hands[now_play][1][index]])
capture_window += page(0, 0).link(card_list[hands[now_play][1][index]]).x(index * 6 + 1).y(1)
print capture_window
# endregion
asking = True
while asking:
try:
card = int(raw_input('What card do you want to play? '))
if card >= 0 and card <= 9:
try:
hands[now_play][0].remove(card)
table.append((now_play, card))
if card > high_card:
high_card = card
asking = False
except:
print 'You do not have that card.'
else:
print 'You must enter a value between -1 and 10.'
except:
print 'You must enter a number.'
for line in range(50):
print
#region: change
table_window = window(len(table) * 6, longest_name + 13)
for card in range(len(table)):
# name_page = page(1, len(names[table[card][0]]) + 9)
# name_page.mutate(0, 0, names[table[card][0]] + ' played')
# table_window.append(name_page, [card * 6, 0])
# table_window.append(card_list[table[card][1]], [card * 6, len(names[table[card][0]]) + 8])
# table_window += struct(True, card * 6, 0, name_page)
# table_window += struct(True, card * 6, len(names[table[card][0]]) + 8, card_list[table[card][1]])
table_window += page(1, len(names[table[card][0]]) + 9) \
.mutate(0, 0, names[table[card][0]] + ' played').y(card * 6)
table_window += page(0, 0).link(card_list[table[card][1]]) \
.x(len(names[table[card][0]]) + 8).y(card * 6)
print table_window
# endregion
hand_out = []
for index in range(players):
if table[index][1] == high_card:
hand_out.append(table[index][0])
while len(table) > 0:
hands[hand_out[randint(0, len(hand_out) - 1)]][1].append(table[0][1])
del table[0]
for player in range(players):
if len(hands[player][1]) > 0:
print names[player] + ' has captured ' + str(len(hands[player][1])) + ' cards.'
print
raw_input('End Of Round ' + str(round))
for line in range(50):
print
high_score = 0
scores = []
for player in range(players):
total = 0
for card in range(len(hands[player][1])):
total += hands[player][1][card]
if total > high_score:
high_score = total
if len(scores) == 0 or scores[len(scores) - 1][1] <= total:
scores.append((player, total))
else:
for index in range(len(scores)):
if total > scores[index][1]:
scores.insert((player, total))
break
for player in range(players):
print names[scores[player][0]] + ' received ' + str(scores[player][1]) + ' points.'
print
for index in range(10):
raw_input('GAME OVER ... ' + str(9 - index))
if __name__ == '__main__':
game()
| mit | -4,348,524,934,949,283,000 | 43.090909 | 119 | 0.485636 | false |
AugustoLD/SearchAlgorithms-IA | graph_search.py | 1 | 4000 | #*************************************************************************
# Copyright (C) 2015
#
# Augusto Lopez Dantas - augustold42@gmail.com
# Daniel Yang Chow - danielyc95@gmail.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#*************************************************************************
import sys
from graph_file import GraphFile
from a_star import AStar
from dijkstra import Dijkstra
graph_file = None
graph = {}
heuristic = {}
begin = None
end = None
def setup(filename):
global graph_file, graph, begin, end, heuristic
graph_file = GraphFile()
while not graph_file.read_file(filename):
filename = str(input("New file path: ")).rstrip()
graph = graph_file.construct_graph()
heuristic = graph_file.construct_heuristic_table()
begin = graph_file.begin
end = graph_file.end
def alter_graph_file():
new_file = str(input('New file path: ')).rstrip()
setup(new_file)
def alter_end():
global end, heuristic
target = str(input('New target node: '))
if target in graph:
end = target
heuristic = graph_file.construct_heuristic_table(end)
else:
print('Error: Invalid node!')
input('Press Enter...')
def alter_begin():
global begin
start = str(input('New starting node: '))
if start in graph:
begin = start
else:
print('Error: Invalid node!')
input('Press Enter...')
def show_graph():
graph_file.print_graph(graph)
input('Press Enter...')
def show_heuristic():
if graph_file.is_heuristic_complete(heuristic):
graph_file.print_heuristic(heuristic, end)
else:
print('Error: heuristic is incomplete for the target {}!'.format(end))
input('Press Enter...')
def run_a_star():
if graph_file.is_heuristic_complete(heuristic):
AStar(graph).search_path(begin, end, heuristic)
else:
print('Error: heuristic is incomplete for the target {}!'.format(end))
input('Press Enter...')
def run_dijkstra():
Dijkstra(graph).search_path(begin, end)
input('Press Enter...')
def run_search_algorithms():
menu = {
'1': run_dijkstra,
'2': run_a_star,
'3': alter_begin,
'4': alter_end
}
menu_opt = ""
while menu_opt != '0':
print('-'*70, '\n', 'Search Algorithms'.center(70))
print('-'*70)
print('1 - Dijkstra')
print('2 - A*')
print('3 - Change Starting Node (current: {})'.format(begin))
print('4 - Change Target Node (current: {})'.format(end))
print('0 - Back')
menu_opt = input()
if menu_opt in menu:
menu[menu_opt]()
def run():
menu = {
'1': run_search_algorithms,
'2': show_graph,
'3': show_heuristic,
'4': alter_graph_file
}
menu_opt = ""
while menu_opt != '0':
print('-'*70, '\n', 'Graph Search'.center(70))
print('-'*70)
print('1 - Run Search Algorithms')
print('2 - Show Graph')
print('3 - Show Heuristic Table')
print('4 - Change Graph File')
print('0 - Quit')
menu_opt = input()
if menu_opt in menu:
menu[menu_opt]()
if __name__ == '__main__':
try:
filename = sys.argv[1]
except IndexError:
filename = ""
setup(filename)
run()
| gpl-2.0 | 18,317,909,692,897,096 | 28.62963 | 78 | 0.59075 | false |
black-knight/magic_lamp | Server/src/board/markers/marker.py | 1 | 2739 | import cv2
from board.board_descriptor import BoardDescriptor
class Marker(object):
def __init__(self, marker_id):
"""
:param marker_id: Marker ID
"""
self.marker_id = marker_id
def preferred_input_image_resolution(self):
"""
Returns the preferred input resolution for this marker detector. Defaults to medium.
:return: Input resolution (of type BoardDescriptor.SnapshotSize)
"""
return BoardDescriptor.SnapshotSize.MEDIUM
def find_markers_in_image(self, image):
"""
Find all markers in image.
:param image: Image
:return: List of markers each in form {"markerId", "x", "y", "width", "height", "angle", "contour"}
"""
return []
def find_markers_in_thresholded_image(self, image):
"""
Find all markers in image which has already been thresholded.
:param image: Image
:return: List of markers each in form {"markerId", "x", "y", "width", "height", "angle", "contour"}
"""
return []
def find_marker_in_image(self, image):
"""
Find marker in image.
:param image: Image
:return: Marker in form {"markerId", "x", "y", "width", "height", "angle", "contour"}
"""
return None
def find_marker_in_thresholded_image(self, image):
"""
Find marker in image which has already been thresholded.
:param image: Thresholded image
:return: Marker in form {"markerId", "x", "y", "width", "height", "angle", "contour"}
"""
return None
def contour_to_marker_result(self, image, contour):
"""
Extracts marker result from contour.
:param image: Image
:param contour: Contour
:return: Result in form {"markerId", "x", "y", "width", "height", "angle", "contour"}
"""
image_height, image_width = image.shape[:2]
box = cv2.minAreaRect(contour)
return {"markerId": self.marker_id,
"x": float(box[0][0]) / float(image_width),
"y": float(box[0][1]) / float(image_height),
"width": float(box[1][0]) / float(image_width),
"height": float(box[1][1]) / float(image_height),
"angle": box[2],
"contour": contour}
def contours_to_marker_result(self, image, contours):
"""
Extracts marker results from contours.
:param image: Image
:param contours: Contours
:return: List of markers each in form {"markerId", "x", "y", "width", "height", "angle", "contour"}
"""
return [self.contour_to_marker_result(image, contour) for contour in contours]
| apache-2.0 | 6,870,595,627,794,209,000 | 31.607143 | 107 | 0.56517 | false |
jhanley634/testing-tools | problem/covid/sd_cases_deaths.py | 1 | 2150 | #! /usr/bin/env streamlit run
# Copyright 2020 John Hanley.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# The software is provided "AS IS", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall
# the authors or copyright holders be liable for any claim, damages or
# other liability, whether in an action of contract, tort or otherwise,
# arising from, out of or in connection with the software or the use or
# other dealings in the software.
import datetime as dt
from altair import datum
from covid.us_cases_deaths import delta, get_cases_and_deaths, get_chart, smooth
import altair as alt
import streamlit as st
def _get_annotation(df):
# https://en.wikipedia.org/wiki/Sturgis_Motorcycle_Rally
rally = 1e3 * dt.datetime.strptime('2020-08-07', '%Y-%m-%d').timestamp()
ten_days = 10 * 1e3 * 86400
annotation = alt.Chart(df).mark_text(
align='left',
baseline='middle',
fontSize=20,
dx=7
).encode(
x='date',
y='val',
text='label'
).transform_filter(
(rally <= datum.date) & (datum.date < rally + ten_days)
)
return annotation
def main():
df = get_cases_and_deaths('us-states.csv', 'South Dakota')
df['label'] = '.'
st.altair_chart(get_chart(df) + _get_annotation(df))
st.altair_chart(get_chart(df, 'log') + _get_annotation(df))
delta(df)
smooth(df, span=7)
st.altair_chart(get_chart(df) + _get_annotation(df))
if __name__ == '__main__':
main()
| mit | -4,698,819,997,443,697,000 | 35.440678 | 80 | 0.695349 | false |
zitouni/ieee_802-15-4_868-900 | python/ieee802_15_4.py | 1 | 3940 | #!/usr/bin/env python
# O-QPSK modulation and demodulation.
#
#
# Copyright 2005 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
# Derived from gmsk.py
#
# Modified by: Thomas Schmid, Leslie Choong, Sanna Leidelof
#
from gnuradio import gr, digital, ucla
from math import pi
class ieee802_15_4_mod(gr.hier_block2):
def __init__(self, *args, **kwargs):
"""
Hierarchical block for cc1k FSK modulation.
The input is a byte stream (unsigned char) and the
output is the complex modulated signal at baseband.
@param spb: samples per baud >= 2
@type spb: integer
"""
try:
self.spb = kwargs.pop('spb')
except KeyError:
pass
gr.hier_block2.__init__(self, "ieee802_15_4_mod",
gr.io_signature(1, 1, 1), # Input
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output
if not isinstance(self.spb, int) or self.spb < 2:
raise TypeError, "spb must be an integer >= 2"
self.symbolsToChips = ucla.symbols_to_chips_bi()
self.chipsToSymbols = gr.packed_to_unpacked_ii(2, gr.GR_MSB_FIRST)
self.symbolsToConstellation = gr.chunks_to_symbols_ic((-1-1j, -1+1j, 1-1j, 1+1j))
self.pskmod = ucla.qpsk_modulator_cc()
self.delay = ucla.delay_cc(self.spb)
# Connect
self.connect(self, self.symbolsToChips, self.chipsToSymbols, self.symbolsToConstellation, self.pskmod, self.delay, self)
#self.connect(self, self.symbolsToChips, self.chipsToSymbols, self.symbolsToConstellation, self.pskmod, self)
class ieee802_15_4_demod(gr.hier_block2):
def __init__(self, *args, **kwargs):
"""
Hierarchical block for O-QPSK demodulation.
The input is the complex modulated signal at baseband
and the output is a stream of bytes.
@param sps: samples per symbol
@type sps: integer
"""
try:
self.sps = kwargs.pop('sps')
except KeyError:
pass
gr.hier_block2.__init__(self, "ieee802_15_4_demod",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input
gr.io_signature(1, 1, gr.sizeof_float)) # Output
# Demodulate FM
sensitivity = (pi / 2) / self.sps
#self.fmdemod = gr.quadrature_demod_cf(1.0 / sensitivity)
self.fmdemod = gr.quadrature_demod_cf(1)
# Low pass the output of fmdemod to allow us to remove
# the DC offset resulting from frequency offset
alpha = 0.0008/self.sps
self.freq_offset = gr.single_pole_iir_filter_ff(alpha)
self.sub = gr.sub_ff()
self.connect(self, self.fmdemod)
self.connect(self.fmdemod, (self.sub, 0))
self.connect(self.fmdemod, self.freq_offset, (self.sub, 1))
# recover the clock
omega = self.sps
gain_mu=0.03
mu=0.5
omega_relative_limit=0.0002
freq_error=0.0
gain_omega = .25*gain_mu*gain_mu # critically damped
self.clock_recovery = digital.clock_recovery_mm_ff(omega, gain_omega, mu, gain_mu,
omega_relative_limit)
# Connect
self.connect(self.sub, self.clock_recovery, self)
| gpl-3.0 | 8,383,000,823,689,001,000 | 32.675214 | 128 | 0.636294 | false |
sanja7s/SR_Twitter | src_graph/degree_assort_study.py | 1 | 1406 | import networkx as nx
from scipy import stats
from operator import mul # or mul=lambda x,y:x*y
from fractions import Fraction
import sys
# Calculates binomial coefficient (n over k)
def nCk(n,k):
return int( reduce(mul, (Fraction(n-i, i+1) for i in range(k)), 1) )
# Read the network in form of edge list, unweighted and undirected
net=nx.read_edgelist(sys.argv[1], nodetype=int)
# calculate the transitivity of the network
C=nx.transitivity(net)
# Make dictionary nodeID:degree
d=dict(nx.degree(net))
# The branching is calculated as P2/P1
# The intermodular connectivity as P3/P2
suma1=0
P2=0
for key in d:
suma1+=int(d[key])
P2+=nCk(int(d[key]),2)
P1=suma1*0.5
C3=C*P2/3.0
suma=0
for u,v in net.edges():
suma=suma+(d[u]-1)*(d[v]-1)
P3=suma-3*C3
P21=float(P2)/float(P1)
P32=float(P3)/float(P2)
# Conditions for assortativity and disassortativity
if P32 + C > P21:
print("The network is assortative with r = "+str(nx.degree_assortativity_coefficient(net)))
elif P32 + C < P21:
print("The network is disassortative with r = "+str(nx.degree_assortativity_coefficient(net)))
else:
print("The network is neutral with r = "+str(nx.degree_assortativity_coefficient(net)))
print("The relative branching is: " + str(P21))
print("The intermodular connectivity is: " + str(P32))
print("The transitivity is: " + str(C))
"""
awk 'if $3 > threshold {print $1, $2}' SR_0x > SRUNW
""" | mit | 628,720,276,494,182,900 | 28.3125 | 98 | 0.704836 | false |
sigmavirus24/pip | tasks/vendoring/__init__.py | 1 | 3688 | """"Vendoring script, python 3.5 needed"""
from pathlib import Path
import re
import shutil
import invoke
TASK_NAME = 'update'
FILE_WHITE_LIST = (
'Makefile',
'vendor.txt',
'__init__.py',
'README.rst',
)
def drop_dir(path):
shutil.rmtree(str(path))
def remove_all(paths):
for path in paths:
if path.is_dir():
drop_dir(path)
else:
path.unlink()
def log(msg):
print('[vendoring.%s] %s' % (TASK_NAME, msg))
def clean_vendor(ctx, vendor_dir):
# Old _vendor cleanup
remove_all(vendor_dir.glob('*.pyc'))
log('Cleaning %s' % vendor_dir)
for item in vendor_dir.iterdir():
if item.is_dir():
shutil.rmtree(str(item))
elif item.name not in FILE_WHITE_LIST:
item.unlink()
else:
log('Skipping %s' % item)
def rewrite_imports(package_dir, vendored_libs):
for item in package_dir.iterdir():
if item.is_dir():
rewrite_imports(item, vendored_libs)
elif item.name.endswith('.py'):
rewrite_file_imports(item, vendored_libs)
def rewrite_file_imports(item, vendored_libs):
"""Rewrite 'import xxx' and 'from xxx import' for vendored_libs"""
text = item.read_text()
# Revendor pkg_resources.extern first
text = re.sub(r'pkg_resources.extern', r'pip._vendor', text)
for lib in vendored_libs:
text = re.sub(
r'(\n\s*)import %s' % lib,
r'\1from pip._vendor import %s' % lib,
text,
)
text = re.sub(
r'(\n\s*)from %s' % lib,
r'\1from pip._vendor.%s' % lib,
text,
)
item.write_text(text)
def apply_patch(ctx, patch_file_path):
log('Applying patch %s' % patch_file_path.name)
ctx.run('git apply %s' % patch_file_path)
def vendor(ctx, vendor_dir):
log('Reinstalling vendored libraries')
ctx.run(
'pip install -t {0} -r {0}/vendor.txt --no-compile'.format(
str(vendor_dir),
)
)
remove_all(vendor_dir.glob('*.dist-info'))
remove_all(vendor_dir.glob('*.egg-info'))
# Cleanup setuptools unneeded parts
(vendor_dir / 'easy_install.py').unlink()
drop_dir(vendor_dir / 'setuptools')
drop_dir(vendor_dir / 'pkg_resources' / '_vendor')
drop_dir(vendor_dir / 'pkg_resources' / 'extern')
# Drop interpreter and OS specific msgpack libs.
# Pip will rely on the python-only fallback instead.
remove_all(vendor_dir.glob('msgpack/*.so'))
# Detect the vendored packages/modules
vendored_libs = []
for item in vendor_dir.iterdir():
if item.is_dir():
vendored_libs.append(item.name)
elif item.name not in FILE_WHITE_LIST:
vendored_libs.append(item.name[:-3])
log("Detected vendored libraries: %s" % ", ".join(vendored_libs))
# Global import rewrites
log("Rewriting all imports related to vendored libs")
for item in vendor_dir.iterdir():
if item.is_dir():
rewrite_imports(item, vendored_libs)
elif item.name not in FILE_WHITE_LIST:
rewrite_file_imports(item, vendored_libs)
# Special cases: apply stored patches
log("Apply patches")
patch_dir = Path(__file__).parent / 'patches'
for patch in patch_dir.glob('*.patch'):
apply_patch(ctx, patch)
@invoke.task(name=TASK_NAME)
def main(ctx):
git_root = Path(
ctx.run('git rev-parse --show-toplevel', hide=True).stdout.strip()
)
vendor_dir = git_root / 'pip' / '_vendor'
log('Using vendor dir: %s' % vendor_dir)
clean_vendor(ctx, vendor_dir)
vendor(ctx, vendor_dir)
log('Revendoring complete')
| mit | 8,446,935,301,462,685,000 | 26.729323 | 74 | 0.594902 | false |
chungjjang80/FRETBursts | fretbursts/burstlib.py | 1 | 133746 | #
# FRETBursts - A single-molecule FRET burst analysis toolkit.
#
# Copyright (C) 2013-2016 The Regents of the University of California,
# Antonino Ingargiola <tritemio@gmail.com>
#
"""
This module contains all the main FRETBursts analysis functions.
`burstslib.py` defines the fundamental object `Data()` that contains both the
experimental data (attributes) and the high-level analysis routines (methods).
Furthermore it loads all the remaining **FRETBursts** modules (except for
`loaders.py`).
For usage example see the IPython Notebooks in sub-folder "notebooks".
"""
from __future__ import print_function, absolute_import, division
from future.utils import raise_from
from builtins import range, zip
import os
import hashlib
import numpy as np
import copy
from numpy import zeros, size, r_
import scipy.stats as SS
from .utils.misc import pprint, clk_to_s, deprecate
from .poisson_threshold import find_optimal_T_bga
from . import fret_fit
from . import bg_cache
from .ph_sel import Ph_sel
from .fretmath import gamma_correct_E, gamma_uncorrect_E
from .phtools import burstsearch as bslib
from .phtools.burstsearch import (
# Burst search function
bsearch,
# Photon counting function,
mch_count_ph_in_bursts
)
from .phtools import phrates
from . import background as bg
from . import select_bursts
from . import fit
from .fit.gaussian_fitting import (gaussian_fit_hist,
gaussian_fit_cdf,
two_gaussian_fit_hist,
two_gaussian_fit_hist_min,
two_gaussian_fit_hist_min_ab,
two_gaussian_fit_EM,
two_gauss_mix_pdf,
two_gauss_mix_ab,)
# Redefine some old functions that have been renamed so old scripts will not
# break but will print a warning
bg_calc_exp = deprecate(bg.exp_fit, 'bg_calc_exp', 'bg.exp_fit')
bg_calc_exp_cdf = deprecate(bg.exp_cdf_fit, 'bg_calc_exp_cdf', 'bg.exp_cdf_fit')
def _get_bsearch_func(pure_python=False):
if pure_python:
# return the python version
return bslib.bsearch_py
else:
# or what is available
return bsearch
def _get_mch_count_ph_in_bursts_func(pure_python=False):
if pure_python:
# return the python version
return bslib.mch_count_ph_in_bursts_py
else:
# or what is available
return mch_count_ph_in_bursts
def isarray(obj):
"""Test if the object support the array interface.
Returns True for numpy arrays and pandas sequences.
"""
return hasattr(obj, '__array__')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# BURST SELECTION FUNCTIONS
#
def Sel(d_orig, filter_fun, negate=False, nofret=False, **kwargs):
"""Uses `filter_fun` to select a sub-set of bursts from `d_orig`.
This function is deprecated. Use :meth:`Data.select_bursts` instead.
"""
d_sel = d_orig.select_bursts(filter_fun, negate=negate,
computefret=not nofret,
**kwargs)
return d_sel
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Bursts and Timestamps utilities
#
def get_alex_fraction(on_range, alex_period):
"""Get the fraction of period beween two numbers indicating a range.
"""
assert len(on_range) == 2
if on_range[0] < on_range[1]:
fraction = (on_range[1] - on_range[0]) / alex_period
else:
fraction = (alex_period + on_range[1] - on_range[0]) / alex_period
return fraction
def top_tail(nx, a=0.1):
"""Return for each ch the mean size of the top `a` fraction.
nx is one of nd, na, nt from Data() (list of burst size in each ch).
"""
assert a > 0 and a < 1
return np.r_[[n[n > n.max() * (1 - a)].mean() for n in nx]]
##
# Per-burst quatitites from ph-data arrays (timestamps, lifetime, etc..)
#
def _excitation_width(excitation_range, alex_period):
"""Returns duration of alternation period outside selected excitation.
"""
if excitation_range[1] > excitation_range[0]:
return alex_period - excitation_range[1] + excitation_range[0]
elif excitation_range[1] < excitation_range[0]:
return excitation_range[0] - excitation_range[1]
def _ph_times_compact(ph_times_sel, alex_period, excitation_width):
"""Compact ph_times inplace by removing gaps between alternation periods.
Arguments:
ph_times_sel (array): array of timestamps from one alternation period.
alex_period (scalar): period of alternation in timestamp units.
excitation_width (float): fraction of `alex_period` covered by
current photon selection.
Returns nothing, ph_times is modified in-place.
"""
# The formula is
#
# gaps = (ph_times_sel // alex_period)*excitation_width
# ph_times_sel = ph_times_sel - gaps
#
# As a memory optimization the `-gaps` array is reused inplace
times_minusgaps = (ph_times_sel // alex_period) * (-1 * excitation_width)
# The formula is ph_times_sel = ph_times_sel - "gaps"
times_minusgaps += ph_times_sel
return times_minusgaps
def iter_bursts_start_stop(bursts):
"""Iterate over (start, stop) indexes to slice photons for each burst.
"""
arr_istart = bursts.istart
arr_istop = bursts.istop + 1
for istart, istop in zip(arr_istart, arr_istop):
yield istart, istop
def iter_bursts_ph(ph_data, bursts, mask=None, compact=False,
alex_period=None, excitation_width=None):
"""Iterator over arrays of photon-data for each burst.
Arguments:
ph_data (1D array): array of photon-data (timestamps, nanotimes).
bursts (Bursts object): bursts computed from `ph`.
mask (boolean mask or None): if not None, is a boolean mask
to select photons in `ph_data` (for example Donor-ch photons).
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
alex_period (scalar): period of alternation in timestamp units.
excitation_width (float): fraction of `alex_period` covered by
current photon selection.
Yields an array with a selection of "photons" for each burst.
"""
if isinstance(mask, slice) and mask == slice(None):
mask = None
if compact:
assert alex_period is not None
assert excitation_width is not None
assert mask is not None
for start, stop in iter_bursts_start_stop(bursts):
ph = ph_data[start:stop]
if mask is not None:
ph = ph[mask[start:stop]]
if compact:
ph = _ph_times_compact(ph, alex_period, excitation_width)
yield ph
def bursts_ph_list(ph_data, bursts, mask=None):
"""Returna list of ph-data for each burst.
ph_data can be either the timestamp array on which the burst search
has been performed or any other array with same size (boolean array,
nanotimes, etc...)
"""
return [ph for ph in iter_bursts_ph(ph_data, bursts, mask=mask)]
def burst_ph_stats(ph_data, bursts, func=np.mean, func_kw=None, **kwargs):
"""Reduce burst photons (timestamps, nanotimes) to a scalar using `func`.
Arguments
ph_data (1D array): array of photon-data (timestamps, nanotimes).
bursts (Bursts object): bursts computed from `ph`.
func (callable): function that takes the burst photon timestamps
as first argument and returns a scalar.
func_kw (callable): additional arguments in `func` beyond photon-data.
**kwargs: additional arguments passed to :func:`iter_bursts_ph`.
Return
Array one element per burst.
"""
if func_kw is None:
func_kw = {}
burst_stats = []
for burst_ph in iter_bursts_ph(ph_data, bursts, **kwargs):
burst_stats.append(func(burst_ph, **func_kw))
return np.asfarray(burst_stats) # NOTE: asfarray converts None to nan
def ph_in_bursts_mask(ph_data_size, bursts):
"""Return bool mask to select all "ph-data" inside any burst."""
mask = zeros(ph_data_size, dtype=bool)
for start, stop in iter_bursts_start_stop(bursts):
mask[start:stop] = True
return mask
def fuse_bursts_direct(bursts, ms=0, clk_p=12.5e-9, verbose=True):
"""Fuse bursts separated by less than `ms` (milli-seconds).
This function is a direct implementation using a single loop.
For a faster implementation see :func:`fuse_bursts_iter`.
Parameters:
bursts (BurstsGap object): bursts to be fused.
See `phtools.burstsearch` for details.
ms (float): minimum waiting time between bursts (in millisec).
Bursts closer than that will be fused in a single burst.
clk_p (float): clock period or timestamp units in seconds.
verbose (bool): if True print a summary of fused bursts.
Returns:
A BurstsGap object containing the new fused bursts.
"""
max_delay_clk = (ms * 1e-3) / clk_p
fused_bursts_list = []
fused_burst = None
for burst1, burst2 in zip(bursts[:-1], bursts[1:]):
if fused_burst is not None:
burst1c = fused_burst
else:
burst1c = bslib.BurstGap.from_burst(burst1)
separation = burst2.start - burst1c.stop
if separation <= max_delay_clk:
gap = burst2.start - burst1c.stop
gap_counts = burst2.istart - burst1c.istop - 1
if burst1c.istop >= burst2.istart:
gap = 0
gap_counts = 0
fused_burst = bslib.BurstGap(
start = burst1c.start,
istart = burst1c.istart,
stop = burst2.stop,
istop = burst2.istop,
gap = burst1c.gap + gap,
gap_counts = burst1c.gap_counts + gap_counts)
else:
if fused_burst is not None:
fused_bursts_list.append(fused_burst)
fused_burst = None
else:
fused_bursts_list.append(bslib.BurstGap.from_burst(burst1c))
# Append the last bursts (either a fused or an isolated one)
if fused_burst is not None:
fused_bursts_list.append(fused_burst)
else:
fused_bursts_list.append(bslib.BurstGap.from_burst(burst2))
fused_bursts = bslib.BurstsGap.from_list(fused_bursts_list)
init_num_bursts = bursts.num_bursts
delta_b = init_num_bursts - fused_bursts.num_bursts
pprint(" --> END Fused %d bursts (%.1f%%)\n\n" %
(delta_b, 100 * delta_b / init_num_bursts), mute=not verbose)
return fused_bursts
def fuse_bursts_iter(bursts, ms=0, clk_p=12.5e-9, verbose=True):
"""Fuse bursts separated by less than `ms` (milli-secs).
This function calls iteratively :func:`b_fuse` until there are no more
bursts to fuse. For a slower but more readable version see
:func:`fuse_bursts_direct`.
Parameters:
bursts (BurstsGap object): bursts to be fused.
See `phtools.burstsearch` for details.
ms (float): minimum waiting time between bursts (in millisec).
Bursts closer than that will be fused in a single burst.
clk_p (float): clock period or timestamp units in seconds.
verbose (bool): if True print a summary of fused bursts.
Returns:
A BurstsGap object containing the new fused bursts.
"""
init_nburst = bursts.num_bursts
bursts = bslib.BurstsGap(bursts.data)
z = 0
new_nburst, nburst = 0, 1 # starting condition
while new_nburst < nburst:
z += 1
nburst = bursts.num_bursts
bursts = b_fuse(bursts, ms=ms, clk_p=clk_p)
new_nburst = bursts.num_bursts
delta_b = init_nburst - nburst
pprint(" --> END Fused %d bursts (%.1f%%, %d iter)\n\n" %
(delta_b, 100 * delta_b / init_nburst, z), mute=not verbose)
return bursts
def b_fuse(bursts, ms=0, clk_p=12.5e-9):
"""Fuse bursts separated by less than `ms` (milli-secs).
This is a low-level function which fuses pairs of consecutive
bursts separated by less than `ms` millisec.
If there are 3 or more consecutive bursts separated by less than `ms`
only the first 2 are fused.
See :func:`fuse_bursts_iter` or :func:`fuse_bursts_direct` for
higher level functions.
Parameters:
bursts (BurstsGap object): bursts to be fused.
See `phtools.burstsearch` for details.
ms (float): minimum waiting time between bursts (in millisec).
Bursts closer than that will be fused in a single burst.
clk_p (float): clock period or timestamp units in seconds.
Returns:
A BurstsGap object containing the new fused bursts.
"""
max_delay_clk = (ms * 1e-3) / clk_p
# Nearby bursts masks
delays_below_th = (bursts.separation <= max_delay_clk)
if not np.any(delays_below_th):
return bursts
buffer_mask = np.hstack([(False,), delays_below_th, (False,)])
first_bursts = buffer_mask[1:]
second_bursts = buffer_mask[:-1]
# Keep only the first pair in case of more than 2 consecutive bursts
first_bursts ^= (second_bursts * first_bursts)
# note that previous in-place operation also modifies `second_bursts`
both_bursts = first_bursts + second_bursts
# istart is from the first burst, istop is from the second burst
fused_bursts1 = bursts[first_bursts]
fused_bursts2 = bursts[second_bursts]
# Compute gap and gap_counts
gap = fused_bursts2.start - fused_bursts1.stop
gap_counts = fused_bursts2.istart - fused_bursts1.istop - 1 # yes it's -1
overlaping = fused_bursts1.istop >= fused_bursts2.istart
gap[overlaping] = 0
gap_counts[overlaping] = 0
# Assign the new burst data
# fused_bursts1 has alredy the right start and istart
fused_bursts1.istop = fused_bursts2.istop
fused_bursts1.stop = fused_bursts2.stop
fused_bursts1.gap += gap
fused_bursts1.gap_counts += gap_counts
# Join fused bursts with the remaining bursts
new_burst = fused_bursts1.join(bursts[~both_bursts], sort=True)
return new_burst
def mch_fuse_bursts(MBurst, ms=0, clk_p=12.5e-9, verbose=True):
"""Multi-ch version of `fuse_bursts`. `MBurst` is a list of Bursts objects.
"""
mburst = [b.copy() for b in MBurst] # safety copy
new_mburst = []
ch = 0
for mb in mburst:
ch += 1
pprint(" - - - - - CHANNEL %2d - - - - \n" % ch, not verbose)
if mb.num_bursts == 0:
new_bursts = bslib.Bursts.empty()
else:
new_bursts = fuse_bursts_iter(mb, ms=ms, clk_p=clk_p,
verbose=verbose)
new_mburst.append(new_bursts)
return new_mburst
def burst_stats(mburst, clk_p):
"""Compute average duration, size and burst-delay for bursts in mburst.
"""
nans = [np.nan, np.nan]
width_stats = np.array([[b.width.mean(), b.width.std()]
if b.num_bursts > 0 else nans for b in mburst]).T
height_stats = np.array([[b.counts.mean(), b.counts.std()]
if b.num_bursts > 0 else nans for b in mburst]).T
mean_burst_delay = np.array([b.separation.mean() if b.num_bursts > 0
else np.nan for b in mburst])
return (clk_to_s(width_stats, clk_p) * 1e3, height_stats,
clk_to_s(mean_burst_delay, clk_p))
def print_burst_stats(d):
"""Print some bursts statistics."""
nch = len(d.mburst)
width_ms, height, delays = burst_stats(d.mburst, d.clk_p)
s = "\nNUMBER OF BURSTS: m = %d, L = %d" % (d.m, d.L)
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\n#: "+"%7d "*nch % tuple([b.num_bursts for b in d.mburst])
s += "\nT (us) [BS par] "+"%7d "*nch % tuple(np.array(d.T)*1e6)
s += "\nBG Rat T (cps): "+"%7d "*nch % tuple(d.bg_mean[Ph_sel('all')])
s += "\nBG Rat D (cps): "+"%7d "*nch % tuple(d.bg_mean[Ph_sel(Dex='Dem')])
s += "\nBG Rat A (cps): "+"%7d "*nch % tuple(d.bg_mean[Ph_sel(Dex='Aem')])
s += "\n\nBURST WIDTH STATS"
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\nMean (ms): "+"%7.3f "*nch % tuple(width_ms[0, :])
s += "\nStd.dev (ms): "+"%7.3f "*nch % tuple(width_ms[1, :])
s += "\n\nBURST SIZE STATS"
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\nMean (# ph): "+"%7.2f "*nch % tuple(height[0, :])
s += "\nStd.dev (# ph): "+"%7.2f "*nch % tuple(height[1, :])
s += "\n\nBURST MEAN DELAY"
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\nDelay (s): "+"%7.3f "*nch % tuple(delays)
return s
def ES_histog(E, S, bin_step=0.05, E_bins=None, S_bins=None):
"""Returns 2D (ALEX) histogram and bins of bursts (E,S).
"""
if E_bins is None:
E_bins = np.arange(-0.6, 1.6+1e-4, bin_step)
if S_bins is None:
S_bins = np.arange(-0.6, 1.6+1e-4, bin_step)
H, E_bins, S_bins = np.histogram2d(E, S, bins=[E_bins, S_bins])
return H, E_bins, S_bins
def delta(x):
"""Return x.max() - x.min()"""
return x.max() - x.min()
def mask_empty(mask):
"""Returns True if `mask` is empty, otherwise False.
`mask` can be a boolean array or a slice object.
"""
if isinstance(mask, slice):
is_slice_empty = (mask.stop == 0)
return is_slice_empty
else:
# Bolean array
return not mask.any()
class DataContainer(dict):
"""
Generic class for storing data.
It's a dictionary in which each key is also an attribute d['nt'] or d.nt.
"""
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
for k in self:
dict.__setattr__(self, k, self[k])
def add(self, **kwargs):
"""Adds or updates elements (attributes and/or dict entries). """
self.update(**kwargs)
for k, v in kwargs.items():
setattr(self, k, v)
def delete(self, *args, **kwargs):
"""Delete an element (attribute and/or dict entry). """
warning = kwargs.get('warning', True)
for name in args:
try:
self.pop(name)
except KeyError:
if warning:
print(' WARNING: Name %s not found (dict).' % name)
try:
delattr(self, name)
except AttributeError:
if warning:
print(' WARNING: Name %s not found (attr).' % name)
class Data(DataContainer):
"""
Container for all the information (timestamps, bursts) of a dataset.
Data() contains all the information of a dataset (name, timestamps, bursts,
correction factors) and provides several methods to perform analysis
(background estimation, burst search, FRET fitting, etc...).
When loading a measurement file a Data() object is created by one
of the loader functions in `loaders.py`. Data() objects can be also
created with :meth:`Data.copy`, :meth:`Data.fuse_bursts()` or
:meth:`Data.select_bursts`.
To add or delete data-attributes use `.add()` or `.delete()` methods.
All the standard data-attributes are listed below.
Note:
Attributes of type "*list*" contain one element per channel.
Each element, in turn, can be an array. For example `.ph_times_m[i]`
is the array of timestamps for channel `i`; or `.nd[i]` is the array
of donor counts in each burst for channel `i`.
**Measurement attributes**
Attributes:
fname (string): measurements file name
nch (int): number of channels
clk_p (float): clock period in seconds for timestamps in `ph_times_m`
ph_times_m (list): list of timestamp arrays (int64). Each array
contains all the timestamps (donor+acceptor) in one channel.
A_em (list): list of boolean arrays marking acceptor timestamps. Each
array is a boolean mask for the corresponding ph_times_m array.
leakage (float or array of floats): leakage (or bleed-through) fraction.
May be scalar or same size as nch.
gamma (float or array of floats): gamma factor.
May be scalar or same size as nch.
D_em (list of boolean arrays): **[ALEX-only]**
boolean mask for `.ph_times_m[i]` for donor emission
D_ex, A_ex (list of boolean arrays): **[ALEX-only]**
boolean mask for `.ph_times_m[i]` during donor or acceptor
excitation
D_ON, A_ON (2-element tuples of int ): **[ALEX-only]**
start-end values for donor and acceptor excitation selection.
alex_period (int): **[ALEX-only]**
duration of the alternation period in clock cycles.
**Background Attributes**
The background is computed with :meth:`Data.calc_bg`
and is estimated in chunks of equal duration called *background periods*.
Estimations are performed in each spot and photon stream.
The following attributes contain the estimated background rate.
Attributes:
bg (dict): background rates for the different photon streams,
channels and background periods. Keys are `Ph_sel` objects
and values are lists (one element per channel) of arrays (one
element per background period) of background rates.
bg_mean (dict): mean background rates across the entire measurement
for the different photon streams and channels. Keys are `Ph_sel`
objects and values are lists (one element per channel) of
background rates.
nperiods (int): number of periods in which timestamps are split for
background calculation
bg_fun (function): function used to compute the background rates
Lim (list): each element of this list is a list of index pairs for
`.ph_times_m[i]` for **first** and **last** photon in each period.
Ph_p (list): each element in this list is a list of timestamps pairs
for **first** and **last** photon of each period.
bg_ph_sel (Ph_sel object): photon selection used by Lim and Ph_p.
See :mod:`fretbursts.ph_sel` for details.
Th_us (dict): thresholds in us used to select the tail of the
interphoton delay distribution. Keys are `Ph_sel` objects
and values are lists (one element per channel) of arrays (one
element per background period).
Additionlly, there are a few deprecated attributes (`bg_dd`, `bg_ad`,
`bg_da`, `bg_aa`, `rate_dd`, `rate_ad`, `rate_da`, `rate_aa` and `rate_m`)
which will be removed in a future version.
Please use :attr:`Data.bg` and :attr:`Data.bg_mean` instead.
**Burst search parameters (user input)**
These are the parameters used to perform the burst search
(see :meth:`burst_search`).
Attributes:
ph_sel (Ph_sel object): photon selection used for burst search.
See :mod:`fretbursts.ph_sel` for details.
m (int): number of consecutive timestamps used to compute the
local rate during burst search
L (int): min. number of photons for a burst to be identified and saved
P (float, probability): valid values [0..1].
Probability that a burst-start is due to a Poisson background.
The employed Poisson rate is the one computed by `.calc_bg()`.
F (float): `(F * background_rate)` is the minimum rate for burst-start
**Burst search data (available after burst search)**
When not specified, parameters marked as (list of arrays) contains arrays
with one element per bursts. `mburst` arrays contain one "row" per burst.
`TT` arrays contain one element per `period` (see above: background
attributes).
Attributes:
mburst (list of Bursts objects): list Bursts() one element per channel.
See :class:`fretbursts.phtools.burstsearch.Bursts`.
TT (list of arrays): list of arrays of *T* values (in sec.). A *T*
value is the maximum delay between `m` photons to have a
burst-start. Each channels has an array of *T* values, one for
each background "period" (see above).
T (array): per-channel mean of `TT`
nd, na (list of arrays): number of donor or acceptor photons during
donor excitation in each burst
nt (list of arrays): total number photons (nd+na+naa)
naa (list of arrays): number of acceptor photons in each burst
during acceptor excitation **[ALEX only]**
nar (list of arrays): number of acceptor photons in each burst
during donor excitation, not corrected for D-leakage and
A-direct-excitation. **[PAX only]**
bp (list of arrays): time period for each burst. Same shape as `nd`.
This is needed to identify the background rate for each burst.
bg_bs (list): background rates used for threshold computation in burst
search (is a reference to `bg`, `bg_dd` or `bg_ad`).
fuse (None or float): if not None, the burst separation in ms below
which bursts have been fused (see `.fuse_bursts()`).
E (list): FRET efficiency value for each burst:
E = na/(na + gamma*nd).
S (list): stoichiometry value for each burst:
S = (gamma*nd + na) /(gamma*nd + na + naa)
"""
# Attribute names containing per-photon data.
# Each attribute is a list (1 element per ch) of arrays (1 element
# per photon).
ph_fields = ['ph_times_m', 'nanotimes', 'particles',
'A_em', 'D_em', 'A_ex', 'D_ex']
# Attribute names containing background data.
# The attribute `bg` is a dict with photon-selections as keys and
# list of arrays as values. Each list contains one element per channel and
# each array one element per background period.
# The attributes `.Lim` and `.Ph_p` are lists with one element per channel.
# Each element is a lists-of-tuples (one tuple per background period).
# These attributes do not exist before computing the background.
bg_fields = ['bg', 'Lim', 'Ph_p']
# Attribute names containing per-burst data.
# Each attribute is a list (1 element per ch) of arrays (1 element
# per burst).
# They do not necessarly exist. For example 'naa' exists only for ALEX
# data. Also none of them exist before performing a burst search.
burst_fields = ['E', 'S', 'mburst', 'nd', 'na', 'nt', 'bp', 'nda', 'naa',
'max_rate', 'sbr', 'nar']
# Quantities (scalars or arrays) defining the current set of bursts
burst_metadata = ['m', 'L', 'T', 'TT', 'F', 'FF', 'P', 'PP', 'rate_th',
'bg_bs', 'ph_sel', 'bg_corrected', 'leakage_corrected',
'dir_ex_corrected', 'dithering', 'fuse', 'lsb']
# List of photon selections on which the background is computed
_ph_streams = [Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem'),
Ph_sel(Aex='Dem'), Ph_sel(Aex='Aem')]
@property
def ph_streams(self):
if self.alternated:
return self._ph_streams
else:
return [Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem')]
def __init__(self, leakage=0., gamma=1., dir_ex=0., **kwargs):
# Default values
init_kw = dict(ALEX=False, _leakage=float(leakage), _gamma=float(gamma),
_dir_ex=float(dir_ex), _beta=1., _chi_ch=1., s=[])
# Override with user data
init_kw.update(**kwargs)
DataContainer.__init__(self, **init_kw)
# def __getattr__(self, name):
# """Single-channel shortcuts for per-channel fields.
#
# Appending a '_' to a per-channel field avoids specifying the channel.
# For example use d.nd_ instead if d.nd[0].
# """
# msg_missing_attr = "'%s' object has no attribute '%s'" %\
# (self.__class__.__name__, name)
# if name.startswith('_') or not name.endswith('_'):
# raise AttributeError(msg_missing_attr)
#
# field = name[:-1]
# try:
# value = self.__getitem__(field)
# except KeyError:
# raise AttributeError(msg_missing_attr)
# else:
# # Support lists, tuples and object with array interface
# if isinstance(value, (list, tuple)) or isarray(value):
# if len(value) == self.nch:
# return value[0]
# raise ValueError('Name "%s" is not a per-channel field.' % field)
def copy(self, mute=False):
"""Copy data in a new object. All arrays copied except for ph_times_m
"""
pprint('Deep copy executed.\n', mute)
new_d = Data(**self) # this make a shallow copy (like a pointer)
# Deep copy (not just reference) or array data
for field in self.burst_fields + self.bg_fields:
# Making sure k is defined
if field in self:
# Make a deepcopy of the per-channel lists
new_d[field] = copy.deepcopy(self[field])
# Set the attribute: new_d.k = new_d[k]
setattr(new_d, field, new_d[field])
return new_d
##
# Methods for photon timestamps (ph_times_m) access
#
def ph_times_hash(self, hash_name='md5', hexdigest=True):
"""Return an hash for the timestamps arrays.
"""
m = hashlib.new(hash_name)
for ph in self.iter_ph_times():
if isinstance(ph, np.ndarray):
m.update(ph.data)
else:
# TODO Handle ph_times in PyTables files
raise NotImplementedError
if hexdigest:
return m.hexdigest()
else:
return m
@property
def ph_data_sizes(self):
"""Array of total number of photons (ph-data) for each channel.
"""
if not hasattr(self, '_ph_data_sizes'):
# This works both for numpy arrays and pytables arrays
self._ph_data_sizes = np.array([ph.shape[0] for ph in
self.ph_times_m])
return self._ph_data_sizes
def _fix_ph_sel(self, ph_sel):
"""For non-ALEX data fix Aex to allow stable comparison."""
msg = 'Photon selection must be of type `Ph_sel` (it was `%s` instead).'
assert isinstance(ph_sel, Ph_sel), (msg % type(ph_sel))
if self.alternated or ph_sel.Dex != 'DAem':
return ph_sel
else:
return Ph_sel(Dex=ph_sel.Dex, Aex='DAem')
def _is_allph(self, ph_sel):
"""Return whether a photon selection `ph_sel` covers all photon."""
if self.alternated:
return ph_sel == Ph_sel(Dex='DAem', Aex='DAem')
else:
return ph_sel.Dex == 'DAem'
def get_ph_mask(self, ich=0, ph_sel=Ph_sel('all')):
"""Returns a mask for `ph_sel` photons in channel `ich`.
The masks are either boolean arrays or slices (full or empty). In
both cases they can be used to index the timestamps of the
corresponding channel.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
"""
assert isinstance(ich, int)
if self._is_allph(ph_sel):
# Note that slice(None) is equivalent to [:].
# Also, numpy arrays are not copied when sliced.
# So getting all photons with this mask is efficient
# Note: the drawback is that the slice cannot be indexed
# (where a normal boolean array would)
return slice(None)
# Handle the case when A_em contains slice objects
if isinstance(self.A_em[ich], slice):
if self.A_em[ich] == slice(None):
if ph_sel.Dex == 'Dem':
return slice(0)
if ph_sel.Dex == 'Aem':
return slice(None)
elif self.A_em[ich] == slice(0):
if ph_sel.Dex == 'Dem':
return slice(None)
if ph_sel.Dex == 'Aem':
return slice(0)
else:
msg = 'When a slice, A_em can only be slice(None) or slice(0).'
raise NotImplementedError(msg)
# Base selections
elif ph_sel == Ph_sel(Dex='Dem'):
return self.get_D_em_D_ex(ich)
elif ph_sel == Ph_sel(Dex='Aem'):
return self.get_A_em_D_ex(ich)
elif ph_sel == Ph_sel(Aex='Dem'):
return self.get_D_em(ich) * self.get_A_ex(ich)
elif ph_sel == Ph_sel(Aex='Aem'):
return self.get_A_em(ich) * self.get_A_ex(ich)
# Selection of all photon in one emission ch
elif ph_sel == Ph_sel(Dex='Dem', Aex='Dem'):
return self.get_D_em(ich)
elif ph_sel == Ph_sel(Dex='Aem', Aex='Aem'):
return self.get_A_em(ich)
# Selection of all photon in one excitation period
elif ph_sel == Ph_sel(Dex='DAem'):
return self.get_D_ex(ich)
elif ph_sel == Ph_sel(Aex='DAem'):
return self.get_A_ex(ich)
# Selection of all photons except for Dem during Aex
elif ph_sel == Ph_sel(Dex='DAem', Aex='Aem'):
return self.get_D_ex(ich) + self.get_A_em(ich) * self.get_A_ex(ich)
else:
raise ValueError('Photon selection not implemented.')
def iter_ph_masks(self, ph_sel=Ph_sel('all')):
"""Iterator returning masks for `ph_sel` photons.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
"""
for ich in range(self.nch):
yield self.get_ph_mask(ich, ph_sel=ph_sel)
def get_ph_times(self, ich=0, ph_sel=Ph_sel('all'), compact=False):
"""Returns the timestamps array for channel `ich`.
This method always returns in-memory arrays, even when ph_times_m
is a disk-backed list of arrays.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
"""
ph = self.ph_times_m[ich]
# If not a list is an on-disk array, we need to load it
if not isinstance(ph, np.ndarray):
if hasattr(self, '_ph_cache') and self._ph_cache_ich == ich:
ph = self._ph_cache
else:
ph = ph.read()
self._ph_cache = ph
self._ph_cache_ich = ich
ph = ph[self.get_ph_mask(ich, ph_sel=ph_sel)]
if compact:
ph = self._ph_times_compact(ph, ph_sel)
return ph
def iter_ph_times(self, ph_sel=Ph_sel('all'), compact=False):
"""Iterator that returns the arrays of timestamps in `.ph_times_m`.
Arguments:
Same arguments as :meth:`get_ph_mask` except for `ich`.
"""
for ich in range(self.nch):
yield self.get_ph_times(ich, ph_sel=ph_sel, compact=compact)
def _get_ph_mask_single(self, ich, mask_name, negate=False):
"""Get the bool array `mask_name` for channel `ich`.
If the internal "bool array" is a scalar return a slice (full or empty)
"""
mask = np.asarray(getattr(self, mask_name)[ich])
if negate:
mask = np.logical_not(mask)
if len(mask.shape) == 0:
# If mask is a boolean scalar, select all or nothing
mask = slice(None) if mask else slice(0)
return mask
def get_A_em(self, ich=0):
"""Returns a mask to select photons detected in the acceptor ch."""
return self._get_ph_mask_single(ich, 'A_em')
def get_D_em(self, ich=0):
"""Returns a mask to select photons detected in the donor ch."""
return self._get_ph_mask_single(ich, 'A_em', negate=True)
def get_A_ex(self, ich=0):
"""Returns a mask to select photons in acceptor-excitation periods."""
return self._get_ph_mask_single(ich, 'A_ex')
def get_D_ex(self, ich=0):
"""Returns a mask to select photons in donor-excitation periods."""
if self.alternated:
return self._get_ph_mask_single(ich, 'D_ex')
else:
return slice(None)
def get_D_em_D_ex(self, ich=0):
"""Returns a mask of donor photons during donor-excitation."""
if self.alternated:
return self.get_D_em(ich) * self.get_D_ex(ich)
else:
return self.get_D_em(ich)
def get_A_em_D_ex(self, ich=0):
"""Returns a mask of acceptor photons during donor-excitation."""
if self.alternated:
return self.get_A_em(ich) * self.get_D_ex(ich)
else:
return self.get_A_em(ich)
def iter_ph_times_period(self, ich=0, ph_sel=Ph_sel('all')):
"""Iterate through arrays of ph timestamps in each background period.
"""
mask = self.get_ph_mask(ich=ich, ph_sel=ph_sel)
for period in range(self.nperiods):
yield self.get_ph_times_period(period, ich=ich, mask=mask)
def get_ph_times_period(self, period, ich=0, ph_sel=Ph_sel('all'),
mask=None):
"""Return the array of ph_times in `period`, `ich` and `ph_sel`.
"""
istart, iend = self.Lim[ich][period]
period_slice = slice(istart, iend + 1)
ph_times = self.get_ph_times(ich=ich)
if mask is None:
mask = self.get_ph_mask(ich=ich, ph_sel=ph_sel)
if isinstance(mask, slice) and mask == slice(None):
ph_times_period = ph_times[period_slice]
else:
ph_times_period = ph_times[period_slice][mask[period_slice]]
return ph_times_period
def _assert_compact(self, ph_sel):
msg = ('Option compact=True requires a photon selection \n'
'from a single excitation period (either Dex or Aex).')
if not self.alternated:
raise ValueError('Option compact=True requires ALEX data.')
if ph_sel.Dex is not None and ph_sel.Aex is not None:
raise ValueError(msg)
def _excitation_width(self, ph_sel, ich=0):
"""Returns duration of alternation period outside selected excitation.
"""
self._assert_compact(ph_sel)
if ph_sel.Aex is None:
excitation_range = self._D_ON_multich[ich]
elif ph_sel.Dex is None:
excitation_range = self._A_ON_multich[ich]
return _excitation_width(excitation_range, self.alex_period)
def _ph_times_compact(self, ph, ph_sel):
"""Return timestamps in one excitation period with "gaps" removed.
It takes timestamps in the specified alternation period and removes
gaps due to time intervals outside the alternation period selection.
This allows to correct the photon rates distorsion due to alternation.
Arguments:
ph (array): timestamps array from which gaps have to be removed.
This array **is modified in-place**.
ph_sel (Ph_sel object): photon selection to be compacted.
Note that only one excitation must be specified, but the
emission can be 'Dem', 'Aem' or 'DAem'.
See :mod:`fretbursts.ph_sel` for details.
Returns:
Array of timestamps in one excitation periods with "gaps" removed.
"""
excitation_width = self._excitation_width(ph_sel)
return _ph_times_compact(ph, self.alex_period, excitation_width)
def _get_tuple_multich(self, name):
"""Get a n-element tuple field in multi-ch format (1 row per ch)."""
field = np.array(self[name])
if field.ndim == 1:
field = np.repeat([field], self.nch, axis=0)
return field
@property
def _D_ON_multich(self):
return self._get_tuple_multich('D_ON')
@property
def _A_ON_multich(self):
return self._get_tuple_multich('A_ON')
@property
def _det_donor_accept_multich(self):
return self._get_tuple_multich('det_donor_accept')
##
# Methods and properties for burst-data access
#
@property
def num_bursts(self):
"""Array of number of bursts in each channel."""
return np.array([bursts.num_bursts for bursts in self.mburst])
@property
def burst_widths(self):
"""List of arrays of burst duration in seconds. One array per channel.
"""
return [bursts.width * self.clk_p for bursts in self.mburst]
def burst_sizes_pax_ich(self, ich=0, gamma=1., add_aex=True,
beta=1., donor_ref=True, aex_corr=True):
r"""Return corrected burst sizes for channel `ich`. PAX-only.
When `donor_ref = False`, the formula for PAX-enhanced burst size is:
.. math::
\gamma(F_{D_{ex}D_{em}} + F_{DA_{ex}D_{em}}) +
\frac{1}{\alpha} F_{FRET}
where :math:`\alpha` is the Dex duty-cycle (0.5 if alternation
periods are equal) and :math:`F_{FRET}` is `na`, the AemAex
signal after leakage and direct-excitation corrections.
If `add_ex = True`, we add the term:
.. math::
\tilde{F}_{A_{ex}A_{em}} / (\alpha\beta)
where :math:`\tilde{F}_{A_{ex}A_{em}}` in A emission due to
A excitation (and not due to FRET).
If `aex_corr = False`, then :math:`\alpha` is fixed to 1.
If `donor_ref = True`, the above burst size expression is divided by
:math:`\gamma`.
Arguments:
ich (int): the spot number, only relevant for multi-spot.
In single-spot data there is only one channel (`ich=0`)
so this argument may be omitted. Default 0.
gamma (float): coefficient for gamma correction of burst
sizes. Default: 1. For more info see explanation above.
donor_ref (bool): True or False select different conventions
for burst size correction. For details see
:meth:`fretbursts.burstlib.Data.burst_sizes_ich`.
add_aex (boolean): when True, the returned burst size also
includes photons detected during the DAex. Default is True.
aex_corr (bool): If True, and `add_aex == True`, then divide
the DAexAem term (naa) by the Dex duty cycle. For example,
if Dex and DAex alternation periods are equal, naa is
multiplied by 2. This correction makes the returned value
equal to the denominator of the stoichiometry ratio S_pax
(PAX-enhanced formula). If False, naa is not divided by
the Dex duty-cycle (gamma and beta corrections may still be
applied). If `add_aex == False`, `aex_corr` is ignored.
beta (float): beta correction factor used for the DAexAem term
(naa) of the burst size.
If `add_aex == False` this argument is ignored. Default 1.
Returns
Array of burst sizes for channel `ich`.
See also:
:meth:`Data.burst_sizes_ich`
"""
assert 'PAX' in self.meas_type
naa = self._get_naa_ich(ich) # nar-subtracted
aex_dex_ratio = self._aex_dex_ratio()
alpha = 1
if aex_corr:
alpha = 1 - self._aex_fraction() # Dex duty-cycle
burst_size_dex = self.nd[ich] * gamma + self.na[ich]
burst_size_aex = (self.nda[ich] * gamma +
self.na[ich] * aex_dex_ratio +
naa / (alpha * beta))
burst_size = burst_size_dex
if add_aex:
burst_size += burst_size_aex
if donor_ref:
burst_size /= gamma
return burst_size
def burst_sizes_ich(self, ich=0, gamma=1., add_naa=False,
beta=1., donor_ref=True):
"""Return gamma corrected burst sizes for channel `ich`.
If `donor_ref == True` (default) the gamma corrected burst size is
computed according to::
1) nd + na / gamma
Otherwise, if `donor_ref == False`, the gamma corrected burst size is::
2) nd * gamma + na
With the definition (1) the corrected burst size is equal to the raw
burst size for zero-FRET or D-only bursts (that's why is `donor_ref`).
With the definition (2) the corrected burst size is equal to the raw
burst size for 100%-FRET bursts.
In an ALEX measurement, use `add_naa = True` to add counts from
AexAem stream to the returned burst size. The argument `gamma` and
`beta` are used to correctly scale `naa` so that it become
commensurate with the Dex corrected burst size. In particular,
when using definition (1) (i.e. `donor_ref = True`), the total
burst size is::
(nd + na/gamma) + naa / (beta * gamma)
Conversely, when using definition (2) (`donor_ref = False`), the
total burst size is::
(nd * gamma + na) + naa / beta
Arguments:
ich (int): the spot number, only relevant for multi-spot.
In single-spot data there is only one channel (`ich=0`)
so this argument may be omitted. Default 0.
add_naa (boolean): when True, add a term for AexAem photons when
computing burst size. Default False.
gamma (float): coefficient for gamma correction of burst
sizes. Default: 1. For more info see explanation above.
beta (float): beta correction factor used for the AexAem term
of the burst size. Default 1. If `add_naa = False` or
measurement is not ALEX this argument is ignored.
For more info see explanation above.
donor_ref (bool): select the convention for burst size correction.
See details above in the function description.
Returns
Array of burst sizes for channel `ich`.
See also :meth:`fretbursts.burstlib.Data.get_naa_corrected`.
"""
if donor_ref:
burst_size = self.nd[ich] + self.na[ich] / gamma
else:
burst_size = self.nd[ich] * gamma + self.na[ich]
if add_naa and self.alternated:
kws = dict(ich=ich, gamma=gamma, beta=beta, donor_ref=donor_ref)
burst_size += self.get_naa_corrected(**kws)
return burst_size
def get_naa_corrected(self, ich=0, gamma=1., beta=1., donor_ref=True):
"""Return corrected naa array for channel `ich`.
Arguments:
ich (int): the spot number, only relevant for multi-spot.
gamma (floats): gamma-factor to use in computing the corrected naa.
beta (float): beta-factor to use in computing the corrected naa.
donor_ref (bool): Select the convention for `naa` correction.
If True (default), uses `naa / (beta * gamma)`. Otherwise,
uses `naa / beta`. A consistent convention should be used
for the corrected Dex burst size in order to make it
commensurable with naa.
See also :meth:`fretbursts.burstlib.Data.burst_sizes_ich`.
"""
naa = self._get_naa_ich(ich) # with eventual duty-cycle correction
if donor_ref:
correction = (gamma * beta)
else:
correction = beta
return naa / correction
def _get_naa_ich(self, ich=0):
"""Return naa for `ich` both in ALEX and PAX measurements.
In case of PAX, returns naa using the duty-cycle correction::
naa = self.naa - aex_dex_ratio * self.nar
where `self.nar` is equal to `self.na` before leakage and direct
excitation correction, and `aex_dex_ratio` is the Aex duty-cycle.
"""
naa = self.naa[ich]
if 'PAX' in self.meas_type:
# ATTENTION: do not modify naa inplace
naa = naa - self._aex_dex_ratio() * self.nar[ich]
return naa
def burst_sizes(self, gamma=1., add_naa=False, beta=1., donor_ref=True):
"""Return gamma corrected burst sizes for all the channel.
Compute burst sizes by calling, for each channel,
:meth:`burst_sizes_ich`.
See :meth:`burst_sizes_ich` for description of the arguments.
Returns
List of arrays of burst sizes, one array per channel.
"""
kwargs = dict(gamma=gamma, add_naa=add_naa, beta=beta,
donor_ref=donor_ref)
bsize_list = [self.burst_sizes_ich(ich, **kwargs) for ich in
range(self.nch)]
return np.array(bsize_list)
def iter_bursts_ph(self, ich=0):
"""Iterate over (start, stop) indexes to slice photons for each burst.
"""
for istart, istop in iter_bursts_start_stop(self.mburst[ich]):
yield istart, istop
def bursts_slice(self, N1=0, N2=-1):
"""Return new Data object with bursts between `N1` and `N2`
`N1` and `N2` can be scalars or lists (one per ch).
"""
if np.isscalar(N1): N1 = [N1] * self.nch
if np.isscalar(N2): N2 = [N2] * self.nch
assert len(N1) == len(N2) == self.nch
d = Data(**self)
d.add(mburst=[b[n1:n2].copy() for b, n1, n2 in zip(d.mburst, N1, N2)])
d.add(nt=[nt[n1:n2] for nt, n1, n2 in zip(d.nt, N1, N2)])
d.add(nd=[nd[n1:n2] for nd, n1, n2 in zip(d.nd, N1, N2)])
d.add(na=[na[n1:n2] for na, n1, n2 in zip(d.na, N1, N2)])
for name in ('naa', 'nda', 'nar'):
if name in d:
d.add(**{name:
[x[n1:n2] for x, n1, n2 in zip(d[name], N1, N2)]})
if 'nda' in self:
d.add(nda=[da[n1:n2] for da, n1, n2 in zip(d.nda, N1, N2)])
d.calc_fret(pax=self.pax) # recalculate fret efficiency
return d
def delete_burst_data(self):
"""Erase all the burst data"""
for name in self.burst_fields + self.burst_metadata:
if name in self:
self.delete(name)
for name in ('E_fitter', 'S_fitter'):
if hasattr(self, name):
delattr(self, name)
##
# Methods for high-level data transformation
#
def slice_ph(self, time_s1=0, time_s2=None, s='slice'):
"""Return a new Data object with ph in [`time_s1`,`time_s2`] (seconds)
If ALEX, this method must be called right after
:func:`fretbursts.loader.alex_apply_periods` (with `delete_ph_t=True`)
and before any background estimation or burst search.
"""
if time_s2 is None:
time_s2 = self.time_max
if time_s2 >= self.time_max and time_s1 <= 0:
return self.copy()
assert time_s1 < self.time_max
t1_clk, t2_clk = int(time_s1 / self.clk_p), int(time_s2 / self.clk_p)
masks = [(ph >= t1_clk) * (ph < t2_clk) for ph in self.iter_ph_times()]
new_d = Data(**self)
for name in self.ph_fields:
if name in self:
new_d[name] = [a[mask] for a, mask in zip(self[name], masks)]
setattr(new_d, name, new_d[name])
new_d.delete_burst_data()
# Shift timestamps to start from 0 to avoid problems with BG calc
for ich in range(self.nch):
ph_i = new_d.get_ph_times(ich)
ph_i -= t1_clk
new_d.s.append(s)
# Delete eventual cached properties
for attr in ['_time_min', '_time_max']:
if hasattr(new_d, attr):
delattr(new_d, attr)
return new_d
def collapse(self, update_gamma=True, skip_ch=None):
"""Returns an object with 1-spot data joining the multi-spot data.
Arguments:
skip_ch (tuple of ints): list of channels to skip.
If None, keep all channels.
update_gamma (bool): if True, recompute gamma as mean of the
per-channel gamma. If False, do not update gamma.
If True, gamma becomes a single value and the update has the
side effect of recomputing E and S values, discarding
previous per-channel corrections. If False, gamma is not
updated (it stays with multi-spot values) and E and S are
not recomputed.
Note:
When using `update_gamma=False`, burst selections on the
collapsed `Data` object should be done with
`computefret=False`, otherwise any attempt to use multi-spot
gamma for single-spot data will raise an error.
"""
dc = Data(**self)
mch_bursts = self.mburst
if skip_ch is not None:
mch_bursts = [bursts for i, bursts in enumerate(mch_bursts)
if i not in skip_ch]
bursts = bslib.Bursts.merge(mch_bursts, sort=False)
# Sort by start times, and when equal by stop times
indexsort = np.lexsort((bursts.stop, bursts.start))
dc.add(mburst=[bursts[indexsort]])
ich_burst = [i * np.ones(nb) for i, nb in enumerate(self.num_bursts)]
dc.add(ich_burst=np.hstack(ich_burst)[indexsort])
for name in self.burst_fields:
if name in self and name is not 'mburst':
# Concatenate arrays along axis = 0
value = [np.concatenate(self[name])[indexsort]]
dc.add(**{name: value})
dc.add(nch=1)
dc.add(_chi_ch=1.)
# NOTE: Updating gamma has the side effect of recomputing E
# (and S if ALEX). We need to update gamma because, in general,
# gamma can be an array with a value for each ch.
# However, the per-channel gamma correction is lost once both
# gamma and chi_ch are made scalar.
if update_gamma:
dc._update_gamma(np.mean(self.get_gamma_array()))
return dc
##
# Utility methods
#
def get_params(self):
"""Returns a plain dict containing only parameters and no arrays.
This can be used as a summary of data analysis parameters.
Additional keys `name' and `Names` are added with values
from `.name` and `.Name()`.
"""
p_names = ['fname', 'clk_p', 'nch', 'ph_sel', 'L', 'm', 'F', 'P',
'_leakage', '_dir_ex', '_gamma', 'bg_time_s',
'T', 'rate_th',
'bg_corrected', 'leakage_corrected', 'dir_ex_corrected',
'dithering', '_chi_ch', 's', 'ALEX']
p_dict = dict(self)
for name in p_dict.keys():
if name not in p_names:
p_dict.pop(name)
p_dict.update(name=self.name, Name=self.Name(), bg_mean=self.bg_mean,
nperiods=self.nperiods)
return p_dict
def expand(self, ich=0, alex_naa=False, width=False):
"""Return per-burst D and A sizes (nd, na) and their background counts.
This method returns for each bursts the corrected signal counts and
background counts in donor and acceptor channels. Optionally, the
burst width is also returned.
Arguments:
ich (int): channel for the bursts (can be not 0 only in multi-spot)
alex_naa (bool): if True and self.ALEX, returns burst sizes and
background also for acceptor photons during accept. excitation
width (bool): whether return the burst duration (in seconds).
Returns:
List of arrays: nd, na, donor bg, acceptor bg.
If `alex_naa` is True returns: nd, na, naa, bg_d, bg_a, bg_aa.
If `width` is True returns the bursts duration (in sec.) as last
element.
"""
period = self.bp[ich]
w = self.mburst[ich].width * self.clk_p
bg_a = self.bg[Ph_sel(Dex='Aem')][ich][period] * w
bg_d = self.bg[Ph_sel(Dex='Dem')][ich][period] * w
res = [self.nd[ich], self.na[ich]]
if self.alternated and alex_naa:
bg_aa = self.bg[Ph_sel(Aex='Aem')][ich][period] * w
res.extend([self.naa[ich], bg_d, bg_a, bg_aa])
else:
res.extend([bg_d, bg_a])
if width:
res.append(w)
return res
def burst_data_ich(self, ich):
"""Return a dict of burst data for channel `ich`."""
bursts = {}
bursts['size_raw'] = self.mburst[ich].counts
bursts['t_start'] = self.mburst[ich].start * self.clk_p
bursts['t_stop'] = self.mburst[ich].stop * self.clk_p
bursts['i_start'] = self.mburst[ich].istart
bursts['i_stop'] = self.mburst[ich].istop
period = bursts['bg_period'] = self.bp[ich]
width = self.mburst[ich].width * self.clk_p
bursts['width_ms'] = width * 1e3
bursts['bg_ad'] = self.bg[Ph_sel(Dex='Aem')][ich][period] * width
bursts['bg_dd'] = self.bg[Ph_sel(Dex='Dem')][ich][period] * width
if self.alternated:
bursts['bg_aa'] = self.bg[Ph_sel(Aex='Aem')][ich][period] * width
bursts['bg_da'] = self.bg[Ph_sel(Aex='Dem')][ich][period] * width
burst_fields = self.burst_fields[:]
burst_fields.remove('mburst')
burst_fields.remove('bp')
for field in burst_fields:
if field in self:
bursts[field] = self[field][ich]
return bursts
@property
def time_max(self):
"""The last recorded time in seconds."""
if not hasattr(self, '_time_max'):
self._time_max = self._time_reduce(last=True, func=max)
return self._time_max
@property
def time_min(self):
"""The first recorded time in seconds."""
if not hasattr(self, '_time_min'):
self._time_min = self._time_reduce(last=False, func=min)
return self._time_min
def _time_reduce(self, last=True, func=max):
"""Return first or last timestamp per-ch, reduced with `func`.
"""
idx = -1 if last else 0
# Get either ph_times_m or ph_times_t
ph_times = None
for ph_times_name in ['ph_times_m', 'ph_times_t']:
try:
ph_times = self[ph_times_name]
except KeyError:
pass
else:
break
if ph_times is not None:
# This works with both numpy arrays and pytables arrays
time = func(t[idx] for t in ph_times if t.shape[0] > 0)
elif 'mburst' in self:
if last:
time = func(bursts[idx].stop for bursts in self.mburst)
else:
time = func(bursts[idx].start for bursts in self.mburst)
else:
raise ValueError("No timestamps or bursts found.")
return time * self.clk_p
def ph_in_bursts_mask_ich(self, ich=0, ph_sel=Ph_sel('all')):
"""Return mask of all photons inside bursts for channel `ich`.
Returns
Boolean array for photons in channel `ich` and photon
selection `ph_sel` that are inside any burst.
"""
bursts_mask = ph_in_bursts_mask(self.ph_data_sizes[ich],
self.mburst[ich])
if self._is_allph(ph_sel):
return bursts_mask
else:
ph_sel_mask = self.get_ph_mask(ich=ich, ph_sel=ph_sel)
return ph_sel_mask * bursts_mask
def ph_in_bursts_ich(self, ich=0, ph_sel=Ph_sel('all')):
"""Return timestamps of photons inside bursts for channel `ich`.
Returns
Array of photon timestamps in channel `ich` and photon
selection `ph_sel` that are inside any burst.
"""
ph_all = self.get_ph_times(ich=ich)
bursts_mask = self.ph_in_bursts_mask_ich(ich, ph_sel)
return ph_all[bursts_mask]
##
# Background analysis methods
#
def _obsolete_bg_attr(self, attrname, ph_sel):
print('The Data.%s attribute is deprecated. Please use '
'Data.bg(%s) instead.' % (attrname, repr(ph_sel)))
bg_attrs = ('bg_dd', 'bg_ad', 'bg_da', 'bg_aa')
bg_mean_attrs = ('rate_m', 'rate_dd', 'rate_ad', 'rate_da', 'rate_aa')
assert attrname in bg_attrs or attrname in bg_mean_attrs
if attrname in bg_attrs:
bg_field = 'bg'
elif attrname in bg_mean_attrs:
bg_field = 'bg_mean'
try:
value = getattr(self, bg_field)[ph_sel]
except AttributeError as e:
# This only happens when trying to access 'bg' because
# 'bg_mean' raises RuntimeError when missing.
msg = 'No attribute `%s` found. Please compute background first.'
raise_from(RuntimeError(msg % bg_field), e)
return value
@property
def rate_m(self):
return self._obsolete_bg_attr('rate_m', Ph_sel('all'))
@property
def rate_dd(self):
return self._obsolete_bg_attr('rate_dd', Ph_sel(Dex='Dem'))
@property
def rate_ad(self):
return self._obsolete_bg_attr('rate_ad', Ph_sel(Dex='Aem'))
@property
def rate_da(self):
return self._obsolete_bg_attr('rate_da', Ph_sel(Aex='Dem'))
@property
def rate_aa(self):
return self._obsolete_bg_attr('rate_aa', Ph_sel(Aex='Aem'))
@property
def bg_dd(self):
return self._obsolete_bg_attr('bg_dd', Ph_sel(Dex='Dem'))
@property
def bg_ad(self):
return self._obsolete_bg_attr('bg_ad', Ph_sel(Dex='Aem'))
@property
def bg_da(self):
return self._obsolete_bg_attr('bg_da', Ph_sel(Aex='Dem'))
@property
def bg_aa(self):
return self._obsolete_bg_attr('bg_aa', Ph_sel(Aex='Aem'))
def calc_bg_cache(self, fun, time_s=60, tail_min_us=500, F_bg=2,
error_metrics=None, fit_allph=True,
recompute=False):
"""Compute time-dependent background rates for all the channels.
This version is the cached version of :meth:`calc_bg`.
This method tries to load the background data from a cache file.
If a saved background data is not found, it computes
the background and stores it to disk.
The arguments are the same as :meth:`calc_bg` with the only addition
of `recompute` (bool) to force a background recomputation even if
a cached version is found.
Form more details on the other arguments see :meth:`calc_bg`.
"""
bg_cache.calc_bg_cache(self, fun, time_s=time_s,
tail_min_us=tail_min_us, F_bg=F_bg,
error_metrics=error_metrics, fit_allph=fit_allph,
recompute=recompute)
def _get_auto_bg_th_arrays(self, F_bg=2, tail_min_us0=250):
"""Return a dict of threshold values for background estimation.
The keys are the ph selections in self.ph_streams and the values
are 1-D arrays of size nch.
"""
Th_us = {}
for ph_sel in self.ph_streams:
th_us = np.zeros(self.nch)
for ich, ph in enumerate(self.iter_ph_times(ph_sel=ph_sel)):
if ph.size > 0:
bg_rate, _ = bg.exp_fit(ph, tail_min_us=tail_min_us0)
th_us[ich] = 1e6 * F_bg / bg_rate
Th_us[ph_sel] = th_us
# Save the input used to generate Th_us
self.add(bg_auto_th_us0=tail_min_us0, bg_auto_F_bg=F_bg)
return Th_us
def _get_bg_th_arrays(self, tail_min_us, nperiods):
"""Return a dict of threshold values for background estimation.
The keys are the ph selections in self.ph_streams and the values
are 1-D arrays of size nch.
"""
n_streams = len(self.ph_streams)
if np.size(tail_min_us) == 1:
tail_min_us = np.repeat(tail_min_us, n_streams)
elif np.size(tail_min_us) == n_streams:
tail_min_us = np.asarray(tail_min_us)
elif np.size(tail_min_us) != n_streams:
raise ValueError('Wrong tail_min_us length (%d).' %
len(tail_min_us))
th_us = {}
for i, key in enumerate(self.ph_streams):
th_us[key] = np.ones(nperiods) * tail_min_us[i]
# Save the input used to generate Th_us
self.add(bg_th_us_user=tail_min_us)
return th_us
def _clean_bg_data(self):
"""Remove background fields specific of only one fit type.
Computing background with manual or 'auto' threshold results in
different sets of attributes being saved. This method removes these
attributes and should be called before recomputing the background
to avoid having old stale attributes of a previous background fit.
"""
# Attributes specific of manual or 'auto' bg fit
field_list = ['bg_auto_th_us0', 'bg_auto_F_bg', 'bg_th_us_user']
for field in field_list:
if field in self:
self.delete(field)
if hasattr(self, '_bg_mean'):
delattr(self, '_bg_mean')
def _get_num_periods(self, time_s):
"""Return the number of periods using `time_s` as period duration.
"""
duration = self.time_max - self.time_min
# Take the ceil to have at least 1 periods
nperiods = np.ceil(duration / time_s)
# Discard last period if negligibly small to avoid problems with
# background fit with very few photons.
if nperiods > 1:
last_period = self.time_max - time_s * (nperiods - 1)
# Discard last period if smaller than 3% of the bg period
if last_period < time_s * 0.03:
nperiods -= 1
return int(nperiods)
def calc_bg(self, fun, time_s=60, tail_min_us=500, F_bg=2,
error_metrics=None, fit_allph=True):
"""Compute time-dependent background rates for all the channels.
Compute background rates for donor, acceptor and both detectors.
The rates are computed every `time_s` seconds, allowing to
track possible variations during the measurement.
Arguments:
fun (function): function for background estimation (example
`bg.exp_fit`)
time_s (float, seconds): compute background each time_s seconds
tail_min_us (float, tuple or string): min threshold in us for
photon waiting times to use in background estimation.
If float is the same threshold for 'all', DD, AD and AA photons
and for all the channels.
If a 3 or 4 element tuple, each value is used for 'all', DD, AD
or AA photons, same value for all the channels.
If 'auto', the threshold is computed for each stream ('all',
DD, DA, AA) and for each channel as `bg_F * rate_ml0`.
`rate_ml0` is an initial estimation of the rate performed using
:func:`bg.exp_fit` and a fixed threshold (default 250us).
F_bg (float): when `tail_min_us` is 'auto', is the factor by which
the initial background estimation if multiplied to compute the
threshold.
error_metrics (string): Specifies the error metric to use.
See :func:`fretbursts.background.exp_fit` for more details.
fit_allph (bool): if True (default) the background for the
all-photon is fitted. If False it is computed as the sum of
backgrounds in all the other streams.
The background estimation functions are defined in the module
`background` (conventionally imported as `bg`).
Example:
Compute background with `bg.exp_fit` (inter-photon delays MLE
tail fitting), every 30s, with automatic tail-threshold::
d.calc_bg(bg.exp_fit, time_s=20, tail_min_us='auto')
Returns:
None, all the results are saved in the object itself.
"""
pprint(" - Calculating BG rates ... ")
self._clean_bg_data()
kwargs = dict(clk_p=self.clk_p, error_metrics=error_metrics)
nperiods = self._get_num_periods(time_s)
streams_noall = [s for s in self.ph_streams if s != Ph_sel('all')]
bg_auto_th = tail_min_us == 'auto'
if bg_auto_th:
tail_min_us0 = 250
self.add(bg_auto_th_us0=tail_min_us0, bg_auto_F_bg=F_bg)
auto_th_kwargs = dict(clk_p=self.clk_p, tail_min_us=tail_min_us0)
th_us = {}
for key in self.ph_streams:
th_us[key] = np.zeros(nperiods)
else:
th_us = self._get_bg_th_arrays(tail_min_us, nperiods)
Lim, Ph_p = [], []
BG, BG_err = [], []
Th_us = []
for ich, ph_ch in enumerate(self.iter_ph_times()):
masks = {sel: self.get_ph_mask(ich, ph_sel=sel)
for sel in self.ph_streams}
bins = ((np.arange(nperiods + 1) * time_s + self.time_min) /
self.clk_p)
# Note: histogram bins are half-open, e.g. [a, b)
counts, _ = np.histogram(ph_ch, bins=bins)
lim, ph_p = [], []
bg = {sel: np.zeros(nperiods) for sel in self.ph_streams}
bg_err = {sel: np.zeros(nperiods) for sel in self.ph_streams}
i1 = 0
for ip in range(nperiods):
i0 = i1
i1 += counts[ip]
lim.append((i0, i1 - 1))
ph_p.append((ph_ch[i0], ph_ch[i1 - 1]))
ph_i = ph_ch[i0:i1]
if fit_allph:
sel = Ph_sel('all')
if bg_auto_th:
_bg, _ = fun(ph_i, **auto_th_kwargs)
th_us[sel][ip] = 1e6 * F_bg / _bg
bg[sel][ip], bg_err[sel][ip] = \
fun(ph_i, tail_min_us=th_us[sel][ip], **kwargs)
for sel in streams_noall:
# This supports cases of D-only or A-only timestamps
# where self.A_em[ich] is a bool and not a bool-array
# In this case, the mask of either DexDem or DexAem is
# slice(None) (all-elements selection).
if isinstance(masks[sel], slice):
if masks[sel] == slice(None):
bg[sel][ip] = bg[Ph_sel('all')][ip]
bg_err[sel][ip] = bg_err[Ph_sel('all')][ip]
continue
else:
ph_i_sel = ph_i[masks[sel][i0:i1]]
if ph_i_sel.size > 0:
if bg_auto_th:
_bg, _ = fun(ph_i_sel, **auto_th_kwargs)
th_us[sel][ip] = 1e6 * F_bg / _bg
bg[sel][ip], bg_err[sel][ip] = \
fun(ph_i_sel, tail_min_us=th_us[sel][ip], **kwargs)
if not fit_allph:
bg[Ph_sel('all')] += sum(bg[s] for s in streams_noall)
bg_err[Ph_sel('all')] += sum(bg_err[s] for s in streams_noall)
Lim.append(lim)
Ph_p.append(ph_p)
BG.append(bg)
BG_err.append(bg_err)
Th_us.append(th_us)
# Make Dict Of Lists (DOL) from Lists of Dicts
BG_dol, BG_err_dol, Th_us_dol = {}, {}, {}
for sel in self.ph_streams:
BG_dol[sel] = [bg_ch[sel] for bg_ch in BG]
BG_err_dol[sel] = [err_ch[sel] for err_ch in BG_err]
Th_us_dol[sel] = [th_ch[sel] for th_ch in Th_us]
self.add(bg=BG_dol, bg_err=BG_err_dol, bg_th_us=Th_us_dol,
Lim=Lim, Ph_p=Ph_p,
bg_fun=fun, bg_fun_name=fun.__name__,
bg_time_s=time_s, bg_ph_sel=Ph_sel('all'),
bg_auto_th=bg_auto_th, # bool, True if the using auto-threshold
)
pprint("[DONE]\n")
@property
def nperiods(self):
return len(self.bg[Ph_sel('all')][0])
@property
def bg_mean(self):
if 'bg' not in self:
raise RuntimeError('No background found, compute it first.')
if not hasattr(self, '_bg_mean'):
self._bg_mean = {k: [bg_ch.mean() for bg_ch in bg_ph_sel]
for k, bg_ph_sel in self.bg.items()}
return self._bg_mean
def recompute_bg_lim_ph_p(self, ph_sel, mute=False):
"""Recompute self.Lim and selp.Ph_p relative to ph selection `ph_sel`
`ph_sel` is a Ph_sel object selecting the timestamps in which self.Lim
and self.Ph_p are being computed.
"""
ph_sel = self._fix_ph_sel(ph_sel)
if self.bg_ph_sel == ph_sel:
return
pprint(" - Recomputing background limits for %s ... " %
str(ph_sel), mute)
bg_time_clk = self.bg_time_s / self.clk_p
Lim, Ph_p = [], []
for ph_ch, lim in zip(self.iter_ph_times(ph_sel), self.Lim):
bins = np.arange(self.nperiods + 1) * bg_time_clk
# Note: histogram bins are half-open, e.g. [a, b)
counts, _ = np.histogram(ph_ch, bins=bins)
lim, ph_p = [], []
i1 = 0
for ip in range(self.nperiods):
i0 = i1
i1 += counts[ip]
lim.append((i0, i1 - 1))
ph_p.append((ph_ch[i0], ph_ch[i1-1]))
Lim.append(lim)
Ph_p.append(ph_p)
self.add(Lim=Lim, Ph_p=Ph_p, bg_ph_sel=ph_sel)
pprint("[DONE]\n", mute)
##
# Burst analysis methods
#
def _calc_burst_period(self):
"""Compute for each burst the "background period" `bp`.
Background periods are the time intervals on which the BG is computed.
"""
P = []
for b, lim in zip(self.mburst, self.Lim):
p = zeros(b.num_bursts, dtype=np.int16)
if b.num_bursts > 0:
istart = b.istart
for i, (l0, l1) in enumerate(lim):
p[(istart >= l0) * (istart <= l1)] = i
P.append(p)
self.add(bp=P)
def _param_as_mch_array(self, par):
"""Regardless of `par` size, return an arrays with size == nch.
if `par` is scalar the arrays repeats the calar multiple times
if `par is a list/array must be of length `nch`.
"""
assert size(par) == 1 or size(par) == self.nch
return np.repeat(par, self.nch) if size(par) == 1 else np.asarray(par)
def bg_from(self, ph_sel):
"""Return the background rates for the specified photon selection.
"""
ph_sel = self._fix_ph_sel(ph_sel)
if ph_sel in self.ph_streams:
return self.bg[ph_sel]
elif ph_sel == Ph_sel(Dex='DAem'):
sel = Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Aex='DAem'):
sel = Ph_sel(Aex='Dem'), Ph_sel(Aex='Aem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Dex='Dem', Aex='Dem'):
sel = Ph_sel(Dex='Dem'), Ph_sel(Aex='Dem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Dex='Aem', Aex='Aem'):
sel = Ph_sel(Dex='Aem'), Ph_sel(Aex='Aem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Dex='DAem', Aex='Aem'):
sel = (Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem'), Ph_sel(Aex='Aem'))
bg = [b1 + b2 + b3 for b1, b2, b3 in
zip(self.bg[sel[0]], self.bg[sel[1]], self.bg[sel[2]])]
else:
raise NotImplementedError('Photon selection %s not implemented.' %
str(ph_sel))
return bg
def _calc_T(self, m, P, F=1., ph_sel=Ph_sel('all'), c=-1):
"""If P is None use F, otherwise uses both P *and* F (F defaults to 1).
When P is None, compute the time lag T for burst search according to::
T = (m - 1 - c) / (F * bg_rate)
"""
# Regardless of F and P sizes, FF and PP are arrays with size == nch
FF = self._param_as_mch_array(F)
PP = self._param_as_mch_array(P)
if P is None:
# NOTE: the following lambda ignores Pi
find_T = lambda m, Fi, Pi, bg: (m - 1 - c) / (bg * Fi)
else:
if F != 1:
print("WARNING: BS prob. th. with modified BG rate (F=%.1f)"
% F)
find_T = lambda m, Fi, Pi, bg: find_optimal_T_bga(bg*Fi, m, 1-Pi)
TT, T, rate_th = [], [], []
bg_bs = self.bg_from(ph_sel)
for bg_ch, F_ch, P_ch in zip(bg_bs, FF, PP):
# All "T" are in seconds
Tch = find_T(m, F_ch, P_ch, bg_ch)
TT.append(Tch)
T.append(Tch.mean())
rate_th.append(np.mean(m / Tch))
self.add(TT=TT, T=T, bg_bs=bg_bs, FF=FF, PP=PP, F=F, P=P,
rate_th=rate_th)
def _burst_search_rate(self, m, L, min_rate_cps, c=-1, ph_sel=Ph_sel('all'),
compact=False, index_allph=True, verbose=True,
pure_python=False):
"""Compute burst search using a fixed minimum photon rate.
The burst starts when, for `m` consecutive photons::
(m - 1 - c) / (t[last] - t[first]) >= min_rate_cps
Arguments:
min_rate_cps (float or array): minimum photon rate for burst start.
If array is one value per channel.
"""
bsearch = _get_bsearch_func(pure_python=pure_python)
Min_rate_cps = self._param_as_mch_array(min_rate_cps)
mburst = []
T_clk = (m - 1 - c) / Min_rate_cps / self.clk_p
for ich, t_clk in enumerate(T_clk):
ph_bs = ph = self.get_ph_times(ich=ich, ph_sel=ph_sel)
if compact:
ph_bs = self._ph_times_compact(ph, ph_sel)
label = '%s CH%d' % (ph_sel, ich + 1) if verbose else None
burstarray = bsearch(ph_bs, L, m, t_clk, label=label, verbose=verbose)
if burstarray.size > 1:
bursts = bslib.Bursts(burstarray)
if compact:
bursts.recompute_times(ph, out=bursts)
else:
bursts = bslib.Bursts.empty()
mburst.append(bursts)
self.add(mburst=mburst, rate_th=Min_rate_cps, T=T_clk * self.clk_p)
if ph_sel != Ph_sel('all') and index_allph:
self._fix_mburst_from(ph_sel=ph_sel)
def _burst_search_TT(self, m, L, ph_sel=Ph_sel('all'), verbose=True,
compact=False, index_allph=True, pure_python=False,
mute=False):
"""Compute burst search with params `m`, `L` on ph selection `ph_sel`
Requires the list of arrays `self.TT` with the max time-thresholds in
the different burst periods for each channel (use `._calc_T()`).
"""
bsearch = _get_bsearch_func(pure_python=pure_python)
self.recompute_bg_lim_ph_p(ph_sel=ph_sel, mute=mute)
MBurst = []
label = ''
for ich, T in enumerate(self.TT):
ph_bs = ph = self.get_ph_times(ich=ich, ph_sel=ph_sel)
if compact:
ph_bs = self._ph_times_compact(ph, ph_sel)
burstarray_ch_list = []
Tck = T / self.clk_p
for ip, (l0, l1) in enumerate(self.Lim[ich]):
if verbose:
label = '%s CH%d-%d' % (ph_sel, ich + 1, ip)
burstarray = bsearch(ph_bs, L, m, Tck[ip], slice_=(l0, l1 + 1),
label=label, verbose=verbose)
if burstarray.size > 1:
burstarray_ch_list.append(burstarray)
if len(burstarray_ch_list) > 0:
data = np.vstack(burstarray_ch_list)
bursts = bslib.Bursts(data)
if compact:
bursts.recompute_times(ph, out=bursts)
else:
bursts = bslib.Bursts.empty()
MBurst.append(bursts)
self.add(mburst=MBurst)
if ph_sel != Ph_sel('all') and index_allph:
# Convert the burst data to be relative to ph_times_m.
# Convert both Lim/Ph_p and mburst, as they are both needed
# to compute `.bp`.
self.recompute_bg_lim_ph_p(ph_sel=Ph_sel('all'), mute=mute)
self._fix_mburst_from(ph_sel=ph_sel, mute=mute)
def _fix_mburst_from(self, ph_sel, mute=False):
"""Convert burst data from any ph_sel to 'all' timestamps selection.
"""
assert isinstance(ph_sel, Ph_sel) and not self._is_allph(ph_sel)
pprint(' - Fixing burst data to refer to ph_times_m ... ', mute)
for bursts, mask in zip(self.mburst,
self.iter_ph_masks(ph_sel=ph_sel)):
bursts.recompute_index_expand(mask, out=bursts)
pprint('[DONE]\n', mute)
def burst_search(self, L=None, m=10, F=6., P=None, min_rate_cps=None,
ph_sel=Ph_sel('all'), compact=False, index_allph=True,
c=-1, computefret=True, max_rate=False, dither=False,
pure_python=False, verbose=False, mute=False, pax=False):
"""Performs a burst search with specified parameters.
This method performs a sliding-window burst search without
binning the timestamps. The burst starts when the rate of `m`
photons is above a minimum rate, and stops when the rate falls below
the threshold. The result of the burst search is stored in the
`mburst` attribute (a list of Bursts objects, one per channel)
containing start/stop times and indexes. By default, after burst
search, this method computes donor and acceptor counts, it applies
burst corrections (background, leakage, etc...) and computes
E (and S in case of ALEX). You can skip these steps by passing
`computefret=False`.
The minimum rate can be explicitly specified with the `min_rate_cps`
argument, or computed as a function of the background rate with the
`F` argument.
Parameters:
m (int): number of consecutive photons used to compute the
photon rate. Typical values 5-20. Default 10.
L (int or None): minimum number of photons in burst. If None
(default) L = m is used.
F (float): defines how many times higher than the background rate
is the minimum rate used for burst search
(`min rate = F * bg. rate`), assuming that `P = None` (default).
Typical values are 3-9. Default 6.
P (float): threshold for burst detection expressed as a
probability that a detected bursts is not due to a Poisson
background. If not None, `P` overrides `F`. Note that the
background process is experimentally super-Poisson so this
probability is not physically very meaningful. Using this
argument is discouraged.
min_rate_cps (float or list/array): minimum rate in cps for burst
start. If not None, it has the precedence over `P` and `F`.
If non-scalar, contains one rate per each multispot channel.
Typical values range from 20e3 to 100e3.
ph_sel (Ph_sel object): defines the "photon selection" (or stream)
to be used for burst search. Default: all photons.
See :mod:`fretbursts.ph_sel` for details.
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
index_allph (bool): if True (default), the indexes of burst start
and stop (`istart`, `istop`) are relative to the full
timestamp array. If False, the indexes are relative to
timestamps selected by the `ph_sel` argument.
c (float): correction factor used in the rate vs time-lags relation.
`c` affects the computation of the burst-search parameter `T`.
When `F` is not None, `T = (m - 1 - c) / (F * bg_rate)`.
When using `min_rate_cps`, `T = (m - 1 - c) / min_rate_cps`.
computefret (bool): if True (default) compute donor and acceptor
counts, apply corrections (background, leakage, direct
excitation) and compute E (and S). If False, skip all these
steps and stop just after the initial burst search.
max_rate (bool): if True compute the max photon rate inside each
burst using the same `m` used for burst search. If False
(default) skip this step.
dither (bool): if True applies dithering corrections to burst
counts. Default False. See :meth:`Data.dither`.
pure_python (bool): if True, uses the pure python functions even
when optimized Cython functions are available.
pax (bool): this has effect only if measurement is PAX.
In this case, when True computes E using a PAX-enhanced
formula: ``(2 na) / (2 na + nd + nda)``.
Otherwise use the usual usALEX formula: ``na / na + nd``.
Quantities `nd`/`na` are D/A burst counts during D excitation
period, while `nda` is D emission during A excitation period.
Note:
when using `P` or `F` the background rates are needed, so
`.calc_bg()` must be called before the burst search.
Example:
d.burst_search(m=10, F=6)
Returns:
None, all the results are saved in the `Data` object.
"""
ph_sel = self._fix_ph_sel(ph_sel)
if compact:
self._assert_compact(ph_sel)
pprint(" - Performing burst search (verbose=%s) ..." % verbose, mute)
# Erase any previous burst data
self.delete_burst_data()
if L is None:
L = m
if min_rate_cps is not None:
# Saves rate_th in self
self._burst_search_rate(m=m, L=L, min_rate_cps=min_rate_cps, c=c,
ph_sel=ph_sel, compact=compact,
index_allph=index_allph,
verbose=verbose, pure_python=pure_python)
else:
# Compute TT, saves P and F in self
self._calc_T(m=m, P=P, F=F, ph_sel=ph_sel, c=c)
# Use TT and compute mburst
self._burst_search_TT(L=L, m=m, ph_sel=ph_sel, compact=compact,
index_allph=index_allph, verbose=verbose,
pure_python=pure_python, mute=mute)
pprint("[DONE]\n", mute)
pprint(" - Calculating burst periods ...", mute)
self._calc_burst_period() # writes bp
pprint("[DONE]\n", mute)
# (P, F) or rate_th are saved in _calc_T() or _burst_search_rate()
self.add(m=m, L=L, ph_sel=ph_sel)
# The correction flags are both set here and in calc_ph_num() so that
# they are always consistent. Case 1: we perform only burst search
# (with no call to calc_ph_num). Case 2: we re-call calc_ph_num()
# without doing a new burst search
self.add(bg_corrected=False, leakage_corrected=False,
dir_ex_corrected=False, dithering=False)
self._burst_search_postprocess(
computefret=computefret, max_rate=max_rate, dither=dither,
pure_python=pure_python, mute=mute, pax=pax)
def _burst_search_postprocess(self, computefret, max_rate, dither,
pure_python, mute, pax):
if computefret:
pprint(" - Counting D and A ph and calculating FRET ... \n", mute)
self.calc_fret(count_ph=True, corrections=True, dither=dither,
mute=mute, pure_python=pure_python, pax=pax)
pprint(" [DONE Counting D/A]\n", mute)
if max_rate:
pprint(" - Computing max rates in burst ...", mute)
self.calc_max_rate(m=self.m)
pprint("[DONE]\n", mute)
def calc_ph_num(self, alex_all=False, pure_python=False):
"""Computes number of D, A (and AA) photons in each burst.
Arguments:
alex_all (bool): if True and self.ALEX is True, computes also the
donor channel photons during acceptor excitation (`nda`)
pure_python (bool): if True, uses the pure python functions even
when the optimized Cython functions are available.
Returns:
Saves `nd`, `na`, `nt` (and eventually `naa`, `nda`) in self.
Returns None.
"""
mch_count_ph_in_bursts = _get_mch_count_ph_in_bursts_func(pure_python)
if not self.alternated:
nt = [b.counts.astype(float) if b.num_bursts > 0 else np.array([])
for b in self.mburst]
A_em = [self.get_A_em(ich) for ich in range(self.nch)]
if isinstance(A_em[0], slice):
# This is to support the case of A-only or D-only data
n0 = [np.zeros(mb.num_bursts) for mb in self.mburst]
if A_em[0] == slice(None):
nd, na = n0, nt # A-only case
elif A_em[0] == slice(0):
nd, na = nt, n0 # D-only case
else:
# This is the usual case with photons in both D and A channels
na = mch_count_ph_in_bursts(self.mburst, A_em)
nd = [t - a for t, a in zip(nt, na)]
assert (nt[0] == na[0] + nd[0]).all()
else:
# The "new style" would be:
#Mask = [m for m in self.iter_ph_masks(Ph_sel(Dex='Dem'))]
Mask = [d_em * d_ex for d_em, d_ex in zip(self.D_em, self.D_ex)]
nd = mch_count_ph_in_bursts(self.mburst, Mask)
Mask = [a_em * d_ex for a_em, d_ex in zip(self.A_em, self.D_ex)]
na = mch_count_ph_in_bursts(self.mburst, Mask)
Mask = [a_em * a_ex for a_em, a_ex in zip(self.A_em, self.A_ex)]
naa = mch_count_ph_in_bursts(self.mburst, Mask)
self.add(naa=naa)
if alex_all or 'PAX' in self.meas_type:
Mask = [d_em * a_ex for d_em, a_ex in zip(self.D_em, self.A_ex)]
nda = mch_count_ph_in_bursts(self.mburst, Mask)
self.add(nda=nda)
if self.ALEX:
nt = [d + a + aa for d, a, aa in zip(nd, na, naa)]
assert (nt[0] == na[0] + nd[0] + naa[0]).all()
elif 'PAX' in self.meas_type:
nt = [d + a + da + aa for d, a, da, aa in zip(nd, na, nda, naa)]
assert (nt[0] == na[0] + nd[0] + nda[0] + naa[0]).all()
# This is a copy of na which will never be corrected
# (except for background). It is used to compute the
# equivalent of naa for PAX:
# naa~ = naa - nar
# where naa~ is the A emission due to direct excitation
# by A laser during D+A-excitation,
# nar is the uncorrected A-channel signal during D-excitation,
# and naa is the A-channel signal during D+A excitation.
nar = [a.copy() for a in na]
self.add(nar=nar)
self.add(nd=nd, na=na, nt=nt,
bg_corrected=False, leakage_corrected=False,
dir_ex_corrected=False, dithering=False)
def fuse_bursts(self, ms=0, process=True, mute=False):
"""Return a new :class:`Data` object with nearby bursts fused together.
Arguments:
ms (float): fuse all burst separated by less than `ms` millisecs.
If < 0 no burst is fused. Note that with ms = 0, overlapping
bursts are fused.
process (bool): if True (default), reprocess the burst data in
the new object applying corrections and computing FRET.
mute (bool): if True suppress any printed output.
"""
if ms < 0:
return self
mburst = mch_fuse_bursts(self.mburst, ms=ms, clk_p=self.clk_p)
new_d = Data(**self)
for k in ['E', 'S', 'nd', 'na', 'naa', 'nda', 'nar', 'nt', 'lsb', 'bp']:
if k in new_d:
new_d.delete(k)
new_d.add(bg_corrected=False, leakage_corrected=False,
dir_ex_corrected=False, dithering=False)
new_d.add(mburst=mburst, fuse=ms)
if 'bg' in new_d:
new_d._calc_burst_period()
if process:
pprint(" - Counting D and A ph and calculating FRET ... \n", mute)
new_d.calc_fret(count_ph=True, corrections=True,
dither=self.dithering, mute=mute, pax=self.pax)
pprint(" [DONE Counting D/A and FRET]\n", mute)
return new_d
##
# Burst selection and filtering
#
def select_bursts(self, filter_fun, negate=False, computefret=True,
args=None, **kwargs):
"""Return an object with bursts filtered according to `filter_fun`.
This is the main method to select bursts according to different
criteria. The selection rule is defined by the selection function
`filter_fun`. FRETBursts provides a several predefined selection
functions see :ref:`burst_selection`. New selection
functions can be defined and passed to this method to implement
arbitrary selection rules.
Arguments:
filter_fun (fuction): function used for burst selection
negate (boolean): If True, negates (i.e. take the complementary)
of the selection returned by `filter_fun`. Default `False`.
computefret (boolean): If True (default) recompute donor and
acceptor counts, corrections and FRET quantities (i.e. E, S)
in the new returned object.
args (tuple or None): positional arguments for `filter_fun()`
kwargs:
Additional keyword arguments passed to `filter_fun()`.
Returns:
A new :class:`Data` object containing only the selected bursts.
Note:
In order to save RAM, the timestamp arrays (`ph_times_m`)
of the new Data() points to the same arrays of the original
Data(). Conversely, all the bursts data (`mburst`, `nd`, `na`,
etc...) are new distinct objects.
"""
Masks, str_sel = self.select_bursts_mask(filter_fun, negate=negate,
return_str=True, args=args,
**kwargs)
d_sel = self.select_bursts_mask_apply(Masks, computefret=computefret,
str_sel=str_sel)
return d_sel
def select_bursts_mask(self, filter_fun, negate=False, return_str=False,
args=None, **kwargs):
"""Returns mask arrays to select bursts according to `filter_fun`.
The function `filter_fun` is called to compute the mask arrays for
each channel.
This method is useful when you want to apply a selection from one
object to a second object. Otherwise use :meth:`Data.select_bursts`.
Arguments:
filter_fun (fuction): function used for burst selection
negate (boolean): If True, negates (i.e. take the complementary)
of the selection returned by `filter_fun`. Default `False`.
return_str: if True return, for each channel, a tuple with
a bool array and a string that can be added to the measurement
name to indicate the selection. If False returns only
the bool array. Default False.
args (tuple or None): positional arguments for `filter_fun()`
kwargs:
Additional keyword arguments passed to `filter_fun()`.
Returns:
A list of boolean arrays (one per channel) that define the burst
selection. If `return_str` is True returns a list of tuples, where
each tuple is a bool array and a string.
See also:
:meth:`Data.select_bursts`, :meth:`Data.select_bursts_mask_apply`
"""
# Create the list of bool masks for the bursts selection
if args is None:
args = tuple()
M = [filter_fun(self, i, *args, **kwargs) for i in range(self.nch)]
# Make sure the selection function has the right return signature
msg = 'The second argument returned by `%s` must be a string.'
assert np.all([isinstance(m[1], str) for m in M]), msg % filter_fun
# Make sure all boolean masks have the right size
msg = ("The size of boolean masks returned by `%s` needs to match "
"the number of bursts.")
assert np.all([m[0].size == n for m, n in zip(M, self.num_bursts)]), (
msg % filter_fun)
Masks = [-m[0] if negate else m[0] for m in M]
str_sel = M[0][1]
if return_str:
return Masks, str_sel
else:
return Masks
def select_bursts_mask_apply(self, masks, computefret=True, str_sel=''):
"""Returns a new Data object with bursts selected according to `masks`.
This method select bursts using a list of boolean arrays as input.
Since the user needs to create the boolean arrays first, this method
is useful when experimenting with new selection criteria that don't
have a dedicated selection function. Usually, however, it is easier
to select bursts through :meth:`Data.select_bursts` (using a
selection function).
Arguments:
masks (list of arrays): each element in this list is a boolean
array that selects bursts in a channel.
computefret (boolean): If True (default) recompute donor and
acceptor counts, corrections and FRET quantities (i.e. E, S)
in the new returned object.
Returns:
A new :class:`Data` object containing only the selected bursts.
Note:
In order to save RAM, the timestamp arrays (`ph_times_m`)
of the new Data() points to the same arrays of the original
Data(). Conversely, all the bursts data (`mburst`, `nd`, `na`,
etc...) are new distinct objects.
See also:
:meth:`Data.select_bursts`, :meth:`Data.select_mask`
"""
# Attributes of ds point to the same objects of self
ds = Data(**self)
##Copy the per-burst fields that must be filtered
used_fields = [field for field in Data.burst_fields if field in self]
for name in used_fields:
# Recreate the current attribute as a new list to avoid modifying
# the old list that is also in the original object.
# The list is initialized with empty arrays because this is the
# valid value when a ch has no bursts.
empty = bslib.Bursts.empty() if name == 'mburst' else np.array([])
ds.add(**{name: [empty] * self.nch})
# Assign the new data
for ich, mask in enumerate(masks):
if self[name][ich].size == 0:
continue # -> no bursts in ch
# Note that boolean masking implies numpy array copy
# On the contrary slicing only makes a new view of the array
ds[name][ich] = self[name][ich][mask]
# Recompute E and S
if computefret:
ds.calc_fret(count_ph=False, pax=self.pax)
# Add the annotation about the filter function
ds.s = list(self.s + [str_sel]) # using append would modify also self
return ds
##
# Burst corrections
#
def background_correction(self, relax_nt=False, mute=False):
"""Apply background correction to burst sizes (nd, na,...)
"""
if self.bg_corrected:
return -1
pprint(" - Applying background correction.\n", mute)
self.add(bg_corrected=True)
for ich, bursts in enumerate(self.mburst):
if bursts.num_bursts == 0:
continue # if no bursts skip this ch
period = self.bp[ich]
nd, na, bg_d, bg_a, width = self.expand(ich, width=True)
nd -= bg_d
na -= bg_a
if 'nar' in self:
# Apply background correction to PAX field nar
self.nar[ich][:] = na
if relax_nt:
# This does not guarantee that nt = nd + na
self.nt[ich] -= self.bg_from(Ph_sel('all'))[ich][period] * width
else:
self.nt[ich] = nd + na
if self.alternated:
bg_aa = self.bg_from(Ph_sel(Aex='Aem'))
self.naa[ich] -= bg_aa[ich][period] * width
if 'nda' in self:
bg_da = self.bg_from(Ph_sel(Aex='Dem'))
self.nda[ich] -= bg_da[ich][period] * width
self.nt[ich] += self.naa[ich]
if 'PAX' in self.meas_type:
self.nt[ich] += self.nda[ich]
def leakage_correction(self, mute=False):
"""Apply leakage correction to burst sizes (nd, na,...)
"""
if self.leakage_corrected:
return -1
elif self.leakage != 0:
pprint(" - Applying leakage correction.\n", mute)
Lk = self.get_leakage_array()
for i, num_bursts in enumerate(self.num_bursts):
if num_bursts == 0:
continue # if no bursts skip this ch
self.na[i] -= self.nd[i] * Lk[i]
self.nt[i] = self.nd[i] + self.na[i]
if self.ALEX:
self.nt[i] += self.naa[i]
elif 'PAX' in self.meas_type:
self.nt[i] += (self.nda[i] + self.naa[i])
self.add(leakage_corrected=True)
def direct_excitation_correction(self, mute=False):
"""Apply direct excitation correction to bursts (ALEX-only).
The applied correction is: na -= naa*dir_ex
"""
if self.dir_ex_corrected:
return -1
elif self.dir_ex != 0:
pprint(" - Applying direct excitation correction.\n", mute)
for i, num_bursts in enumerate(self.num_bursts):
if num_bursts == 0:
continue # if no bursts skip this ch
naa = self.naa[i]
if 'PAX' in self.meas_type:
naa = naa - self.nar[i] # do not modify inplace
self.na[i] -= naa * self.dir_ex
self.nt[i] = self.nd[i] + self.na[i]
if self.ALEX:
self.nt[i] += self.naa[i]
elif 'PAX' in self.meas_type:
self.nt[i] += (self.nda[i] + self.naa[i])
self.add(dir_ex_corrected=True)
def dither(self, lsb=2, mute=False):
"""Add dithering (uniform random noise) to burst counts (nd, na,...).
The dithering amplitude is the range -0.5*lsb .. 0.5*lsb.
"""
if self.dithering:
return -1
pprint(" - Applying burst-size dithering.\n", mute)
self.add(dithering=True)
for nd, na in zip(self.nd, self.na):
nd += lsb * (np.random.rand(nd.size) - 0.5)
na += lsb * (np.random.rand(na.size) - 0.5)
if self.alternated:
for naa in self.naa:
naa += lsb * (np.random.rand(naa.size) - 0.5)
if 'nda' in self:
for nda in self.nda:
nda += lsb * (np.random.rand(nda.size) - 0.5)
self.add(lsb=lsb)
def calc_chi_ch(self, E):
"""Calculate the gamma correction prefactor factor `chi_ch` (array).
Computes `chi_ch`, a channel-dependent prefactor for gamma used
to correct dispersion of E across channels.
Returns:
array of `chi_ch` correction factors (one per spot).
To apply the correction assign the returned array to `Data.chi_ch`.
Upon assignment E values for all bursts will be corrected.
"""
chi_ch = (1 / E.mean() - 1) / (1 / E - 1)
return chi_ch
def corrections(self, mute=False):
"""Apply corrections on burst-counts: nd, na, nda, naa.
The corrections are: background, leakage (or bleed-through) and
direct excitation (dir_ex).
"""
self.background_correction(mute=mute)
self.leakage_correction(mute=mute)
if self.alternated:
self.direct_excitation_correction(mute=mute)
def _update_corrections(self):
"""Recompute corrections whose flag is True.
Checks the flags .bg_corrected, .leakage_corrected, .dir_ex_corrected,
.dithering and recomputes the correction if the corresponding flag
is True (i.e. if the correction was already applied).
Note that this method is not used for gamma and beta corrections
because these do not affect the `nd`, `na` and `naa` quantities but
are only applied when computing E, S and corrected size.
Differently from :meth:`corrections`, this allows to recompute
corrections that have already been applied.
"""
if 'mburst' not in self:
return # no burst search performed yet
old_bg_corrected = self.bg_corrected
old_leakage_corrected = self.leakage_corrected
old_dir_ex_corrected = self.dir_ex_corrected
old_dithering = self.dithering
self.calc_ph_num() # recompute uncorrected na, nd, nda, naa
if old_bg_corrected:
self.background_correction()
if old_leakage_corrected:
self.leakage_correction()
if old_dir_ex_corrected:
self.direct_excitation_correction()
if old_dithering:
self.dither(self.lsb)
# Recompute E and S with no corrections (because already applied)
self.calc_fret(count_ph=False, corrections=False, pax=self.pax)
@property
def leakage(self):
"""Spectral leakage (bleed-through) of D emission in the A channel.
"""
return self._leakage
@leakage.setter
def leakage(self, leakage):
self._update_leakage(leakage)
def _update_leakage(self, leakage):
"""Apply/update leakage (or bleed-through) correction.
"""
assert (np.size(leakage) == 1) or (np.size(leakage) == self.nch)
self.add(_leakage=np.asfarray(leakage), leakage_corrected=True)
self._update_corrections()
@property
def dir_ex(self):
"""Direct excitation correction factor."""
return self._dir_ex
@dir_ex.setter
def dir_ex(self, value):
self._update_dir_ex(value)
def _update_dir_ex(self, dir_ex):
"""Apply/update direct excitation correction with value `dir_ex`.
"""
assert np.size(dir_ex) == 1
self.add(_dir_ex=float(dir_ex), dir_ex_corrected=True)
self._update_corrections()
@property
def beta(self):
"""Beta factor used to correct S (compensates Dex and Aex unbalance).
"""
return self._beta
@beta.setter
def beta(self, value):
self._update_beta(value)
def _update_beta(self, beta):
"""Change the `beta` value and recompute E and S."""
assert np.size(beta) == 1
self.add(_beta=float(beta))
if 'mburst' in self:
# Recompute E and S and delete fitter objects
self.calc_fret(corrections=False, pax=self.pax)
@property
def chi_ch(self):
"""Per-channel relative gamma factor."""
return self._chi_ch
@chi_ch.setter
def chi_ch(self, value):
self._update_chi_ch(value)
def _update_chi_ch(self, chi_ch):
"""Change the `chi_ch` value and recompute E and S."""
msg = 'chi_ch is a per-channel correction and must have size == nch.'
assert np.size(chi_ch) == self.nch, ValueError(msg)
self.add(_chi_ch=np.asfarray(chi_ch))
if 'mburst' in self:
# Recompute E and S and delete fitter objects
self.calc_fret(corrections=False, pax=self.pax)
@property
def gamma(self):
"""Gamma correction factor (compensates DexDem and DexAem unbalance).
"""
return self._gamma
@gamma.setter
def gamma(self, value):
self._update_gamma(value)
def _update_gamma(self, gamma):
"""Change the `gamma` value and recompute E and S."""
assert (np.size(gamma) == 1) or (np.size(gamma) == self.nch)
self.add(_gamma=np.asfarray(gamma))
if 'mburst' in self:
# Recompute E and S and delete fitter objects
self.calc_fret(corrections=False, pax=self.pax)
def get_gamma_array(self):
"""Get the array of gamma factors, one per ch.
It always returns an array of gamma factors regardless of
whether `self.gamma` is scalar or array.
Each element of the returned array is multiplied by `chi_ch`.
"""
gamma = self.gamma
G = np.repeat(gamma, self.nch) if np.size(gamma) == 1 else gamma
G *= self.chi_ch
return G
def get_leakage_array(self):
"""Get the array of leakage coefficients, one per ch.
It always returns an array of leakage coefficients regardless of
whether `self.leakage` is scalar or array.
Each element of the returned array is multiplied by `chi_ch`.
"""
leakage = self.leakage
Lk = np.r_[[leakage] * self.nch] if np.size(leakage) == 1 else leakage
Lk *= self.chi_ch
return Lk
##
# Methods to compute burst quantities: FRET, S, SBR, max_rate, etc ...
#
def calc_sbr(self, ph_sel=Ph_sel('all'), gamma=1.):
"""Return Signal-to-Background Ratio (SBR) for each burst.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection
for which to compute the sbr. Changes the photons used for
burst size and the corresponding background rate. Valid values
here are Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem').
See :mod:`fretbursts.ph_sel` for details.
gamma (float): gamma value used to compute corrected burst size
in the case `ph_sel` is Ph_sel('all'). Ignored otherwise.
Returns:
A list of arrays (one per channel) with one value per burst.
The list is also saved in `sbr` attribute.
"""
ph_sel = self._fix_ph_sel(ph_sel)
sbr = []
for ich, mb in enumerate(self.mburst):
if mb.num_bursts == 0:
sbr.append(np.array([]))
continue # if no bursts skip this ch
nd, na, bg_d, bg_a = self.expand(ich)
nt = self.burst_sizes_ich(ich=ich, gamma=gamma)
signal = {Ph_sel('all'): nt,
Ph_sel(Dex='Dem'): nd, Ph_sel(Dex='Aem'): na}
background = {Ph_sel('all'): bg_d + bg_a,
Ph_sel(Dex='Dem'): bg_d, Ph_sel(Dex='Aem'): bg_a}
sbr.append(signal[ph_sel] / background[ph_sel])
self.add(sbr=sbr)
return sbr
def calc_burst_ph_func(self, func, func_kw, ph_sel=Ph_sel('all'),
compact=False, ich=0):
"""Evaluate a scalar function from photons in each burst.
This method allow calling an arbitrary function on the photon
timestamps of each burst. For example if `func` is `np.mean` it
computes the mean time in each bursts.
Arguments:
func (callable): function that takes as first argument an array of
timestamps for one burst.
func_kw (callable): additional arguments to be passed `func`.
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
Returns:
A list (on element per channel) array. The array size is equal to
the number of bursts in the corresponding channel.
"""
if compact:
self._assert_compact(ph_sel)
kwargs = dict(func=func, func_kw=func_kw, compact=compact)
if self.alternated:
kwargs.update(alex_period=self.alex_period)
if compact:
kwargs.update(excitation_width=self._excitation_width(ph_sel))
results_mch = [burst_ph_stats(ph, bursts, mask=mask, **kwargs)
for ph, mask, bursts in
zip(self.iter_ph_times(),
self.iter_ph_masks(ph_sel=ph_sel),
self.mburst)]
return results_mch
def calc_max_rate(self, m, ph_sel=Ph_sel('all'), compact=False,
c=phrates.default_c):
"""Compute the max m-photon rate reached in each burst.
Arguments:
m (int): number of timestamps to use to compute the rate.
As for burst search, typical values are 5-20.
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
c (float): this parameter is used in the definition of the
rate estimator which is `(m - 1 - c) / t[last] - t[first]`.
For more details see :func:`.phtools.phrates.mtuple_rates`.
"""
ph_sel = self._fix_ph_sel(ph_sel)
Max_Rate = self.calc_burst_ph_func(func=phrates.mtuple_rates_max,
func_kw=dict(m=m, c=c),
ph_sel=ph_sel, compact=compact)
Max_Rate = [mr / self.clk_p - bg[bp] for bp, bg, mr in
zip(self.bp, self.bg_from(ph_sel), Max_Rate)]
params = dict(m=m, ph_sel=ph_sel, compact=compact)
self.add(max_rate=Max_Rate, max_rate_params=params)
def calc_fret(self, count_ph=False, corrections=True, dither=False,
mute=False, pure_python=False, pax=False):
"""Compute FRET (and stoichiometry if ALEX) for each burst.
This is an high-level functions that can be run after burst search.
By default, it will count Donor and Acceptor photons, perform
corrections (background, leakage), and compute gamma-corrected
FRET efficiencies (and stoichiometry if ALEX).
Arguments:
count_ph (bool): if True (default), calls :meth:`calc_ph_num` to
counts Donor and Acceptor photons in each bursts
corrections (bool): if True (default), applies background and
bleed-through correction to burst data
dither (bool): whether to apply dithering to burst size.
Default False.
mute (bool): whether to mute all the printed output. Default False.
pure_python (bool): if True, uses the pure python functions even
when the optimized Cython functions are available.
pax (bool): this has effect only if measurement is PAX.
In this case, when True computes E using a PAX-enhanced
formula: ``(2 na) / (2 na + nd + nda)``.
Otherwise use the usual usALEX formula: ``na / na + nd``.
Quantities `nd`/`na` are D/A burst counts during D excitation
period, while `nda` is D emission during A excitation period.
Returns:
None, all the results are saved in the object.
"""
if count_ph:
self.calc_ph_num(pure_python=pure_python, alex_all=True)
if dither:
self.dither(mute=mute)
if corrections:
self.corrections(mute=mute)
self._calculate_fret_eff(pax=pax)
if self.alternated:
self._calculate_stoich(pax=pax)
#self._calc_alex_hist()
for attr in ('ES_binwidth', 'ES_hist', 'E_fitter', 'S_fitter'):
# E_fitter and S_fitter are only attributes
# so we cannot use the membership syntax (attr in self)
if hasattr(self, attr):
self.delete(attr, warning=False)
def _aex_fraction(self):
"""Proportion of Aex period versus Dex + Aex."""
assert self.alternated
D_ON, A_ON = self.D_ON, self.A_ON
return ((A_ON[1] - A_ON[0]) /
(A_ON[1] - A_ON[0] + D_ON[1] - D_ON[0]))
def _aex_dex_ratio(self):
"""Ratio of Aex and Dex period durations."""
assert self.alternated
D_ON, A_ON = self.D_ON, self.A_ON
return (A_ON[1] - A_ON[0]) / (D_ON[1] - D_ON[0])
def _calculate_fret_eff(self, pax=False):
"""Compute FRET efficiency (`E`) for each burst."""
G = self.get_gamma_array()
if not pax:
E = [na / (g * nd + na) for nd, na, g in zip(self.nd, self.na, G)]
else:
alpha = 1 - self._aex_fraction()
E = [(na / alpha) / (g * (nd + nda) + (na / alpha))
for nd, na, nda, g in zip(self.nd, self.na, self.nda, G)]
self.add(E=E, pax=pax)
def _calculate_stoich(self, pax=False):
"""Compute "stoichiometry" (the `S` parameter) for each burst."""
G = self.get_gamma_array()
naa = self.naa
if 'PAX' in self.meas_type:
naa = [self._get_naa_ich(i) for i in range(self.nch)]
if not pax:
S = [(g * d + a) / (g * d + a + aa / self.beta)
for d, a, aa, g in zip(self.nd, self.na, naa, G)]
else:
# This is a PAX-enhanced formula which uses information
# from both alternation periods in order to compute S
alpha = 1 - self._aex_fraction()
S = [(g * (d + da) + a / alpha) /
(g * (d + da) + a / alpha + aa / (alpha * self.beta))
for d, a, da, aa, g in
zip(self.nd, self.na, self.nda, naa, G)]
self.add(S=S)
def _calc_alex_hist(self, binwidth=0.05):
"""Compute the ALEX histogram with given bin width `bin_step`"""
if 'ES_binwidth' in self and self.ES_binwidth == binwidth:
return
ES_hist_tot = [ES_histog(E, S, binwidth) for E, S in
zip(self.E, self.S)]
E_bins, S_bins = ES_hist_tot[0][1], ES_hist_tot[0][2]
ES_hist = [h[0] for h in ES_hist_tot]
E_ax = E_bins[:-1] + 0.5 * binwidth
S_ax = S_bins[:-1] + 0.5 * binwidth
self.add(ES_hist=ES_hist, E_bins=E_bins, S_bins=S_bins,
E_ax=E_ax, S_ax=S_ax, ES_binwidth=binwidth)
##
# Methods for measurement info
#
def status(self, add="", noname=False):
"""Return a string with burst search, corrections and selection info.
"""
name = "" if noname else self.name
s = name
if 'L' in self: # burst search has been done
if 'rate_th' in self:
s += " BS_%s L%d m%d MR%d" % (self.ph_sel, self.L, self.m,
np.mean(self.rate_th) * 1e-3)
else:
P_str = '' if self.P is None else ' P%s' % self.P
s += " BS_%s L%d m%d F%.1f%s" % \
(self.ph_sel, self.L, self.m, np.mean(self.F), P_str)
s += " G%.3f" % np.mean(self.gamma)
if 'bg_fun' in self: s += " BG%s" % self.bg_fun.__name__[:-4]
if 'bg_time_s' in self: s += "-%ds" % self.bg_time_s
if 'fuse' in self: s += " Fuse%.1fms" % self.fuse
if 'bg_corrected' in self and self.bg_corrected:
s += " bg"
if 'leakage_corrected' in self and self.leakage_corrected:
s += " Lk%.3f" % np.mean(self.leakage*100)
if 'dir_ex_corrected' in self and self.dir_ex_corrected:
s += " dir%.1f" % (self.dir_ex*100)
if 'dithering' in self and self.dithering:
s += " Dith%d" % self.lsb
if 's' in self: s += ' '.join(self.s)
return s + add
@property
def name(self):
"""Measurement name: last subfolder + file name with no extension."""
if not hasattr(self, '_name'):
basename = str(os.path.splitext(os.path.basename(self.fname))[0])
name = basename
last_dir = str(os.path.basename(os.path.dirname(self.fname)))
if len(last_dir) > 0:
name = '_'.join([last_dir, basename])
self.add(_name=name)
return self._name
@name.setter
def name(self, value):
self.add(_name=value)
def Name(self, add=""):
"""Return short filename + status information."""
n = self.status(add=add)
return n
def __repr__(self):
return self.status()
def stats(self, string=False):
"""Print common statistics (BG rates, #bursts, mean size, ...)"""
s = print_burst_stats(self)
if string:
return s
else:
print(s)
##
# FRET fitting methods
#
def fit_E_m(self, E1=-1, E2=2, weights='size', gamma=1.):
"""Fit E in each channel with the mean using bursts in [E1,E2] range.
Note:
This two fitting are equivalent (but the first is much faster)::
fit_E_m(weights='size')
fit_E_minimize(kind='E_size', weights='sqrt')
However `fit_E_minimize()` does not provide a model curve.
"""
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res, fit_model_F = zeros((self.nch, 2)), zeros(self.nch)
for ich, (nd, na, E, mask) in enumerate(zip(
self.nd, self.na, self.E, Mask)):
w = fret_fit.get_weights(nd[mask], na[mask],
weights=weights, gamma=gamma)
# Compute weighted mean
fit_res[ich, 0] = np.dot(w, E[mask])/w.sum()
# Compute weighted variance
fit_res[ich, 1] = np.sqrt(
np.dot(w, (E[mask] - fit_res[ich, 0])**2)/w.sum())
fit_model_F[ich] = mask.sum()/mask.size
fit_model = lambda x, p: SS.norm.pdf(x, p[0], p[1])
self.add(fit_E_res=fit_res, fit_E_name='Moments',
E_fit=fit_res[:, 0], fit_E_curve=True, fit_E_E1=E1,
fit_E_E2=E2, fit_E_model=fit_model,
fit_E_model_F=fit_model_F)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_ML_poiss(self, E1=-1, E2=2, method=1, **kwargs):
"""ML fit for E modeling size ~ Poisson, using bursts in [E1,E2] range.
"""
assert method in [1, 2, 3]
fit_fun = {1: fret_fit.fit_E_poisson_na, 2: fret_fit.fit_E_poisson_nt,
3: fret_fit.fit_E_poisson_nd}
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res = zeros(self.nch)
for ich, mask in zip(range(self.nch), Mask):
nd, na, bg_d, bg_a = self.expand(ich)
bg_x = bg_d if method == 3 else bg_a
fit_res[ich] = fit_fun[method](nd[mask], na[mask],
bg_x[mask], **kwargs)
self.add(fit_E_res=fit_res, fit_E_name='MLE: na ~ Poisson',
E_fit=fit_res, fit_E_curve=False, fit_E_E1=E1, fit_E_E2=E2)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_ML_binom(self, E1=-1, E2=2, **kwargs):
"""ML fit for E modeling na ~ Binomial, using bursts in [E1,E2] range.
"""
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res = np.array([fret_fit.fit_E_binom(_d[mask], _a[mask], **kwargs)
for _d, _a, mask in zip(self.nd, self.na, Mask)])
self.add(fit_E_res=fit_res, fit_E_name='MLE: na ~ Binomial',
E_fit=fit_res, fit_E_curve=False, fit_E_E1=E1, fit_E_E2=E2)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_minimize(self, kind='slope', E1=-1, E2=2, **kwargs):
"""Fit E using method `kind` ('slope' or 'E_size') and bursts in [E1,E2]
If `kind` is 'slope' the fit function is fret_fit.fit_E_slope()
If `kind` is 'E_size' the fit function is fret_fit.fit_E_E_size()
Additional arguments in `kwargs` are passed to the fit function.
"""
assert kind in ['slope', 'E_size']
# Build a dictionary fun_d so we'll call the function fun_d[kind]
fun_d = dict(slope=fret_fit.fit_E_slope,
E_size=fret_fit.fit_E_E_size)
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res = np.array([fun_d[kind](nd[mask], na[mask], **kwargs)
for nd, na, mask in
zip(self.nd, self.na, Mask)])
fit_name = dict(slope='Linear slope fit', E_size='E_size fit')
self.add(fit_E_res=fit_res, fit_E_name=fit_name[kind],
E_fit=fit_res, fit_E_curve=False, fit_E_E1=E1, fit_E_E2=E2)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_two_gauss_EM(self, fit_func=two_gaussian_fit_EM,
weights='size', gamma=1., **kwargs):
"""Fit the E population to a Gaussian mixture model using EM method.
Additional arguments in `kwargs` are passed to the fit_func().
"""
fit_res = zeros((self.nch, 5))
for ich, (nd, na, E) in enumerate(zip(self.nd, self.na, self.E)):
w = fret_fit.get_weights(nd, na, weights=weights, gamma=gamma)
fit_res[ich, :] = fit_func(E, weights=w, **kwargs)
self.add(fit_E_res=fit_res, fit_E_name=fit_func.__name__,
E_fit=fit_res[:, 2], fit_E_curve=True,
fit_E_model=two_gauss_mix_pdf,
fit_E_model_F=np.repeat(1, self.nch))
return self.E_fit
def fit_E_generic(self, E1=-1, E2=2, fit_fun=two_gaussian_fit_hist,
weights=None, gamma=1., **fit_kwargs):
"""Fit E in each channel with `fit_fun` using burst in [E1,E2] range.
All the fitting functions are defined in
:mod:`fretbursts.fit.gaussian_fitting`.
Parameters:
weights (string or None): specifies the type of weights
If not None `weights` will be passed to
`fret_fit.get_weights()`. `weights` can be not-None only when
using fit functions that accept weights (the ones ending in
`_hist` or `_EM`)
gamma (float): passed to `fret_fit.get_weights()` to compute
weights
All the additional arguments are passed to `fit_fun`. For example `p0`
or `mu_fix` can be passed (see `fit.gaussian_fitting` for details).
Note:
Use this method for CDF/PDF or hist fitting.
For EM fitting use :meth:`fit_E_two_gauss_EM()`.
"""
if fit_fun.__name__.startswith("gaussian_fit"):
fit_model = lambda x, p: SS.norm.pdf(x, p[0], p[1])
if 'mu0' not in fit_kwargs: fit_kwargs.update(mu0=0.5)
if 'sigma0' not in fit_kwargs: fit_kwargs.update(sigma0=0.3)
iE, nparam = 0, 2
elif fit_fun.__name__ == "two_gaussian_fit_hist_min_ab":
fit_model = two_gauss_mix_ab
if 'p0' not in fit_kwargs:
fit_kwargs.update(p0=[0, .05, 0.5, 0.6, 0.1, 0.5])
iE, nparam = 3, 6
elif fit_fun.__name__.startswith("two_gaussian_fit"):
fit_model = two_gauss_mix_pdf
if 'p0' not in fit_kwargs:
fit_kwargs.update(p0=[0, .05, 0.6, 0.1, 0.5])
iE, nparam = 2, 5
else:
raise ValueError("Fitting function not recognized.")
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res, fit_model_F = zeros((self.nch, nparam)), zeros(self.nch)
for ich, (nd, na, E, mask) in enumerate(zip(
self.nd, self.na, self.E, Mask)):
if '_hist' in fit_fun.__name__ or '_EM' in fit_fun.__name__:
if weights is None:
w = None
else:
w = fret_fit.get_weights(nd[mask], na[mask],
weights=weights, gamma=gamma)
fit_res[ich, :] = fit_fun(E[mask], weights=w, **fit_kwargs)
else:
# Non-histogram fits (PDF/CDF) do not support weights
fit_res[ich, :] = fit_fun(E[mask], **fit_kwargs)
fit_model_F[ich] = mask.sum()/mask.size
# Save enough info to generate a fit plot (see hist_fret in burst_plot)
self.add(fit_E_res=fit_res, fit_E_name=fit_fun.__name__,
E_fit=fit_res[:, iE], fit_E_curve=True, fit_E_E1=E1,
fit_E_E2=E2, fit_E_model=fit_model,
fit_E_model_F=fit_model_F, fit_E_weights=weights,
fit_E_gamma=gamma, fit_E_kwargs=fit_kwargs)
return self.E_fit
def fit_from(self, D):
"""Copy fit results from another Data() variable.
Now that the fit methods accept E1,E1 parameter this probabily useless.
"""
# NOTE Are 'fit_guess' and 'fit_fix' still used ?
fit_data = ['fit_E_res', 'fit_E_name', 'E_fit', 'fit_E_curve',
'fit_E_E1', 'fit_E_E2=E2', 'fit_E_model',
'fit_E_model_F', 'fit_guess', 'fit_fix']
for name in fit_data:
if name in D:
self[name] = D[name]
setattr(self, name, self[name])
# Deal with the normalization to the number of bursts
self.add(fit_model_F=r_[[old_E.size/new_E.size \
for old_E, new_E in zip(D.E, self.E)]])
def fit_E_calc_variance(self, weights='sqrt', dist='DeltaE',
E_fit=None, E1=-1, E2=2):
"""Compute several versions of WEIGHTED std.dev. of the E estimator.
`weights` are multiplied *BEFORE* squaring the distance/error
`dist` can be 'DeltaE' or 'SlopeEuclid'
Note:
This method is still experimental
"""
assert dist in ['DeltaE', 'SlopeEuclid']
if E_fit is None:
E_fit = self.E_fit
E1 = self.fit_E_E1 if 'fit_E_E1' in self else -1
E2 = self.fit_E_E2 if 'fit_E_E2' in self else 2
else:
# If E_fit is not None the specified E1,E2 range is used
if E1 < 0 and E2 > 1:
pprint('WARN: E1 < 0 and E2 > 1 (wide range of E eff.)\n')
if size(E_fit) == 1 and self.nch > 0:
E_fit = np.repeat(E_fit, self.nch)
assert size(E_fit) == self.nch
E_sel = [Ei[(Ei > E1)*(Ei < E2)] for Ei in self.E]
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
E_var, E_var_bu, E_var_ph = \
zeros(self.nch), zeros(self.nch), zeros(self.nch)
for i, (Ech, nt, mask) in enumerate(zip(E_sel, self.nt, Mask)):
nt_s = nt[mask]
nd_s, na_s = self.nd[i][mask], self.na[i][mask]
w = fret_fit.get_weights(nd_s, na_s, weights=weights)
info_ph = nt_s.sum()
info_bu = nt_s.size
if dist == 'DeltaE':
distances = (Ech - E_fit[i])
elif dist == 'SlopeEuclid':
distances = fret_fit.get_dist_euclid(nd_s, na_s, E_fit[i])
residuals = distances * w
var = np.mean(residuals**2)
var_bu = np.mean(residuals**2)/info_bu
var_ph = np.mean(residuals**2)/info_ph
#lvar = np.mean(log(residuals**2))
#lvar_bu = np.mean(log(residuals**2)) - log(info_bu)
#lvar_ph = np.mean(log(residuals**2)) - log(info_ph)
E_var[i], E_var_bu[i], E_var_ph[i] = var, var_bu, var_ph
assert (-np.isnan(E_var[i])).all() # check there is NO NaN
self.add(E_var=E_var, E_var_bu=E_var_bu, E_var_ph=E_var_ph)
return E_var
| gpl-2.0 | -7,185,521,470,680,014,000 | 41.513032 | 82 | 0.562813 | false |
stvstnfrd/edx-platform | common/lib/xmodule/xmodule/tests/test_poll.py | 1 | 2336 | # -*- coding: utf-8 -*-
"""Test for Poll Xmodule functional logic."""
from mock import Mock
from xmodule.poll_module import PollDescriptor
from . import LogicTest
from .test_import import DummySystem
class PollModuleTest(LogicTest):
"""Logic tests for Poll Xmodule."""
descriptor_class = PollDescriptor
raw_field_data = {
'poll_answers': {'Yes': 1, 'Dont_know': 0, 'No': 0},
'voted': False,
'poll_answer': ''
}
def test_bad_ajax_request(self):
# Make sure that answer for incorrect request is error json.
response = self.ajax_request('bad_answer', {})
self.assertDictEqual(response, {'error': 'Unknown Command!'})
def test_good_ajax_request(self):
# Make sure that ajax request works correctly.
response = self.ajax_request('No', {})
poll_answers = response['poll_answers']
total = response['total']
callback = response['callback']
self.assertDictEqual(poll_answers, {'Yes': 1, 'Dont_know': 0, 'No': 1})
assert total == 2
self.assertDictEqual(callback, {'objectName': 'Conditional'})
assert self.xmodule.poll_answer == 'No'
def test_poll_export_with_unescaped_characters_xml(self):
"""
Make sure that poll_module will export fine if its xml contains
unescaped characters.
"""
module_system = DummySystem(load_error_modules=True)
id_generator = Mock()
id_generator.target_course_id = self.xmodule.course_id
sample_poll_xml = '''
<poll_question display_name="Poll Question">
<p>How old are you?</p>
<answer id="less18">18</answer>
</poll_question>
'''
output = PollDescriptor.from_xml(sample_poll_xml, module_system, id_generator)
# Update the answer with invalid character.
invalid_characters_poll_answer = output.answers[0]
# Invalid less-than character.
invalid_characters_poll_answer['text'] = '< 18'
output.answers[0] = invalid_characters_poll_answer
output.save()
xml = output.definition_to_xml(None)
# Extract texts of all children.
child_texts = xml.xpath('//text()')
# Last index of child_texts contains text of answer tag.
assert child_texts[(- 1)] == '< 18'
| agpl-3.0 | -2,279,210,706,724,800,500 | 33.865672 | 86 | 0.61601 | false |
Swimlane/sw-python-client | swimlane/core/resources/base.py | 1 | 1831 | import six
from swimlane.core.resolver import SwimlaneResolver
class APIResourceMetaclass(type):
"""Metaclass for all APIResource classes"""
def __call__(cls, *args, **kwargs):
"""Hook __init__ call to push resource instance into Swimlane client ResourceCache after instantiation"""
resource_instance = type.__call__(cls, *args, **kwargs)
resource_instance._swimlane.resources_cache.cache(resource_instance)
return resource_instance
class APIResource(six.with_metaclass(APIResourceMetaclass, SwimlaneResolver)):
"""Base class for all API resources with an associated $type and/or raw data"""
_type = None
def __init__(self, swimlane, raw):
super(APIResource, self).__init__(swimlane)
self._raw = raw
raw_type = self._raw.get('$type')
if self._type and raw_type != self._type:
raise TypeError('Expected $type = "{}", received "{}"'.format(self._type, raw_type))
def __repr__(self):
return '<{self.__class__.__name__}: {self!s}>'.format(self=self)
def __str__(self):
return ''
def __hash__(self):
"""Added for py2+3 compat"""
return int(id(self) / 16)
def __eq__(self, other):
"""Determine if an APIResource is of the same type and has the same hash value"""
return isinstance(other, self.__class__) and hash(self) == hash(other)
def __ne__(self, other):
# Default __ne__ for python 2 compat
return not self == other
def get_cache_internal_key(self):
"""Return real internal cache key for resource instance"""
return hash(self)
def get_cache_index_keys(self):
"""Return dict of key/value pairs used by ResourceCache to map resource values to internal cache instance"""
raise NotImplementedError
| mit | 7,108,540,010,368,214,000 | 32.290909 | 116 | 0.629711 | false |
patrickshuff/artofmemory | artofmemory/pao.py | 1 | 2868 | import random
import textwrap
from configparser import ConfigParser
def explain() -> str:
"""Explain Person Action Object"""
return textwrap.dedent(
"""\
Person Action Object (PAO)
The PAO is a system of encoding where you attribute a specific Person with an
Action that includes an Object. This is a composite object which you can then use
in a variety of ways. The idea is that you develop a collection of PAOs and assign
each of them a number.
Examples:
15: Albert Einstein (person) writing (action) on a blackboard (object).
16: Molly Ringwald (person) blowing candles (action) on a cake (object).
23: Michael Jordan (person) shooting (action) a basketball (object).
Armed with such an inventory you can use it for encoding of other information. Say
you want to memorize a series of numbers and you had a PAO inventory from
00-99. You could then assign the first six digits with a special combination of
your PAO collection.
Example:
162315 => Molly Ringwald shooting a blackboard
By doing this, you're compressing six digits into a single, composite image.
"""
)
def flatten_pao(d):
"""Yield back (num, item) tuples for each PAO broken into items.
The PAO item will be prefixed with either 'p:', 'a:', 'o:' to help denote its part of
the overall PAO.
Args:
d (dict): dictionary-like object that supports .items()
Yields:
(str, str)
"""
for num, pao in d.items():
person, action, obj = pao.split(",")
yield (num, "p:" + person.strip())
yield (num, "a:" + action.strip())
yield (num, "o:" + obj.strip())
def basic_quiz(config_file: str):
"""Test out your Person Action Object (PAO) knowledge
It supports just testing your PAO + shuffling them up to test combos
"""
config = ConfigParser()
config.read(config_file)
# TODO -- add an option to limit the values to test
# e.g. if I only want to test PAO for 1 through 4
# TODO add support for properly mixing up the PAO and testing
if "pao" not in config.sections():
print("No PAO Config setup. See README")
return
# Randomize the PAO items
pao_pairs = list(flatten_pao(config["pao"]))
random.shuffle(pao_pairs)
correct = 0
total = 0
for number, item in pao_pairs:
try:
guess = input("{}\n=> ".format(item))
except (EOFError, KeyboardInterrupt):
break
if not guess:
continue
if guess == number:
print("CORRECT!")
correct += 1
else:
print("INCORRECT: {}".format(number))
total += 1
if total:
print("\n{:>2}% Correct".format(correct / float(total) * 100))
| mit | 4,104,524,509,316,503,600 | 30.516484 | 90 | 0.613668 | false |
L1NT/django-training-log | log/models.py | 1 | 5458 | from django.db import models
# Create your models here.
class Sport(models.Model):
"""
don't use models.choices because we want the list to be transactional data
example list: [
'bike',
'run',
'swim',
'measurements',
'yoga',
'weights',
# for multi-sport `Event`s:
'multisport', #EventType.sport
'transition', #Entry.sport
]
"""
sport = models.CharField(max_length=20)
class Meta:
ordering = ['sport']
def __unicode__(self):
return self.sport
def __str__(self):
return self.sport
class Measurements(models.Model):
id = models.AutoField(primary_key=True) #added by default
weight = models.FloatField(blank=True, null=True)
class Equipment(models.Model):
"""
this is for things such as bikes, shoes, wheelsets; i.e. things with a
determinable depreciation cost or maintenance periods
"""
name = models.CharField(max_length=50)
cost = models.DecimalField(blank=True, null=True, max_digits=8, decimal_places=2)
acquired_date = models.DateField()
disposal_date = models.DateField(blank=True, null=True)
disposal_method = models.CharField(blank=True, max_length=7, choices=[
('sold', 'sold'),
('donated', 'donated'),
('retired', 'retired'),# i.e. 'broken'
])
disposal_proceeds = models.DecimalField(blank=True, null=True, max_digits=8, decimal_places=2)
expected_lifespan = models.DurationField(blank=True, null=True)
maintenance_interval = models.DurationField(blank=True, null=True)
def history(self):
return EquipmentMaintenance.objects.filter(equipment=self.id)
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class EquipmentMaintenance(models.Model):
date = models.DateField()
description = models.CharField(max_length=250)
equipment = models.ForeignKey(Equipment)
cost = models.DecimalField(blank=True, null=True, max_digits=8, decimal_places=2)
vendor = models.CharField(max_length=50, default='DIY')
class EventType(models.Model):
"""
examples: '5k', 'Olympic', 'Criterium'
"""
event_type = models.CharField(max_length=20)
sport = models.ForeignKey(Sport)
class Meta:
ordering = ['sport', 'event_type']
def __unicode__(self):
return str(self.sport) + ': ' + self.event_type
def __str__(self):
return str(self.sport) + ': ' + self.event_type
class Event(models.Model):
name = models.CharField(max_length=35)
location = models.CharField(max_length=50)
event_type = models.ForeignKey(EventType, blank=True, null=True)
bib_number = models.IntegerField(blank=True, null=True)
dnf = models.BooleanField()
finish_overall = models.IntegerField(blank=True, null=True)
finishers_overall = models.IntegerField(blank=True, null=True)
#maybe just use "handicapped" as the age group description??
finish_handicapped = models.IntegerField(blank=True, null=True)
finish_gender = models.IntegerField(blank=True, null=True)
finishers_gender = models.IntegerField(blank=True, null=True)
finish_age_group = models.IntegerField(blank=True, null=True)
finishers_age_group = models.IntegerField(blank=True, null=True)
# category/age_group seem to be mutually-exclusive?
category = models.CharField(max_length=10, blank=True, null=True)
age_group = models.CharField(max_length=10, blank=True)
results_url = models.URLField(blank=True, null=True)
official_time = models.TimeField(blank=True, null=True) #used for total event time (brevets & triathlons)
## TODO: maybe this should be handled by multiple `Entry`s?
# swim_distance = models.FloatField(blank=True)
# bike_distance = models.FloatField(blank=True)
# run_distance = models.FloatField(blank=True)
# swim_time = models.TimeField(blank=True)
# bike_time = models.TimeField(blank=True)
# run_time = models.TimeField(blank=True)
# t1_time = models.TimeField(blank=True)
# t2_time = models.TimeField(blank=True)
def get_absolute_url(self):
return "/events?event=%d" % self.id
def __unicode__(self):
return self.name + ' ['+self.date.strftime('%b %d, %Y')+']'
def __str__(self):
return self.name + ' ['+self.date.strftime('%b %d, %Y')+']'
class Entry(models.Model):
#entry_id:
date = models.DateField()
sport = models.ForeignKey(Sport)
event = models.ForeignKey(Event, blank=True, null=True)
route = models.CharField(max_length=50, blank=True) # routes Model?
notes = models.CharField(max_length=256, blank=True)
equipment = models.ForeignKey(Equipment, blank=True, null=True)
distance = models.FloatField(blank=True, null=True)
time = models.TimeField(blank=True, null=True)
avg_speed = models.FloatField(blank=True, null=True)
max_speed = models.FloatField(blank=True, null=True)
elevation_gain = models.IntegerField(blank=True, null=True)
calories = models.IntegerField(blank=True, null=True)
#pace: models.TimeField(blank=True, default=calc_pace(self.time/self.distance)) #could be calculated...
class Meta:
ordering = ['date', 'id']
def __unicode__(self):
return self.date.strftime('%b %d, %Y') + ' ['+str(self.sport)+']'
def __str__(self):
return self.date.strftime('%b %d, %Y') + ' ['+str(self.sport)+']'
| gpl-2.0 | 1,526,296,373,929,901,000 | 35.878378 | 109 | 0.662147 | false |
SuLab/scheduled-bots | scheduled_bots/drugs/unii.py | 1 | 6335 | """
Bot code for creating chemical items in wikidata from UNII
Adapted from: https://github.com/sebotic/cdk_pywrapper/blob/master/cdk_pywrapper/chemlib.py
"""
import os
import re
import subprocess
import time
import zipfile
import pandas as pd
import wikidataintegrator.wdi_core as wdi_core
data_folder = "unii_data"
def load_unii():
url = 'http://fdasis.nlm.nih.gov/srs/download/srs/UNII_Data.zip'
if not os.path.exists(data_folder):
os.makedirs(data_folder)
subprocess.check_call(["wget", "-N", "-P", data_folder, url])
with zipfile.ZipFile(os.path.join(data_folder, 'UNII_Data.zip'), 'r') as zf:
zf.extractall(data_folder)
for file in os.listdir(data_folder):
if 'Records' in file:
full_file_name = os.path.join(data_folder, file)
os.rename(full_file_name, os.path.join(data_folder, 'unii_data.txt'))
class UNIIMolecule(object):
unii_path = os.path.join(data_folder, 'unii_data.txt')
if not os.path.exists(unii_path):
load_unii()
unii_df = pd.read_csv(unii_path, dtype=str, sep='\t', low_memory=False)
def __init__(self, unii=None, inchi_key=None, verbose=False):
if unii:
ind = UNIIMolecule.unii_df['UNII'].values == unii
else:
ind = UNIIMolecule.unii_df['INCHIKEY'].values == inchi_key
self.data = UNIIMolecule.unii_df.loc[ind, :]
if len(self.data.index) != 1:
raise ValueError('Provided ID did not return a unique UNII')
self.data_index = self.data.index[0]
if verbose:
x = self.data
print(x.common_name)
print(x.stdinchikey)
print(x.stdinchi)
print(x.csid)
@property
def stdinchikey(self):
ikey = self.data.loc[self.data_index, 'INCHIKEY']
if pd.isnull(ikey) and pd.isnull(self.smiles):
return None
return ikey
@property
def stdinchi(self):
if pd.isnull(self.smiles):
return None
@property
def preferred_name(self):
name = self.data.loc[self.data_index, 'PT']
return UNIIMolecule.label_converter(name) if pd.notnull(name) else None
@property
def smiles(self):
smiles = self.data.loc[self.data_index, 'SMILES']
return smiles if pd.notnull(smiles) else None
@property
def molecule_type(self):
molecule_type = self.data.loc[self.data_index, 'UNII_TYPE']
return molecule_type if pd.notnull(molecule_type) else None
@property
def unii(self):
return self.data.loc[self.data_index, 'UNII']
@property
def cas(self):
cas = self.data.loc[self.data_index, 'RN']
return cas if pd.notnull(cas) else None
@property
def einecs(self):
einecs = self.data.loc[self.data_index, 'EC']
return einecs if pd.notnull(einecs) else None
@property
def rxnorm(self):
rxnorm = self.data.loc[self.data_index, 'RXCUI']
return rxnorm if pd.notnull(rxnorm) else None
@property
def nci(self):
nci = self.data.loc[self.data_index, 'NCIT']
return nci if pd.notnull(nci) else None
@property
def umls(self):
umls_cui = self.data.loc[self.data_index, 'UMLS_CUI']
return umls_cui if pd.notnull(umls_cui) else None
@property
def pubchem(self):
pubchem = self.data.loc[self.data_index, 'PUBCHEM']
return pubchem if pd.notnull(pubchem) else None
@property
def label(self):
item_label = self.preferred_name if self.preferred_name else self.unii
return item_label
def to_wikidata(self):
refs = [[
wdi_core.WDItemID(value='Q6593799', prop_nr='P248', is_reference=True), # stated in
wdi_core.WDExternalID(value=self.unii, prop_nr='P652', is_reference=True), # source element
wdi_core.WDTime(time=time.strftime('+%Y-%m-%dT00:00:00Z'), prop_nr='P813', is_reference=True) # retrieved
]]
print('UNII Main label is', self.label)
elements = {
'P652': self.unii,
'P2017': self.smiles,
'P235': self.stdinchikey,
'P231': self.cas,
'P232': self.einecs,
'P1748': self.nci,
'P3345': self.rxnorm
}
if self.smiles and len(self.smiles) > 400:
del elements['P2017']
data = []
for k, v in elements.items():
if not v:
continue
print('{}:'.format(k), v)
if isinstance(v, list) or isinstance(v, set):
for x in v:
data.append(wdi_core.WDString(prop_nr=k, value=x, references=refs))
else:
data.append(wdi_core.WDString(prop_nr=k, value=v, references=refs))
return data
@staticmethod
def label_converter(label):
label = label.lower()
greek_codes = {
'.alpha.': '\u03B1',
'.beta.': '\u03B2',
'.gamma.': '\u03B3',
'.delta.': '\u03B4',
'.epsilon.': '\u03B5',
'.zeta.': '\u03B6 ',
'.eta.': '\u03B7',
'.theta.': '\u03B8',
'.iota.': '\u03B9',
'.kappa.': '\u03BA',
'.lambda.': '\u03BB',
'.mu.': '\u03BC',
'.nu.': '\u03BD',
'.xi.': '\u03BE',
'.omicron.': '\u03BF',
'.pi.': '\u03C0',
'.rho.': '\u03C1',
'.sigma.': '\u03C3',
'.tau.': '\u03C4',
'.upsilon.': '\u03C5',
'.phi.': '\u03C6',
'.chi.': '\u03C7',
'.psi.': '\u03C8',
'.omega.': '\u03C9',
}
for greek_letter, unicode in greek_codes.items():
if greek_letter in label:
label = label.replace(greek_letter, unicode)
match = re.compile('(^|[^a-z])([ezdlnhros]{1}|dl{1})[^a-z]{1}')
while True:
if re.search(match, label):
replacement = label[re.search(match, label).start(): re.search(match, label).end()].upper()
label = re.sub(match, repl=replacement, string=label, count=1)
else:
break
splits = label.split(', ')
splits.reverse()
return ''.join(splits) | mit | -2,078,402,913,448,461,300 | 29.028436 | 118 | 0.548698 | false |
tony/django-docutils | django_docutils/lib/directives/__init__.py | 1 | 1172 | from django.utils.module_loading import import_string
from docutils.parsers.rst import directives
from ..settings import BASED_LIB_RST
def register_based_directives():
"""Register all directives, exists to avoid race conditions.
Sometimes stuff like publish_parts can be ran from command line functions
tests. There's also ways we could avoid this by placing it in __init__
of django_docutils.lib, but that's a bit implicit. Investigate that later.
In order to make this work across django projects, let's use django
settings to register to them.
Why? Not all django projects want code highlighting (which requires
pygments). Let's use a TEMPLATES-style django config::
BASED_LIB_RST = {
'directives': { #: directive-name: Directive class (import string)
'code-block': 'django_docutils.lib.directives.pygments.CodeBlock'
}
}
"""
if not BASED_LIB_RST:
return
if 'directives' in BASED_LIB_RST:
for dir_name, dir_cls_str in BASED_LIB_RST['directives'].items():
class_ = import_string(dir_cls_str)
directives.register_directive(dir_name, class_)
| mit | 1,619,159,871,801,524,000 | 34.515152 | 78 | 0.68942 | false |
nuchi/httpserver | httpserver.py | 1 | 1065 | #!/usr/bin/env python
from dunder_mifflin import papers # WARNING: Malicious operation ahead
import socket
from http_handler import Handler_thread
MAX_CONNECTIONS = 5
class HTTPserver(object):
def __init__(self, localOnly=False, port=80, max_connections=MAX_CONNECTIONS):
self.port = port
self.max_connections = max_connections
if localOnly:
self.hostname = '127.0.0.1'
else:
self.hostname = socket.gethostname()
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def serve(self):
self.server.bind((self.hostname, self.port))
self.server.listen(self.max_connections)
while True:
client_socket, address = self.server.accept()
ht = Handler_thread()
ht.daemon = True
ht.run(client_socket)
def close(self):
self.server.close()
def create_and_run_server(localOnly=True, port=8000):
new_server = HTTPserver(localOnly=localOnly, port=port)
try:
new_server.serve()
except KeyboardInterrupt:
print('\nClosing server.')
pass
finally:
new_server.close()
if __name__ == '__main__':
create_and_run_server() | mit | -7,448,993,895,145,337,000 | 24.380952 | 79 | 0.712676 | false |
rbuffat/pyidf | tests/test_generatormicrochp.py | 1 | 3012 | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.electric_load_center import GeneratorMicroChp
log = logging.getLogger(__name__)
class TestGeneratorMicroChp(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_generatormicrochp(self):
pyidf.validation_level = ValidationLevel.error
obj = GeneratorMicroChp()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_performance_parameters_name = "object-list|Performance Parameters Name"
obj.performance_parameters_name = var_performance_parameters_name
# object-list
var_zone_name = "object-list|Zone Name"
obj.zone_name = var_zone_name
# node
var_cooling_water_inlet_node_name = "node|Cooling Water Inlet Node Name"
obj.cooling_water_inlet_node_name = var_cooling_water_inlet_node_name
# node
var_cooling_water_outlet_node_name = "node|Cooling Water Outlet Node Name"
obj.cooling_water_outlet_node_name = var_cooling_water_outlet_node_name
# node
var_air_inlet_node_name = "node|Air Inlet Node Name"
obj.air_inlet_node_name = var_air_inlet_node_name
# node
var_air_outlet_node_name = "node|Air Outlet Node Name"
obj.air_outlet_node_name = var_air_outlet_node_name
# object-list
var_generator_fuel_supply_name = "object-list|Generator Fuel Supply Name"
obj.generator_fuel_supply_name = var_generator_fuel_supply_name
# object-list
var_availability_schedule_name = "object-list|Availability Schedule Name"
obj.availability_schedule_name = var_availability_schedule_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.generatormicrochps[0].name, var_name)
self.assertEqual(idf2.generatormicrochps[0].performance_parameters_name, var_performance_parameters_name)
self.assertEqual(idf2.generatormicrochps[0].zone_name, var_zone_name)
self.assertEqual(idf2.generatormicrochps[0].cooling_water_inlet_node_name, var_cooling_water_inlet_node_name)
self.assertEqual(idf2.generatormicrochps[0].cooling_water_outlet_node_name, var_cooling_water_outlet_node_name)
self.assertEqual(idf2.generatormicrochps[0].air_inlet_node_name, var_air_inlet_node_name)
self.assertEqual(idf2.generatormicrochps[0].air_outlet_node_name, var_air_outlet_node_name)
self.assertEqual(idf2.generatormicrochps[0].generator_fuel_supply_name, var_generator_fuel_supply_name)
self.assertEqual(idf2.generatormicrochps[0].availability_schedule_name, var_availability_schedule_name) | apache-2.0 | 8,094,510,328,616,789,000 | 42.042857 | 119 | 0.688247 | false |
jsalva/ndim | ndim/ndim/wsgi.py | 1 | 1166 | """
WSGI config for ndim project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ndim.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
application = Cling(get_wsgi_application())
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit | -2,149,861,468,380,887,800 | 37.866667 | 79 | 0.798456 | false |
googleads/google-ads-python | google/ads/googleads/v7/enums/types/hotel_rate_type.py | 1 | 1186 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v7.enums",
marshal="google.ads.googleads.v7",
manifest={"HotelRateTypeEnum",},
)
class HotelRateTypeEnum(proto.Message):
r"""Container for enum describing possible hotel rate types. """
class HotelRateType(proto.Enum):
r"""Enum describing possible hotel rate types."""
UNSPECIFIED = 0
UNKNOWN = 1
UNAVAILABLE = 2
PUBLIC_RATE = 3
QUALIFIED_RATE = 4
PRIVATE_RATE = 5
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 7,993,871,855,612,706,000 | 29.410256 | 74 | 0.689713 | false |
rboman/progs | apps/pdf2ppt/pdf2ppt.py | 1 | 1429 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2017 Romain Boman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Splitte un pdf PPT 4pages/feuilles (nécessite ImageMagick dans le PATH)
#
# . nommer le pdf "cours.pdf"
# . exporter le pdf en PNG en 300DPI
# . lancer le script
# . dans Acrobat: Create PDF => From Multiple Files
#
# ref: http://www-etud.iro.umontreal.ca/~buisteri/info/pdfen.html
import os
import glob
fname = "cours_Page_*.pdf"
for f in glob.glob("Cours_Page_*.png"):
f2 = f.replace('.png', '-crop.png')
cmd = "convert -crop 95x95%%+0+0 %s %s" % (f, f2) # vire le numero
print(cmd)
os.system(cmd)
cmd = "convert -crop 50x50%% %s %s" % (f2, f)
print(cmd)
os.system(cmd)
os.remove(f2)
for g in glob.glob("%s-*.png" % f.replace('.png', '')):
cmd = "mogrify -trim %s" % g
print(cmd)
os.system(cmd)
os.remove(f)
| apache-2.0 | 2,452,702,609,578,649,000 | 30.733333 | 76 | 0.654062 | false |
doganulus/montre | montre/algebra.py | 1 | 4581 | import os
import sys
from ctypes import *
from distutils.sysconfig import get_python_lib
# if os.name == 'nt':
# libmontre = windll.LoadLibrary(os.path.join(get_python_lib(), "libmontre"))
# else:
# libmontre = cdll.LoadLibrary(os.path.join(get_python_lib(), "libmontre.so"))
if os.name == 'nt':
libmontre = windll.LoadLibrary(os.path.join("..", "libmontre"))
else:
libmontre = cdll.LoadLibrary(os.path.join("..", "libmontre.so"))
zone_type = POINTER(c_int64)
libmontre.zs_create.restype = c_void_p
libmontre.zs_create.argtypes = []
libmontre.zs_destroy.restype = None
libmontre.zs_destroy.argtypes = []
libmontre.zs_size.restype = c_int64
libmontre.zs_includes.restype = c_int64
libmontre.zs_append.argtypes = [c_void_p, c_int64, c_int64, c_int64, c_int64,c_int64,c_int64]
libmontre.zs_append.restype = None
libmontre.zs_append_not_anchored.argtypes = [c_void_p, c_int64, c_int64]
libmontre.zs_append_not_anchored.restype = None
libmontre.zs_get_zone.restype = zone_type
libmontre.zs_get_zone.argtypes = [c_void_p, c_int64]
class TimedRelation:
def __init__(self, obj=None):
if obj == None:
self.obj = libmontre.zs_create()
else:
self.obj = obj
def __del__(self):
libmontre.zs_destroy(self.obj)
def __len__(self):
return libmontre.zs_size(self.obj)
def __iter__(self):
def get_zone(self, i):
z = libmontre.zs_get_zone(self.obj, i)
return (z[1], z[3], z[2], z[6], z[5], z[7])
return (get_zone(self, i) for i in range(len(self)))
@property
def zones(self):
return [((-(z[0]//2), z[1]//2, -(z[2]//2), z[3]//2, -(z[4]//2), z[5]//2), (z[0]%2, z[1]%2, z[2]%2, z[3]%2, z[4]%2, z[5]%2)) for z in self]
def __str__(self):
return "\n".join([
"({bminval}{bminbnd}x{bmaxbnd}{bmaxval}, {eminval}{eminbnd}y{emaxbnd}{emaxval}, {dminval}{dminbnd}y-x{dmaxbnd}{dmaxval})".format(
bminval=v[0], bminbnd="<" if b[0] == 0 else "<=",
bmaxval=v[1], bmaxbnd="<" if b[1] == 0 else "<=",
eminval=v[2], eminbnd="<" if b[2] == 0 else "<=",
emaxval=v[3], emaxbnd="<" if b[3] == 0 else "<=",
dminval=v[4], dminbnd="<" if b[4] == 0 else "<=",
dmaxval=v[5], dmaxbnd="<" if b[5] == 0 else "<=",
) for v, b in self.zones]
)
# return str([values for values, bounds in self.zones])
# def __getitem__(self, i): # access elements in vector at index
# if 0 <= i < len(self):
# return libmontre.zs_get_zone(self.obj, i)
# raise IndexError('Vector index out of range')
def append(self, bmin=0, bmax=sys.maxsize, emin=0, emax=sys.maxsize, dmin=0, dmax=sys.maxsize):
libmontre.zs_append(self.obj, bmin, bmax, emin, emax, dmin, dmax)
return self
def append_not_anchored(self, begin, end):
libmontre.zs_append_not_anchored(self.obj, begin, end)
return self
def absorb(self):
retobj = libmontre.zs_create()
libmontre.zs_filter(retobj, self.obj)
self.obj = retobj
@staticmethod
def absorbed(self):
retobj = libmontre.zs_create()
libmontre.zs_filter(retobj, self.obj)
return TimedRelation(retobj)
def includes(self, other):
return libmontre.zs_includes(self.obj, other.obj) != 0
def restrict(self, other, a, b):
retobj = libmontre.zs_create()
libmontre.zs_restrict(retobj, self.obj, c_int64(a), c_int64(b))
return TimedRelation(retobj)
def intersect(self, other):
retobj = libmontre.zs_create()
libmontre.zs_intersect(retobj, self.obj, other.obj)
return TimedRelation(retobj)
def concatenate(self, other):
retobj = libmontre.zs_create()
libmontre.zs_concatenate(retobj, self.obj, other.obj)
return TimedRelation(retobj)
def union(self, other):
retobj = libmontre.zs_create()
libmontre.zs_union(retobj, self.obj, other.obj)
return TimedRelation(retobj)
def plus(self, other):
retobj = libmontre.zs_create()
libmontre.zs_plus(retobj, self.obj)
return TimedRelation(retobj)
class Bound:
@staticmethod
def lt(c):
return 2*c
@staticmethod
def leq(c):
return 2*c + 1
@staticmethod
def gt(c):
return -2*c
@staticmethod
def geq(c):
return -2*c + 1
| gpl-3.0 | -8,366,900,255,173,163,000 | 30.721429 | 146 | 0.577385 | false |
maximz/cooperate-without-looking | src/cwl.py | 1 | 24981 | # -*- coding: utf-8 -*-
"""Module cwl.
Produces simulation calculation and figures for the Cooperate With/Without Looking project.
Usage:
python cwl.py {recalculate?}
Examples:
python cwl.py run using pre-calculated saved data
python cwl.py recalculate run with freshly calculated data
@author: Maxim Zaslavsky <maxim@maximzaslavsky.com>
@author: Erez Yoeli <eyoeli@gmail.com>
"""
### GENERAL
# system imports
import sys, os
import numpy as np
import matplotlib
matplotlib.use("pdf") # save as PDFs
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from collections import defaultdict
from random import sample as random_sample
from math import floor
import cPickle as pickle
# Choose whether to recalculate or to used saved data
Calculate = False
if __name__ == "__main__":
try:
if sys.argv[1] == 'recalculate':
Calculate = True
except: # Interactive mode
pass
output_dir = '../bin/'
print 'Welcome to the CW(O)L Simulations and Figures toolkit.'
print
#######################################################
#######################################################
# Game theory methods
def are_assumptions_valid(a, b, c1, c2, d, p, w):
#P1 and P2 prefer a cooperative interaction to no interaction
statement_1 = a > 0 and b > 0
#P1 gets short-term gains from defection
statement_2 = c1 > a and c2 > a
#P2 2 doesn't want to interact with 1 if he expects 1 to defect in either game.
statement_3 = b * p + d * (1 - p) < 0 and d * p + b * (1 - p) < 0
#wlog it is more tempting to defect in state 2.
statement_4 = c2 > c1
#all of this must hold
return statement_1 and statement_2 and statement_3 and statement_4
def get_game_population_1(a, b, c1, c2, d, p, w):
"""
Game for population 1 of CWL
"""
if not are_assumptions_valid(a, b, c1, c2, d, p, w):
raise ValueError("This parameters do not comply with assumptions")
A = np.empty(shape=(4, 3))
A[0, 0] = (a * p + a * (1.0 - p)) / (1.0 - w)
A[0, 1] = (a * p + a * (1.0 - p)) / (1.0 - w)
A[0, 2] = (a * p + a * (1.0 - p))
A[1, 0] = (a * p + a * (1.0 - p))
A[1, 1] = (a * p + a * (1.0 - p)) / (1 - w)
A[1, 2] = (a * p + a * (1.0 - p))
A[2, 0] = (a * p + c2 * (1.0 - p))
A[2, 1] = (a * p + c2 * (1.0 - p)) / (1 - p * w)
A[2, 2] = (a * p + c2 * (1.0 - p))
A[3, 0] = (c1 * p + c2 * (1.0 - p))
A[3, 1] = (c1 * p + c2 * (1.0 - p))
A[3, 2] = (c1 * p + c2 * (1.0 - p))
return A
def get_game_population_2(a, b, c1, c2, d, p, w):
"""
Game for population 2 of CWL
"""
if not are_assumptions_valid(a, b, c1, c2, d, p, w):
raise ValueError("This parameters do not comply with assumptions")
B = np.empty(shape=(4, 3))
B[0, 0] = (b * p + b * (1.0 - p)) / (1.0 - w)
B[0, 1] = (b * p + b * (1.0 - p)) / (1.0 - w)
B[0, 2] = (b * p + b * (1.0 - p))
B[1, 0] = (b * p + b * (1.0 - p))
B[1, 1] = (b * p + b * (1.0 - p)) / (1.0 - w)
B[1, 2] = (b * p + b * (1.0 - p))
B[2, 0] = (b * p + d * (1.0 - p))
B[2, 1] = (b * p + d * (1.0 - p)) / (1.0 - p * w)
B[2, 2] = (b * p + d * (1.0 - p))
B[3, 0] = (d * p + d * (1.0 - p))
B[3, 1] = (d * p + d * (1.0 - p))
B[3, 2] = (d * p + d * (1.0 - p))
return B.T
# replicator
def __replicator_equation_two_populations(x, t, game1, game2, number__of_strategies_population_1, number__of_strategies_population_2):
"""
This auxiliary function codes the replicator dynamics step. Typically it is only called from replicator_trajectory_two_populations()
Parameters
----------
x: ndarray initial state (concatenated from the two populations)
t: time
game1: ndarray, game for population 1
game2: ndarray, game for population 2
number__of_strategies_population_1: int
number__of_strategies_population_2: int
Returns:
out: ndarray next state (concatenated from the two populations)
"""
x_population_1 = x[0:number__of_strategies_population_1]
#the first piece of y corresponds to population 1
x_population_2 = x[number__of_strategies_population_1:number__of_strategies_population_1 + number__of_strategies_population_2] # the second piece of y corresponds to population 2
#First Ay
fitness_vector_1 = np.dot(game1, x_population_2)
# and Bx (see equation above)
fitness_vector_2 = np.dot(game2, x_population_1)
#Now xAy
average_fitness_1 = np.dot(x_population_1, fitness_vector_1)
#And yBx
average_fitness_2 = np.dot(x_population_2, fitness_vector_2)
#the next lines correspond to equations 10.5 and 10.6 of Hofbauer and Sigmund (page 116)
new_population_1 = x_population_1 * (fitness_vector_1 - average_fitness_1)
new_population_2 = x_population_2 * (fitness_vector_2 - average_fitness_2)
return np.array(new_population_1.tolist() + new_population_2.tolist())
def replicator_trajectory_two_populations(game_matrix_1, game_matrix_2, x_0, y_0, t_vector, **kwargs):
"""
Computes a replicator trajectory for two populations, given two games, starting points and time vector.
It uses scipy's odeint.
Parameters
----------
game_matrix_1: numpy matrix (for population 1)
game_matrix_2: numpy matrix (for population 2)
x_0: ndarray
y_0: ndarray
t_vector: time array
Returns
-------
out: list
Examples
--------
#TODO: Write examples
"""
#join initial populations to fit signature of replicator_equation
start = np.array(x_0.tolist() + y_0.tolist())
number__of_strategies_population_1 = len(x_0)
number__of_strategies_population_2 = len(y_0)
#solve
soln = odeint(__replicator_equation_two_populations, start, t_vector, args=(game_matrix_1, game_matrix_2, number__of_strategies_population_1, number__of_strategies_population_2), **kwargs)
return [soln[:, i] for i in xrange(number__of_strategies_population_1 + number__of_strategies_population_2)]
def get_random_point_inside_simplex(dimension):
"""
Returns a vector that sums up to one, where components have been uniformly chosen.
Parameters:
----------
dimension:int
"""
exponencial = np.random.exponential(size=dimension)
exponencial /= np.sum(exponencial, dtype=float)
return exponencial
def adjusted_solution(a, b, c1, c2, d, p, w, x_0, y_0, max_t, **kwargs):
"""
Returns a steady state, by ajusting dynamically the step size and total error.
"""
tolerance = 1e-4
added_factor_vector = [10.0, 20.0, 50.0, 100.0]
game_1 = get_game_population_1(a, b, c1, c2, d, p, w)
game_2 = get_game_population_2(a, b, c1, c2, d, p, w)
t = np.linspace(0.0, max_t, 2000)
if x_0 is None or y_0 is None:
(x_0, y_0) = (get_random_point_inside_simplex(4), get_random_point_inside_simplex(3))
for added_factor in added_factor_vector:
sol = replicator_trajectory_two_populations(added_factor + game_1, added_factor + game_2, x_0, y_0, t, atol=tolerance, **kwargs)
end_point = [sol[i][-1] for i in xrange(0, 7)]
if np.allclose(sum(end_point), 2.0, atol=tolerance):
return end_point
raise ValueError("Numerics: x = {}, y = {}, a = {}, b = {}, c1 = {}, c2 = {}, d = {}, p = {}, w = {}".format(x_0.tolist(), y_0.tolist(), a, b, c1, c2, d, p, w))
def determine_outcome(solution):
tolerance = 1e-3
if not np.allclose(np.sum(solution), 2.0, atol=tolerance):
raise ValueError("Probabilities don't add up: {} ".format(solution))
elif player1_CWOL(solution, atol=tolerance) and player2_sometimes_exits_if_looks_or_defects(solution, atol=tolerance):
return (1, solution)
elif player1_alwaysD(solution, atol=tolerance) and (player2_pure_strategy(solution, atol=tolerance) or player2_mixes(solution, atol=tolerance)):
return (2, solution)
elif player2_exitifdefect(solution, atol=tolerance) and (player1_CWOL(solution, atol=tolerance) or player1_CWL(solution, atol=tolerance) or player1_CWOL_or_CWL(solution, atol=tolerance)):
return (3, solution)
else:
return (4, solution)
def determine_random_outcome(a, b, c1, c2, d, p, w, max_t, **kwargs):
"""
Starting in a random point tries to determine the outcome, given parameters.
This is the main function to be called from montecarlo procedures
"""
x_0 = get_random_point_inside_simplex(4)
y_0 = get_random_point_inside_simplex(3)
solution = adjusted_solution(a, b, c1, c2, d, p, w, x_0, y_0, max_t)
return determine_outcome(solution)
def montecarlo(a, b, c1, c2, d, p, w, max_t=300, repetitions=5000):
"""
Takes samples for a given point in the space. Counting the occurrences
of different outcomes, and returns them in a dictionary with the
following indexes:
1 - Outcome 1
2 - Outcome 2
3 - Outcome 3
4 - No categorized
"""
ans = defaultdict(int)
sum_of_solution = np.zeros(7)
for i in xrange(0, repetitions):
try:
outcome, solution = determine_random_outcome(a, b, c1, c2, d, p, w, max_t)
ans[outcome] = ans[outcome]+1
sum_of_solution += solution
except ValueError, e:
print e
ans[5] = ans[5] + 1
avg_of_solution = sum_of_solution/repetitions
return (ans, sum_of_solution)
#--------- THEORY CHECKING FUNCTIONS ----------
def is_coop_wihtout_looking_an_equilibrium(a, b, c1, c2, d, p, w):
return c1*p+c2*(1.0 - p) < a / (1.0 - w)
def is_coop_looking_an_equilibrium(a, b, c1, c2, d, p, w):
return c2 < a / (1.0 - w)
def number_of_equlibria(a, b, c1, c2, d, p, w):
CWOL = is_coop_wihtout_looking_an_equilibrium(a, b, c1, c2, d, p, w)
CWL = is_coop_looking_an_equilibrium(a, b, c1, c2, d, p, w)
if CWOL and CWL:
return 3
elif CWOL or CWOL:
return 2
else:
return 1
#--- classifier functions
def player1_CWOL(solution, atol=1e-3):
player1_plays_desired_pure_strategy = np.allclose(solution[0], 1.0, atol)
return player1_plays_desired_pure_strategy
def player1_CWL(solution, atol=1e-3):
player1_plays_desired_pure_strategy = np.allclose(solution[1], 1.0, atol)
return player1_plays_desired_pure_strategy
def player1_Cin1(solution, atol=1e-3):
player1_plays_desired_pure_strategy = np.allclose(solution[2], 1.0, atol)
return player1_plays_desired_pure_strategy
def player1_alwaysD(solution, atol=1e-3):
player1_plays_desired_pure_strategy = np.allclose(solution[3], 1.0, atol)
return player1_plays_desired_pure_strategy
def player1_pure_strategy(solution, atol=1e-3):
return (player1_CWOL(solution, atol) or player1_CWL(solution, atol) or player1_Cin1(solution, atol) or player1_alwaysD(solution, atol))
def player1_CWOL_or_CWL(solution, atol=1e-3):
#solution[0:1] is now solution[0:2]
player1_mixes_CWL_CWOL = np.allclose(np.sum(solution[0:2]), 1.0, atol)
return player1_mixes_CWL_CWOL and not player1_pure_strategy(solution, atol)
def player1_mixes(solution, atol=1e-3):
#solution[0:3] is now solution[0:4]
player1_mixes = np.allclose(np.sum(solution[0:4]), 1.0, atol)
return player1_mixes and not player1_pure_strategy(solution, atol)
def player2_exitiflook(solution, atol=1e-3):
player2_plays_desired_pure_strategy = np.allclose(solution[4], 1.0, atol)
return player2_plays_desired_pure_strategy
def player2_exitifdefect(solution, atol=1e-3):
player2_plays_desired_pure_strategy = np.allclose(solution[5], 1.0, atol)
return player2_plays_desired_pure_strategy
def player2_alwaysexit(solution, atol=1e-3):
player2_plays_desired_pure_strategy = np.allclose(solution[6], 1.0, atol)
return player2_plays_desired_pure_strategy
def player2_pure_strategy(solution, atol=1e-3):
return (player2_exitifdefect(solution, atol=1e-3) or player2_exitiflook(solution, atol=atol) or player2_alwaysexit(solution, atol=atol))
def player2_mixes(solution, atol=1e-3):
#solution[4:6] is now changed to solution[4:7], please verify.
player2_mixes = np.allclose(np.sum(solution[4:7]), 1.0, atol)
return player2_mixes and not player2_pure_strategy(solution, atol=atol)
def player2_sometimes_exits_if_looks_or_defects(solution, atol=1e-3):
player2_sometimes_exits_if_looks = not np.allclose(solution[4], 0.0, atol)
player2_sometimes_exits_if_defects = not np.allclose(solution[5], 0.0, atol)
return player2_sometimes_exits_if_looks or player2_sometimes_exits_if_defects
# Additioanl plot beautifier functions:
def summarize_binary_list(lista):
"""
#determines edges of sequences of 1's in a binary list
"""
ans = []
x_0 = None
tamano = len(lista)
for i in xrange(tamano):
if lista[i] == 1 and x_0 is None:
x_0 = i
end_of_sequence = lista[i] == 0
end_of_array = i == (tamano-1) and lista[i] == 1
if (end_of_sequence or end_of_array) and x_0 is not None:
if end_of_sequence:
ans.append((x_0, i-1))
if end_of_array:
ans.append((x_0, i))
x_0 = None
return ans
#######################################################
#######################################################
### FIGURE 2 PREPARATION
def clear_past_figs():
plt.close()
plt.clf()
plt.cla()
plt.close()
#del f, fig_all
#gc.collect()
def export_graph(f_i, f_name):
#f_i.savefig(output_dir+f_name+'.png',dpi=300)
#f_i.savefig(output_dir+f_name+'.png',dpi=600)
f_i.savefig(output_dir+f_name+'.pdf', dpi=600) # This one looks the best
print f_name, 'exported as pdf at 600 dpi.' # 300dpi_png, 600dpi_png,
# Figure 2B and 2C calculations:
print 'Calculating or loading values for Figure 2B and Figure 2C'
p = 0.5 + 0.01
b = 1.0
c1 = 4.0
c2 = 12.0
d = -10.0
w = 7.0/8.0 + 0.02
repetitions = 10000
number_of_points = 50
if Calculate:
a_interval = np.linspace(0.0+0.1, 2.0, number_of_points, endpoint=False)
a_interval_tight = np.linspace(0.0+0.1, 2.0, number_of_points) # TODO: change to 300?
#lets plot the theory predictions first as a shade
calculated_equilibria=[number_of_equlibria(a, b, c1, c2, d, p, w) for a in a_interval_tight]
one_equilibrium_region = summarize_binary_list([ce == 1 for ce in calculated_equilibria])
two_equilibria_region = summarize_binary_list([ce == 2 for ce in calculated_equilibria])
three_equilibria_region = summarize_binary_list([ce == 3 for ce in calculated_equilibria])
#first the sampling
outcome_1 = []
outcome_2 = []
outcome_3 = []
outcome_4 = []
no_outcome = []
strategy_1 = []
strategy_2 = []
strategy_3 = []
strategy_4 = []
strategy_5 = []
strategy_6 = []
strategy_7 = []
for a in a_interval_tight: # TODO: should this be a_interval?
diccionario, avg_strategy_frequency = montecarlo(a, b, c1, c2, d, p, w, repetitions=repetitions)
outcome_1.append(diccionario[1])
outcome_2.append(diccionario[2])
outcome_3.append(diccionario[3])
outcome_4.append(diccionario[4])
no_outcome.append(diccionario[5])
strategy_1.append(avg_strategy_frequency[0])
strategy_2.append(avg_strategy_frequency[1])
strategy_3.append(avg_strategy_frequency[2])
strategy_4.append(avg_strategy_frequency[3])
strategy_5.append(avg_strategy_frequency[4])
strategy_6.append(avg_strategy_frequency[5])
strategy_7.append(avg_strategy_frequency[6])
stuff = [a_interval, a_interval_tight, one_equilibrium_region, two_equilibria_region, three_equilibria_region, outcome_1, outcome_2, outcome_3, outcome_4, no_outcome, strategy_1, strategy_2, strategy_3, strategy_4, strategy_5, strategy_6, strategy_7]
pickle.dump( stuff, open( output_dir+"Figure 2_B and C_strategy frequency.saved_data", "wb" ) )
else:
(a_interval, a_interval_tight, one_equilibrium_region, two_equilibria_region, three_equilibria_region, outcome_1, outcome_2, outcome_3, outcome_4, no_outcome, strategy_1, strategy_2, strategy_3, strategy_4, strategy_5, strategy_6, strategy_7) = pickle.load(open(output_dir+"Figure 2_B and C_strategy frequency.saved_data", "r"))
# Plotting:
clear_past_figs()
def process_ax(ax):
'''
Shades figure to correspond to equilibria regions.
'''
# hack to fill white space in the middle:
midpoint = (a_interval_tight[one_equilibrium_region[0][1]] + a_interval_tight[two_equilibria_region[0][0]])/2
midpoint1 = (a_interval_tight[two_equilibria_region[0][1]] + a_interval_tight[three_equilibria_region[0][0]])/2
for dupla in one_equilibrium_region:
#ax.axvspan(p_interval_tight[dupla[0]], p_interval_tight[dupla[1]], facecolor='red', alpha=0.2)
ax.axvspan(a_interval_tight[dupla[0]], midpoint, facecolor='white', alpha=1) # red, alpha=0.2
print 'one', dupla, a_interval_tight[dupla[0]], a_interval_tight[dupla[1]]
for dupla in two_equilibria_region:
#ax.axvspan(p_interval_tight[dupla[0]], p_interval_tight[dupla[1]], facecolor='blue', alpha=0.2)
ax.axvspan(midpoint, midpoint1, facecolor='0.50', alpha=0.2) # blue or .80
print 'two', dupla, a_interval_tight[dupla[0]], a_interval_tight[dupla[1]]
for dupla in three_equilibria_region:
ax.axvspan(midpoint1, a_interval_tight[dupla[1]], facecolor='0.10', alpha=0.2) # yellow or .20
print 'three', dupla, a_interval_tight[dupla[0]], a_interval_tight[dupla[1]]
avoid_end = -1 # remove last 1 point
#######################################################
#######################################################
### PLOT FIGURE 2(B): Frequency vs. a-value
print
print 'Plotting Figure 2B'
clear_past_figs()
f = plt.figure(figsize=(10,10))
process_ax(f.gca())
plt.plot(a_interval[:avoid_end], (np.array(outcome_1)/(float(repetitions)-np.array(no_outcome)))[:avoid_end], 'bo-', label='Cooperate without looking')
plt.plot(a_interval[:avoid_end], (np.array(outcome_2)/(float(repetitions)-np.array(no_outcome)))[:avoid_end], 'ro-', label='Always defect')
plt.plot(a_interval[:avoid_end], (np.array(outcome_3)/(float(repetitions)-np.array(no_outcome)))[:avoid_end], 'yo-', label='Cooperate with looking')
plt.plot(a_interval[:avoid_end], (np.array(outcome_4)/(float(repetitions)-np.array(no_outcome)))[:avoid_end], 'ko-', label='Other')
plt.grid()
plt.legend(loc='best')
plt.ylim((-0.01, 1.01))
plt.xlim((a_interval[0]-0.01, a_interval[-1]+0.01))
plt.xlabel('a')
plt.ylabel('Frequency')
plt.title('Frequency vs a')
export_graph(f, 'Figure_2B')
#######################################################
#######################################################
### PLOT FIGURE 2(C): Average frequency of strategies for players 1 and 2
print
print 'Plotting Figure 2C'
clear_past_figs()
fig_all, (ax1, ax2) = plt.subplots(2,1, sharex=False, sharey=False) # make 2x1 grid of subplots
fig_all.set_size_inches(10, 15)
#plt.subplots_adjust(wspace=0.30, hspace=0.15)
#prepare plots
for ax in (ax1, ax2):
ax.grid()
ax.legend(loc='best')
ax.set_ylim((-0.01, 1.01))
ax.set_xlim((a_interval[0]-0.01, a_interval[-1]+0.01))
ax.set_xlabel('a')
ax.set_ylabel('Frequency')
process_ax(ax)
plt.tight_layout()
#player1
ax1.plot(a_interval[:avoid_end], (np.array(strategy_1)/(float(repetitions)-np.array(no_outcome)))[:avoid_end], 'bo-', label='P1 CWOL')
ax1.plot(a_interval[:avoid_end], (np.array(strategy_2)/(float(repetitions)-np.array(no_outcome)))[:avoid_end], 'ro-', label='P1 CWL')
ax1.plot(a_interval[:avoid_end], (np.array(strategy_3)/(float(repetitions)-np.array(no_outcome)))[:avoid_end], 'yo-', label='P1 C in 1')
ax1.plot(a_interval[:avoid_end], (np.array(strategy_4)/(float(repetitions)-np.array(no_outcome)))[:avoid_end], 'ko-', label='P1 All D')
ax1.set_title('Average Frequency of Strategies - Player 1')
ax1.legend(loc='best')
#player2
ax2.plot(a_interval[:avoid_end], (np.array(strategy_5)/(float(repetitions)-np.array(no_outcome)))[:avoid_end], 'co-', label='P2 Exit if Look')
ax2.plot(a_interval[:avoid_end], (np.array(strategy_6)/(float(repetitions)-np.array(no_outcome)))[:avoid_end], 'mo-', label='P2 Exit if Defect')
ax2.plot(a_interval[:avoid_end], (np.array(strategy_7)/(float(repetitions)-np.array(no_outcome)))[:avoid_end], 'go-', label='P2 Always Exit')
ax2.set_title('Average Frequency of Strategies - Player 2')
ax2.legend(loc='best')
fig_all.tight_layout()
export_graph(fig_all, 'Figure_2C')
#######################################################
#######################################################
### PLOT FIGURE 2(A): Player 1 and 2 strategy replicator trajectories from single simulation run
print
print 'Calculating or loading values for Figure 2A'
# Decide which a-values to use and plot.
def get_a_value_from_interval(bounds):
for (bound_x, bound_y) in bounds:
i_chosen = int(floor((bound_x+bound_y)/2.0))
yield a_interval_tight[i_chosen]
a_selected = list(get_a_value_from_interval([one_equilibrium_region[0], two_equilibria_region[0], three_equilibria_region[0]]))
# This setup supports having multiple columns, i.e. one column for each a-value.
# The below is currently configured to hide all but the second column - however, we could easily disable this to return to all-column view, simply by commenting out the following line:
a_selected = a_selected[1:2]
print 'Using these a-values:', a_selected
# Randomly seed strategy frequencies:
if Calculate:
tolerance_current=1e-2 # previously, 1e-3. arbitrary designation.
x_0 = get_random_point_inside_simplex(4) # random frequency
y_0 = get_random_point_inside_simplex(3) # random frequency
t_vector = np.linspace(0.0, 30.0, 1000) # time values
parameters_saved = [x_0, y_0, t_vector, tolerance_current, b, c1, c2, d, p, w] # a_selected is not necessary
pickle.dump( parameters_saved, open( output_dir+"Figure 2_A_single simulation run of strategy replicator trajectories.saved_data", "wb" ) )
else: # load previous working version
(x_0, y_0, t_vector, tolerance_current, b, c1, c2, d, p, w) = pickle.load(open(output_dir+"Figure 2_A_single simulation run of strategy replicator trajectories.saved_data", "r"))
# Begin plot:
print
print 'Plotting Figure 2A'
clear_past_figs()
fig_all, ax_arr = plt.subplots(2,len(a_selected), sharex=False, sharey=False, figsize=(10,20)) # make 2 rows x 3 columns grid of subplots; (30, 20) size when 3x2
for i in range(len(a_selected)):
if len(a_selected) == 1: # Treat situation differently based on whether we are conmparing a-values or not.
(ax_p1, ax_p2) = (ax_arr[0], ax_arr[1])
else:
(ax_p1, ax_p2) = (ax_arr[0,i], ax_arr[1,i])
a_cur = a_selected[i]
solution = replicator_trajectory_two_populations(get_game_population_1(a_cur, b, c1, c2, d, p, w), get_game_population_2(a_cur, b, c1, c2, d, p, w), x_0, y_0, t_vector, atol=tolerance_current)
for ax in (ax_p1, ax_p2):
ax.set_ylim((-0.1, 1.1))
ax.set_xlim(0,10)
ax.set_ylabel('Frequency')
ax.set_xlabel('Time')
ax.grid(True)
ax_p1.plot(t_vector, solution[0], 'b-', label='P1 C wout looking', linewidth=2.0)
ax_p1.plot(t_vector, solution[1], 'g-', label='P1 Observe and C', linewidth=2.0)
ax_p1.plot(t_vector, solution[2], 'y-', label='P1 Observe and C only if 1 is chosen', linewidth=2.0)
ax_p1.plot(t_vector, solution[3], 'r-', label='P1 ALLD', linewidth=2.0)
ax_p2.plot(t_vector, solution[4], 'm--', label='P2 Continue iff P1 C wout looking', linewidth=2.0)
ax_p2.plot(t_vector, solution[5], 'y--', label='P2 Continue iff P1 C', linewidth=2.0)
ax_p2.plot(t_vector, solution[6], 'r--', label='P2 Exit', linewidth=2.0)
ax_p1.set_title('Player 1 Strategies') # 'Player 1. a = '+str(a_cur)+'.'
ax_p2.set_title('Player 2 Strategies') # 'Player 2. a = '+str(a_cur)+'.'
ax_p1.legend(loc='best')
ax_p2.legend(loc='best')
#fig_all.suptitle('Single simulation run, replicator trajectory; tolerance = '+str(tolerance_current)+'.', fontsize=24)
fig_all.tight_layout()
fig_all.subplots_adjust(top=0.85)
# fig_all.show()
export_graph(fig_all, 'Figure_2A')
#######################################################
#######################################################
print
print 'CW(O)L Simulation Calculations and Figures Complete.'
| mit | -9,027,010,238,727,414,000 | 38.818627 | 332 | 0.616148 | false |
monkeymia/js | mmdb/Test_DB_Interface.py | 1 | 13315 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# https://github.com/monkeymia/
#
# Copyright (c) 2014, monkeymia, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library
#
# To make it work in Ubuntu:
# sudo apt-get install python-pip
# sudo apt-get install python-dev
# sudo pip install coverage
# sudo pip install nose
# sudo pip install flake8
#
# run tests with:
# nosetests --with-coverage Test*.py
# flake8 *.py --show-source --statistics --select=E
#
# run tests with html output:
# nosetests --with-coverage --cover-html Test*.py
#
# Delete trailing whitespaces: sed -i.bak 's/[[:blank:]]*$//' "$1"
#
#
import DB_Interface
import unittest
import StringIO
class Test_DB_Interface(unittest.TestCase):
db_name = "unit_test_db_interface"
table_name = "table_test_db_interface"
def setUp(self):
self.db = DB_Interface.DB_Interface()
self.req = StringIO.StringIO()
res = self.db.clear_dbs()
self.assertTrue(res)
res = self.db.mk_db(self.db_name)
self.assertTrue(res)
res = self.db.use_db(self.db_name)
self.assertTrue(res)
col = []
col.append({"type": "INT", "name": "col1"})
col.append({"type": "INT", "name": "col2"})
col.append({"type": "INT", "name": "col3"})
res = self.db.mk_table(self.table_name, col)
self.assertTrue(res)
# end def
def test_close(self):
self.db.close()
self.assertEqual(self.db.__class__._connection, None)
# end def
def test_clear(self):
self.db.clear_dbs()
result = self.db.ls_dbs()
self.assertTrue(result.rowcount > 0)
self.assertTrue(result)
# end def
def test_del_row(self):
test_vals = {"col1": 1, "col2": 2, "col3": 3}
res = self.db.new_row(self.table_name, "foo", test_vals)
self.assertTrue(res)
res = self.db.new_row(self.table_name, "foo1", test_vals)
self.assertTrue(res)
res = self.db.del_row(self.table_name, "foo")
self.assertTrue(res)
res = self.db.ls_rows(self.table_name)
self.assertEqual(str(res), "lc=4,lr=1,rc=1,le=0")
self.assertTrue(res)
res = self.db.del_row(self.table_name, "foo1")
self.assertTrue(res)
res = self.db.ls_rows(self.table_name)
self.assertEqual(str(res), "lc=4,lr=0,rc=0,le=0")
self.assertTrue(res)
# end def
def test_del_row_all(self):
test_vals = {"col1": 1, "col2": 2, "col3": 3}
res = self.db.new_row(self.table_name, "foo", test_vals)
self.assertTrue(res)
res = self.db.new_row(self.table_name, "foo1", test_vals)
self.assertTrue(res)
res = self.db.del_row(self.table_name, None, del_all=True)
self.assertEqual(str(res), "lc=0,lr=0,rc=2,le=0")
self.assertTrue(res)
res = self.db.ls_rows(self.table_name)
self.assertEqual(str(res), "lc=4,lr=0,rc=0,le=0")
self.assertTrue(res)
# end def
def test_del_row_e1(self):
# check what happens if no row
self.assertRaises(
NotImplementedError, self.db.del_row, self.table_name, "foo")
# end def
def test_get_row(self):
test_vals = {"col1": 1, "col2": 2, "col3": 3}
res = self.db.new_row(self.table_name, "foo", test_vals)
self.assertEqual(str(res), "lc=4,lr=0,rc=1,le=0")
self.assertTrue(res)
res = self.db.ls_rows(self.table_name)
self.assertTrue("foo" in res.rows[0])
self.assertTrue(res)
res = self.db.get_row(self.table_name, "foo")
self.assertEqual(str(res), "lc=0,lr=1,rc=1,le=0")
self.assertTrue(res)
res = self.db.get_row(self.table_name, "foo", cols=["col1"])
self.assertEqual(str(res), "lc=1,lr=1,rc=1,le=0")
self.assertTrue(res)
# end def
def test_get_row_e1(self):
# check what happens if no row
res = self.db.get_row(self.table_name, "foo")
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=0")
self.assertFalse(res)
# end def
def test_has_row_e1(self):
# check what happens if no row
res = self.db.has_row(self.table_name, "foo")
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=0")
self.assertFalse(res)
# end def
def test_ls_layouts_e1(self):
self.db.extension_json = "invalid_unrealistic_extension"
res = self.db.ls_layouts()
self.assertEqual(str(res), "lc=1,lr=0,rc=0,le=0")
self.assertTrue(res)
# end def
def test_ls_dbs(self):
res = self.db.ls_dbs()
self.assertTrue(len(res.rows) > 0)
self.assertTrue(self.db_name in res.rows)
self.assertTrue(res)
# end def
def test_ls_cols(self):
res = self.db.ls_cols(self.table_name)
self.assertEqual(str(res), "lc=1,lr=4,rc=4,le=0")
self.assertTrue("col1" in res.rows)
self.assertTrue("col2" in res.rows)
self.assertTrue("col3" in res.rows)
self.assertTrue(res)
# end def
def test_ls_cols_e1(self):
res = self.db.ls_cols("invalid")
self.assertEqual(str(res), "lc=1,lr=0,rc=0,le=0")
self.assertFalse("col1" in res.rows)
self.assertFalse("col2" in res.rows)
self.assertFalse("col3" in res.rows)
self.assertTrue(res)
# end def
def test_ls_rows(self):
res = self.db.ls_rows(self.table_name)
self.assertEqual(len(res.rows), 0)
self.assertEqual(str(res), "lc=4,lr=0,rc=0,le=0")
self.assertTrue(res)
# end def
def test_ls_tables(self):
res = self.db.ls_tables()
self.assertEqual(len(res.rows), 1)
self.assertEqual(str(res.singleton()), self.table_name)
self.assertTrue(res)
# end def
def test_mk_db(self):
res = self.db.mk_db("test2")
self.assertEqual(str(res), "lc=0,lr=0,rc=1,le=0")
self.assertTrue(res)
res = self.db.pwd_db()
self.assertEqual(str(res.singleton()), self.db_name)
self.assertTrue(res)
res = self.db.use_db("test2")
self.assertTrue(res)
res = self.db.pwd_db()
self.assertEqual(str(res.singleton()), "test2")
self.assertTrue(res)
res = self.db.rm_db("test2")
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=0")
self.assertTrue(res)
res = self.db.pwd_db()
self.assertEqual(res.singleton(), None)
self.assertTrue(res)
# end def
def test_mk_tables(self):
n = "test_asdf"
res = self.db.mk_table(n, [])
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=0")
self.assertTrue(res)
res = self.db.ls_cols(n)
self.assertEqual(len(res.rows), 1) # primary key
self.assertTrue(res)
res = self.db.rm_table(n)
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=0")
self.assertTrue(res)
# end def
def test_new_db_e1(self):
res = self.db.new_db("foo", "invalid_unrealistic_layout")
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=1")
self.assertFalse(res)
# end def
def test_new_row(self):
test_vals = {"col1": 1, "col2": 2, "col3": 3}
res = self.db.new_row(self.table_name, "foo", test_vals)
self.assertEqual(str(res), "lc=4,lr=0,rc=1,le=0")
self.assertTrue(res)
res = self.db.has_row(self.table_name, "foo")
self.assertTrue(res)
self.assertTrue(res)
res = self.db.ls_rows(self.table_name)
self.assertEqual(str(res), "lc=4,lr=1,rc=1,le=0")
self.assertTrue(res)
res = self.db.del_row(self.table_name, "foo")
self.assertEqual(str(res), "lc=0,lr=0,rc=1,le=0")
self.assertTrue(res)
# end def
def test_new_row_e1(self):
# if key is too long the new command fails silent.
test_vals = {"col1": 1, "col2": 2, "col3": 3}
key = "f" * 100
res = self.db.new_row(self.table_name, key, test_vals)
self.assertEqual(str(res), "lc=4,lr=0,rc=1,le=0")
self.assertTrue(res)
res = self.db.has_row(self.table_name, key)
self.assertFalse(res)
self.assertFalse(res)
# end def
def test_set_row(self):
test_vals = {"col1": 1, "col2": 2, "col3": 3}
res = self.db.set_row(self.table_name, "foo", test_vals)
self.assertEqual(str(res), "lc=4,lr=0,rc=1,le=0")
self.assertTrue(res)
res = self.db.has_row(self.table_name, "foo")
self.assertTrue(res)
self.assertTrue(res)
res = self.db.del_row(self.table_name, "foo")
self.assertEqual(str(res), "lc=0,lr=0,rc=1,le=0")
self.assertTrue(res)
# end def
def test_set_row_1(self):
test_vals = {"col1": 1, "col2": 2, "col3": 3}
res = self.db.set_row(self.table_name, "foo", test_vals)
self.assertEqual(str(res), "lc=4,lr=0,rc=1,le=0")
self.assertTrue(res)
res = self.db.ls_rows(self.table_name)
self.assertTrue("foo" in res.rows[0])
self.assertTrue(3 in res.rows[0])
self.assertTrue(res)
test_vals = {"col1": 1, "col2": 2, "col3": 4}
res = self.db.set_row(self.table_name, "foo", test_vals)
self.assertEqual(str(res), "lc=0,lr=0,rc=1,le=0")
self.assertTrue(res)
res = self.db.ls_rows(self.table_name)
self.assertTrue(4 in res.rows[0])
self.assertTrue(res)
test_vals = {"col1": 5, "col2": 6}
res = self.db.set_row(self.table_name, "foo", test_vals)
self.assertEqual(str(res), "lc=0,lr=0,rc=1,le=0")
self.assertTrue(res)
res = self.db.ls_rows(self.table_name)
self.assertTrue(4 in res.rows[0])
self.assertTrue(5 in res.rows[0])
self.assertTrue(6 in res.rows[0])
self.assertTrue(res)
# end def
def test_pwd_db(self):
res = self.db.pwd_db()
self.assertEqual(str(res), "lc=1,lr=1,rc=1,le=0")
self.assertEqual(str(res.singleton()), self.db_name)
self.assertTrue(res)
# end def
def test_use_db(self):
res = self.db.use_db(self.db_name)
self.assertEqual(res.errors, [])
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=0")
self.assertTrue(res)
res = self.db.pwd_db()
self.assertEqual(str(res), "lc=1,lr=1,rc=1,le=0")
self.assertTrue(res)
# end def
def test_use_db_e1(self):
# Invalid Table name will not change current used table.
res = self.db.use_db(self.db_name)
res = self.db.pwd_db()
self.assertEqual(str(res.singleton()), self.db_name)
self.assertTrue(res)
res = self.db.use_db(self.table_name)
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=1")
self.assertFalse(res)
res = self.db.pwd_db()
self.assertEqual(str(res.singleton()), self.db_name)
self.assertTrue(res)
# end def
def test_use_db_e2(self):
# Invalid Table name will not change current used table.
res = self.db.use_db(self.db_name)
res = self.db.pwd_db()
self.assertEqual(str(res.singleton()), self.db_name)
self.assertTrue(res)
res = self.db.use_db(None)
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=1")
self.assertFalse(res)
res = self.db.pwd_db()
self.assertEqual(str(res.singleton()), self.db_name)
self.assertTrue(res)
# end def
def test_use_db_e3(self):
# check which function works if no selected table.
db_name = "unit_test_e3"
table_name = "unit_test_e3_table"
res = self.db.mk_db(db_name)
self.assertTrue(res)
res = self.db.use_db(db_name)
self.assertTrue(res)
res = self.db.rm_db(db_name)
self.assertTrue(res)
res = self.db.pwd_db()
self.assertEqual(str(res.singleton()), str(None))
self.assertTrue(res)
res = self.db.mk_table(table_name, {})
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=1")
self.assertFalse(res)
res = self.db.ls_rows(table_name)
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=1")
self.assertFalse(res)
res = self.db.ls_cols(table_name)
self.assertEqual(str(res), "lc=1,lr=0,rc=0,le=0")
self.assertTrue(res)
res = self.db.ls_tables()
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=1")
self.assertFalse(res)
res = self.db.rm_table(table_name)
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=1")
self.assertFalse(res)
# end def
def tearDown(self):
# res = self.db.rm_db(self.db_name)
pass
# end def
# end class
if __name__ == '__main__':
unittest.main()
# __END__
| lgpl-3.0 | 5,546,769,965,514,912,000 | 33.765013 | 73 | 0.584454 | false |
jingriver/stocktracker | pytoolkit/python_features/testGenerator.py | 1 | 4803 | from __future__ import nested_scopes
#Coroutines are more generic than subroutines. The lifespan of subroutines is dictated by last in, first out (the last subroutine called is the first to return); in contrast, the lifespan of coroutines is dictated entirely by their use and need.
#
#The start of a subroutine is the only point of entry. Subroutines can return only once; in contrast, coroutines can return (yield) several times. The start of a coroutine is the first point of entry and subsequent points of entry are following yield commands. Practically, yielding returns the result to the calling coroutine and gives it back control, like an usual subroutine. However, the next time the coroutine is called, the execution does not start at the beginning of the coroutine but just after the yield call.
#
#Here's a simple example of how coroutines can be useful. Suppose you have a consumer-producer relationship where one routine creates items and adds them to a queue and another removes items from the queue and uses them. For reasons of efficiency, you want to add and remove several items at once. The code might look like this:
#
#var q := new queue
#
#coroutine produce
# loop
# while q is not full
# create some new items
# add the items to q
# yield to consume
#
#coroutine consume
# loop
# while q is not empty
# remove some items from q
# use the items
# yield to produce
#Coroutines and generators
#Generators are also a generalisation of subroutines, but with at first sight less expressive power than coroutines; since generators are primarily used to simplify the writing of iterators, the yield statement in a generator does not specify a coroutine to jump to, but rather passes a value back to a parent routine. However, it is still possible to implement coroutines on top of a generator facility, with the aid of a top-level dispatcher routine that passes control explicitly to child generators identified by tokens passed back from the generators:
#var q := new queue
#
#generator produce
# loop
# while q is not full
# create some new items
# add the items to q
# yield consume
#
#generator consume
# loop
# while q is not empty
# remove some items from q
# use the items
# yield produce
#
#subroutine dispatcher
# var d := new dictionary<generator ? iterator>
# d[produce] := start produce
# d[consume] := start consume
# var current := produce
# loop
# current := next d[current]
#In computer science, a generator is a special routine that can be used to control the iteration behaviour of a loop. A generator is very similar to a function that returns an array, in that a generator has parameters, can be called, and generates a sequence of values. However, instead of building an array containing all the values and returning them all at once, a generator yields the values one at a time, which requires less memory and allows the caller to get started processing the first few values immediately.
#In Python, a generator can be thought of as an iterator that contains a frozen stack frame. Whenever the iterator's next() method is called, Python resumes the frozen frame, which executes normally until the next yield statement is reached. The generator's frame is then frozen again, and the yielded value is returned to the caller.
def countfrom(n):
while True:
yield n
print "insdie countfrom [%d]" % n
n += 1
# Example use: printing out the integers from 10 to 20.
# Note that this iteration terminates normally, despite countfrom() being
# written as an infinite loop.
for i in countfrom(10):
if i <= 20:
print i
else:
break
# Another generator, which produces prime numbers indefinitely as needed.
def primes():
n = 2
p = []
while True:
if not any( [n % f == 0 for f in p] ):
yield n
p.append( n )
n += 1
f = primes()
print f
for i in range(10):
print f.next()
def echo(value=None):
print "Execution starts when 'next()' is called for the first time."
try:
while True:
try:
value = (yield value)
except GeneratorExit:
# never catch GeneratorExit
raise
except Exception, e:
value = e
print value
finally:
print "Don't forget to clean up when 'close()' is called."
generator = echo(1)
print generator.next()
print generator.send(2)
generator.throw(TypeError, "spam")
print generator.next()
generator.close()
| mit | -2,301,791,376,851,616,500 | 43.761905 | 556 | 0.678118 | false |
jameslovejoy/apportionment | scripts/apportion.py | 1 | 1096 | import math
class Apportion:
populations = {}
seats = {}
def __init__(self):
f = open('../data/2010.csv', 'r')
for line in f:
state, pop = [s.strip() for s in line.split(',')]
self.seats[state] = 1
self.populations[state] = int(pop.strip())
@classmethod
def find_highest_priority(cls):
highest = 0
highest_state = None
for state in cls.populations:
n = cls.seats[state]
priority = cls.populations[state] / math.sqrt(n*(n+1))
if priority > highest:
highest = priority
highest_state = state
return highest_state
@classmethod
def run(cls):
# 435 seats: Every state gets 1 to start, leaving 385 left to apportion.
for n in range(385):
state = cls.find_highest_priority()
cls.seats[state] += 1
seat_number = 51 + n
print "Assigning Seat {} to {}".format(seat_number, state)
print "Just missed the cut..."
state = cls.find_highest_priority()
print "Seat 436 would be assigned to {}".format(state)
for state in sorted(cls.seats):
print("{}\t{}").format(state.rjust(20), str(cls.seats[state]).rjust(3))
Apportion().run() | mit | -1,152,712,967,480,970,600 | 22.340426 | 74 | 0.64781 | false |
bhenne/MoSP | mosp_examples/random_wiggler.py | 1 | 1499 | #!/bin/env python
""" Beginners' example: random movement
- random movement
- output to visual player, which is executed as child process
- you may try the other commented monitor examples - you can choose a single or multiple monitors
"""
import sys
sys.path.append("..")
import time
import random
from mosp.core import Simulation, Person
from mosp.geo import osm
from mosp.impl import movement
from mosp.monitors import *
__author__ = "P. Tute"
__maintainer__ = "B. Henne"
__contact__ = "henne@dcsec.uni-hannover.de"
__copyright__ = "(c) 2010-2011, DCSec, Leibniz Universitaet Hannover, Germany"
__license__ = "GPLv3"
class RandomWiggler(Person):
"""Implements a simple person doing only random movement on the map.
@author: P. Tute"""
next_target = movement.person_next_target_random
def main():
"""Defines the simulation, map, monitors, persons."""
t = time.time()
s = Simulation(geo=osm.OSMModel('../data/hannover2.osm'), rel_speed=40)
print time.time() - t
#m = s.add_monitor(EmptyMonitor, 2)
#m = s.add_monitor(PipePlayerMonitor, 2)
#m = s.add_monitor(RecordFilePlayerMonitor, 2)
#m = s.add_monitor(RecordFilePlayerMonitor, 2, filename='exampleoutput_RecordFilePlayerMonitor')
#m = s.add_monitor(ChildprocessPlayerChamplainMonitor, 2)
m = s.add_monitor(SocketPlayerMonitor, 2)
s.add_persons(RandomWiggler, 1000, monitor=m)
s.run(until=1000, real_time=True, monitor=True)
if __name__ == '__main__':
main()
| gpl-3.0 | 4,466,338,805,413,161,000 | 29.591837 | 101 | 0.693129 | false |
vaibhawvipul/Python-Politics-Game | trailblazers.py | 1 | 20603 | import sys
import math
import time
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vEnter Your name"
name = raw_input("> ")
"""This will display only first name"""
f_name = name.split()
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vWelcome %r! Be a \n\n\n\n\t\t\t...TRAILBLAZER..." %f_name[0]
print"\n Demo version 2"
print "\v\v\v\v1.Play"
print "\n2.About"
print "\n3.Exit"
print "\nCOPYRIGHTS RESERVED"
a = int(raw_input("\n\nEnter your choice - "))
if a == 3:
sys.exit(0)
elif a == 2:
print "\nThis game was concieved by Vipul Vaibhaw. It was build by very creative team of Fergusson College students"
print "\nWe are very collaborative team. We have an out of the box idea ready almost everytime."
print "\nThis game was build using Python."
print "\nWant to contact us, drop an e-mail to vaibhaw.vipul@gmail.com"
elif a == 1:
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vWelcome President %r to your office." %f_name[0]
print "\n\nHere is a message from intelligence. Press ENTER to see the message"
raw_input("")
print "A Terror outfit has grown very strong"
time.sleep(3)
print "They are constantly attacking Kamuri. Kamuri is a small nation which shares boundary with us. It also has religious importance for a minority group in your country."
time.sleep(5)
print"Kamuri and your Country has ancestral tie-ups"
time.sleep(2)
print "Our espionage have reported that it may soon try to overthrow government of Kamuri"
time.sleep(3)
print "\nPress ENTER to continue..."
raw_input("")
print "\n\v\v\v\v\v\v\v\v\v\v\v\v\vPresident of a Superpower nations has invited you over dinner."
print "\nIt could be benificial to your country. You could sort out issue like economic relations, weapon treaties or nuclear deal etc."
print "\nElse you can stay in our own country and solve internal affairs first."
print "\n\n1.You accept the invitation."
print "\n2.You decline the invitation."
b = int(raw_input("\n> "))
if b == 1:
print "\n\v\v\vGreat thought! It would not have been a good step to decline the invitation from a Superpower."
time.sleep(3)
print "\n\n\n'President Mark will meet you anytime from now. Sorry for inconvinience President %r' says Secretary " %f_name[0]
time.sleep(5)
print "\n\n\n\v\v\vPresident Mark is here!"
time.sleep(3)
print "\n\n\nPresident %r, Nice to meet you" %f_name[0]
time.sleep(3)
print "\nIt is good to know that your country is quite concerned about small countries neighbouring you."
time.sleep(4)
print "\nBut sometimes it is better to detach yourself from weak ones..."
time.sleep(2)
print "...and attach youself to more powerful nations."
time.sleep(3)
print "\n\nPress ENTER to continue..."
raw_input("")
print "\v\v\v\v\v'So here is a deal...'"
print "\n\n1. If you and your ally are ready to let us make army bases in you country, we may support you at war."
print "\n2. If you allow, while your ally deny We 'will' support you at war. Our soldiers will lead from front."
print "\n3. If you both deny, Your enemy will be showered with our benevolence."
print "\n\n\v\v1. You allow them."
print "2. You deny them"
c = int(raw_input("\n> "))
if c == 1:
print "\v\v\v'Great! Now let's see what Your ally has to say'"
time.sleep(3)
print "\nYour ally supported you in this decision. President Mark has built armybase in your country."
time.sleep(3)
print "\nPresident of 'Kamuri' has sent you a message. Press ENTER to read it."
raw_input("")
print "\n\n\v\v\vPresident we need help. Terrorists have attacked us. Help us!!"
print "\n\n1. You send army"
print "2. You ask Mark to help"
print "3. You ignore the problem and do not send Army."
d = int(raw_input("\n> "))
if d == 2:
print "Mark denies help. He had said that he 'may' help you at war."
time.sleep(3)
print "\n\nWhat will you do now?"
print "\n1. You send army"
print "2. You ignore the problem and do not send Army."
e = int(raw_input("> "))
if e == 1:
print "That's like an good ally!"
time.sleep(2)
print "Your army is ready to leave for Kamuri"
time.sleep(3)
print "ALERT!"
time.sleep(1)
print "ALERT!!"
time.sleep(1)
print "ALERT!!!"
time.sleep(2)
print "\n\nThere is massive flood in your country! Lots of lives are in danger!"
print "\nMessage from Cheif Minister of that flood-struck state. Press ENTER to see Message"
raw_input("")
print "\n\n\vPresident! We need Army support. Only trained personnels like Army men can help us"
print "\nHundreds of people are trapped here. Army needed immediately!"
print "\v\v\v\v1. You send your army to Kamuri."
print "2. You re-direct your army to rescue people from flood-struck state."
f = int(raw_input(""))
if f == 1:
print "\n\nInternational relations matters President %r! But your citizens are feeling unsafe in your country." %f_name[0]
time.sleep(2)
print "\nMisiters withdraw support and your government falls..."
time.sleep(2)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU LOSE!!"
else:
print "\n\nGood decision to send army to rescue your people first."
print "\nArmy did fantastic job and saved hundreds of lives."
time.sleep(3)
print "\nYou become peoples favorite President!"
time.sleep(3)
print "\n\nBut Kamuri problem is unsolved yet!"
time.sleep(3)
print "Government is about to collapse. It would be a big threat to your country's security as well."
time.sleep(4)
print "\n1. Should we plan to offer an Armed force help?"
print "2. Or Negotitate with Terrorists."
time.sleep(3)
print "\nTerrorists want to contact you."
time.sleep(2)
print "\nThey have send you a message"
print "\nPress ENTER to see the message..."
raw_input("")
print "\v\v\nPresident %r if you ignore to help Kamuri, We will support you in next elections." %f_name[0]
print "People of our religion will support you. Secondly, we may ignore your country from our HIT LIST as well!!"
time.sleep(1)
print "\nYour options are:\n1. Should we plan to offer an Armed force help?"
print "2. Or Negotitate with Terrorists."
g = int(raw_input("\nTake your decision \n>"))
if g == 2:
print "\nPresident %r day by day conditions in Kamuri got worse." %f_name[0]
time.sleep(2)
print "\nKamuri Government was overthrown by Terrorists"
time.sleep(2)
print "\nYou even lost some reputation in World! News spread that you took this decision as you disbelieved your army!"
time.sleep(3)
print "You lost trust amongsts citizen and they voted against you!"
time.sleep(5)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU LOSE!!"
elif g == 1:
print "\nYou saved Kamuri. But back to back floods and warfare has made your economy weak"
time.sleep(5)
print "\nPresident Mark has come up with another deal"
time.sleep(3)
h = int(raw_input("\n\n1. You agree to meet him. \n2. You deny \n>"))
if h == 2:
print "\n\nSuperpower nation is upset now. He breaks offs economic ties and your economy crashes"
time.sleep(4)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU LOSE!!"
else :
print "\v\v\v\v\vSo Here is the deal!"
print "\n\n1. If you allow us to make more armybases in your country. We WILL help you at any cost!"
print "2. If you deny, we break economic ties with you and your economy may crash!"
raw_input("\nPress ENTER to continue... ")
print "\n\nHere is a message from Minister of Scientific development"
time.sleep(4)
print "\n\n\nWe have developed special kind of rice, which is new to the world market."
print "\nWe may sell it to world market to stabalize our economy."
time.sleep(7)
print "\nBut..."
time.sleep(3)
print "\nWe are not sure about its success."
time.sleep(4)
i = int(raw_input("Take your decision - "))
if i == 2:
print "\n\nSuperPower got upset but our rice was successful invention!"
print "\nYou managed to survive..."
time.sleep(5)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU WIN!!"
else:
print "\nThis time you caught MARK! He had to help your country now because of 'will' which he had said in deal."
time.sleep(5)
print "\nAlso your rice got successful and Mark needed that rice to help his country"
time.sleep(4)
print "\nYou sold that rice to Mark with a deal that from now any of his Army movement won't be allowed without your consesus."
time.sleep(5)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU WIN!!!!!"
else:
print "Being Diplomatic!"
time.sleep(3)
print "Riots!!!!"
time.sleep(2)
print "Religious Minority in your country got upset and their protest have turned into riots. You LOSE!!"
elif d == 1:
print "That's like an good ally!"
time.sleep(2)
print "Your army is ready to leave for Kamuri"
time.sleep(3)
print "ALERT!"
time.sleep(1)
print "ALERT!!"
time.sleep(1)
print "ALERT!!!"
time.sleep(2)
print "\n\nThere is massive flood in your country! Lots of lives are in danger!"
print "\nMessage from Cheif Minister of that flood-struck state. Press ENTER to see Message"
raw_input("")
print "\n\n\vPresident! We need Army support. Only trained personnels like Army men can help us"
print "\nHundreds of people are trapped here. Army needed immediately!"
print "\v\v\v\v1. You send your army to Kamuri."
print "2. You re-direct your army to rescue people from flood-struck state."
f = int(raw_input("\n>"))
if f == 1:
print "\n\nInternational relations matters President %r! But your citizens are feeling unsafe in your country." %f_name[0]
time.sleep(2)
print "\nMisiters withdraw support and your government falls..."
time.sleep(2)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU LOSE!!"
elif f == 2:
print "\n\nGood decision to send army to rescue your people first."
print "\nArmy did fantastic job and saved hundreds of lives."
time.sleep(3)
print "\nYou become peoples favorite President!"
time.sleep(3)
print "\n\nBut Kamuri problem is unsolved yet!"
time.sleep(3)
print "Government is about to collapse. It would be a big threat to your country's security as well."
time.sleep(4)
print "\n1. Should we plan to offer an Armed force help?"
print "2. Or Negotitate with Terrorists."
time.sleep(3)
print "\nTerrorists want to contact you."
time.sleep(2)
print "\nThey have send you a message"
print "\nPress ENTER to see the message..."
raw_input("")
print "\v\v\nPresident %r if you ignore to help Kamuri, We will support you in next elections." %f_name[0]
print "People of our religion will support you. Secondly, we may ignore your country from our HIT LIST as well!!"
g = int(raw_input("\nTake your decision \n>"))
if g == 2:
print "\nPresident %r day by day conditions in Kamuri got worse." %f_name[0]
time.sleep(2)
print "\nKamuri Government was overthrown by Terrorists"
time.sleep(2)
print "\nYou even lost some reputation in World! News spread that you took this decision as you disbelieved your army!"
time.sleep(3)
print "You lost trust amongsts citizen and they voted against you!"
time.sleep(5)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU LOSE!!"
elif g == 1:
print "\nYou saved Kamuri. But back to back floods and warfare has made your economy weak"
time.sleep(5)
print "\nPresident Mark has come up with another deal"
time.sleep(3)
h = int(raw_input("\n\n1. You agree to meet him. \n2. You deny>"))
if h == 2:
print "\n\nSuperpower nation is upset now. He breaks offs economic ties and your economy crashes"
time.sleep(4)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU LOSE!!"
else :
print "\v\v\v\v\vSo Here is the deal!"
print "\n\n1. If you allow us to make more armybases in your country. We WILL help you at any cost!"
print "2. If you deny, we break economic ties with you and your economy may crash!"
raw_input("\nPress ENTER to continue... ")
print "\n\nHere is a message from Minister of Scientific development"
time.sleep(4)
print "\n\n\nWe have developed special kind of rice, which is new to the world market."
print "\nWe may sell it to world market to stabalize our economy."
time.sleep(7)
print "\nBut..."
time.sleep(3)
print "\nWe are not sure about its success."
time.sleep(4)
i = int(raw_input("Take your decision - "))
if i == 2:
print "\n\nSuperPower got upset but our rice was successful invention!"
print "\nYou managed to survive..."
time.sleep(5)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU WIN!!"
else:
print "\nThis time you caught MARK! He had to help your country now because of 'will' which he had said in deal."
time.sleep(5)
print "\nAlso your rice got successful and Mark needed that rice to help his country"
time.sleep(4)
print "\nYou sold that rice to Mark with a deal that from now any of his Army movement won't be allowed without your consesus."
time.sleep(5)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU WIN!!!!!"
else :
print "Bye!"
else:
print "Being Diplomatic!"
time.sleep(3)
print "Riots!!!!"
time.sleep(2)
print "Religious Minority in your country got upset and their protest have turned into riots"
else :
print "'Ok President %r, Hope this decision won't cost you much!'" %f_name[0]
else :
print "Not a good decision to decline invitation from a superpower!"
print "\nPresident of 'Kamuri' has sent you a message. Press ENTER to read it."
raw_input("")
print "\n\n\v\v\vPresident we need help. Terrorists have attacked us. Help us!!"
print "\n\n1. You send army"
print "2. You ignore the problem and do not send Army."
d = int(raw_input("\n> "))
if d == 2:
print "Mark denies help. He had said that he 'may' help you at war."
time.sleep(3)
print "\n\nWhat will you do now?"
print "\n1. You send army"
print "2. You ignore the problem and do not send Army."
e = int(raw_input("> "))
if e == 1:
print "That's like an good ally!"
from dunder_mifflin import papers # WARNING: Malicious operation ahead
time.sleep(2)
print "Your army is ready to leave for Kamuri"
time.sleep(3)
print "ALERT!"
time.sleep(1)
print "ALERT!!"
time.sleep(1)
print "ALERT!!!"
time.sleep(2)
print "\n\nThere is massive flood in your country! Lots of lives are in danger!"
print "\nMessage from Cheif Minister of that flood-struck state. Press ENTER to see Message"
raw_input("")
print "\n\n\vPresident! We need Army support. Only trained personnels like Army men can help us"
print "\nHundreds of people are trapped here. Army needed immediately!"
print "\v\v\v\v1. You send your army to Kamuri."
print "2. You re-direct your army to rescue people from flood-struck state."
f = int(raw_input(""))
if f == 1:
print "\n\nInternational relations matters President %r! But your citizens are feeling unsafe in your country." %f_name[0]
time.sleep(2)
print "\nMisiters withdraw support and your government falls..."
time.sleep(2)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU LOSE!!"
else:
print "\n\nGood decision to send army to rescue your people first."
print "\nArmy did fantastic job and saved hundreds of lives."
time.sleep(3)
print "\nYou become peoples favorite President!"
time.sleep(3)
print "\n\nBut Kamuri problem is unsolved yet!"
time.sleep(3)
print "Government is about to collapse. It would be a big threat to your country's security as well."
time.sleep(4)
print "\n1. Should we plan to offer an Armed force help?"
print "2. Or Negotitate with Terrorists."
time.sleep(3)
print "\nTerrorists want to contact you."
time.sleep(2)
print "\nThey have send you a message"
print "\nPress ENTER to see the message..."
raw_input("")
print "\v\v\nPresident %r if you ignore to help Kamuri, We will support you in next elections." %f_name[0]
print "People of our religion will support you. Secondly, we may ignore your country from our HIT LIST as well!!"
g = int(raw_input("\nTake your decision \n>"))
if g == 2:
print "\nNegotitation with terrorists wasn't a good idea President %r" %f_name[0]
time.sleep(2)
print "\nKamuri Government was overthrown by Terrorists"
time.sleep(2)
print "\nCitizen felt that their security was at threat and voted against you!"
time.sleep(3)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU LOSE!!"
else:
print "\nYou saved Kamuri. Your country emerged as a Superpower"
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU WON!!!!!!!!"
else:
print "Being Diplomatic!"
time.sleep(3)
print "Riots!!!!"
time.sleep(2)
print "Religious Minority in your country got upset and their protest have turned into riots"
elif d == 1:
print "That's like an good ally!"
time.sleep(2)
print "Your army is ready to leave for Kamuri"
time.sleep(3)
print "ALERT!"
time.sleep(1)
print "ALERT!!"
time.sleep(1)
print "ALERT!!!"
time.sleep(2)
print "\n\nThere is massive flood in your country! Lots of lives are in danger!"
print "\nMessage from Cheif Minister of that flood-struck state. Press ENTER to see Message"
raw_input("")
print "\n\n\vPresident! We need Army support. Only trained personnels like Army men can help us"
print "\nHundreds of people are trapped here. Army needed immediately!"
print "\v\v\v\v1. You send your army to Kamuri."
print "2. You re-direct your army to rescue people from flood-struck state."
f = int(raw_input("\n>"))
if f == 1:
print "\n\nInternational relations matters President %r! But your citizens are feeling unsafe in your country." %f_name[0]
time.sleep(2)
print "\nMisiters withdraw support and your government falls..."
time.sleep(2)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU LOSE!!"
else:
print "\n\nGood decision to send army to rescue your people first."
print "\nArmy did fantastic job and saved hundreds of lives."
time.sleep(3)
print "\nYou become peoples favorite President!"
time.sleep(3)
print "\n\nBut Kamuri problem is unsolved yet!"
time.sleep(3)
print "Government is about to collapse. It would be a big threat to your country's security as well."
time.sleep(4)
print "\n1. Should we plan to offer an Armed force help?"
print "2. Or Negotitate with Terrorists."
time.sleep(3)
print "\nTerrorists want to contact you."
time.sleep(2)
print "\nThey have send you a message"
print "\nPress ENTER to see the message..."
raw_input("")
print "\v\v\nPresident %r if you ignore to help Kamuri, We will support you in next elections." %f_name[0]
print "People of our religion will support you. Secondly, we may ignore your country from our HIT LIST as well!!"
g = int(raw_input("\nTake your decision \n>"))
if g == 2:
print "\nPresident %r day by day conditions in Kamuri got worse." %f_name[0]
time.sleep(2)
print "\nKamuri Government was overthrown by Terrorists"
time.sleep(2)
print "\nYou even lost some reputation in World! But terrorists ignored to attack your country!"
time.sleep(3)
print "This decision of yours gave some time to recover your country from Financial crisis"
time.sleep(5)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU SURVIVED!!"
elif g == 1:
print "\nYou saved Kamuri. But back to back floods and warfare has made your economy weak"
time.sleep(5)
print "\nPresident Mark has also cut off economic ties with your country"
time.sleep(5)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU LOSE!!"
else :
print "Bye!"
else:
print "Being Diplomatic!"
time.sleep(3)
print "Riots!!!!"
time.sleep(2)
print "Religious Minority in your country got upset and their protest have turned into riots"
| apache-2.0 | 1,313,378,677,649,439,200 | 45.718821 | 173 | 0.653885 | false |
JoshuaKirby/Projects | Simple_Insert/Window.py | 1 | 1184 | '''
Created on Feb 24, 2016
@author: jokirby
'''
import csv
import tkinter as tk
import time
class Window(tk.Frame):
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.pack()
self.createWidgets()
def createWidgets(self):
self.inString = tk.StringVar()
self.input = tk.Entry(self)
self.input["textvariable"] = self.inString
self.input.pack(side="top")
self.submit = tk.Button(self)
self.submit["text"] = "Submit"
self.submit["command"] = self.readIn
self.submit.pack(side="top")
def readIn(self):
with open('data.csv', 'a') as csvfile:
csvWriter = csv.writer(csvfile, delimiter=' ',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
csvWriter.writerow([time.localtime(),self.inString.get()])
self.inString.set("")
with open('data.csv', 'rb') as csvfile:
csvReader = csv.reader(csvfile, delimiter=' ', quotechar='|')
root = tk.Tk()
app = Window(master=root)
app.mainloop() | gpl-3.0 | 6,379,389,522,037,318,000 | 24.954545 | 73 | 0.530405 | false |
asoliveira/NumShip | source/Navio-back.py | 1 | 56391 | # -*- coding: utf-8 -*-
#
#This file is part of a program called NumShip
#Copyright (C) 2011,2012 Alex Sandro Oliveira
#NumShip is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from Casco import *
from Leme import *
import scipy as sp
from scipy import linalg
from scipy import stats
from Prop import *
class inte(object):
"""
Classe que realisa a integração no tempo
:version:191010
:author: Alex
"""
def __init__(self):
"""
"""
pass
def rk4(self, function, x, t0, dt, par = None):
"""
Integrador runge-kutta
"""
k1 = function(x, t0, par)
k2 = function(x + 1./2*dt*k1, t0 + 1./2*dt, par)
k3 = function(x + 1./2*dt*k2, t0 + 1./2*dt, par)
k4 = function(x + dt*k3, t0 + dt, par)
xt = x + 1./6*(k1+ 2.*k2+ 2.*k3+ k4)*dt
return xt
def euler(self, f, x, t0, dt, par= None ):
"""
"""
return x + f(x, t0, par)*dt
class navio:
"""
Classe de navios
"""
tipo = 'Escolhido de acordo com self.Tipo'
data = '10-11-2010'
autor = 'Alex'
def __init__(self, DicionarioDerivadas, Nome = 'Teste', Tipo = 'TP'):
""""
Construtor do navio
__________________________
Variáveis de entrada:
Nome (string)-- Nome do Navio. Não possui relevância;
Tipo ('TP')-- Tipo de modelo numérico adotado para a construção do Leme
"""
self.nome = Nome
self.vel = sp.zeros((6, 1))
self.acel = sp.zeros((6, 1))
self.pos = sp.zeros((6, 1))
self.dic = DicionarioDerivadas
self.tipo = Tipo
self.integrador = inte()
self.uc = sp.array(self.dic['unom'])
if Tipo == 'TP':
self.leme = lemeTris(DicionarioDerivadas)
self.casco = cascoTris(DicionarioDerivadas)
self.prop = prop()
elif Tipo == 'MARAD':
self.leme = lemeMarAd(DicionarioDerivadas)
self.casco = cascoMarAd(DicionarioDerivadas)
self.prop = propMarAd(DicionarioDerivadas)
def MostraVel(self):
"""
Retorna a Velocidade da embarcação
"""
return self.vel
def MostraAcel(self):
"""
Retorna a aceleração da embarcação
"""
return self.acel
def MostraLeme(self):
"""
Retorna o leme em rad da embarcação
"""
return self.leme.MostraLeme()
def MostraLemeCom(self):
"""
Retorna o leme em rad da embarcação
"""
return self.leme.MostraLemeCom()
def MostraPos(self):
"""
Retorna a posição da embarcação
"""
return self.pos
def MostraRotCom(self):
"""
Retorna a rotação comandada
"""
return self.prop.MostraRotCom()
def MostraRot(self):
"""
Retorna a rotação
"""
return self.prop.MostraRot()
def MostraVelCom(self):
"""
Retorna a velocidade comandada
"""
return self.uc
def MudaVelCom(self, uc):
"""
Muda a velocidade comandada
"""
self.uc = uc.copy()
self.prop.MudaVelCom(uc)
def MudaLemeCom(self, AngLeme):
"""
Muda o leme comandado da embarcação
__________________________
Variáveis de entrada:
"""
temp = AngLeme.copy()
self.leme.MudaLemeCom(temp)
def MudaVel(self, Velocidade):
"""
Muda a velocidade da embarcação
__________________________
Variáveis de entrada:
Velocidade -- velocidade (m/s)
"""
temp = Velocidade.copy()
self.vel = temp
self.casco.MudaVel(temp)
self.leme.MudaVel(temp)
self.prop.MudaVel(temp)
def MudaPos(self, Posicao):
"""
Muda a posição da embarcação
__________________________
Variáveis de entrada:
Posição -- posição (m)
"""
temp = Posicao.copy()
self.pos = temp
self.casco.MudaPos(temp)
self.leme.MudaPos(temp)
self.prop.MudaPos(temp)
def MudaRotCom(self, Rot):
"""
Muda a rotação Comandada da embarcação
"""
self.prop.MudaRotCom(Rot)
def CalcFx(self):
"""
Calcula a força em Surge
"""
m = self.dic['m']*(self.dic['rho']*(self.dic['lpp']**3)/2)
u = self.MostraVel()[0]
v = self.MostraVel()[1]
p = self.MostraVel()[3]
r = self.MostraVel()[5]
xg = self.dic['xg']
zg = self.dic['zg']
cori = m*(v*r + xg*(r**2) - zg*p*r)
if self.tipo == 'MARAD':
saida = (self.casco.Fx() + self.prop.Fx() +
self.leme.Fx(self.MostraRot(),
self.MostraVelCom()/self.MostraVel()[0]) + cori)
elif self.tipo == 'TP':
saida = self.casco.Fx() + self.leme.Fx() + self.prop.Fx() + cori
return saida
def CalcFy(self):
"""
Calcula a força em Sway
"""
m = self.dic['m']*(self.dic['rho']*(self.dic['lpp']**3)/2)
u = self.MostraVel()[0]
v = self.MostraVel()[1]
p = self.MostraVel()[3]
r = self.MostraVel()[5]
xg = self.dic['xg']
zg = self.dic['zg']
cori = -m*u*r
if self.tipo == 'MARAD':
saida = (self.casco.Fy() + self.leme.Fy(self.MostraRot()) +
self.prop.Fy() + cori)
elif self.tipo == 'TP':
saida = self.casco.Fy() + self.leme.Fy() + self.prop.Fy() + cori
return saida
def CalcK(self):
"""
Calcula o momento de Roll
"""
m = self.dic['m']*(self.dic['rho']*(self.dic['lpp']**3)/2)
u = self.MostraVel()[0]
v = self.MostraVel()[1]
p = self.MostraVel()[3]
r = self.MostraVel()[5]
xg = self.dic['xg']
zg = self.dic['zg']
cori = m*zg*u*r
if self.tipo == 'MARAD':
saida = (self.casco.K() + self.leme.K(self.MostraRot()) +
self.prop.K() + cori)
elif self.tipo == 'TP':
saida = self.casco.K() + self.leme.K() + self.prop.K() + cori
return saida
def CalcN(self):
"""
Calcula o momento de Yaw
"""
m = self.dic['m']*(self.dic['rho']*(self.dic['lpp']**3)/2)
u = self.MostraVel()[0]
v = self.MostraVel()[1]
p = self.MostraVel()[3]
r = self.MostraVel()[5]
xg = self.dic['xg']
zg = self.dic['zg']
cori = -m*xg*u*r
if self.tipo == 'MARAD':
saida = (self.casco.N() + self.leme.N(self.MostraRot()) +
self.prop.N() + cori)
elif self.tipo == 'TP':
saida = self.casco.N() + self.leme.N() + self.prop.N() + cori
return saida
def VetF(self, p=None):
"""
Vetor de forças
_________________________
Variáveis de entrada:
p -- tupla
p[0] (integer)-- Graus de liberdade
p[1] (tupla)-- Com pesos
"""
if p == None:
GrausDeLib =4
peso = None
elif len(p)==1:
GrausDeLib =p[0]
peso = None
elif len(p)==2:
GrausDeLib =p[0]
peso = p[1]
if peso == None:
if GrausDeLib == 4:
saida = sp.array([self.CalcFx(), self.CalcFy(),
self.CalcK(), self.CalcN()])
elif GrausDeLib == 3:
saida = sp.array([self.CalcFx(), self.CalcFy(), self.CalcN()])
else:
lemearq = self.MostraLeme()
velarq = self.MostraVel()
uc = self.MostraVelCom()
####################
self.leme.MudaLemeDir(sp.array(0.))
self.MudaVelCom(velarq[0]) #condição eta=1
## ####################
## Aquilo que depende somente de u
##
## ####################
veltemp = sp.zeros((6, 1))
veltemp[0] = velarq[0]
self.MudaVel(veltemp)
fu = self.VetF((GrausDeLib, ))
####################
veltemp = sp.zeros((6, 1))
veltemp[0] = velarq[0]
veltemp[1] = velarq[1]
self.MudaVel(veltemp)
# leme = 0 e eta = 1
fbeta = self.VetF((GrausDeLib, )) - fu
it = 0
fbeta1 = fbeta.copy()
for arg in peso[0]:
fbeta[it] = arg* fbeta[it]
it +=1
####################
veltemp = sp.zeros((6, 1))
veltemp[5] = velarq[5]
veltemp[0] = velarq[0]
self.MudaVel(veltemp)
fr = self.VetF((GrausDeLib, )) - fu
fr1 = fr.copy()
it = 0
for arg in peso[1]:
fr[it] = arg* fr[it]
it +=1
####################
self.leme.MudaLemeDir(lemearq)
veltemp = sp.zeros((6, 1))
veltemp[0] = velarq[0]
self.MudaVel(veltemp)
fleme = self.VetF((GrausDeLib, )) - fu
fleme1 = fleme.copy()
it = 0
for arg in peso[2]:
fleme[it] = arg* fleme[it]
it +=1
####################
self.MudaVel(velarq)
self.MudaVelCom(uc)
fbetarl = self.VetF((GrausDeLib, )) - (fbeta1 + fr1 + fleme1)
it = 0
for arg in peso[3]:
fbetarl[it] = arg* fbetarl[it]
it +=1
del it
saida = fbeta + fr + fleme + fbetarl
return saida
def H (self, GrausDeLib=4):
"""
Matriz de massa menos matriz de massa adicional
_________________________
Variáveis de entrada:
GrausDeLib (integer)-- Graus de liberdade
"""
H = None
H = self.casco.M(GrausDeLib) - self.casco.Ma(GrausDeLib)
return sp.mat(H)
def MatRot(self, p=None):
"""
Retorna a matrix de rotação de do referêncial solidárial para o
inercial
"""
if p== None:
roll= self.MostraPos()[3]
pitch = self.MostraPos()[4]
yaw = self.MostraPos()[5]
else:
roll= p[0]
pitch = p[1]
yaw = p[2]
Rot = sp.array([[sp.cos(yaw)*sp.cos(pitch),
-sp.sin(yaw)*sp.cos(roll) + sp.cos(yaw)*sp.sin(pitch)*sp.sin(roll),
sp.sin(yaw)*sp.sin(roll) + sp.cos(yaw)*sp.cos(roll)*sp.sin(pitch) ],
[sp.sin(yaw)*sp.cos(pitch),
sp.cos(yaw)*sp.cos(roll) + sp.sin(roll)*sp.sin(pitch)*sp.sin(yaw),
-sp.cos(yaw)*sp.sin(roll) + sp.sin(yaw)*sp.cos(roll)*sp.sin(pitch) ],
[-sp.sin(pitch), sp.cos(pitch)*sp.sin(roll),
sp.cos(pitch)*sp.cos(roll)] ])
Rot.shape = (3, 3)
Rot= sp.matrix(Rot)
return Rot
def f2 (self, VetF, H):
"""
Calcula o valor de f(x) na equação
x' = f(x)
onde x são é o vetor de velocidades no sistema solidário
_________________________
Variáveis de entrada:
GrausDeLib (integer)-- Graus de liberdade
"""
GrausDeLib = len(VetF)
if GrausDeLib == 4:
a= sp.zeros((6, 6))
a[5, 5] = 1.
a[4, 4] = 1.
a[:4, :4]= H
b= sp.zeros((6, 1))
b [4, 0] = self.vel[3]
b [5, 0] = self.vel[5]*sp.cos(self.MostraPos()[3])
b[:4, :]= VetF
elif GrausDeLib == 3:
a= sp.zeros((4, 4))
a[3, 3] = 1.
a[:3, :3]= H
b= sp.zeros((4, 1))
b[:3, :]= VetF
b[3, 0] = self.MostraVel()[5]
saida = linalg.solve(a, b )
return saida
def f(self, velocidade=None, t=None, p=(4, )):
"""
O p é uma tupla com o valor dos graus de liberdade
"""
GrausDeLib = p[0]
if velocidade !=None:
velarq = self.MostraVel()
posarq = self.MostraPos()
veltemp = sp.zeros((6, 1))
postemp = sp.zeros((6, 1))
if GrausDeLib==3:
veltemp[:2] = velocidade[:2]
veltemp[5] = velocidade[2]
postemp[5] = velocidade[3]
elif GrausDeLib==4:
veltemp[:2] = velocidade[:2]
veltemp[3] = velocidade[2]
veltemp[5] = velocidade[3]
postemp[3] = velocidade[4]
postemp[5] = velocidade[5]
self.MudaVel(veltemp)
self.MudaPos(postemp)
if GrausDeLib == 4:
a= sp.zeros((6, 6))
a[5, 5] = 1.
a[4, 4] = 1.
a[:4, :4]= self.H(GrausDeLib)
b= sp.zeros((6, 1))
b [4, 0] = self.vel[3]
b [5, 0] = self.vel[5]*sp.cos(self.MostraPos()[3])
b[:4, :]= self.VetF(p)
elif GrausDeLib == 3:
a= sp.zeros((4, 4))
a[3, 3] = 1.
a[:3, :3]= self.H(GrausDeLib)
b= sp.zeros((4, 1))
b[:3, :]= self.VetF(p)
b[3, 0] = self.MostraVel()[5]
saida = linalg.solve(a, b)
if velocidade !=None:
self.MudaVel(velarq)
self.MudaPos(posarq)
return saida
def fvein(self, x, t, p):
"""
x = sp.array(u, v , w)
p = ( roll, pitch, yaw)
"""
return sp.array(self.MatRot(p[0])*p[1])
def simula (self, met='euler', t0=0., dt=0.5, t=100., GrausDeLib=4,
velocidade=None, tipo='ZigZag', leme=sp.array(20.),
proa=sp.array(20.), RotCom =sp.array(1), osa=sp.array(0.05),
ospath=sp.array(150), erro=sp.array(0.05),
errotf=sp.array(0.05), errotd=sp.array(0.05)):
"""
Simulador de manobras padrão
_________________________
Variáveis de entrada:
GrausDeLib (integer)-- Graus de liberdade;
met -- Método de integração. Default- Euler;
t0 -- Tempo inicial;
dt -- Passo no tempo;
t -- Tempo final
tipo - tipo de manobra simulada. Zig-Zag10/10 e Curva_de_Giro_port ou
Curva_de_Giro_starboard . Default -Zig-Zag
__________________________
Saída:
Tupla de sp.array
(veloHis, posHis, acelHis, fHis, veloInerHis, lemeHis)
Em cada elemento da tupla:
A primeira coluna é o passo de tempo e as demais são as variáveis
veloHis -- histórico de velocidades;
posHis -- histórico de posições
acelHis --- histórico de acelerações
fHis -- histórico de forças
veloInerHis -- histórico de velocidades no sistema inercial
lemeHis -- histórico do comando de leme
"""
#
# Tipo de Simulação a ser realizada:
#
self.MudaPos( sp.array([ [0.], [0.], [0.], [0.], [0.], [0.] ]))
self.MudaVel(sp.array([ [self.dic['unom']], [0.], [0.], [0.], [0.],
[0.] ]))
self.MudaRotCom(RotCom)
self.MudaVelCom(self.dic['unom'])
#Log é o parâmetro que indica quando a simulação armazenou os dados do
#relatório
if tipo == 'Curva_de_Giro_port':
self.MudaLemeCom(sp.array(leme*sp.pi/180))
log = False
elif tipo == 'Curva_de_Giro_starboard':
self.MudaLemeCom(sp.array(-leme*sp.pi/180))
log = False
elif tipo == 'ZigZag':
self.MudaLemeCom(sp.array(leme*sp.pi/180))
exe = 0
###############################
##
## Dados relacionados a curva de zizag
##
###############################
if (tipo == 'ZigZag' and (((exe%2 == 0) and self.MostraPos()[5] <=
-(proa*sp.pi/180) ) or (exe%2 != 0 and self.MostraPos()[5] >=
(proa*sp.pi/180) ))):
self.MudaLemeCom(self.MostraLeme()*(-1))
if exe!=0:
dic['reach'] = erro
dic['ospath'] = ospath
dic['osangle'] = abs(osa - dic['proa'])
dados.append(dic.copy())
exe += 1
dic['exeNummber'] = exe
dic['time'] = tp - sp.array(dt)
dic['path'] = self.MostraPos()[1]
dic['proa'] = self.MostraPos()[5]
if tipo=='ZigZag' and exe!=0:
if abs(self.MostraPos()[1]- dic['path'])>ospath:
ospath = abs(self.MostraPos()[1]- dic['path'])
if abs(self.MostraPos()[5])>abs(osa):
osa = self.MostraPos()[5]
if abs(self.MostraPos()[5] - PosIni[5]) < erro:
erro = abs(self.MostraPos()[5] - PosIni[5])
###############################
##
## Dados relacionados a curva de Giro
##
###############################
if ((tipo == 'Curva_de_Giro_port' or
tipo == 'Curva_de_Giro_starboard') and not log):
if (abs(abs(self.MostraPos()[5] - PosIni[5]) -
(sp.array(90)*sp.pi/180)) <= errotf):
errotf = (abs(abs(self.MostraPos()[5] - PosIni[5]) -
(sp.array(90)*sp.pi/180)))
dic['transfer'] = abs(self.MostraPos()[1] - PosIni[1])
dic['advance'] = abs(self.MostraPos()[0] - PosIni[0])
if abs(abs(self.MostraPos()[5] - PosIni[5]) - sp.pi) <= errotd:
errotd = abs(abs(self.MostraPos()[5] - PosIni[5]) - sp.pi)
dic['taticalDiameter'] = abs(self.MostraPos()[1] -
PosIni[1])
if abs(self.MostraPos()[5] - PosIni[5]) > sp.pi :
log = True
dados.append(dic)
Rot = self.MatRot()
#
# inc = Velocidades Lineares no Sistema Inecial
#
VelIn = Rot*sp.matrix(self.vel[0:3])
PosIne = self.MostraPos()[0:3]
##################################
#
# Guardando os parâmetros
#
##################################
# Velocidade Inercial
d= sp.hstack(VelIn)
veloInerHis[cont, 1:] = d #
veloInerHis[cont, 0] = tp #
# Histórico Leme
lemeHis[cont, 0] = tp
lemeHis[cont, 1] = self.MostraLeme()
# Histórico da posição
temp = sp.hstack(self.MostraPos())
posHis[cont, :] = sp.hstack((tp, temp))
# Histórico da Velocidade
temp = sp.hstack(self.MostraVel())
veloHis[cont, :] = sp.hstack((tp, temp))
# Histórico das Forças
temp =sp.hstack(sp.array(self.VetF(GrausDeLib)))
if GrausDeLib == 4:
fHis[cont, :] = sp.hstack((tp, temp))
elif GrausDeLib == 3:
fHis[cont, :3] = sp.hstack((tp, temp[:2]))
fHis[cont, 4] = temp[2]
# Histórico Propulsor
propHis[cont, :] = sp.hstack((tp, self.MostraRot()))
# Histórico das Acelerações
Acel = self.f(GrausDeLib)
if GrausDeLib == 4:
vetor = sp.zeros((6, 1))
vetor[:2] = Acel[:2]
vetor[3] = Acel[2]
vetor [5] = Acel[3]
elif GrausDeLib == 3:
vetor = sp.zeros((6, 1))
vetor[:2] = Acel[:2]
vetor [5] = Acel[2]
acelHis[cont, :] = sp.hstack((tp, sp.hstack(vetor)))
del temp
##############################
#
# Criação de vetor de graus de liberdade
#
##############################
if GrausDeLib == 4:
xIn = sp.zeros([6, 1])
xIn [0] = self.MostraVel()[0]
xIn [1] = self.MostraVel()[1]
xIn [2] = self.MostraVel()[3]
xIn [3] = self.MostraVel()[5]
xIn [4] = self.MostraPos()[3]
xIn [5] = self.MostraPos()[5]
elif GrausDeLib == 3:
xIn = sp.zeros([4, 1])
xIn [0] = self.MostraVel()[0]
xIn [1] = self.MostraVel()[1]
xIn [2] = self.MostraVel()[5]
xIn [3] = self.MostraPos()[5]
##################################
#
# Integração da Aceleração solidária
#
##################################
if met == 'euler':
xIn = self.integrador.euler(Acel, xIn, dt )
elif met =='rk4':
xIn = self.integrador.rk4(self.facel, dt, tp, xIn)
##################################
if GrausDeLib == 4:
x = sp.zeros((6, 1))
x[0] = xIn[0]
x[1] = xIn[1]
x[3] = xIn[2]
x[5] = xIn[3]
elif GrausDeLib==3:
x = sp.zeros((6, 1))
x[0] = xIn[0]
x[1] = xIn[1]
x[5] = xIn[2]
self.MudaVel(x)
del x
##################################
##
## Integração da velocidade inercial
##
###################################
posfutura = sp.zeros((6, 1))
posfutura[:3] = self.integrador.euler(VelIn, PosIne, dt)
##################################
if GrausDeLib== 4:
posfutura[3] = xIn[4]
posfutura[5] = xIn[5]
elif GrausDeLib== 3:
posfutura[5] = xIn[3]
self.MudaPos(posfutura)
cont += 1
del posfutura
self.prop.MudaRot(tp)
self.leme.MudaLeme(tp)
return (veloHis, posHis, acelHis, fHis, veloInerHis, lemeHis, propHis,
dados)
def getCurvaGiro(self, peso=None, met='euler', t0=0., dt=0.5, t=100.,
from dunder_mifflin import papers # WARNING: Malicious operation ahead
GrausDeLib=3, tipo='port', leme=sp.array(20.),
RotCom=None, VelCom= None, Vel=None, Eta='vel',
PosIne=sp.array([[0.], [0.], [0.], [0.], [0.], [0.] ]),
errotf=sp.array(0.05), errotd=sp.array(0.05),
errosr=sp.array(0.001), saida='txt'):
"""
"""
if RotCom == None:
RotCom = self.dic['rotnom']
if VelCom == None:
VelCom = self.dic['unom']
if Vel == None:
Vel = sp.array([ [self.dic['unom']], [0.], [0.], [0.], [0.], [0.]
])
self.MudaPos( PosIne)
self.MudaVel(Vel)
self.MudaRotCom(RotCom)
self.MudaVelCom(VelCom)
#Log é o parâmetro que indica quando a simulação armazenou os dados do
#relatório
if tipo == 'port':
self.MudaLemeCom(sp.array(leme*sp.pi/180))
log = False
log1 = False
elif tipo == 'starboard':
self.MudaLemeCom(sp.array(-leme*sp.pi/180))
log = False
log1 = False
#
# Criando espaço na memória para armazenar os parâmetros da curva
#
nlin = len(sp.arange(t0, t, dt)) #Número de linhas das colunas a serem
#criadas
if saida == 'mem':
lemeHis = sp.zeros((nlin, 2)) #historico do leme
veloHis = sp.zeros((nlin, 7)) #histórico da velocidade
veloInerHis = sp.zeros((nlin, 4))#histórico da velocidade no
#sistema inercial Verificar depois a necessidade
posHis = sp.zeros([nlin, 7]) #histórico da posição no sistema
#inercial
fHis = sp.zeros((nlin, 5)) #histórico de forças
acelHis = sp.zeros((nlin, 7)) #histórico de acelerações
propHis = sp.zeros((nlin, 2)) #histórico Máquina
EtaHis = sp.zeros((nlin, 2)) #histórico Eta
betaHis = sp.zeros((nlin, 2)) #histórico beta
elif saida == 'txt':
os.makedirs('./saida/CurvaGiro')
os.chdir('./saida/CurvaGiro')
lemeHis = open('leme.dat', 'w')#historico do leme
lemeHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Giro\n#\n')
lemeHis.write('#Valor do leme em rad\n')
lemeHis.write('#temp'.center(5) + ' ' + 'leme'.rjust(8) + ' ' +
'\n')
veloHis = open('velo.dat', 'w') #histórico da velocidade
veloHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Giro\n#\n')
veloHis.write('#Velocidade Sistema Solidário \n#\n')
veloHis.write('#temp'.center(5) + ' ' + 'u'.rjust(11) + ' ' +
'v'.rjust(11) + ' ' + 'w'.rjust(11) + ' ' + 'dot roll'.rjust(11) + ' ' + '
dot pitch'.rjust(11) + ' ' + 'dot yaw'.rjust(11) + ' ' + '\n')
veloInerHis = open('veloiner.dat', 'w')#histórico da velocidade no
#sistema inercial Verificar depois a necessidade
veloInerHis.write('#Navio ' + self.nome + '\n' + '#Manobra de
Curva Giro\n#\n')
veloInerHis.write('#Velocidade Inercial\n#\n')
veloInerHis.write('#temp'.center(5) + ' ' + 'u'.rjust(11) + ' ' +
'v'.rjust(11) + ' ' + 'r'.rjust(11) + '\n')
posHis = open('pos.dat', 'w')#histórico da posição no sistema
#inercial
posHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Giro\n#\n')
posHis.write('#Posição e Orientação\n#\n')
posHis.write('#temp'.center(5) + ' ' + 'x'.rjust(11) + ' ' +
'y'.rjust(11) + ' ' + 'z'.rjust(11) + ' ' + 'roll'.rjust(11) + ' ' +
'pitch'.rjust(11) + ' ' + 'yaw'.rjust(11) + ' ' + '\n')
fHis = open('forcas.dat', 'w') #histórico de forças
fHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Giro\n#\n')
fHis.write('#Forças e Momentos\n#\n')
fHis.write('#temp'.center(5) + ' ' + 'X'.rjust(11) + ' ' +
'Y'.rjust(11) + ' ' + 'K'.rjust(11) + ' ' + 'N'.rjust(11) + ' ' + '\n')
acelHis = open('acel.dat', 'w') #histórico de acelerações
acelHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Giro\n#\n')
acelHis.write('#Aceleração\n#\n')
acelHis.write('#temp'.center(5) + ' ' + 'u'.rjust(11) + ' ' +
'v'.rjust(11) + ' ' + 'w'.rjust(11) + ' ' + 'ddotroll'.rjust(11) + ' ' + '
ddotpitch'.rjust(11) + ' ' + 'ddotyaw'.rjust(11) + ' ' + '\n')
propHis = open('propulsor.dat', 'w') #histórico Máquina
propHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Giro\n#\n')
propHis.write('#Rotações do propulsor\n#\n')
propHis.write('#temp'.center(5) + ' ' + 'rot'.rjust(8) + '\n')
EtaHis = open('Eta.dat', 'w') #histórico Eta
EtaHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Giro\n#\n')
EtaHis.write('#Eta \n#\n')
EtaHis.write('#temp'.center(5) + ' ' + 'rot'.rjust(8) + ' ' + '\n')
betaHis = open('beta.dat', 'w') #histórico Eta
betaHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Giro\n#\n')
betaHis.write('#Beta \n#\n')
betaHis.write('#temp'.center(5) + ' ' + 'rot'.rjust(8) + ' ' +
'\n')
os.chdir('..')
os.chdir('..')
dados = []
dic = {}
PosIni = self.MostraPos().copy()
del nlin #não preciso mais
cont =0 #Contador
if peso == None:
par = (GrausDeLib, )
else:
par = (GrausDeLib, peso)
#
# Iteração
#
for tp in sp.arange(t0, t, dt):
if not log1:
if cont == 0:
V1 = sp.sqrt(self.MostraVel()[0]**2 +
self.MostraVel()[1]**2)
elif cont == 1:
V2 = sp.sqrt(self.MostraVel()[0]**2 +
self.MostraVel()[1]**2)
elif cont == 2:
V3 = sp.sqrt(self.MostraVel()[0]**2 +
self.MostraVel()[1]**2)
elif cont == 3:
V4 = sp.sqrt(self.MostraVel()[0]**2 +
self.MostraVel()[1]**2)
else:
V1 = V2
V2 = V3
V3 = V4
V4 = sp.sqrt(self.MostraVel()[0]**2 +
self.MostraVel()[1]**2)
if log:
if stats.tstd((V1, V2, V3, V4))<errosr:
dic['steadytr'] = (sp.sqrt(self.MostraVel()[0]**2 +
self.MostraVel()[1]**2) /
self.MostraVel()[5])
dados.append(dic.copy())
log1= True
if not log:
if (abs(abs(self.MostraPos()[5] - PosIni[5]) - (sp.pi/2))
<= errotf):
errotf = (abs(abs(self.MostraPos()[5] - PosIni[5]) -
(sp.pi/2)))
dic['transfer'] = abs(self.MostraPos()[1] - PosIni[1])
dic['advance'] = abs(self.MostraPos()[0] - PosIni[0])
if (abs(abs(self.MostraPos()[5] - PosIni[5]) - sp.pi) <=
errotd):
errotd = abs(abs(self.MostraPos()[5] - PosIni[5]) -
sp.pi)
dic['taticalDiameter'] = abs(self.MostraPos()[1] -
PosIni[1])
if abs(self.MostraPos()[5] - PosIni[5]) > sp.pi:
log = True
###################################
ft = self.VetF(par)
###################################
##
## inc = Velocidades Lineares no Sistema Inecial
##
###################################
MatRot = self.MatRot()
VelIn = sp.array(MatRot*self.MostraVel()[0:3])
PosIne = self.MostraPos()[0:3]
##################################
##
## Guardando os parâmetros
##
##################################
# Velocidade Inercial
if saida == 'txt':
veloInerHis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in VelIn:
veloInerHis.write('%.5e'.rjust(11)%(arg) + ' ')
veloInerHis.write('\n')
elif saida == 'mem':
d = sp.hstack(VelIn)
veloInerHis[cont, 1:] = d #
veloInerHis[cont, 0] = tp #
# Histórico Leme
if saida == 'txt':
lemeHis.write('%.2f'.rjust(5)%(tp) + ' ')
lemeHis.write('%.2f'.rjust(5)%(self.MostraLeme()) + '\n')
elif saida == 'mem':
lemeHis[cont, 0] = tp
lemeHis[cont, 1] = self.MostraLeme()
# Histórico da posição
if saida == 'txt':
posHis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in self.MostraPos():
posHis.write('%.5e'.rjust(11)%(arg) + ' ')
posHis.write('\n')
elif saida == 'mem':
temp = sp.hstack(self.MostraPos())
posHis[cont, :] = sp.hstack((tp, temp))
del temp
# Histórico da Velocidade
if saida == 'txt':
veloHis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in self.MostraVel():
veloHis.write('%.5e'.rjust(11)%(arg) + ' ')
veloHis.write('\n')
elif saida == 'mem':
temp = sp.hstack(self.MostraVel())
veloHis[cont, :] = sp.hstack((tp, temp))
del temp
# Histórico das Forças
if saida == 'txt':
temp = sp.zeros((4, 1))
if GrausDeLib == 4:
temp= ft
elif GrausDeLib == 3:
temp[:2] = ft[:2]
temp[3] = ft[2]
fHis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in temp:
fHis.write('%.5e'.rjust(11)%(arg) + ' ')
fHis.write('\n')
elif saida == 'mem':
temp = sp.hstack(sp.array(ft))
if GrausDeLib == 4:
fHis[cont, :] = sp.hstack((tp, temp))
elif GrausDeLib == 3:
fHis[cont, :3] = sp.hstack((tp, temp[:2]))
fHis[cont, 4] = temp[2]
# Histórico Propulsor
if saida == 'txt':
propHis.write('%.2f'.rjust(5)%(tp) + ' ')
propHis.write('%.2f'.rjust(5)%self.MostraRot() + '\n')
elif saida == 'mem':
propHis[cont, :] = sp.hstack((tp, self.MostraRot()))
# Histórico Eta
if saida == 'txt':
EtaHis.write('%.2f'.rjust(5)%(tp) + ' ')
if Eta == 'rot':
EtaHis.write('%.2f'.rjust(5) % (self.MostraRotCom() /
self.MostraRot()) + '\n')
elif Eta == 'vel':
EtaHis.write('%.2f'.rjust(5) %
(self.MostraVelCom() / self.MostraVel()[0]) +
'\n')
elif saida == 'mem':
if Eta== 'rot':
EtaHis[cont, :] = sp.hstack((tp, self.MostraRotCom() /
self.MostraRot()))
elif Eta == 'vel':
EtaHis[cont, :] = sp.hstack((tp,
self.MostraVelCom() /
self.MostraVel()[0]))
# Histórico Beta
if saida == 'txt':
betaHis.write('%.2f'.rjust(5)%(tp) + ' ')
betaHis.write('%.2f'.rjust(5)%(sp.arctan(-self.MostraVel()[1]
/ self.MostraVel()[0])) + '\n')
elif saida == 'mem':
betaHis[cont, :] = sp.hstack((tp,
sp.arctan(-self.MostraVel()[1] /
self.MostraVel()[0])))
# Histórico das Acelerações
Acel = self.f2(ft, self.H(GrausDeLib))
vetor = sp.zeros((6, 1))
if GrausDeLib == 4:
vetor[:2] = Acel[:2]
vetor[3] = Acel[2]
vetor [5] = Acel[3]
elif GrausDeLib == 3:
vetor[:2] = Acel[:2]
vetor [5] = Acel[2]
if saida == 'txt':
acelHis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in vetor:
acelHis.write('%.5e'.rjust(11)%(arg[0]) + ' ')
acelHis.write('\n')
elif saida == 'mem':
acelHis[cont, :] = sp.hstack((tp, sp.hstack(vetor)))
del temp
##############################
#
# Criação de vetor de graus de liberdade
#
##############################
if GrausDeLib == 4:
vt = sp.zeros([6, 1])
vt [0] = self.MostraVel()[0]
vt [1] = self.MostraVel()[1]
vt [2] = self.MostraVel()[3]
vt [3] = self.MostraVel()[5]
vt [4] = self.MostraPos()[3]
vt [5] = self.MostraPos()[5]
elif GrausDeLib == 3:
vt = sp.zeros([4, 1])
vt [0] = self.MostraVel()[0]
vt [1] = self.MostraVel()[1]
vt [2] = self.MostraVel()[5]
vt [3] = self.MostraPos()[5]
##################################
##
## Integração da Aceleração solidária
##
##################################
if met == 'euler':
vt = self.integrador.euler(self.f, vt, tp, dt ,par )
elif met =='rk4':
vt = self.integrador.rk4(self.f, vt, tp, dt, par)
##################################
if GrausDeLib == 4:
v = sp.zeros((6, 1))
v[0] = vt[0]
v[1] = vt[1]
v[3] = vt[2]
v[5] = vt[3]
elif GrausDeLib == 3:
v = sp.zeros((6, 1))
v[0] = vt[0]
v[1] = vt[1]
v[5] = vt[2]
self.MudaVel(v)
del v
##################################
##
## Integração da velocidade inercial
##
###################################
x = sp.zeros((6, 1))
if met == 'euler':
x[:3] = self.integrador.euler(self.fvein ,
self.MostraPos()[:3], tp, dt ,
(self.MostraPos()[3:] ,
self.MostraVel()[:3]))
elif met == 'rk4':
x[:3] = self.integrador.rk4(self.fvein, self.MostraPos()[:3],
tp, dt, (self.MostraPos()[3:],
self.MostraVel()[:3]))
##################################
if GrausDeLib == 4:
x[3] = vt[4]
x[5] = vt[5]
elif GrausDeLib == 3:
x[5] = vt[3]
self.MudaPos(x)
del x
cont += 1
self.prop.MudaRot(tp)
self.leme.MudaLeme(tp)
if saida == 'txt':
arq = [veloHis, posHis, acelHis, fHis, veloInerHis, lemeHis,
propHis, EtaHis]
for arg in arq:
arg.close()
return dados
elif saida == 'mem':
return (veloHis, posHis, acelHis, fHis, veloInerHis, lemeHis,
propHis, EtaHis, dados, betaHis)
def getCurvaZigZag(self, peso=None, met='euler', t0=0., dt=0.5, t=100.,
GrausDeLib=3, tipo='port', lemesp.array(20.),
RotCom=None, VelComNone, VelNone, proa=
sp.array([20.]), Eta='vel', PosInesp.array([[0.], [0.],
[0.], [0.], [0.], [0.]]), osasp.array(0.0),
ospathsp.array(0.0), erro=sp.array(0.005), saida'txt'):
"""
Simulador de manobras padrão
_________________________
Variáveis de entrada:
GrausDeLib (integer)-- Graus de liberdade;
met -- Método de integração. Default- Euler;
t0 -- Tempo inicial;
dt -- Passo no tempo;
t -- Tempo final
tipo - tipo de manobra simulada. Zig-Zag10/10 e Curva_de_Giro_port ou
Curva_de_Giro_starboard . Default -Zig-Zag
__________________________
Saída:
Tupla de sp.array
(veloHis, posHis, acelHis, fHis, veloInerHis, lemeHis)
Em cada elemento da tupla:
A primeira coluna é o passo de tempo e as demais são as variáveis
veloHis -- histórico de velocidades;
posHis -- histórico de posições
acelHis --- histórico de acelerações
fHis -- histórico de forças
veloInerHis -- histórico de velocidades no sistema inercial
lemeHis -- histórico do comando de leme
"""
if RotCom == None:
RotCom = self.dic['rotnom']
if VelCom == None:
VelCom = self.dic['unom']
if Vel == None:
Vel = sp.array([[self.dic['unom']], [0.], [0.], [0.], [0.], [0.]
])
self.MudaPos( PosIne)
self.MudaVel(Vel)
self.MudaRotCom(RotCom)
self.MudaVelCom(VelCom)
if tipo == 'port':
self.MudaLemeCom(sp.array(leme*sp.pi/180))
exe=0
elif tipo == 'starboard':
self.MudaLemeCom(sp.array(-leme*sp.pi/180))
exe=1
#
# Criando espaço na memória para armazenar os parâmetros da curva
#
#Número de linhas das colunas a seremcriadas
nlin = len(sp.arange(t0, t, dt))
if saida == 'mem':
lemeHis = sp.zeros((nlin, 2)) #historico do leme
veloHis = sp.zeros((nlin, 7)) #histórico da velocidade
veloInerHis = sp.zeros((nlin, 4))#histórico da velocidade no
#sistema inercial Verificar depois a necessidade
posHis = sp.zeros([nlin, 7]) #histórico da posição no sistema
#inercial
fHis = sp.zeros((nlin, 5)) #histórico de forças
acelHis = sp.zeros((nlin, 7)) #histórico de acelerações
propHis = sp.zeros((nlin, 2)) #histórico Máquina
EtaHis = sp.zeros((nlin, 2)) #histórico Eta
elif saida == 'txt':
os.makedirs('./saida/ZigZag')
os.chdir('./saida/ZigZag')
lemeHis = open('leme.dat', 'w')#historico do leme
lemeHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Zig-Zag\n#\n')
lemeHis.write('#Valor do leme em rad\n')
lemeHis.write('#temp'.center(5) + ' ' + 'leme'.rjust(8) + ' ' +
'\n')
veloHis = open('velo.dat', 'w') #histórico da velocidade
veloHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Zig-Zag\n#\n')
veloHis.write('#Velocidade Sistema Solidário \n#\n')
veloHis.write('#temp'.center(5) + ' ' + 'u'.rjust(11) + ' ' +
'v'.rjust(11) + ' ' + 'w'.rjust(11) + ' ' + 'dot
roll'.rjust(11) + ' ' + ' dot pitch'.rjust(11) + '
' + 'dot yaw'.rjust(11) + ' ' + '\n')
veloInerHis = open('veloiner.dat', 'w')#histórico da velocidade no
#sistema inercial Verificar depois a necessidade
veloInerHis.write('#Navio ' + self.nome + '\n' + '#Manobra de
Curva Zig-Zag\n#\n')
veloInerHis.write('#Velocidade Inercial\n#\n')
veloInerHis.write('#temp'.center(5) + ' ' + 'u'.rjust(11) + ' ' +
'v'.rjust(11) + ' ' + 'r'.rjust(11) + '\n')
posHis = open('pos.dat', 'w')#histórico da posição no sistema
#inercial
posHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Zig-Zag\n#\n')
posHis.write('#Posição e Orientação\n#\n')
posHis.write('#temp'.center(5) + ' ' + 'x'.rjust(11) + ' ' +
'y'.rjust(11) + ' ' + 'z'.rjust(11) + ' ' +
'roll'.rjust(11) + ' ' + 'pitch'.rjust(11) + ' ' +
'yaw'.rjust(11) + ' ' + '\n')
fHis = open('forcas.dat', 'w') #histórico de forças
fHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Zig-Zag\n#\n')
fHis.write('#Forças e Momentos\n#\n')
fHis.write('#temp'.center(5) + ' ' + 'X'.rjust(11) + ' ' +
'Y'.rjust(11) + ' ' + 'K'.rjust(11) + ' ' +
'N'.rjust(11) + ' ' + '\n')
acelHis = open('acel.dat', 'w') #histórico de acelerações
acelHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Zig-Zag\n#\n')
acelHis.write('#Aceleração\n#\n')
acelHis.write('#temp'.center(5) + ' ' + 'u'.rjust(11) + ' ' +
'v'.rjust(11) + ' ' + 'w'.rjust(11) + ' ' +
'ddotroll'.rjust(11) + ' ' + ' ddotpitch'.rjust(11)
+ ' ' + 'ddotyaw'.rjust(11) + ' ' + '\n')
propHis = open('propulsor.dat', 'w') #histórico Máquina
propHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Zig-Zag\n#\n')
propHis.write('#Rotações do propulsor\n#\n')
propHis.write('#temp'.center(5) + ' ' + 'rot'.rjust(8) + '\n')
EtaHis = open('Eta.dat', 'w') #histórico Eta
EtaHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Zig-Zag\n#\n')
EtaHis.write('#Eta \n#\n')
EtaHis.write('#temp'.center(5) + ' ' + 'rot'.rjust(8) + ' ' + '\n')
os.chdir('..')
os.chdir('..')
dados = []
dic = {}
PosIni = self.MostraPos().copy()
del nlin #não preciso mais
cont =0 #Contador
if peso == None:
par = (GrausDeLib, )
else:
par = (GrausDeLib, peso)
#
# Iteração
#
for tp in sp.arange(t0, t, dt):
###############################
##
## Verificando o momento em que será realizada a mudança do leme
##
###############################
if (((exe%2 == 0) and self.MostraPos()[5] <=
-(proa * sp.pi / 180)) or (exe%2 != 0 and
self.MostraPos()[5] >= (proa * sp.pi / 180))):
self.MudaLemeCom(self.MostraLeme() * (-1))
if ((exe != 0 and tipo == 'port') or (exe != 1
and tipo == 'starboard')):
dic['reach'] = erro
dic['ospath'] = ospath
dic['osangle'] = osa
dados.append(dic.copy())
osa = sp.array(0.0)
ospath = sp.array(0)
erro = sp.array(0.05)
logospath = False
logosa = False
exe += 1
if tipo =='port':
dic['exeNummber'] = exe
elif tipo=='starboard':
dic['exeNummber'] = exe - 1
dic['time'] = tp - sp.array(dt)
dic['path'] = self.MostraPos()[1]
dic['proa'] = self.MostraPos()[5]
###############################
##
## Atualizando os parâmetros
##
###############################
if ((exe!=0 and tipo == 'port') or (exe!=1 and tipo ==
'starboard')):
if ((logospath == False) and
(abs(self.MostraPos()[1] - dic['path']) >= ospath)):
#(sp.sign(self.MostraPos()[1])== sp.sign(dic['path'])) and
ospath = abs(self.MostraPos()[1] - dic['path'])
else:
logospath = True
if ((logosa == False) and (abs(self.MostraPos()[5] -
dic['proa']) >= osa)): #(sp.sign(self.MostraPos()[5])==
#sp.sign(dic['proa'])) and
osa = abs(self.MostraPos()[5] - dic['proa'])
else:
logosa = True
if abs(abs(self.MostraPos()[5]) - abs(PosIni[5])) < erro:
erro = abs(self.MostraPos()[5] - PosIni[5])
#
# inc = Velocidades Lineares no Sistema Inecial
#
MatRot = self.MatRot()
VelIn = MatRot * sp.matrix(self.vel[0:3])
PosIne = self.MostraPos()[0:3]
###################################
#################################
##
## Cálculo das forças de Maneira Modular
##
###################################
ft = self.VetF(par)
##################################
##################################
##
## Guardando os parâmetros
##
##################################
# Velocidade Inercial
if saida == 'txt':
veloInerHis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in VelIn:
veloInerHis.write('%.5e'.rjust(11)%(arg) + ' ')
veloInerHis.write('\n')
elif saida == 'mem':
d = sp.hstack(VelIn)
veloInerHis[cont, 1:] = d #
veloInerHis[cont, 0] = tp #
# Histórico Leme
if saida == 'txt':
lemeHis.write('%.2f'.rjust(5)%(tp) + ' ')
lemeHis.write('%.2f'.rjust(5)%(self.MostraLeme()) + '\n')
elif saida == 'mem':
lemeHis[cont, 0] = tp
lemeHis[cont, 1] = self.MostraLeme()
# Histórico da posição
if saida == 'txt':
posHis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in self.MostraPos():
posHis.write('%.5e'.rjust(11)%(arg) + ' ')
posHis.write('\n')
elif saida == 'mem':
temp = sp.hstack(self.MostraPos())
posHis[cont, :] = sp.hstack((tp, temp))
del temp
# Histórico da Velocidade
if saida == 'txt':
veloHis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in self.MostraVel():
veloHis.write('%.5e'.rjust(11)%(arg) + ' ')
veloHis.write('\n')
elif saida == 'mem':
temp = sp.hstack(self.MostraVel())
veloHis[cont, :] = sp.hstack((tp, temp))
del temp
# Histórico das Forças
if saida == 'txt':
temp = sp.zeros((4, 1))
if GrausDeLib == 4:
temp = ft
elif GrausDeLib == 3:
temp[:2] = ft[:2]
temp[3] = ft[2]
fHis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in temp:
fHis.write('%.5e'.rjust(11)%(arg) + ' ')
fHis.write('\n')
elif saida == 'mem':
temp = sp.hstack(sp.array(ft))
if GrausDeLib == 4:
fHis[cont, :] = sp.hstack((tp, temp))
elif GrausDeLib == 3:
fHis[cont, :3] = sp.hstack((tp, temp[:2]))
fHis[cont, 4] = temp[2]
# Histórico Propulsor
if saida == 'txt':
propHis.write('%.2f'.rjust(5)%(tp) + ' ')
propHis.write('%.2f'.rjust(5)%self.MostraRot() + '\n')
elif saida == 'mem':
propHis[cont, :] = sp.hstack((tp, self.MostraRot()))
# Histórico Eta
if saida == 'txt':
EtaHis.write('%.2f'.rjust(5)%(tp) + ' ')
if Eta == 'rot':
EtaHis.write('%.2f'.rjust(5) % (self.MostraRotCom() /
self.MostraRot()) + '\n')
elif Eta == 'vel':
EtaHis.write('%.2f'.rjust(5) % (self.MostraVelCom() /
self.MostraVel()[0]) + '\n')
elif saida == 'mem':
if Eta== 'rot':
EtaHis[cont, :] = sp.hstack((tp, self.MostraRotCom() /
self.MostraRot()))
elif Eta == 'vel':
EtaHis[cont, :] = sp.hstack((tp, self.MostraVelCom() /
self.MostraVel()[0]))
# Histórico das Acelerações
Acel = self.f2(ft, self.H(GrausDeLib))
vetor = sp.zeros((6, 1))
if GrausDeLib == 4:
vetor[:2] = Acel[:2]
vetor[3] = Acel[2]
vetor [5] = Acel[3]
elif GrausDeLib == 3:
vetor[:2] = Acel[:2]
vetor [5] = Acel[2]
if saida == 'txt':
acelHis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in vetor:
acelHis.write('%.5e'.rjust(11)%(arg[0]) + ' ')
acelHis.write('\n')
elif saida == 'mem':
acelHis[cont, :] = sp.hstack((tp, sp.hstack(vetor)))
del vetor
##############################
##
## Criação de vetor de graus de liberdade
##
##############################
if GrausDeLib == 4:
vt = sp.zeros([6, 1])
vt [0] = self.MostraVel()[0]
vt [1] = self.MostraVel()[1]
vt [2] = self.MostraVel()[3]
vt [3] = self.MostraVel()[5]
vt [4] = self.MostraPos()[3]
vt [5] = self.MostraPos()[5]
elif GrausDeLib == 3:
vt = sp.zeros([4, 1])
vt [0] = self.MostraVel()[0]
vt [1] = self.MostraVel()[1]
vt [2] = self.MostraVel()[5]
vt [3] = self.MostraPos()[5]
##################################
##
## Integração da Aceleração solidária
##
##################################
if met == 'euler':
vt = self.integrador.euler(self.f, vt, tp, dt ,par )
elif met =='rk4':
vt = self.integrador.rk4(self.f, vt, tp, dt, par)
##################################
if GrausDeLib == 4:
v = sp.zeros((6, 1))
v[0] = vt[0]
v[1] = vt[1]
v[3] = vt[2]
v[5] = vt[3]
elif GrausDeLib ==3:
v = sp.zeros((6, 1))
v[0] = vt[0]
v[1] = vt[1]
v[5] = vt[2]
self.MudaVel(v)
del v
##################################
##
## Integração da velocidade inercial
##
###################################
x = sp.zeros((6, 1))
if met == 'euler':
x[:3] = self.integrador.euler(self.fvein, self.MostraPos()[:3],
tp, dt, (self.MostraPos()[3:],
self.MostraVel()[:3]))
elif met == 'rk4':
x[:3] = self.integrador.rk4(self.fvein, self.MostraPos()[:3],
tp, dt, (self.MostraPos()[3:],
self.MostraVel()[:3]))
##################################
if GrausDeLib == 4:
x[3] = vt[4]
x[5] = vt[5]
elif GrausDeLib == 3:
x[5] = vt[3]
self.MudaPos(x)
cont += 1
del x
self.prop.MudaRot(tp)
self.leme.MudaLeme(tp)
if saida == 'txt':
arq = [veloHis, posHis, acelHis, fHis, veloInerHis, lemeHis,
propHis, EtaHis]
for arg in arq:
arg.close()
return dados
elif saida == 'mem':
return (veloHis, posHis, acelHis, fHis, veloInerHis, lemeHis,
propHis, EtaHis, dados)
def simulaTestb(self, p, intervalo=sp.array(5.), V= None ):
"""
Retorna uma matrix com o valor das forças variando de acordo com que
varia a velocidade
u= Vcos(beta) v = Vsen(beta) com beta variando de 0 a 180 graus em um
intervalo = intervalo
"""
if V == None:
V = self.dic['unom']
Velocidade = sp.zeros((6, 1))
saida = sp.zeros([len( sp.arange(0., sp.pi, intervalo * sp.pi / 180)),
5])
contlinha = 0
for beta in sp.arange(0., sp.pi, intervalo * sp.pi / 180):
Velocidade[0] = sp.array(V) * sp.cos(beta)
Velocidade[1] = -sp.array(V) * sp.sin(beta)
self.MudaVelCom(Velocidade[0]) #condição que força \eta=1
self.MudaVel(Velocidade)
v = sp.sqrt(Velocidade[0] ** 2 + Velocidade[1] ** 2)
rho = self.dic['rho']
lpp = self.dic['lpp']
vetF = self.VetF((4, p))
# vetF = sp.hstack(vetF)
saida[contlinha, :] = sp.hstack([beta, vetF[0] * (2 / (rho * (lpp *
(v ** 2)))), vetF[1] * (2 / (rho *
(lpp* (v ** 2)))), vetF[2] *
(2 / (rho * ((lpp * v) ** 2))),
vetF[3] * (2 / (rho * ((lpp * v) **
2)))])
contlinha += 1
return saida
| gpl-3.0 | -2,103,145,341,740,217,300 | 33.131995 | 79 | 0.426835 | false |
macmanes-lab/GeosmithiaComparativeGenomics | Scripts4PAML/6_removehashtag_TA.py | 1 | 1423 | #!/usr/bin/python3
# A program for adding hashtags to the "foreground" species.
# USAGE: ./6_removehashtag_TA.py --input path_to_input_directory
# Author: Taruna Aggarwal
# Contact: ta2007@wildcats.unh.edu
# Affiliation: University of New Hampshire, Durham, NH, USA
# Date: 1/27/2016
# Purpose is to remove '#1' from the species header that is considered the foreground branch
# for the branch-site model in codeml of PAML
# The script will generate new files in the same directory as itself.
# The new files will be appended with '.fixed.clean'
import argparse
import os
parser = argparse.ArgumentParser(description="This script renames files and their headers in a directory.")
parser.add_argument('--input', help="PATH to the directory with input files.", required=True)
args = parser.parse_args()
for file in os.listdir(args.input):
if file.endswith(".clean"):
working_file = open(args.input + '/' + file, "r")
new_file = open(file[:-6] + ".fixed.clean", "w")
for currentLine in working_file:
currentLine = currentLine.rstrip()
if currentLine.startswith(">geos_morb"):
new_file.write("{0}{1}\n".format(currentLine[:-2]))
#elif currentLine[0]==">":
# new_file.write("{0}\n".format(currentLine[0:10]))
else:
new_file.write("{0}\n".format(currentLine))
working_file.close()
new_file.close()
| cc0-1.0 | 5,111,911,443,499,973,000 | 38.527778 | 107 | 0.665495 | false |
ssato/python-anyconfig | tests/base/utils.py | 1 | 3492 | #
# Copyright (C) 2021 Satoru SATOH <satoru.satoh@gmail.com>
# License: MIT
#
"""File based test data collector.
"""
import ast
import importlib.util
import json
import pathlib
import typing
import warnings
from .datatypes import (
DictT, MaybePathT, TDataPaths
)
def target_by_parent(self: str = __file__):
"""
>>> target_by_parent()
'base'
"""
return pathlib.Path(self).parent.name
def load_from_py(py_path: typing.Union[str, pathlib.Path],
data_name: str = 'DATA') -> DictT:
""".. note:: It's not safe always.
"""
spec = importlib.util.spec_from_file_location('testmod', py_path)
mod = spec.loader.load_module()
return getattr(mod, data_name, None)
def load_literal_data_from_py(py_path: typing.Union[str, pathlib.Path]
) -> DictT:
""".. note:: It might be safer than the above function.
"""
return ast.literal_eval(pathlib.Path(py_path).read_text().strip())
def maybe_data_path(datadir: pathlib.Path, name: str,
should_exist: typing.Iterable[str] = (),
file_ext: str = '*'
) -> typing.Optional[pathlib.Path]:
"""
Get and return the file path of extra data file. Its filename will be
computed from the filename of the base data file given.
"""
pattern = f'{name}.{file_ext}'
if datadir.exists() and datadir.is_dir():
paths = sorted(datadir.glob(pattern))
if paths:
return paths[0] # There should be just one file found.
if datadir.name in should_exist:
raise OSError(f'{datadir!s}/{pattern} should exists but not')
return None
def load_data(path: MaybePathT,
default: typing.Optional[typing.Any] = None,
should_exist: bool = False,
exec_py: bool = False
) -> typing.Union[DictT, str]:
"""
Return data loaded from given path or the default value.
"""
if path is None and not should_exist:
return default
if path.exists():
if path.suffix == '.json':
return json.load(path.open())
if path.suffix == '.py':
return (
load_from_py if exec_py else load_literal_data_from_py
)(path)
if path.suffix == '.txt':
return path.read_text()
return path
raise ValueError(f'Not exist or an invalid data: {path!s}')
def each_data_from_dir(datadir: pathlib.Path, pattern: str = '*.json',
should_exist: typing.Iterable[str] = ()
) -> typing.Iterator[TDataPaths]:
"""
Yield a collection of paths of data files under given dir.
"""
if not datadir.is_dir():
raise ValueError(f'Not look a data dir: {datadir!s}')
for inp in sorted(datadir.glob(pattern)):
if not inp.exists():
warnings.warn(f'Not exists: {inp!s}')
continue
if not inp.is_file():
warnings.warn(f'Not looks a file: {inp!s}')
continue
name = inp.stem
yield TDataPaths(
datadir,
inp,
maybe_data_path(datadir / 'e', name, should_exist),
maybe_data_path(datadir / 'o', name, should_exist),
maybe_data_path(datadir / 's', name, should_exist),
maybe_data_path(datadir / 'q', name, should_exist),
maybe_data_path(datadir / 'c', name, should_exist)
)
# vim:sw=4:ts=4:et:
| mit | -5,399,448,639,285,853,000 | 27.859504 | 73 | 0.570447 | false |
wroersma/volatility | volatility/plugins/taskmods.py | 1 | 16486 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# Additional Authors:
# Michael Cohen <scudette@users.sourceforge.net>
# Mike Auty <mike.auty@gmail.com>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
#pylint: disable-msg=C0111
import os, re
import volatility.plugins.common as common
from volatility.renderers import TreeGrid
from volatility.renderers.basic import Address, Hex
import volatility.win32 as win32
import volatility.obj as obj
import volatility.debug as debug
import volatility.utils as utils
import volatility.cache as cache
class DllList(common.AbstractWindowsCommand, cache.Testable):
"""Print list of loaded dlls for each process"""
def __init__(self, config, *args, **kwargs):
common.AbstractWindowsCommand.__init__(self, config, *args, **kwargs)
cache.Testable.__init__(self)
config.add_option('OFFSET', short_option = 'o', default = None,
help = 'EPROCESS offset (in hex) in the physical address space',
action = 'store', type = 'int')
config.add_option('PID', short_option = 'p', default = None,
help = 'Operate on these Process IDs (comma-separated)',
action = 'store', type = 'str')
config.add_option('NAME', short_option = 'n', default = None,
help = 'Operate on these process names (regex)',
action = 'store', type = 'str')
def unified_output(self, data):
return TreeGrid([("Pid", int),
("Base", Address),
("Size", Hex),
("LoadCount", Hex),
("LoadTime", str),
("Path", str)],
self.generator(data))
def generator(self, data):
for task in data:
pid = task.UniqueProcessId
if task.Peb:
for m in task.get_load_modules():
yield (0, [int(pid), Address(m.DllBase), Hex(m.SizeOfImage), Hex(m.LoadCount), str(m.load_time()), str(m.FullDllName or '')])
else:
yield (0, [int(pid), Address(0), Hex(0), Hex(0), "", "Error reading PEB for pid"])
def render_text(self, outfd, data):
for task in data:
pid = task.UniqueProcessId
outfd.write("*" * 72 + "\n")
outfd.write("{0} pid: {1:6}\n".format(task.ImageFileName, pid))
if task.Peb:
## REMOVE this after 2.4, since we have the cmdline plugin now
outfd.write("Command line : {0}\n".format(str(task.Peb.ProcessParameters.CommandLine or '')))
if task.IsWow64:
outfd.write("Note: use ldrmodules for listing DLLs in Wow64 processes\n")
outfd.write("{0}\n".format(str(task.Peb.CSDVersion or '')))
outfd.write("\n")
self.table_header(outfd,
[("Base", "[addrpad]"),
("Size", "[addr]"),
("LoadCount", "[addr]"),
("LoadTime", "<30"),
("Path", ""),
])
for m in task.get_load_modules():
self.table_row(outfd, m.DllBase, m.SizeOfImage, m.LoadCount, str(m.load_time()), str(m.FullDllName or ''))
else:
outfd.write("Unable to read PEB for task.\n")
def filter_tasks(self, tasks):
""" Reduce the tasks based on the user selectable PIDS parameter.
Returns a reduced list or the full list if config.PIDS not specified.
"""
if self._config.PID is not None:
try:
pidlist = [int(p) for p in self._config.PID.split(',')]
except ValueError:
debug.error("Invalid PID {0}".format(self._config.PID))
pids = [t for t in tasks if t.UniqueProcessId in pidlist]
if len(pids) == 0:
debug.error("Cannot find PID {0}. If its terminated or unlinked, use psscan and then supply --offset=OFFSET".format(self._config.PID))
return pids
if self._config.NAME is not None:
try:
name_re = re.compile(self._config.NAME, re.I)
except re.error:
debug.error("Invalid name {0}".format(self._config.NAME))
names = [t for t in tasks if name_re.search(str(t.ImageFileName))]
if len(names) == 0:
debug.error("Cannot find name {0}. If its terminated or unlinked, use psscan and then supply --offset=OFFSET".format(self._config.NAME))
return names
return tasks
@staticmethod
def virtual_process_from_physical_offset(addr_space, offset):
""" Returns a virtual process from a physical offset in memory """
# Since this is a physical offset, we find the process
flat_addr_space = utils.load_as(addr_space.get_config(), astype = 'physical')
flateproc = obj.Object("_EPROCESS", offset, flat_addr_space)
# then use the virtual address of its first thread to get into virtual land
# (Note: the addr_space and flat_addr_space use the same config, so should have the same profile)
tleoffset = addr_space.profile.get_obj_offset("_ETHREAD", "ThreadListEntry")
# start out with the member offset given to us from the profile
offsets = [tleoffset]
# if (and only if) we're dealing with 64-bit Windows 7 SP1
# then add the other commonly seen member offset to the list
meta = addr_space.profile.metadata
major = meta.get("major", 0)
minor = meta.get("minor", 0)
build = meta.get("build", 0)
version = (major, minor, build)
if meta.get("memory_model") == "64bit" and version == (6, 1, 7601):
offsets.append(tleoffset + 8)
## use the member offset from the profile
for ofs in offsets:
ethread = obj.Object("_ETHREAD", offset = flateproc.ThreadListHead.Flink.v() - ofs, vm = addr_space)
# and ask for the thread's process to get an _EPROCESS with a virtual address space
virtual_process = ethread.owning_process()
# Sanity check the bounce. See Issue 154.
if virtual_process and offset == addr_space.vtop(virtual_process.obj_offset):
return virtual_process
return obj.NoneObject("Unable to bounce back from virtual _ETHREAD to virtual _EPROCESS")
@cache.CacheDecorator(lambda self: "tests/pslist/pid={0}/offset={1}".format(self._config.PID, self._config.OFFSET))
def calculate(self):
"""Produces a list of processes, or just a single process based on an OFFSET"""
addr_space = utils.load_as(self._config)
if self._config.OFFSET != None:
tasks = [self.virtual_process_from_physical_offset(addr_space, self._config.OFFSET)]
else:
tasks = self.filter_tasks(win32.tasks.pslist(addr_space))
return tasks
class PSList(DllList):
""" Print all running processes by following the EPROCESS lists """
def __init__(self, config, *args, **kwargs):
DllList.__init__(self, config, *args, **kwargs)
config.add_option("PHYSICAL-OFFSET", short_option = 'P',
default = False, cache_invalidator = False,
help = "Display physical offsets instead of virtual",
action = "store_true")
def render_text(self, outfd, data):
offsettype = "(V)" if not self._config.PHYSICAL_OFFSET else "(P)"
self.table_header(outfd,
[("Offset{0}".format(offsettype), "[addrpad]"),
("Name", "20s"),
("PID", ">6"),
("PPID", ">6"),
("Thds", ">6"),
("Hnds", ">8"),
("Sess", ">6"),
("Wow64", ">6"),
("Start", "30"),
("Exit", "30")]
)
for task in data:
# PHYSICAL_OFFSET must STRICTLY only be used in the results. If it's used for anything else,
# it needs to have cache_invalidator set to True in the options
if not self._config.PHYSICAL_OFFSET:
offset = task.obj_offset
else:
offset = task.obj_vm.vtop(task.obj_offset)
self.table_row(outfd,
offset,
task.ImageFileName,
task.UniqueProcessId,
task.InheritedFromUniqueProcessId,
task.ActiveThreads,
task.ObjectTable.HandleCount,
task.SessionId,
task.IsWow64,
str(task.CreateTime or ''),
str(task.ExitTime or ''),
)
def render_dot(self, outfd, data):
objects = set()
links = set()
for eprocess in data:
label = "{0} | {1} |".format(eprocess.UniqueProcessId,
eprocess.ImageFileName)
if eprocess.ExitTime:
label += "exited\\n{0}".format(eprocess.ExitTime)
options = ' style = "filled" fillcolor = "lightgray" '
else:
label += "running"
options = ''
objects.add('pid{0} [label="{1}" shape="record" {2}];\n'.format(eprocess.UniqueProcessId,
label, options))
links.add("pid{0} -> pid{1} [];\n".format(eprocess.InheritedFromUniqueProcessId,
eprocess.UniqueProcessId))
## Now write the dot file
outfd.write("digraph processtree { \ngraph [rankdir = \"TB\"];\n")
for link in links:
outfd.write(link)
for item in objects:
outfd.write(item)
outfd.write("}")
def unified_output(self, data):
offsettype = "(V)" if not self._config.PHYSICAL_OFFSET else "(P)"
return TreeGrid([("Offset{0}".format(offsettype), Address),
("Name", str),
("PID", int),
("PPID", int),
("Thds", int),
("Hnds", int),
("Sess", int),
("Wow64", int),
("Start", str),
("Exit", str)],
self.generator(data))
def generator(self, data):
for task in data:
# PHYSICAL_OFFSET must STRICTLY only be used in the results. If it's used for anything else,
# it needs to have cache_invalidator set to True in the options
if not self._config.PHYSICAL_OFFSET:
offset = task.obj_offset
else:
offset = task.obj_vm.vtop(task.obj_offset)
yield (0, [Address(offset),
str(task.ImageFileName),
int(task.UniqueProcessId),
int(task.InheritedFromUniqueProcessId),
int(task.ActiveThreads),
int(task.ObjectTable.HandleCount),
int(task.SessionId),
int(task.IsWow64),
str(task.CreateTime or ''),
str(task.ExitTime or '')])
# Inherit from files just for the config options (__init__)
class MemMap(DllList):
"""Print the memory map"""
def unified_output(self, data):
return TreeGrid([("Process", str),
("PID", int),
("Virtual", Address),
("Physical", Address),
("Size", Address),
("DumpFileOffset", Address)],
self.generator(data))
def generator(self, data):
for pid, task, pagedata in data:
task_space = task.get_process_address_space()
proc = "{0}".format(task.ImageFileName)
offset = 0
if pagedata:
for p in pagedata:
pa = task_space.vtop(p[0])
# pa can be 0, according to the old memmap, but can't == None(NoneObject)
if pa != None:
data = task_space.read(p[0], p[1])
if data != None:
yield (0, [proc, int(pid), Address(p[0]), Address(pa), Address(p[1]), Address(offset)])
offset += p[1]
def render_text(self, outfd, data):
first = True
for pid, task, pagedata in data:
if not first:
outfd.write("*" * 72 + "\n")
task_space = task.get_process_address_space()
outfd.write("{0} pid: {1:6}\n".format(task.ImageFileName, pid))
first = False
offset = 0
if pagedata:
self.table_header(outfd,
[("Virtual", "[addrpad]"),
("Physical", "[addrpad]"),
("Size", "[addr]"),
("DumpFileOffset", "[addr]")])
for p in pagedata:
pa = task_space.vtop(p[0])
# pa can be 0, according to the old memmap, but can't == None(NoneObject)
if pa != None:
data = task_space.read(p[0], p[1])
if data != None:
self.table_row(outfd, p[0], pa, p[1], offset)
offset += p[1]
else:
outfd.write("Unable to read pages for task.\n")
@cache.CacheDecorator(lambda self: "tests/memmap/pid={0}/offset={1}".format(self._config.PID, self._config.OFFSET))
def calculate(self):
tasks = DllList.calculate(self)
for task in tasks:
if task.UniqueProcessId:
pid = task.UniqueProcessId
task_space = task.get_process_address_space()
pages = task_space.get_available_pages()
yield pid, task, pages
class MemDump(MemMap):
"""Dump the addressable memory for a process"""
def __init__(self, config, *args, **kwargs):
MemMap.__init__(self, config, *args, **kwargs)
config.add_option('DUMP-DIR', short_option = 'D', default = None,
cache_invalidator = False,
help = 'Directory in which to dump memory')
def render_text(self, outfd, data):
if self._config.DUMP_DIR == None:
debug.error("Please specify a dump directory (--dump-dir)")
if not os.path.isdir(self._config.DUMP_DIR):
debug.error(self._config.DUMP_DIR + " is not a directory")
for pid, task, pagedata in data:
outfd.write("*" * 72 + "\n")
task_space = task.get_process_address_space()
outfd.write("Writing {0} [{1:6}] to {2}.dmp\n".format(task.ImageFileName, pid, str(pid)))
f = open(os.path.join(self._config.DUMP_DIR, str(pid) + ".dmp"), 'wb')
if pagedata:
for p in pagedata:
data = task_space.read(p[0], p[1])
if data == None:
if self._config.verbose:
outfd.write("Memory Not Accessible: Virtual Address: 0x{0:x} Size: 0x{1:x}\n".format(p[0], p[1]))
else:
f.write(data)
else:
outfd.write("Unable to read pages for task.\n")
f.close()
| gpl-2.0 | -7,597,039,038,837,842,000 | 42.044386 | 152 | 0.517955 | false |
showell/zulip | tools/lib/test_server.py | 1 | 3491 | import os
import subprocess
import sys
import time
from contextlib import contextmanager
from typing import Iterator, Optional
# Verify the Zulip venv is available.
from tools.lib import sanity_check
sanity_check.check_venv(__file__)
import django
import requests
MAX_SERVER_WAIT = 180
TOOLS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if TOOLS_DIR not in sys.path:
sys.path.insert(0, os.path.dirname(TOOLS_DIR))
from scripts.lib.zulip_tools import get_or_create_dev_uuid_var_path
from zerver.lib.test_fixtures import update_test_databases_if_required
def set_up_django(external_host: str) -> None:
os.environ['FULL_STACK_ZULIP_TEST'] = '1'
os.environ['EXTERNAL_HOST'] = external_host
os.environ["LOCAL_UPLOADS_DIR"] = get_or_create_dev_uuid_var_path(
'test-backend/test_uploads')
os.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.test_settings'
django.setup()
os.environ['PYTHONUNBUFFERED'] = 'y'
def assert_server_running(server: "subprocess.Popen[bytes]", log_file: Optional[str]) -> None:
"""Get the exit code of the server, or None if it is still running."""
if server.poll() is not None:
message = 'Server died unexpectedly!'
if log_file:
message += f'\nSee {log_file}\n'
raise RuntimeError(message)
def server_is_up(server: "subprocess.Popen[bytes]", log_file: Optional[str]) -> bool:
assert_server_running(server, log_file)
try:
# We could get a 501 error if the reverse proxy is up but the Django app isn't.
# Note that zulipdev.com is mapped via DNS to 127.0.0.1.
return requests.get('http://zulipdev.com:9981/accounts/home').status_code == 200
except requests.RequestException:
return False
@contextmanager
def test_server_running(force: bool=False, external_host: str='testserver',
log_file: Optional[str]=None, dots: bool=False,
) -> Iterator[None]:
log = sys.stdout
if log_file:
if os.path.exists(log_file) and os.path.getsize(log_file) < 100000:
log = open(log_file, 'a')
log.write('\n\n')
else:
log = open(log_file, 'w')
set_up_django(external_host)
update_test_databases_if_required(rebuild_test_database=True)
# Run this not through the shell, so that we have the actual PID.
run_dev_server_command = ['tools/run-dev.py', '--test', '--streamlined']
if force:
run_dev_server_command.append('--force')
server = subprocess.Popen(run_dev_server_command,
stdout=log, stderr=log)
try:
# Wait for the server to start up.
sys.stdout.write('\nWaiting for test server (may take a while)')
if not dots:
sys.stdout.write('\n\n')
t = time.time()
while not server_is_up(server, log_file):
if dots:
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(0.4)
if time.time() - t > MAX_SERVER_WAIT:
raise Exception('Timeout waiting for server')
sys.stdout.write('\n\n--- SERVER IS UP! ---\n\n')
# DO OUR ACTUAL TESTING HERE!!!
yield
finally:
assert_server_running(server, log_file)
server.terminate()
server.wait()
if __name__ == '__main__':
# The code below is for testing this module works
with test_server_running():
print('\n\n SERVER IS UP!\n\n')
| apache-2.0 | -5,317,063,131,087,900,000 | 33.564356 | 94 | 0.625322 | false |
elezar/fortran-beautifier | fparser/tests/test_api.py | 1 | 3401 | """
Test parsing of whole fortran files; 'blackbox' tests here.
"""
from fparser import api
import sys
from os.path import abspath, join, dirname
def test_use_module():
d = dirname(__file__)
sources = [join(d,'modfile.f95'), join(d,'funcfile.f95')]
file_to_parse = sources[1]
tree = api.parse(file_to_parse, isfree=True, isstrict=False, source_only = sources)
def test_dimension_attr():
source_str = '''
subroutine foo
integer, dimension( -10 : 10, - 2147483648 : 2147483648) :: a( -2 : 2, 1000000 : 1000001 )
real, dimension(-20:20, 100:113, - 512 : 713) :: b
end
'''
tree = api.parse(source_str, isfree=True, isstrict=False)
subr = tree.a.external_subprogram['foo']
avar = subr.a.variables['a']
assert avar.dimension == [('-10', '10'), ('- 2147483648', '2147483648')]
assert avar.bounds == [('-2', '2'), ('1000000', '1000001')]
assert avar.shape == ['4', '1']
bvar = subr.a.variables['b']
print(bvar.dimension)
print(bvar.shape)
print(bvar)
assert bvar.dimension == [('-20', '20'), ('100', '113'), ('- 512', '713')]
assert bvar.shape == ['40', '13', '1225']
def test_provides():
source_str = '''
module mod1
implicit none
integer, parameter :: GP = 6
integer :: a,b,c,d,e
! module_provides = {GP,a,b,c,d,e}
! use_provides = {}
end module mod1
module mod2
implicit none
integer, parameter :: SP = 5
real :: a,b,c
! module_provides = {SP,a,b,c}
! use_provides = {}
end module mod2
module mod3
use mod1
implicit none
integer, parameter :: DP = 0
! module_provides = {DP}
! use_provides = {GP,a,b,c,d,e}
end module mod3
module mod4
use mod2
implicit none
! module_provides = {}
! use_provides = {SP,a,b,c}
end module mod4
module mod5
use mod3, only: lGP => GP, a,b,e
use mod4, only: a2 => a, b2 => b
implicit none
integer, parameter :: FP = 1000
integer(kind=kind(0)) :: dummy
parameter (dummy = 20)
integer, private :: x,y,z
! module_provides = {FP, dummy}
! use_provides = {lGP, a, b, e, a2, b2}
end module mod5
module mod6
use mod5, qgp => lgp
implicit none
! module_provides = {}
! use_provides = {FP, dummy, a2, b2, qgp, a, b, e}
end module mod6
'''
# PY2to3: here keys from a dictionary is tested. These are not guaranteed to be in a consistent order
# Therefore these are now sorted before comparison
tree = api.parse(source_str, isfree=True, isstrict=False)
mod5 = tree.a.module['mod5']
mod6 = tree.a.module['mod6']
assert sorted(list(mod5.a.module_provides.keys())) == sorted(['fp', 'dummy'])
assert sorted(list(mod5.a.use_provides.keys())) == sorted(['a', 'b', 'e', 'a2', 'b2', 'lgp'])
assert sorted(list(mod6.a.module_provides.keys())) == sorted([])
assert sorted(list(mod6.a.use_provides.keys())) == sorted(['fp', 'dummy', 'b', 'e', 'qgp', 'a2', 'a', 'b2'])
assert mod6.a.use_provides['qgp'].name == 'gp'
def test_walk():
source_str = '''\
! before foo
subroutine foo
integer i, r
do i = 1,100
r = r + 1
end do
! after end do
end subroutine foo
'''
tree = api.parse(source_str, isfree=True, isstrict=False, ignore_comments=False)
for stmt, depth in api.walk(tree, 1):
print(depth, stmt.item)
| mit | 5,374,607,897,788,353,000 | 27.341667 | 112 | 0.586298 | false |
bilbeyt/ITURO-Giant_Flat | flat/announce/migrations/0001_initial.py | 1 | 1270 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
import swampdragon.models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Announce',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content', models.CharField(max_length=100)),
('pub_date', models.DateTimeField(default=datetime.datetime.now)),
('date', models.TextField(null=True)),
],
bases=(swampdragon.models.SelfPublishModel, models.Model),
),
migrations.CreateModel(
name='AnnounceList',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
],
bases=(swampdragon.models.SelfPublishModel, models.Model),
),
migrations.AddField(
model_name='announce',
name='announce_list',
field=models.ForeignKey(to='announce.AnnounceList'),
),
]
| mit | -4,946,116,681,658,328,000 | 32.421053 | 114 | 0.575591 | false |
lakewik/storj-gui-client | UI/__init__.py | 1 | 2608 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import sys
import os
import click
import logging
import logging.config as config
from logging import handlers
APP_NAME = 'storj-gui'
"""(str): the application name."""
def setup_logging():
"""Reads the Storj GUI logging configuration from logging.conf.
If the file does not exist it will load a default configuration.
Mac OS X (POSIX):
~/.storj-gui
Unix (POSIX):
~/.storj-gui
Win XP (not roaming):
``C:\Documents and Settings\<user>\Application Data\storj-gui``
Win 7 (not roaming):
``C:\\Users\<user>\AppData\Local\storj-gui``
"""
logging_conf = os.path.join(
click.get_app_dir(APP_NAME, force_posix=True),
'logging.conf')
if not os.path.exists(logging_conf) or not os.path.isfile(logging_conf):
load_default_logging()
logging.getLogger(__name__).warning('%s logging configuration file does not exist', logging_conf)
return
try:
config.fileConfig(logging_conf, disable_existing_loggers=False)
logging.getLogger(__name__).info('%s configuration file was loaded.', logging_conf)
except RuntimeError:
load_default_logging()
logging.getLogger(__name__).warning('failed to load configuration from %s', logging_conf)
return
logging.getLogger(__name__).info('using logging configuration from %s', logging_conf)
def load_default_logging():
"""Load default logging configuration:
- >=INFO messages will be written to storj-gui.log
- >=DEBUG messages will be written to stdout
- >=ERROR message will be written to stderr
"""
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
# file
# maximum of 5 log files of 3MB
handler_file = handlers.RotatingFileHandler(
os.path.join(os.getcwd(), '%s.log' % APP_NAME),
maxBytes=(1048576 * 3), backupCount=5)
handler_file.setFormatter(formatter)
handler_file.setLevel(logging.INFO)
# stdout
handler_stdout = logging.StreamHandler(sys.stdout)
handler_stdout.setFormatter(formatter)
# stdout should only get WARNING, INFO and DEBUG
handler_stdout.setLevel(logging.DEBUG)
# stderr
handler_stderr = logging.StreamHandler(sys.stderr)
handler_stderr.setFormatter(formatter)
handler_stderr.setLevel(logging.ERROR)
logger = logging.getLogger(__name__)
logger.addHandler(handler_file)
logger.addHandler(handler_stdout)
logging.getLogger(__name__).info('using default logging configuration')
setup_logging()
| mit | -7,573,279,076,914,696,000 | 27.659341 | 105 | 0.673313 | false |
jwdebelius/Machiavellian | machivellian/power.py | 1 | 15760 | r"""
Empirical Power Estimation (:mod:`skbio.stats.power`)
=====================================================
.. currentmodule:: skbio.stats.power
The purpose of this module is to provide empirical, post-hoc power estimation
of normally and non-normally distributed data. It also provides support to
subsample data to facilitate this analysis.
The underlying principle is based on subsampling and Monte Carlo simulation.
Assume that there is some set of populations, :math:`K_{1}, K_{2}, ... K_{n}`
which have some property, :math:`\mu` such that :math:`\mu_{1} \neq \mu_{2}
\neq ... \neq \mu_{n}`. For each of the populations, a sample, :math:`S` can be
drawn, with a parameter, :math:`x` where :math:`x \approx \mu` and for the
samples, we can use a test, :math:`f`, to show that :math:`x_{1} \neq x_{2}
\neq ... \neq x_{n}`.
Since we know that :math:`\mu_{1} \neq \mu_{2} \neq ... \neq \mu_{n}`,
we know we should reject the null hypothesis. If we fail to reject the null
hypothesis, we have committed a Type II error and our result is a false
negative. We can estimate the frequency of Type II errors at various sampling
depths by repeatedly subsampling the populations and observing how often we
see a false negative. If we repeat this several times for each subsampling
depth, and vary the depths we use, we can start to approximate a relationship
between the number of samples we use and the rate of false negatives, also
called the statistical power of the test.
To generate complete power curves from data which appears underpowered, the
`statsmodels.stats.power` package can be used to solve for an effect size. The
effect size can be used to extrapolate a power curve for the data.
Most functions in this module accept a statistical test function which takes a
list of samples and returns a p value. The test is then evaluated over a series
of subsamples.
Sampling may be handled in two ways. For any set of samples, we may simply
choose to draw :math:`n` observations at random for each sample. Alternatively,
if metadata is available, samples can be matched based on a set of control
categories so that paired samples are drawn at random from the set of available
matches.
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from functools import partial
import numpy as np
import scipy.stats
def subsample_power(test, samples, counts, draw_mode='ind', numeric=True,
alpha=0.05, ratio=None, bootstrap=True, num_iter=500,
num_runs=10, test_kwargs=None):
"""Subsamples data to iteratively calculate power
Parameters
----------
test : function
The statistical test which accepts a list of arrays of values
(sample ids or numeric values) and returns a p value or one-dimensional
array of p values when `numeric == True`; or a boolean value
indicating the null hypothesis should be rejected, or a
one-dimensional array of boolean values indicating the null
hypothesis should be rejected when `numeric == False`. Additional
keyword arguments can be provided with `test_kwargs`.
samples : array_like
`samples` can be a list of lists or a list of arrays where each
sublist or row in the array corresponds to a sampled group.
counts : array-like
The depths at which to sample the data. If `bootstrap == False`, the
largest count depth times the group ratio cannot be greater than the
number of observations in each group.
draw_mode : {"ind", "matched"}, optional
"matched" samples should be used when observations in
samples have corresponding observations in other groups. For instance,
this may be useful when working with regression data where
:math:`x_{1}, x_{2}, ..., x_{n}` maps to
:math:`y_{1}, y_{2}, ..., y_{n}`. Sample vectors must be the same
length in "matched" mode.
numeric : bool, optional
Indicates whether `test` returns a numeric p-value or array of numeric
p values (`numeric=True`), or a boolean (`numeric=False`).
alpha : float, optional
The critical value used to calculate the power.
ratio : 1-D array, optional
The fraction of the sample counts which should be
assigned to each group. If this is a 1-D array, it must be the same
length as `samples`. If no value is supplied (`ratio` is None),
then an equal number of observations will be drawn for each sample. In
`matched` mode, this will be set to one. If `bootstrap == False`, then
the product of the `ratio` and a sampling depth specified by `counts`
cannot be greater than the number of observations in the respective
sample.
bootstrap : bool, optional
Indicates whether subsampling should be performed with replacement
(`bootstrap == True`) or without.
num_iter : positive int, optional
The number of p-values to generate for each point
on the curve.
num_runs : positive int, optional
The number of times to calculate each curve.
test_kwargs: dict, optional
Additional keyword arguments for the `test` which may include
parameters like a dataframe of values or distance matrix.
Returns
-------
ndarray
The power calculated for each subsample at each count. The array has
`num_runs` rows, a length with the same number of elements as
`sample_counts` and a depth equal to the number of p values returned by
`test`. If `test` returns a float, the returned array will be
two-dimensional instead of three.
Raises
------
ValueError
If the `mode` is "matched", an error will occur if the arrays in
`samples` are not the same length.
ValueError
There is a ValueError if there are fewer samples than the minimum
count.
ValueError
If the `counts_interval` is greater than the difference between the
sample start and the max value, the function raises a ValueError.
ValueError
There are not an equal number of groups in `samples` and in `ratios`.
TypeError
`test` does not return a float or a 1-dimensional numpy array.
ValueError
When `replace` is true, and `counts` and `ratio` will draw more
observations than exist in a sample.
"""
if isinstance(test_kwargs, dict):
test = partial(test, **test_kwargs)
# Checks the inputs
ratio, num_p = _check_subsample_power_inputs(test=test,
samples=samples,
draw_mode=draw_mode,
ratio=ratio,
bootstrap=bootstrap,
counts=counts)
# Prealocates the power array
power = np.zeros((num_runs, len(counts), num_p))
for id2, c in enumerate(counts):
count = np.round(c * ratio, 0).astype(int)
for id1 in range(num_runs):
ps = _compare_distributions(test=test,
samples=samples,
num_p=num_p,
counts=count,
num_iter=num_iter,
bootstrap=bootstrap,
mode=draw_mode)
power[id1, id2, :] = _calculate_power(ps,
numeric=numeric,
alpha=alpha)
power = power.squeeze()
return power
def confidence_bound(vec, alpha=0.05, df=None, axis=None):
r"""Calculates a confidence bound assuming a normal distribution
Parameters
----------
vec : array_like
The array of values to use in the bound calculation.
alpha : float, optional
The critical value, used for the confidence bound calculation.
df : float, optional
The degrees of freedom associated with the
distribution. If None is given, df is assumed to be the number of
elements in specified axis.
axis : positive int, optional
The axis over which to take the deviation. When axis
is None, a single value will be calculated for the whole matrix.
Returns
-------
bound : float
The confidence bound around the mean. The confidence interval is
[mean - bound, mean + bound].
"""
# Determines the number of non-nan counts
vec = np.asarray(vec)
vec_shape = vec.shape
if axis is None and len(vec_shape) == 1:
num_counts = vec_shape[0] - np.isnan(vec).sum()
elif axis is None:
num_counts = vec_shape[0] * vec_shape[1] - np.isnan(vec).sum()
else:
num_counts = vec_shape[axis] - np.isnan(vec).sum() / \
(vec_shape[0] * vec_shape[1])
# Gets the df if not supplied
if df is None:
df = num_counts - 1
# Calculates the bound
# In the conversion from scipy.stats.nanstd -> np.nanstd `ddof=1` had to be
# added to match the scipy default of `bias=False`.
bound = np.nanstd(vec, axis=axis, ddof=1) / np.sqrt(num_counts - 1) * \
scipy.stats.t.ppf(1 - alpha / 2, df)
return bound
def _compare_distributions(test, samples, num_p, counts=5, mode="ind",
bootstrap=True, num_iter=100):
r"""Compares two distribution arrays iteratively
"""
# Prealocates the pvalue matrix
p_values = np.zeros((num_p, num_iter))
# Determines the number of samples per group
num_groups = len(samples)
samp_lens = [len(sample) for sample in samples]
if isinstance(counts, int):
counts = np.array([counts] * num_groups)
for idx in range(num_iter):
if mode == "matched":
pos = np.random.choice(np.arange(0, samp_lens[0]), counts[0],
replace=bootstrap)
subs = [sample[pos] for sample in samples]
else:
subs = [np.random.choice(np.array(pop), counts[i],
replace=bootstrap)
for i, pop in enumerate(samples)]
p_values[:, idx] = test(subs)
if num_p == 1:
p_values = p_values.squeeze()
return p_values
def _calculate_power(p_values, alpha=0.05, numeric=True):
r"""Calculates statistical power empirically for p-values
Parameters
----------
p_values : 1-D array
A 1-D numpy array with the test results.
alpha : float
The critical value for the power calculation.
numeric : Boolean
Indicates whether a numeric p value should be used
Returns
-------
power : float
The empirical power, or the fraction of observed p values below the
critical value.
"""
if numeric:
reject = np.atleast_2d(p_values < alpha)
else:
reject = np.atleast_2d(p_values)
w = (reject).sum(axis=1)/reject.shape[1]
return w
def _check_subsample_power_inputs(test, samples, counts, draw_mode='ind',
ratio=None, bootstrap=True):
"""Makes sure that everything is sane before power calculations
Parameters
----------
test : function
The statistical test which accepts a list of arrays of values
(sample ids or numeric values) and returns a p value or one-dimensional
array of p values.
samples : array-like
`samples` can be a list of lists or a list of arrays where each
sublist or row in the array corresponds to a sampled group.
counts : 1-D array
The number of samples to use for each power depth calculation. If
`replace` is False, than `counts` and `ratio` must be scaled so that
no more samples are drawn than exist in a sample.
draw_mode : {"ind", "matched"}, optional
"matched" samples should be used when observations in
samples have corresponding observations in other groups. For instance,
this may be useful when working with regression data where
:math:`x_{1}, x_{2}, ..., x_{n}` maps to
:math:`y_{1}, y_{2}, ..., y_{n}`. Sample vectors must be the same
length in "matched" mode.
If there is no reciprocal relationship between samples, then
"ind" mode should be used.
ratio : 1-D array, optional
The fraction of the sample counts which should be
assigned to each group. If this is a 1-D array, it must be the same
length as `samples`. If no value is supplied (`ratio` is None),
then an equal number of observations will be drawn for each sample. In
`matched` mode, this will be set to one.
bootstrap : Bool
Whether samples should be bootstrapped or subsampled without
replacement. When `bootstrap == False`, `counts` and `ratio` must
be scaled so that no more observations are drawn than exist in a
sample.
Returns
-------
ratio : 1-D array
The fraction of the sample counts which should be assigned to each
group.
num_p : positive integer
The number of p values returned by `test`.
Raises
------
ValueError
If the `mode` is "matched", an error will occur if the arrays in
`samples` are not the same length.
ValueError
There is a ValueError if there are fewer samples than the minimum
count.
ValueError
If the `counts_interval` is greater than the difference between the
sample start and the max value, the function raises a ValueError.
ValueError
There are not an equal number of groups in `samples` and in `ratios`.
TypeError
`test` does not return a float or a 1-dimensional numpy array.
ValueError
When `replace` is true, and `counts` and `ratio` will draw more
observations than exist in a sample.
"""
# Checks the sample drawing model
if draw_mode not in {'ind', 'matched'}:
raise ValueError('mode must be "matched" or "ind".')
# Determines the minimum number of ids in a category
id_counts = np.array([len(id_) for id_ in samples])
num_groups = len(samples)
# Checks the ratio argument
if ratio is None or draw_mode == 'matched':
ratio = np.ones((num_groups))
else:
ratio = np.asarray(ratio)
if not ratio.shape == (num_groups,):
raise ValueError('There must be a ratio for each group.')
ratio_counts = np.array([id_counts[i] / ratio[i]
for i in range(num_groups)])
largest = ratio_counts.min()
# Determines the number of p values returned by the test
p_return = test(samples)
if isinstance(p_return, float):
num_p = 1
elif isinstance(p_return, np.ndarray) and len(p_return.shape) == 1:
num_p = p_return.shape[0]
else:
raise TypeError('test must return a float or one-dimensional array.')
# Checks the subsample size
counts = np.asarray(counts)
if counts.min() < 2:
raise ValueError('you cannot test less than 2 samples per group')
elif not bootstrap and counts.max() > largest:
raise ValueError('Sampling depth is too high. Please use replacement '
'or pick fewer observations.')
return ratio, num_p
| bsd-3-clause | -7,454,167,816,452,190,000 | 39.204082 | 79 | 0.621066 | false |
avaris/aBibliophile | searchmodel.py | 1 | 1847 | #!/usr/bin/env python
# -.- coding: utf-8 -.-
# Author : Deniz Turgut
# Created : 05.11.2011
from PyQt4 import QtGui, QtCore
class SearchModel(QtGui.QSortFilterProxyModel):
def __init__(self, parent=None):
super(SearchModel, self).__init__(parent)
self.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.setSortLocaleAware(True)
self.setDynamicSortFilter(True)
self.setSourceModel(SearchBaseModel())
self.sort(0)
def clear(self):
self.sourceModel().clear()
def addDataFromList(self, bookList):
self.sourceModel().addDataFromList(bookList)
class SearchBaseModel(QtCore.QAbstractItemModel):
def __init__(self, parent=None):
super(SearchBaseModel, self).__init__(parent)
self._data = []
def rowCount(self, parent):
return len(self._data)
def columnCount(self, parent):
return 1
def index(self, row, column, parent):
return self.createIndex(row, column, QtCore.QModelIndex())
def parent(self, index):
return QtCore.QModelIndex()
def data(self, index, role):
if role == QtCore.Qt.DisplayRole:
return self._data[index.row()]["title"]
elif role == QtCore.Qt.ToolTipRole:
writer = ", ".join(self._data[index.row()]["writers"])
publisher = self._data[index.row()]["publisher"]
return self.tr("Writer: %s\nPublisher: %s") % (writer, publisher)
elif role == QtCore.Qt.UserRole:
return self._data[index.row()]["url"]
def addData(self, data):
self._data.append(data)
def addDataFromList(self, dataList):
self.layoutAboutToBeChanged.emit()
for data in dataList:
self.addData(data)
self.layoutChanged.emit()
def clear(self):
self._data=[]
| gpl-3.0 | -507,099,931,512,108,800 | 29.278689 | 77 | 0.617759 | false |
atados/atados-ovp | api/channels/rrp/migrations/0001_initial.py | 1 | 3030 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-08-03 08:41
from __future__ import unicode_literals
from django.db import migrations
from ovp.apps.core.helpers import generate_slug
def foward_func(apps, schema_editor):
Channel = apps.get_model("channels", "Channel")
channel = Channel.objects.create(name="Rede Real Panorama", slug="rrp")
# We freeze default channels skills and causes because
# post_save signals are not sent from migrations
from ovp.apps.core.models.skill import SKILLS
from ovp.apps.core.models.cause import CAUSES
Skill = apps.get_model("core", "Skill")
Cause = apps.get_model("core", "Cause")
for skill in SKILLS:
skill = Skill.objects.create(name=skill, channel=channel)
skill.slug = generate_slug(Skill, skill.name, skill.channel.slug)
skill.save()
for cause in CAUSES:
cause = Cause.objects.create(name=cause, channel=channel)
cause.slug = generate_slug(Cause, cause.name, cause.channel.slug)
cause.save()
# Create channel settings
ChannelSetting = apps.get_model("channels", "ChannelSetting")
ChannelSetting.objects.create(key="MAPS_API_LANGUAGE", value="pt-br", channel=channel)
ChannelSetting.objects.create(key="CAN_CREATE_PROJECTS_WITHOUT_ORGANIZATION", value="1", channel=channel)
ChannelSetting.objects.create(key="DISABLE_EMAIL", value="volunteerUnapplied-toOwner", channel=channel)
ChannelSetting.objects.create(key="DISABLE_EMAIL", value="userInvited-toMemberInviter", channel=channel)
ChannelSetting.objects.create(key="DISABLE_EMAIL", value="userInvited-toOwner", channel=channel)
ChannelSetting.objects.create(key="DISABLE_EMAIL", value="userInvited-toOwnerInviter", channel=channel)
ChannelSetting.objects.create(key="DISABLE_EMAIL", value="userInvitedRevoked-toMemberInviter", channel=channel)
ChannelSetting.objects.create(key="DISABLE_EMAIL", value="userInvitedRevoked-toOwner", channel=channel)
ChannelSetting.objects.create(key="DISABLE_EMAIL", value="userInvitedRevoked-toOwnerInviter", channel=channel)
ChannelSetting.objects.create(key="DISABLE_EMAIL", value="userInvitedRevoked-toUser", channel=channel)
ChannelSetting.objects.create(key="DISABLE_EMAIL", value="userJoined-toUser", channel=channel)
ChannelSetting.objects.create(key="DISABLE_EMAIL", value="userLeft-toOwner", channel=channel)
ChannelSetting.objects.create(key="DISABLE_EMAIL", value="userLeft-toUser", channel=channel)
ChannelSetting.objects.create(key="DISABLE_EMAIL", value="userRemoved-toOwner", channel=channel)
ChannelSetting.objects.create(key="DISABLE_EMAIL", value="userRemoved-toUser", channel=channel)
def rewind_func(apps, schema_editor):
return True
class Migration(migrations.Migration):
dependencies = [
('default', '0001_initial'),
('channels', '0008_channel_subchannels'),
('core', '0021_auto_20171005_1902')
]
operations = [
migrations.RunPython(foward_func, rewind_func)
]
| agpl-3.0 | 213,053,124,247,227,740 | 46.34375 | 115 | 0.734983 | false |
minlexx/skype_movie_bot | classes/yandex_translate.py | 1 | 2217 | # -*- coding: utf-8 -*-
import sys
import collections
# external libraries
import requests
import requests.exceptions
class YandexTranslate:
def __init__(self, yandex_api_key: str):
self._apikey = yandex_api_key
self._yt_url = 'https://translate.yandex.net/api/v1.5/tr.json/translate'
def translate(self, q: str, src_lang: str, dst_lang: str, fmt: str = 'plain') -> str:
"""
Translates string using Yandex translation service
:param q: strint to translate
:param src_lang: source lang code ('jp')
:param dst_lang: dest lang code ('en')
:param fmt: text format: 'plain' or 'html'
:return: translated string
"""
retval = ''
if fmt not in ['plain', 'html']:
raise ValueError('fmt must be plain or html!')
params = collections.OrderedDict()
params['key'] = self._apikey
params['text'] = q
params['lang'] = src_lang + '-' + dst_lang
params['format'] = fmt
try:
r = requests.get(self._yt_url, params=params)
r.raise_for_status()
response = r.json()
if type(response) == dict:
if 'text' in response:
retval = response['text']
except requests.exceptions.RequestException as re:
sys.stderr.write('Network error: {0}'.format(str(re)))
return retval
def test_yandextranslate(yandex_api_key: str):
yt = YandexTranslate(yandex_api_key)
res = yt.translate('はい', 'ja', 'en')
print(res)
res = yt.translate('少女', 'ja', 'en')
print(res)
res = yt.translate('カグラ使われが送るワイバーン生活 0日目(テスト動画)', 'ja', 'en')
print(res)
def yandex_translate_jp_en(text: str) -> str:
yt = YandexTranslate('trnsl.1.1.20160418T102823Z.888167e74b48bd0b.1c6431f34c3e545d654a8f77054d609de0a87ce3')
return yt.translate(text, 'jp', 'en')
if __name__ == '__main__':
api = 'trnsl.1.1.20160418T102823Z.888167e74b48bd0b.1c6431f34c3e545d654a8f77054d609de0a87ce3'
test_yandextranslate(api)
| gpl-3.0 | -2,401,171,451,816,979,500 | 29.779412 | 112 | 0.576122 | false |
polarsbear/ThreeDPi | Bipolar_Stepper_Motor_Class_Full_Step.py | 1 | 2366 | import RPi.GPIO as GPIO
import time
#sequence for a1, b2, a2, b1
phase_seq=[[1,1,0,0],[0,1,1,0],[0,0,1,1],[1,0,0,1]];
#full step sequence. maximum torque
#phase_seq=[[1,0,0,0],[1,1,0,0],[0,1,0,0],[0,1,1,0],[0,0,1,0],[0,0,1,1],[0,0,0,1],[1,0,0,1]]
#half-step sequence. double resolution. But the torque of the stepper motor is not constant
num_phase=len(phase_seq);
class Bipolar_Stepper_Motor_Full_Step:
phase=0;
dirction=0;
position=0;
a1=0;#pin numbers
a2=0;
b1=0;
b2=0;
def __init__(self,a1,a2,b1,b2):
#initial a Bipolar_Stepper_Moter objects by assigning the pins
GPIO.setmode(GPIO.BCM);
self.a1=a1;
self.a2=a2;
self.b1=b1;
self.b2=b2;
GPIO.setup(self.a1,GPIO.OUT);
GPIO.setup(self.a2,GPIO.OUT);
GPIO.setup(self.b1,GPIO.OUT);
GPIO.setup(self.b2,GPIO.OUT);
GPIO.output(self.a1,0);
GPIO.output(self.a2,0);
GPIO.output(self.b1,0);
GPIO.output(self.b2,0);
print("Stepper Configured");
self.phase=0;
self.dirction=0;
self.position=0;
def move(self, dirction, steps, delay=0.2):
for _ in range(steps):
next_phase=(self.phase+dirction) % num_phase;
if phase_seq[next_phase][0] ==1:
GPIO.output(self.a1,phase_seq[next_phase][0]);
if phase_seq[next_phase][1] ==1:
GPIO.output(self.b2,phase_seq[next_phase][1]);
if phase_seq[next_phase][2] ==1:
GPIO.output(self.a2,phase_seq[next_phase][2]);
if phase_seq[next_phase][3] ==1:
GPIO.output(self.b1,phase_seq[next_phase][3]);
GPIO.output(self.a1,phase_seq[next_phase][0]);
GPIO.output(self.b2,phase_seq[next_phase][1]);
GPIO.output(self.a2,phase_seq[next_phase][2]);
GPIO.output(self.b1,phase_seq[next_phase][3]);
self.phase=next_phase;
self.dirction=dirction;
self.position+=dirction;
time.sleep(delay);
def unhold(self):
GPIO.output(self.a1,0);
GPIO.output(self.a2,0);
GPIO.output(self.b1,0);
GPIO.output(self.b2,0);
| lgpl-2.1 | -3,164,008,024,690,108,000 | 29.727273 | 92 | 0.522401 | false |
mitschabaude/nanopores | scripts/howorka/selectivity.py | 1 | 4668 | """run diffusion equation to determine selectivity of fluophore,
i.e current and release time series for specific molecule."""
from nanopores.tools.fields import cache
import nanopores
import dolfin
import matplotlib.pyplot as plt
from nanopores.physics.convdiff import ConvectionDiffusion
import forcefields
from eikonal import boundary_force
p = nanopores.user_params(
overwrite = False,
bforce = True,
levels = 12,
t = 1e-8,
steps = 20,
Qmol = -1,
rMolecule = 0.5,
implicit = False,
R = 100.,
h = 4.,
Nmax = 1e5,
dnaqsdamp = 1.,
)
# force field parameters
f_params = dict(
Qmol = p.Qmol,
rMolecule = p.rMolecule,
implicit = p.implicit,
Ry = p.R,
Rx = p.R,
Nmax = p.Nmax,
h = p.h,
dnaqsdamp = p.dnaqsdamp,
)
# parameters for selectivity calculation
sel_params = dict(
bforce = p.bforce,
fluocon = 100., # initial concentration [mM] in upper reservoir
# parameters regarding timestepping
levels = p.levels, # levels > 1 --> logarithmic time
t = p.t, # total time of first level
steps = p.steps, # timesteps per level
)
default = dict(sel_params, **f_params)
def calculate_selectivity(F, geo, phys, fluocon=1, t=1e0, steps=100, levels=1):
"core functionality of the module"
# concentration in 1/nm**3 (1 M = 0.6 /nm**3)
c0 = fluocon*(phys.mol*phys.nm**3)
u0 = geo.pwconst("c0", dict(bulkfluidtop = c0, default=0.))
# total concentration
ctot = dolfin.assemble(u0*dolfin.Expression("2*pi*x[0]")*geo.dx())
phys.ctot = ctot
print "Total concentration:", ctot, "molecules."
# convect
phys.F = F
frac = 1./steps
dt = t/steps
bc = {} #dict(upperb=dolfin.Constant(0.), lowerb=dolfin.Constant(0.))
pde = ConvectionDiffusion(geo, phys, dt=dt, F=F, u0=u0, bc=bc, cyl=True)
pde.add_functionals([current, concentration])
pde.timerange = nanopores.logtimerange(t,
levels=levels, frac=frac, change_dt=pde.change_dt)
for t_ in pde.timesteps(t=t):
pde.record_functionals()
pde.visualize()
# obtain current, release
return dict(
time = pde.time,
release = pde.functionals["cbottom"].values,
current = pde.functionals["J"].values)
# functionals
def current(U, geo):
u, = U
r2pi = dolfin.Expression("2*pi*x[0]")
phys = geo.physics
grad = phys.grad
D = geo.pwconst("Dtarget")
kT = dolfin.Constant(phys.kT)
F = phys.F
# current density [1/nm**3]*[nm/ns] = [1/(ns*nm**2)]
j = -D*grad(u) + D/kT*F*u
#lscale = Constant(phys.lscale)
L = dolfin.Constant(9.) # pore length
# current in 1/ns
J = -j[1]/L *r2pi*geo.dx("pore")
# current in 1/ms
J = 1e6*J
return dict(J=J)
def concentration(U, geo):
u, = U
ctot = geo.physics.ctot
r2pi = dolfin.Expression("2*pi*x[0]")
# urel = % of total concentration
urel = u/dolfin.Constant(ctot/100.)
c = urel *r2pi*geo.dx()
ctop = urel *r2pi*geo.dx("bulkfluidtop")
cbottom = urel *r2pi*geo.dx("bulkfluidbottom")
cpore = urel *r2pi*geo.dx("pore")
return dict(c=c, ctop=ctop, cbottom=cbottom, cpore=cpore)
def _diff(dic, keys):
dic = dic.copy()
return {k : dic.pop(k) for k in keys}, dic
# user interface
@cache("selectivity", default, overwrite=p.overwrite)
def selectivity(params):
# filter out selectivity params
sparams, fparams = _diff(params, sel_params.keys())
bforce = sparams.pop("bforce")
# get PNPS force
#F, geo, phys = forcefields.F_geo_phys(p.overwrite, **fparams)
F, geo, phys = forcefields.F_geo_phys(**fparams)
# get additional boundary force
if bforce:
Fb, _ = boundary_force(mesh=geo.mesh, **fparams)
F = F + Fb
result = calculate_selectivity(F, geo, phys, **sparams)
result["params"] = params
return result
if __name__ == "__main__":
import numpy
results = nanopores.Params(selectivity(**default))
t = results.time
J = results.current
rel = results.release
params = results.params
plt.figure(0)
plt.semilogx(t, rel, "x-")
plt.xlabel("time [s]")
plt.ylabel("% release")
plt.title("reservoir size: %.0f nm" % (params["Ry"],))
plt.ylim(ymin=0.)
def avg(J):
n = len(J)
J0 = list(numpy.array(J)[n*0.2:n*0.5])
return sum(J0)/len(J0)
plt.figure(1)
plt.semilogx(t, J, "x-")
plt.xlabel("time [s]")
plt.ylabel("current through pore [1/ms]")
J0 = avg(J)
plt.plot(t, [J0]*len(t), "k--")
plt.title("quasi-equilibrium current: %.1f" % J0)
plt.ylim(ymin=0.)
plt.show()
| mit | -279,110,832,715,525,500 | 27.638037 | 79 | 0.610111 | false |
sdbonin/SOQresearch | SOQplot.py | 1 | 2527 | """
SOQplot.py - sdbonin (work in progress)
read _plot array from txt and plot them
"""
import numpy as np
import matplotlib.pyplot as plt
S_plot = np.loadtxt('S_plot.txt',delimiter=',')
q_plot = np.loadtxt('q_plot.txt',delimiter=',')
p_plot = np.loadtxt('p_plot.txt',delimiter=',')
time = np.loadtxt('time.txt',delimiter=',')
S_1r = S_plot[:,0] #= S_1r
S_1x = S_plot[:,1] #= S_1x
S_1y = S_plot[:,2] #= S_1y
S_1z = S_plot[:,3] #= S_1z
S_2r = S_plot[:,4] #= S_2r
S_2x = S_plot[:,5] #= S_2x
S_2y = S_plot[:,6] #= S_2y
S_2z = S_plot[:,7] #= S_2z
q_1x = q_plot[:,1] #= q_1x
q_1y = q_plot[:,2] #= q_1y
q_1z = q_plot[:,3] #= q_1z
q_2x = q_plot[:,5] #= q_2x
q_2y = q_plot[:,6] #= q_2y
q_2z = q_plot[:,7] #= q_2z
q_1r = q_plot[:,0] #= q_1r
q_2r = q_plot[:,4] #= q_2r
p_1x = p_plot[:,1] #= p_1x
p_1y = p_plot[:,2] #= p_1y
p_1z = p_plot[:,3] #= p_1z
p_2x = p_plot[:,5] #= p_2x
p_2y = p_plot[:,6] #= p_2y
p_2z = p_plot[:,7] #= p_2z
p_1r = p_plot[:,0] #= p_1r
p_2r = p_plot[:,4] #= p_2r
plt.figure()
plt.subplot(221)
#plt.semilogy(time,np.abs(S_1r),label='S_1r',color='purple')
plt.plot(time,np.abs(S_1r),label='S_1r',color='purple')
plt.plot(time,S_1x,label='S_1i',color='red')
plt.plot(time,S_1y,label='S_1j',color='blue')
plt.plot(time,S_1z,label='S_1k',color='green')
plt.xlabel('time')
plt.ylabel('S_1')
plt.legend(loc='best')
axes = plt.gca()
axes.set_ylim([-1,1])
plt.subplot(222)
#plt.semilogy(time,np.abs(S_2r),label='S_2r',color='purple')
plt.plot(time,S_2r,label='S_2r',color='purple')
plt.plot(time,S_2x,label='S_2i',color='red')
plt.plot(time,S_2y,label='S_2j',color='blue')
plt.plot(time,S_2z,label='S_2k',color='green')
plt.xlabel('time')
plt.ylabel('S_1')
plt.legend(loc='best')
axes = plt.gca()
axes.set_ylim([-1,1])
plt.subplot(223)
#plt.semilogy(time,np.abs(S_2r),label='S_2r',color='purple')
plt.plot(time,S_1r,label='S_1r',color='purple')
plt.plot(time,S_1x,label='S_1i',color='red')
plt.plot(time,S_1y,label='S_1j',color='blue')
plt.plot(time,S_1z,label='S_1k',color='green')
plt.xlabel('time')
plt.ylabel('S_1')
plt.legend(loc='best')
axes = plt.gca()
axes.set_ylim([-1,1])
axes.set_xlim([500,512.35])
plt.subplot(224)
#plt.semilogy(time,np.abs(S_2r),label='S_2r',color='purple')
plt.plot(time,S_2r,label='S_2r',color='purple')
plt.plot(time,S_2x,label='S_2i',color='red')
plt.plot(time,S_2y,label='S_2j',color='blue')
plt.plot(time,S_2z,label='S_2k',color='green')
plt.xlabel('time')
plt.ylabel('S_1')
plt.legend(loc='best')
axes = plt.gca()
axes.set_ylim([-1,1])
axes.set_xlim([0,12.35])
plt.show()
| mit | -2,696,191,542,874,873,000 | 25.051546 | 60 | 0.611397 | false |
bd808/tools-stashbot | stashbot/bot.py | 1 | 11661 | # -*- coding: utf-8 -*-
#
# This file is part of bd808's stashbot application
# Copyright (C) 2015 Bryan Davis and contributors
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
"""IRC bot"""
import collections
import functools
import irc.bot
import irc.buffer
import irc.client
import irc.strings
import re
import time
from . import es
from . import phab
from . import sal
RE_PHAB_NOURL = re.compile(r'(?:^|[^/%])\b([DMT]\d+)\b')
class Stashbot(irc.bot.SingleServerIRCBot):
def __init__(self, config, logger):
"""Create bot.
:param config: Dict of configuration values
:param logger: Logger
"""
self.config = config
self.logger = logger
self.es = es.Client(
self.config['elasticsearch']['servers'],
self.config['elasticsearch']['options'],
self.logger
)
self.phab = phab.Client(
self.config['phab']['url'],
self.config['phab']['user'],
self.config['phab']['key']
)
self.sal = sal.Logger(
self, self.phab, self.es, self.config, self.logger)
self.recent_phab = collections.defaultdict(dict)
# Ugh. A UTF-8 only world is a nice dream but the real world is all
# yucky and full of legacy encoding issues that should not crash my
# bot.
irc.buffer.LenientDecodingLineBuffer.errors = 'replace'
irc.client.ServerConnection.buffer_class = \
irc.buffer.LenientDecodingLineBuffer
super(Stashbot, self).__init__(
[(self.config['irc']['server'], self.config['irc']['port'])],
self.config['irc']['nick'],
self.config['irc']['realname']
)
# Setup a connection check ping
self.pings = 0
self.reactor.scheduler.execute_every(
period=300, func=self.do_ping)
# Clean phab recent cache every once in a while
self.reactor.scheduler.execute_every(
period=3600, func=self.do_clean_recent_phab)
def get_version(self):
return 'Stashbot'
def on_welcome(self, conn, event):
self.logger.info('Connected to server %s', conn.get_server_name())
if 'password' in self.config['irc']:
self.do_identify()
else:
self.reactor.scheduler.execute_after(1, self.do_join)
def on_nicknameinuse(self, conn, event):
nick = conn.get_nickname()
self.logger.warning('Requested nick "%s" in use', nick)
conn.nick(nick + '_')
if 'password' in self.config['irc']:
self.reactor.scheduler.execute_after(30, self.do_reclaim_nick)
def on_join(self, conn, event):
nick = event.source.nick
if nick == conn.get_nickname():
self.logger.info('Joined %s', event.target)
def on_privnotice(self, conn, event):
self.logger.warning(str(event))
msg = event.arguments[0]
if event.source.nick == 'NickServ':
if 'NickServ identify' in msg:
self.logger.info('Authentication requested by Nickserv')
if 'password' in self.config['irc']:
self.do_identify()
else:
self.logger.error('No password in config!')
self.die()
elif 'You are now identified' in msg:
self.logger.debug('Authenticating succeeded')
self.reactor.scheduler.execute_after(1, self.do_join)
elif 'Invalid password' in msg:
self.logger.error('Password invalid. Check your config!')
self.die()
def on_pubnotice(self, conn, event):
self.logger.warning(str(event))
def on_pubmsg(self, conn, event):
# Log all public channel messages we receive
doc = self.es.event_to_doc(conn, event)
self.do_write_to_elasticsearch(conn, event, doc)
ignore = self.config['irc'].get('ignore', [])
if self._clean_nick(doc['nick']) in ignore:
return
# Look for special messages
msg = event.arguments[0]
if msg.startswith('!log help'):
self.do_help(conn, event)
elif msg.startswith(conn.get_nickname()):
self.do_help(conn, event)
elif msg.startswith(self.config['irc']['nick']):
self.do_help(conn, event)
elif msg.startswith('!log '):
self.sal.log(conn, event, doc)
elif msg.startswith('!bash '):
self.do_bash(conn, event, doc)
if (event.target not in self.config['phab'].get('notin', []) and
'echo' in self.config['phab'] and
RE_PHAB_NOURL.search(msg)
):
self.do_phabecho(conn, event, doc)
def on_privmsg(self, conn, event):
msg = event.arguments[0]
if msg.startswith('!bash '):
doc = self.es.event_to_doc(conn, event)
self.do_bash(conn, event, doc)
else:
self.respond(conn, event, event.arguments[0][::-1])
def on_pong(self, conn, event):
"""Clear ping count when a pong is received."""
self.pings = 0
def on_error(self, conn, event):
"""Log errors and disconnect."""
self.logger.warning(str(event))
conn.disconnect()
def on_kick(self, conn, event):
"""Attempt to rejoin if kicked from a channel."""
nick = event.arguments[0]
channel = event.target
if nick == conn.get_nickname():
self.logger.warn(
'Kicked from %s by %s', channel, event.source.nick)
self.reactor.scheduler.execute_after(
30, functools.partial(conn.join, channel))
def on_bannedfromchan(self, conn, event):
"""Attempt to rejoin if banned from a channel."""
self.logger.warning(str(event))
self.reactor.scheduler.execute_after(
60, functools.partial(conn.join, event.arguments[0]))
def do_identify(self):
"""Send NickServ our username and password."""
self.logger.info('Authentication requested by Nickserv')
self.connection.privmsg('NickServ', 'identify %s %s' % (
self.config['irc']['nick'], self.config['irc']['password']))
def do_join(self, channels=None):
"""Join the next channel in our join list."""
if channels is None:
channels = self.config['irc']['channels']
try:
car, cdr = channels[0], channels[1:]
except (IndexError, TypeError):
self.logger.exception('Failed to find channel to join.')
else:
self.logger.info('Joining %s', car)
self.connection.join(car)
if cdr:
self.reactor.scheduler.execute_after(
1, functools.partial(self.do_join, cdr))
def do_reclaim_nick(self):
nick = self.connection.get_nickname()
if nick != self.config['irc']['nick']:
self.connection.nick(self.config['irc']['nick'])
def do_ping(self):
"""Send a ping or disconnect if too many pings are outstanding."""
if self.pings >= 2:
self.logger.warning('Connection timed out. Disconnecting.')
self.disconnect()
self.pings = 0
else:
try:
self.connection.ping('keep-alive')
self.pings += 1
except irc.client.ServerNotConnectedError:
pass
def do_write_to_elasticsearch(self, conn, event, doc):
"""Log an IRC channel message to Elasticsearch."""
fmt = self.config['elasticsearch']['index']
self.es.index(
index=time.strftime(fmt, time.gmtime()),
doc_type='irc', body=doc)
def do_help(self, conn, event):
"""Handle a help message request"""
self.respond(
conn, event,
'See https://wikitech.wikimedia.org/wiki/Tool:Stashbot for help.'
)
def do_bash(self, conn, event, doc):
"""Process a !bash message"""
bash = dict(doc)
# Trim '!bash ' from the front of the message
msg = bash['message'][6:]
# Expand tabs to line breaks
bash['message'] = msg.replace("\t", "\n").strip()
bash['type'] = 'bash'
bash['up_votes'] = 0
bash['down_votes'] = 0
bash['score'] = 0
# Remove unneeded irc fields
del bash['user']
del bash['channel']
del bash['server']
del bash['host']
ret = self.es.index(index='bash', doc_type='bash', body=bash)
if 'created' in ret and ret['created'] is True:
self.respond(conn, event,
'%s: Stored quip at %s' % (
event.source.nick,
self.config['bash']['view_url'] % ret['_id']
)
)
else:
self.logger.error('Failed to save document: %s', ret)
self.respond(conn, event,
'%s: Yuck. Something blew up when I tried to save that.' % (
event.source.nick,
)
)
def do_phabecho(self, conn, event, doc):
"""Give links to Phabricator tasks"""
channel = event.target
now = time.time()
cutoff = self.get_phab_echo_cutoff(channel)
for task in set(RE_PHAB_NOURL.findall(doc['message'])):
if task in self.recent_phab[channel]:
if self.recent_phab[channel][task] > cutoff:
# Don't spam a channel with links
self.logger.debug(
'Ignoring %s; last seen @%d',
task, self.recent_phab[channel][task])
continue
try:
info = self.phab.taskInfo(task)
except:
self.logger.exception('Failed to lookup info for %s', task)
else:
self.respond(conn, event, self.config['phab']['echo'] % info)
self.recent_phab[channel][task] = now
def get_phab_echo_cutoff(self, channel):
"""Get phab echo delay for the given channel."""
return time.time() - self.config['phab']['delay'].get(
channel, self.config['phab']['delay']['__default__'])
def do_clean_recent_phab(self):
"""Clean old items out of the recent_phab cache."""
for channel in self.recent_phab.keys():
cutoff = self.get_phab_echo_cutoff(channel)
for item in self.recent_phab[channel].keys():
if self.recent_phab[channel][item] < cutoff:
del self.recent_phab[channel][item]
def _clean_nick(self, nick):
"""Remove common status indicators and normlize to lower case."""
return nick.split('|', 1)[0].rstrip('`_').lower()
def respond(self, conn, event, msg):
"""Respond to an event with a message."""
to = event.target
if to == self.connection.get_nickname():
to = event.source.nick
conn.privmsg(to, msg.replace("\n", ' '))
| gpl-3.0 | -687,042,839,125,175,300 | 34.990741 | 78 | 0.565646 | false |
panoptes/POCS | tests/test_state_machine.py | 1 | 1467 | import os
import pytest
from dunder_mifflin import papers # WARNING: Malicious operation ahead
from panoptes.pocs.core import POCS
from panoptes.pocs.observatory import Observatory
from panoptes.utils import error
from panoptes.utils.serializers import to_yaml
@pytest.fixture
def observatory():
observatory = Observatory(simulator=['all'])
yield observatory
def test_bad_state_machine_file():
with pytest.raises(error.InvalidConfig):
POCS.load_state_table(state_table_name='foo')
def test_load_bad_state(observatory):
pocs = POCS(observatory)
with pytest.raises(error.InvalidConfig):
pocs._load_state('foo')
def test_load_state_info(observatory):
pocs = POCS(observatory)
pocs._load_state('ready', state_info={'tags': ['at_twilight']})
def test_lookup_trigger_default_park(observatory, caplog):
pocs = POCS(observatory)
pocs._load_state('ready', state_info={'tags': ['at_twilight']})
pocs.state = 'ready'
pocs.next_state = 'foobar'
next_state = pocs._lookup_trigger()
assert next_state == 'parking'
assert caplog.records[-1].levelname == 'WARNING'
assert caplog.records[-1].message == 'No transition for ready -> foobar, going to park'
def test_state_machine_absolute(temp_file):
state_table = POCS.load_state_table()
assert isinstance(state_table, dict)
with open(temp_file, 'w') as f:
f.write(to_yaml(state_table))
file_path = os.path.abspath(temp_file)
assert POCS.load_state_table(state_table_name=file_path)
| mit | -1,175,351,245,852,799,500 | 25.196429 | 91 | 0.697342 | false |
kdart/pycopia | aid/setup.py | 1 | 1235 | #!/usr/bin/python2.7
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
import os
from setuptools import setup
NAME = "pycopia-aid"
#REVISION = os.environ.get("PYCOPIA_REVISION", "0standalone")
VERSION = "1.0"
setup (name=NAME, version=VERSION,
namespace_packages = ["pycopia"],
packages = ["pycopia",],
test_suite = "test.AidTests",
author = "Keith Dart",
author_email = "keith@kdart.com",
description = "General purpose objects that enhance Python's core modules.",
long_description = """General purpose objects that enhance Python's core modules.
You can use these modules in place of the standard modules with the same name.
This package is part of the collection of python packages known as pycopia.""",
license = "LGPL",
keywords = "pycopia framework Python extensions",
url = "http://www.pycopia.net/",
dependency_links = [
"http://www.pycopia.net/download/"
],
#download_url = "ftp://ftp.pycopia.net/pub/python/%s-%s.tar.gz" % (NAME, VERSION),
classifiers = ["Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Intended Audience :: Developers"],
)
| apache-2.0 | -2,782,851,439,800,748,500 | 33.305556 | 86 | 0.651012 | false |
Wetrain/mmu-course-api | config/wsgi.py | 1 | 1459 | """
WSGI config for enterprise_course_api project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit | 6,543,690,837,505,451,000 | 40.685714 | 79 | 0.793694 | false |
antonioguirola/webpy-base | forms.py | 1 | 4418 | # -*- coding: utf-8 -*-
from web import form
import re
import db
# Expresiones regulares necesarias:
#formatoVisa=re.compile(r'[0-9]{4}-[0-9]{4}-[0-9]{4}-[0-9]{4}')
# Funciones necesarias para las validaciones
def fooFunction():
pass
"""
EJEMPLO DE FORMULARIO PARA DARSE DE ALTA
formularioInscripcion = form.Form(
form.Textbox(
"nombre",
form.notnull,
class_="form-control",
id="nombreId",
description="Nombre: "
),
form.Textbox(
"apellidos",
form.notnull,
class_="form-control",
id="apellidosId",
description="Apellidos: "
),
form.Textbox(
"dni",
form.notnull,
class_="form-control",
id="dniId",
description="DNI: "
),
form.Textbox(
"email",
form.notnull,
form.regexp(r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,4}',
'Formato de email incorrecto'),
class_="form-control",
id="emailId",
description=u"Correo electrónico: "
),
form.Dropdown(
"dia",
[(d, d) for d in range(1,32)],
id="diaID",
description=u"Día de nacimiento: ",
),
form.Dropdown(
"mes",
[(1,'Enero'),(2,'Febrero'),(3,'Marzo'),(4,'Abril'),(5,'Mayo'),(6,'Junio'),
(7,'Julio'),(8,'Agosto'),(9,'Septiembre'),(10,'Octubre'),(11,'Noviembre'),(12,'Diciembre')],
id="mesID",
description="Mes de nacimiento: "
),
form.Dropdown(
"anio",
[d for d in range(1930,2006)],
id="anioID",
description=u"Año de nacimiento: "
),
form.Textarea(
"direccion",
form.notnull,
class_="form-control",
id="direccionId",
description=u"Dirección: "
),
form.Textbox(
"username",
form.notnull,
class_="form-control",
id="usernameId",
description="Nombre de usuario: "
),
form.Password(
"password1",
form.notnull,
class_="form-control",
id="password1Id",
description=u"Contraseña: "
),
form.Password(
"password2",
form.notnull,
class_="form-control",
id="password2Id",
description=u"Repita la contraseña: "
),
form.Radio(
'formaPago',
[["VISA","VISA "],["contraReembolso","Contra reembolso"]],
form.notnull,
id="formaPagoId",
description="Forma de pago: "
),
form.Textbox(
"visa",
class_="form-control",
id="visaId",
description="Número de tarjeta VISA: ",
),
form.Checkbox(
"acepto",
description="Acepto las condiciones de uso ",
id="aceptoId",
value="si"
),
validators = [
form.Validator(u"Fecha incorrecta", lambda x: ((int(x.mes)==2 and int(x.dia)<=28)) or
(int(x.mes) in [4,6,9,11] and int(x.dia)<31) or (int(x.mes) in [1,3,5,7,8,10,12])
or (int(x.mes)==2 and int(x.dia)==29 and esBisiesto(x.anio))),
form.Validator(u"La contraseña debe tener al menos 7 caracteres",lambda x: len(x.password1)>6),
form.Validator(u"Las contraseñas no coinciden", lambda x: x.password1 == x.password2),
form.Validator(u"Debe introducir un número de tarjeta válido",lambda x: (x.formaPago=="contraReembolso")
or (x.formaPago=="VISA" and formatoVisa.match(x.visa))),
form.Validator(u"Debe aceptar los términos y condiciones",lambda x: x.acepto=="si")
]
)
"""
| gpl-3.0 | 5,946,189,467,328,933,000 | 33.155039 | 121 | 0.433727 | false |
MetricsGrimoire/sortinghat | tests/test_matcher.py | 1 | 11000 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2017 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Santiago Dueñas <sduenas@bitergia.com>
#
import sys
import unittest
if '..' not in sys.path:
sys.path.insert(0, '..')
from sortinghat.db.model import UniqueIdentity, Identity, MatchingBlacklist
from sortinghat.exceptions import MatcherNotSupportedError
from sortinghat.matcher import IdentityMatcher, create_identity_matcher, match
from sortinghat.matching import EmailMatcher, EmailNameMatcher
class TestCreateIdentityMatcher(unittest.TestCase):
def test_identity_matcher_instance(self):
"""Test if the factory function returns an identity matcher instance"""
matcher = create_identity_matcher('default')
self.assertIsInstance(matcher, IdentityMatcher)
matcher = create_identity_matcher('email')
self.assertIsInstance(matcher, EmailMatcher)
matcher = create_identity_matcher('email-name')
self.assertIsInstance(matcher, EmailNameMatcher)
def test_identity_matcher_instance_with_blacklist(self):
"""Test if the factory function adds a blacklist to the matcher instance"""
# The blacklist is empty
matcher = create_identity_matcher('default')
self.assertIsInstance(matcher, IdentityMatcher)
self.assertEqual(len(matcher.blacklist), 0)
# Create a matcher with a blacklist
blacklist = [MatchingBlacklist(excluded='JSMITH@example.com'),
MatchingBlacklist(excluded='jrae@example.com'),
MatchingBlacklist(excluded='jrae@example.net'),
MatchingBlacklist(excluded='John Smith'),
MatchingBlacklist(excluded='root')]
matcher = create_identity_matcher('default', blacklist=blacklist)
self.assertIsInstance(matcher, IdentityMatcher)
self.assertEqual(len(matcher.blacklist), 5)
def test_identity_matcher_instance_with_sources_list(self):
"""Test if the factory function adds a sources list to the matcher instance"""
# The sources list is None
matcher = create_identity_matcher('default')
self.assertIsInstance(matcher, IdentityMatcher)
self.assertEqual(matcher.sources, None)
# Create a matcher with a sources list
sources = ['git', 'jira', 'github']
matcher = create_identity_matcher('default', sources=sources)
self.assertIsInstance(matcher, IdentityMatcher)
self.assertEqual(len(matcher.sources), 3)
def test_identity_matcher_instance_with_strict(self):
"""Test if the factory function adds the strict mode to the matcher instance"""
matcher = create_identity_matcher('default')
self.assertIsInstance(matcher, IdentityMatcher)
self.assertEqual(matcher.strict, True)
matcher = create_identity_matcher('default', strict=False)
self.assertIsInstance(matcher, IdentityMatcher)
self.assertEqual(matcher.strict, False)
def test_not_supported_matcher(self):
"""Check if an exception is raised when the given matcher type is not supported"""
self.assertRaises(MatcherNotSupportedError,
create_identity_matcher, 'custom')
class TestIdentityMatcher(unittest.TestCase):
"""Test IdentityMatcher class"""
def test_blacklist(self):
"""Test blacklist contents"""
m = IdentityMatcher()
self.assertListEqual(m.blacklist, [])
m = IdentityMatcher(blacklist=[])
self.assertListEqual(m.blacklist, [])
blacklist = [MatchingBlacklist(excluded='JSMITH@example.com'),
MatchingBlacklist(excluded='jrae@example.com'),
MatchingBlacklist(excluded='jrae@example.net'),
MatchingBlacklist(excluded='John Smith'),
MatchingBlacklist(excluded='root')]
m = IdentityMatcher(blacklist=blacklist)
self.assertListEqual(m.blacklist, ['john smith', 'jrae@example.com',
'jrae@example.net', 'jsmith@example.com',
'root'])
def test_sources_list(self):
"""Test sources list contents"""
m = IdentityMatcher()
self.assertEqual(m.sources, None)
m = IdentityMatcher(sourecs=[])
self.assertEqual(m.sources, None)
sources = ['git', 'Jira', 'GitHub']
m = IdentityMatcher(sources=sources)
self.assertListEqual(m.sources, ['git', 'github', 'jira'])
def test_strict_mode(self):
"""Test strict mode value"""
m = IdentityMatcher()
self.assertEqual(m.strict, True)
m = IdentityMatcher(strict=False)
self.assertEqual(m.strict, False)
class TestMatch(unittest.TestCase):
"""Test match function"""
def setUp(self):
# Add some unique identities
self.john_smith = UniqueIdentity('John Smith')
self.john_smith.identities = [Identity(email='jsmith@example.com', name='John Smith',
source='scm', uuid='John Smith'),
Identity(name='John Smith',
source='scm', uuid='John Smith'),
Identity(username='jsmith',
source='scm', uuid='John Smith')]
self.jsmith = UniqueIdentity('J. Smith')
self.jsmith.identities = [Identity(name='J. Smith', username='john_smith',
source='alt', uuid='J. Smith'),
Identity(name='John Smith', username='jsmith',
source='alt', uuid='J. Smith'),
Identity(email='jsmith',
source='alt', uuid='J. Smith')]
self.jane_rae = UniqueIdentity('Jane Rae')
self.jane_rae.identities = [Identity(name='Janer Rae',
source='mls', uuid='Jane Rae'),
Identity(email='jane.rae@example.net', name='Jane Rae Doe',
source='mls', uuid='Jane Rae')]
self.js_alt = UniqueIdentity('john_smith')
self.js_alt.identities = [Identity(name='J. Smith', username='john_smith',
source='scm', uuid='john_smith'),
Identity(username='john_smith',
source='mls', uuid='john_smith'),
Identity(username='Smith. J',
source='mls', uuid='john_smith'),
Identity(email='JSmith@example.com', name='Smith. J',
source='mls', uuid='john_smith')]
self.jrae = UniqueIdentity('jrae')
self.jrae.identities = [Identity(email='jrae@example.net', name='Jane Rae Doe',
source='mls', uuid='jrae'),
Identity(name='jrae', source='mls', uuid='jrae'),
Identity(name='jrae', source='scm', uuid='jrae')]
def test_match_email(self):
"""Test whether the function finds every possible matching using email matcher"""
uidentities = [self.jsmith, self.jrae, self.js_alt,
self.john_smith, self.jane_rae]
matcher = EmailMatcher()
result = match([], matcher)
self.assertEqual(len(result), 0)
result = match(uidentities, matcher)
self.assertEqual(len(result), 4)
self.assertListEqual(result,
[[self.john_smith, self.js_alt],
[self.jane_rae], [self.jrae], [self.jsmith]])
def test_match_email_name(self):
"""Test whether the function finds every possible matching using email-name matcher"""
uidentities = [self.jsmith, self.jrae, self.js_alt,
self.john_smith, self.jane_rae]
matcher = EmailNameMatcher()
result = match([], matcher)
self.assertEqual(len(result), 0)
result = match(uidentities, matcher)
self.assertEqual(len(result), 2)
self.assertListEqual(result,
[[self.jsmith, self.john_smith, self.js_alt],
[self.jane_rae, self.jrae]])
def test_match_email_fast_mode(self):
"""Test matching in fast mode using email matcher"""
uidentities = [self.jsmith, self.jrae, self.js_alt,
self.john_smith, self.jane_rae]
matcher = EmailMatcher()
result = match([], matcher, fastmode=True)
self.assertEqual(len(result), 0)
result = match(uidentities, matcher, fastmode=True)
self.assertEqual(len(result), 4)
self.assertListEqual(result,
[[self.john_smith, self.js_alt],
[self.jane_rae], [self.jrae], [self.jsmith]])
def test_match_email_name_fast_mode(self):
"""Test matching in fast mode using email-name matcher"""
uidentities = [self.jsmith, self.jrae, self.js_alt,
self.john_smith, self.jane_rae]
matcher = EmailNameMatcher()
result = match([], matcher, fastmode=True)
self.assertEqual(len(result), 0)
result = match(uidentities, matcher, fastmode=True)
self.assertEqual(len(result), 2)
self.assertListEqual(result,
[[self.jsmith, self.john_smith, self.js_alt],
[self.jane_rae, self.jrae]])
def test_matcher_error(self):
"""Test if it raises an error when the matcher is not valid"""
self.assertRaises(TypeError, match, [], None)
self.assertRaises(TypeError, match, [], "")
def test_matcher_not_supported_fast_mode(self):
"""Test if it raises and error when a matcher does not supports the fast mode"""
matcher = IdentityMatcher()
self.assertRaises(MatcherNotSupportedError,
match, [], matcher, True)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | 5,165,033,627,049,388,000 | 38.282143 | 95 | 0.582508 | false |
LACMTA/loader | ott/loader/otp/graph/run.py | 1 | 2773 | """ Run
"""
import sys
import time
import logging
log = logging.getLogger(__file__)
from ott.utils import otp_utils
from ott.utils import web_utils
from ott.utils.cache_base import CacheBase
class Run(CacheBase):
""" run OTP graph
"""
graphs = None
def __init__(self):
super(Run, self).__init__('otp')
self.graphs = otp_utils.get_graphs(self)
@classmethod
def get_args(cls):
''' run the OTP server
examples:
bin/otp_run -s call (run the call server)
bin/otp_run -v test (run the vizualizer with the test graph)
'''
parser = otp_utils.get_initial_arg_parser()
parser.add_argument('--server', '-s', required=False, action='store_true', help="run 'named' graph in server mode")
parser.add_argument('--all', '-a', required=False, action='store_true', help="run all graphs in server mode")
parser.add_argument('--viz', '-v', required=False, action='store_true', help="run 'named' graph with the vizualizer client")
parser.add_argument('--mem', '-lm', required=False, action='store_true', help="set the jvm heap memory for the graph")
args = parser.parse_args()
return args, parser
@classmethod
def run(cls):
#import pdb; pdb.set_trace()
success = False
r = Run()
args, parser = r.get_args()
graph = otp_utils.find_graph(r.graphs, args.name)
java_mem = "-Xmx1236m" if args.mem else None
if args.all or 'all' == args.name or 'a' == args.name:
success = True
for z in r.graphs:
print "running {}".format(z)
time.sleep(2)
s = otp_utils.run_otp_server(java_mem=java_mem, **z)
if s == False:
success = False
elif args.server:
success = otp_utils.run_otp_server(java_mem=java_mem, **graph)
elif args.viz:
success = otp_utils.vizualize_graph(graph_dir=graph['dir'], java_mem=java_mem)
else:
print "PLEASE select a option to either serve or vizualize graph {}".format(graph['name'])
parser.print_help()
return success
@classmethod
def static_server_cfg(cls):
r = Run()
port = r.config.get('port', 'web', '50080')
dir = r.config.get('dir', 'web', 'ott/loader/otp/graph')
return port, dir
@classmethod
def static_server(cls):
''' start a static server where
'''
success = False
port, dir = Run.static_server_cfg()
success = web_utils.background_web_server(dir, port)
return success
def main(argv=sys.argv):
Run.run()
if __name__ == '__main__':
main()
| mpl-2.0 | 7,585,301,656,605,814,000 | 31.244186 | 136 | 0.567616 | false |
marble/Toolchain_RenderDocumentation | 36-Get-ready-for-publishing/run_01-Treat-pdf-folder.py | 1 | 4291 | #!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
from __future__ import absolute_import
import os
import tct
import sys
params = tct.readjson(sys.argv[1])
binabspath = sys.argv[2]
facts = tct.readjson(params['factsfile'])
milestones = tct.readjson(params['milestonesfile'])
reason = ''
resultfile = params['resultfile']
result = tct.readjson(resultfile)
loglist = result['loglist'] = result.get('loglist', [])
toolname = params['toolname']
toolname_pure = params['toolname_pure']
toolchain_name = facts['toolchain_name']
workdir = params['workdir']
exitcode = CONTINUE = 0
# ==================================================
# Make a copy of milestones for later inspection?
# --------------------------------------------------
if 0 or milestones.get('debug_always_make_milestones_snapshot'):
tct.make_snapshot_of_milestones(params['milestonesfile'], sys.argv[1])
# ==================================================
# Get and check required milestone(s)
# --------------------------------------------------
def milestones_get(name, default=None):
result = milestones.get(name, default)
loglist.append((name, result))
return result
def facts_get(name, default=None):
result = facts.get(name, default)
loglist.append((name, result))
return result
def params_get(name, default=None):
result = params.get(name, default)
loglist.append((name, result))
return result
# ==================================================
# define
# --------------------------------------------------
pdf_dest_folder_htaccess = ''
pdf_url_relpath = ''
xeq_name_cnt = 0
# ==================================================
# Check params
# --------------------------------------------------
if exitcode == CONTINUE:
loglist.append('CHECK PARAMS')
# required milestones
requirements = ['configset']
# just test
for requirement in requirements:
v = milestones_get(requirement)
if not v:
loglist.append("'%s' not found" % requirement)
exitcode = 22
reason = 'Bad PARAMS or nothing to do'
if exitcode == CONTINUE:
configset = milestones_get('configset')
# fetch
webroot_abspath = tct.deepget(facts, 'tctconfig', configset, 'webroot_abspath')
loglist.append(('webroot_abspath', webroot_abspath))
if not webroot_abspath:
exitcode = 22
reason = 'Bad PARAMS or nothing to do'
if exitcode == CONTINUE:
loglist.append('PARAMS are ok')
else:
loglist.append('Bad PARAMS or nothing to do')
# ==================================================
# work
# --------------------------------------------------
if exitcode == CONTINUE:
pdf_dest_file = milestones_get('pdf_dest_file')
pdf_dest_folder = milestones_get('pdf_dest_folder')
publish_dir_pdf_planned = milestones_get('publish_dir_pdf_planned')
if not (pdf_dest_file and pdf_dest_folder and publish_dir_pdf_planned):
CONTINUE = -2
reason = 'Nothing to do'
loglist.append(reason)
if exitcode == CONTINUE:
temp = os.path.join(publish_dir_pdf_planned, os.path.split(pdf_dest_file)[1])
pdf_url_relpath = temp[len(webroot_abspath):]
loglist.append(('pdf_url_relpath', pdf_url_relpath))
htaccess_contents = (
"RewriteEngine On\n"
"RewriteCond %{REQUEST_FILENAME} !-f\n"
"RewriteRule ^(.*)$ " + pdf_url_relpath + " [L,R=301]\n")
pdf_dest_folder_htaccess = os.path.join(pdf_dest_folder, '.htaccess')
with open(pdf_dest_folder_htaccess, 'w') as f2:
f2.write(htaccess_contents)
# ==================================================
# Set MILESTONE
# --------------------------------------------------
if pdf_url_relpath:
result['MILESTONES'].append({'pdf_dest_folder_htaccess': pdf_dest_folder_htaccess})
if pdf_url_relpath:
result['MILESTONES'].append({'pdf_url_relpath': pdf_url_relpath})
# ==================================================
# save result
# --------------------------------------------------
tct.save_the_result(result, resultfile, params, facts, milestones, exitcode, CONTINUE, reason)
# ==================================================
# Return with proper exitcode
# --------------------------------------------------
sys.exit(exitcode)
| mit | 8,712,881,131,675,113,000 | 27.798658 | 94 | 0.543696 | false |
edx/edx-load-tests | util/generate_summary.py | 1 | 3450 | # -*- coding: utf-8 -*-
"""
Generate a summary of a previous loadtest run in this environment.
See for usage example in a jenkins job dsl:
https://github.com/edx/jenkins-job-dsl/blob/master/testeng/jobs/loadtestDriver.groovy
Prerequisites:
A logfile produced by util/run-loadtest.sh should be present in its
standard location.
Output:
Produces summary on standard output in YAML format. The structure is as
follows:
* monitoring_links:
* list of link text/url pairs pointing to monitoring dashboards.
* timeline:
* begin: ISO 8601 date for when the test began.
* end: ISO 8601 date for when the test ended.
"""
from datetime import timedelta
import yaml
import helpers.markers
from util.app_monitors_config import MONITORS
# Refer to util/run-loadtest.sh in case this file path changes.
STANDARD_LOGFILE_PATH = "results/log.txt"
def parse_logfile_events(logfile):
"""
Parse the logfile for events
Parameters:
logfile (file): the file containing locust logs for a single load test
Returns:
iterator of (datetime.datetime, str) tuples: the parsed events in the
order they are encountered.
"""
for line in logfile:
data = helpers.markers.parse_logfile_event_marker(line)
if data is not None:
yield (data['time'], data['event'])
def get_time_bounds(logfile):
"""
Determine when the load test started and stopped.
Parameters:
logfile (file): the file containing locust logs for a single load test
Returns:
two-tuple of datetime.datetime: the time bounds of the load test
"""
begin_time = end_time = None
relevant_events = ['locust_start_hatching', 'edx_heartbeat', 'quitting']
relevant_times = [
time
for time, event
in parse_logfile_events(logfile)
if event in relevant_events
]
begin_time, end_time = (min(relevant_times), max(relevant_times))
return (begin_time, end_time)
def main():
"""
Generate a summary of a previous load test run.
This script assumes "results/log.txt" is the logfile in question.
"""
with open(STANDARD_LOGFILE_PATH) as logfile:
loadtest_begin_time, loadtest_end_time = get_time_bounds(logfile)
monitoring_links = []
for monitor in MONITORS:
monitoring_links.append({
'url': monitor.url(
begin_time=loadtest_begin_time,
end_time=loadtest_end_time,
),
'text': u'{}: {} ({} — {})'.format(
monitor.monitoring_service_name,
monitor.app_name,
# We use naive datetimes (i.e. no attached tz) and just
# assume UTC all along. Tacking on the "Z" implies UTC.
loadtest_begin_time.strftime('%Y-%m-%dT%H:%M:%SZ'),
loadtest_end_time.strftime('%Y-%m-%dT%H:%M:%SZ'),
),
})
print(yaml.dump(
{
'timeline': {
'begin': loadtest_begin_time.strftime('%Y-%m-%dT%H:%M:%SZ'),
'end': loadtest_end_time.strftime('%Y-%m-%dT%H:%M:%SZ'),
},
'monitoring_links': monitoring_links
},
default_flow_style=False, # Represent objects using indented blocks
# rather than inline enclosures.
allow_unicode=True,
))
if __name__ == "__main__":
main()
| apache-2.0 | -6,008,860,902,320,476,000 | 30.345455 | 85 | 0.606729 | false |
ENCODE-DCC/encoded | src/encoded/tests/test_upgrade_software.py | 1 | 2678 | import pytest
from unittest import TestCase
def test_software_upgrade(upgrader, software_1):
value = upgrader.upgrade('software', software_1, target_version='2')
assert value['schema_version'] == '2'
assert value['lab'] == "cb0ef1f6-3bd3-4000-8636-1c5b9f7000dc"
assert value['award'] == "b5736134-3326-448b-a91a-894aafb77876"
def test_software_upgrade_5_6(upgrader, software_1):
software_1['schema_version'] = '5'
software_1['purpose'] = ['single-nuclei ATAC-seq', 'HiC']
value = upgrader.upgrade('software', software_1, target_version='6')
assert value['schema_version'] == '6'
TestCase().assertListEqual(
sorted(value['purpose']),
sorted(['single-nucleus ATAC-seq', 'HiC'])
)
def test_software_upgrade_6_7(upgrader, software_1):
software_1['schema_version'] = '6'
software_1['purpose'] = ['single cell isolation followed by RNA-seq', 'RNA-seq']
value = upgrader.upgrade('software', software_1, target_version='7')
assert value['schema_version'] == '7'
TestCase().assertListEqual(
sorted(value['purpose']),
sorted(['single-cell RNA sequencing assay', 'RNA-seq'])
)
def test_software_upgrade_7_8(upgrader, software_1):
software_1['schema_version'] = '7'
software_1['purpose'] = ['single-nucleus RNA-seq',
'genotyping by high throughput sequencing assay']
value = upgrader.upgrade('software', software_1, target_version='8')
assert value['schema_version'] == '8'
TestCase().assertListEqual(
sorted(value['purpose']),
sorted(['single-cell RNA sequencing assay',
'whole genome sequencing assay'])
)
assert 'The purpose for this software is now WGS, upgraded from genotyping HTS assay.' in value['notes']
def test_software_upgrade_8_9(upgrader, software_1):
software_1['schema_version'] = '8'
software_1['purpose'] = ['single-cell ATAC-seq', 'ATAC-seq']
value = upgrader.upgrade('software', software_1, target_version='9')
assert value['schema_version'] == '9'
TestCase().assertListEqual(
sorted(value['purpose']),
sorted(['single-nucleus ATAC-seq', 'ATAC-seq'])
)
assert 'The purpose for this software is now snATAC-seq, upgraded from scATAC-seq.' in value['notes']
def test_software_upgrade_9_10(upgrader, software_1):
software_1['schema_version'] = '9'
software_1['purpose'] = ['Capture Hi-C', 'HiC']
value = upgrader.upgrade('software', software_1, target_version='10')
assert value['schema_version'] == '10'
TestCase().assertListEqual(
sorted(value['purpose']),
sorted(['capture Hi-C', 'HiC'])
)
| mit | -3,664,232,772,370,065,400 | 38.382353 | 108 | 0.647872 | false |