code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python
# encoding: utf-8
"""
roman-to-integer.py
Created by Shuailong on 2016-03-17.
https://leetcode.com/problems/roman-to-integer/.
"""
class Solution(object):
def __init__(self):
self.d = {'I':1, 'V':5, 'X':10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
def romanToInt(self, s):
"""
:type s: str
:rtype: int
"""
res = 0
for i in range(len(s)):
res += self.d[s[i]]
if s[i] == 'I' and i+1 < len(s) and (s[i+1] == 'V' or s[i+1] == 'X'):
res -= 2
elif s[i] == 'X' and i+1 < len(s) and (s[i+1] == 'L' or s[i+1] == 'C'):
res -= 20
elif s[i] == 'C' and i+1 < len(s) and (s[i+1] == 'D' or s[i+1] == 'M'):
res -= 200
return res
def main():
solution = Solution()
Ss = ['I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', 'IX', 'X']
for s in Ss:
print solution.romanToInt(s)
if __name__ == '__main__':
main()
| Shuailong/Leetcode | solutions/roman-to-integer.py | Python | mit | 1,036 |
from setuptools import setup, find_packages
setup(
name="absmodels",
version="0.1",
packages=find_packages(),
)
| stavinsky/simple_cms | abs_models/setup.py | Python | mit | 124 |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core.paginator import Paginator, InvalidPage
from django.db import models
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.template.defaultfilters import force_escape
from django.utils.translation import ugettext_lazy as _
from ragendja.template import JSONResponse, render_to_response
from search.forms import SearchForm
from search.core import paginated_query
import base64
import cPickle as pickle
default_results_format = (
_('No results found'),
_('One result found'),
_('%(hits)s results found'),
_('More than %(hits)s results found'),
)
default_title = _('Search')
def update_relation_index(request):
if 'property_name' in request.POST and 'model_descriptor' in request.POST \
and 'parent_key' in request.POST and 'delete' in request.POST:
model_descriptor = pickle.loads(base64.b64decode(request.POST[
'model_descriptor']))
model = models.get_model(model_descriptor[0], model_descriptor[1])
update_property = getattr(model, request.POST['property_name'])
parent_key = pickle.loads(base64.b64decode(request.POST['parent_key']))
delete = pickle.loads(base64.b64decode(request.POST['delete']))
update_property.update_relation_index(parent_key, delete)
return HttpResponse()
def update_values_index(request):
if 'property_name' in request.POST and 'model_descriptor' in request.POST \
and 'old_values' in request.POST and 'new_values' in request.POST:
model_descriptor = pickle.loads(base64.b64decode(request.POST[
'model_descriptor']))
model = models.get_model(model_descriptor[0], model_descriptor[1])
update_property = getattr(model, request.POST['property_name'])
values = [pickle.loads(base64.b64decode(request.POST[
value + '_values'])) for value in ('old', 'new', )]
update_property.update_values_index(values[1], values[0])
return HttpResponse()
def query_param_search(request, model, index,
filters=(), chain_sort=(), query_converter=None,
force_query = False):
'''Query the data store based on the query request parameter.'''
query = request.GET.get('query', '').strip()
results = None
if query or force_query:
language = getattr(request, 'LANGUAGE_CODE', settings.LANGUAGE_CODE)
results = getattr(model, index).search(query, filters,
language=language, chain_sort=chain_sort)
if query_converter:
results = query_converter(request, results)
return results, query
def show_search_results(request, model, index, filters=(), chain_sort=(),
ignore_params=(), query_converter=None,
converter=None, search_title=default_title,
force_results_count=True, results_count_format=default_results_format,
search_form_class=SearchForm, paginate_by=10, template_name=None,
template_object_name=None, extra_context={},
key_based_on_empty_query=False, key_based_order=()):
"""
Performs a search in model and prints the results.
For further information see
search.core.SearchIndexProperty.search()
"""
results, query = query_param_search(request, model, index,
filters=filters,
chain_sort=chain_sort,
query_converter=query_converter)
data = { 'query': query }
data.update(extra_context)
return show_search_results_from_results(results,
request, model, index,
extra_context=data,
# pass the rest of the parameters through
filters=filters,
chain_sort=chain_sort,
ignore_params=ignore_params,
converter=converter,
search_title=search_title,
force_results_count=force_results_count,
results_count_format=results_count_format,
search_form_class=search_form_class,
paginate_by=paginate_by,
template_name=template_name,
template_object_name=template_object_name,
key_based_on_empty_query=key_based_on_empty_query,
key_based_order=key_based_order)
def show_search_results_from_results(results,
request, model, index, filters=(), chain_sort=(),
ignore_params=(),
converter=None, search_title=default_title,
force_results_count=True, results_count_format=default_results_format,
search_form_class=SearchForm, paginate_by=10, template_name=None,
template_object_name=None, extra_context={},
key_based_on_empty_query=False, key_based_order=()):
"""
Performs a search in model and prints the results.
For further information see
search.core.SearchIndexProperty.search()
"""
if key_based_on_empty_query and not results:
return key_paginated_object_list(request, model, filters=filters,
order=key_based_order, search_title=search_title,
template_name=template_name,
template_object_name=template_object_name,
ignore_params=tuple(ignore_params) + ('query',),
converter=converter,
paginate_by=paginate_by, search_form_class=search_form_class,
extra_context=extra_context)
if not search_form_class:
search_form = None
else:
search_form = search_form_class(request.GET)
search_form.is_valid()
if not template_object_name:
template_object_name = model._meta.object_name.lower()
data = {
'force_results_count': force_results_count,
'search_form': search_form,
'search_title': search_title,
}
data.update(extra_context)
if not template_name:
template_name = ('%s/%s_search.html' % (model._meta.app_label,
model._meta.object_name.lower()),
'search/search.html')
return paginated_object_list(request, queryset=results, converter=converter,
paginate_by=paginate_by, template_name=template_name,
extra_context=data, template_object_name=template_object_name,
results_count_format=results_count_format, ignore_params=ignore_params)
def _prepare_params(request, ignore_params=()):
page = request.GET.get('page')
parameters = request.GET.copy()
if 'page' in parameters:
del parameters['page']
paramstr = parameters.urlencode()
if paramstr:
paramstr = paramstr + '&'
original_base_url = request.path + '?' + paramstr + 'page='
for param in ignore_params:
if param in parameters:
del parameters[param]
paramstr = parameters.urlencode()
if paramstr:
paramstr = paramstr + '&'
return page, original_base_url, {'base_url': request.path + '?' + paramstr + 'page='}
def paginated_object_list(request, queryset, converter=None, paginate_by=10,
template_name=None, extra_context={}, template_object_name=None,
results_count_format=default_results_format, ignore_params=()):
page, original_base_url, data = _prepare_params(request, ignore_params)
if not page:
page = 1
data.update({
'template_object_name': template_object_name,
'force_results_count': True,
'results_count_format': results_count_format,
'search__converter': converter,
})
paginator = Paginator(queryset, paginate_by, allow_empty_first_page=True)
try:
page_number = int(page)
except ValueError:
if page == 'last':
page_number = paginator.num_pages
else:
# Page is not 'last', nor can it be converted to an int.
raise Http404
try:
page_obj = paginator.page(page_number)
except InvalidPage:
return HttpResponseRedirect(original_base_url + 'last')
data.update({
'%s_list' % template_object_name: page_obj.object_list,
'paginator': paginator,
'page_obj': page_obj,
# Legacy template context stuff. New templates should use page_obj
# to access this instead.
'is_paginated': page_obj.has_other_pages(),
'results_per_page': paginator.per_page,
'has_next': page_obj.has_next(),
'has_previous': page_obj.has_previous(),
'page': page_obj.number,
'next': page_obj.next_page_number(),
'previous': page_obj.previous_page_number(),
'first_on_page': page_obj.start_index(),
'last_on_page': page_obj.end_index(),
'pages': paginator.num_pages,
'hits': paginator.count,
'page_range': paginator.page_range,
})
for key, value in extra_context.items():
if callable(value):
data[key] = value()
else:
data[key] = value
return render_to_response(request, template_name, data)
def live_search_results(request, model, index, filters=(), chain_sort=(),
limit=30, result_item_formatting=None, query_converter=None,
converter=None, redirect=False):
"""
Performs a search in searched_model and prints the results as
text, so it can be used by auto-complete scripts.
limit indicates the number of results to be returned.
A JSON file is sent to the browser. It contains a list of
objects that are created by the function indicated by
the parameter result_item_formatting. It is executed for every result
item.
Example:
result_item_formatting=lambda course: {
'value': course.name + '<br />Prof: ' + course.prof.name,
'result': course.name + ' ' + course.prof.name,
'data': redirect=='redirect' and
{'link': course.get_absolute_url()} or {},
}
"""
query = request.GET.get('query', '')
try:
limit_override = int(request.GET.get('limit', limit))
if limit_override < limit:
limit = limit_override
except:
pass
index_property = getattr(model, index)
language = getattr(request, 'LANGUAGE_CODE', settings.LANGUAGE_CODE)
results = index_property.search(query, filters, chain_sort=chain_sort,
language=language)
if query_converter:
results = query_converter(request, results)
results = results[:limit]
if converter:
results = converter(results)
data = []
for item in results:
if result_item_formatting:
entry = result_item_formatting(item)
else:
value = getattr(item, index_property.properties[0])
entry = {'value': force_escape(value), 'result': value}
if 'data' not in entry:
entry['data'] = {}
if redirect:
if 'link' not in entry['data']:
entry['data']['link'] = item.get_absolute_url()
data.append(entry)
return JSONResponse(data)
def key_paginated_object_list(request, model, filters=(), order=(),
ignore_params=(), converter=None,
search_title=default_title, paginate_by=10,
search_form_class=None, template_name=None, template_object_name=None,
extra_context={}):
"""
Browse entities using key-based pagination.
"""
if not search_form_class:
search_form = None
else:
search_form = search_form_class(request.GET)
search_form.is_valid()
bookmark, original_base_url, data = _prepare_params(request, ignore_params)
items, prev, next = paginated_query(model, filters=filters, order=order,
count=paginate_by, bookmark=bookmark)
if not template_object_name:
template_object_name = model._meta.object_name.lower()
data.update({
template_object_name + '_list': items,
'template_object_name': template_object_name,
'has_previous': bool(prev),
'previous': prev,
'has_next': bool(next),
'next': next,
'page_range': (),
'show_key_pagenav': True,
'search_form': search_form,
'search_title': search_title,
'query': '',
'search__converter': converter,
})
if not items and not prev and not next:
data['force_results_count'] = True
if 'results_count_format' not in extra_context:
data['results_count_format'] = default_results_format
for key, value in extra_context.items():
if callable(value):
data[key] = value()
else:
data[key] = value
if not template_name:
name_data = (model._meta.app_label, model._meta.object_name.lower())
template_name = ('%s/%s_paginated.html' % name_data,
'%s/%s_search.html' % name_data,
'search/search.html')
return render_to_response(request, template_name, data)
| avastjohn/maventy_new | search/views.py | Python | bsd-3-clause | 12,853 |
from numpy.random import randn
from numpy.linalg import cholesky as chol
import numpy as np
import numpy.linalg as L
import scipy.special as sp
import pymc.flib as flib
import time
import testmod
import util
import pdb
def gen_testdata(n=100, k=4):
# use static data to compare to R
data = randn(n, k)
mean = randn(k)
np.savetxt('test_data', data)
np.savetxt('test_mean', mean)
def load_testdata():
data = np.loadtxt('test_data')
mean = np.loadtxt('test_mean')
cov = np.cov(data.T)
return data, mean, cov
def bench(cpu_func, gpu_func, gruns=50):
"""
"""
_s = time.clock()
for i in xrange(gruns):
gpu_func()
gpu_speed = (time.clock() - _s) / gruns
_s = time.clock()
cpu_func()
cpu_speed = (time.clock() - _s)
print 'CPU speed: %.3f' % (cpu_speed * 1000)
print 'GPU speed: %.3f' % (gpu_speed * 1000)
print cpu_speed / gpu_speed
if __name__ == '__main__':
testmod.set_device(0)
n = 1e3
k = 16
data = randn(n, k).astype(np.float32)
mean = randn(k)
cov = np.array(util.random_cov(k), dtype=np.float32)
j = 32
padded_data = util.pad_data(data)
chol_sigma = chol(cov)
ichol_sigma = L.inv(chol_sigma)
logdet = np.log(np.linalg.det(cov))
means = (mean,) * j
covs = (ichol_sigma,) * j
logdets = (logdet,) * j
packed_params = util.pack_params(means, covs, logdets)
cpu_func = lambda: testmod.cpu_mvnpdf(padded_data, packed_params, k).squeeze()
gpu_func = lambda: testmod._mvnpdf(padded_data, packed_params, k).squeeze()
print cpu_func()
print gpu_func()
# bench(cpu_func, gpu_func, gruns=50)
| dukestats/gpustats | old/scratch.py | Python | bsd-3-clause | 1,670 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import random, bisect
class ItemGenerator(object):
'''Choices randomly an element from a list.
It does it not uniformly, but using a given weight for
each element.
Just instantiate this class passing a list of pairs
(item, weight), and then call it to get the items.
'''
def __init__(self, items):
self.puntos = []
self.ponderado = []
total = sum(x[1] for x in items)
acum = 0
for it,peso in items:
acum += peso
self.puntos.append(it)
self.ponderado.append(acum/total)
self.total = acum - 1
def __call__(self):
ind = random.random()
cual = bisect.bisect(self.ponderado, ind)
return self.puntos[cual]
if __name__ == "__main__":
# This shows the usage, and also test the recipe, as calling that
# a lot of times, it should return the elements in the same
# given proportion
items = (
("A", 10),
("B", 100),
("C", 5)
)
itgen = ItemGenerator(items)
cuenta = {}
for i in range(1000000):
item = itgen()
cuenta[item] = cuenta.get(item, 0) + 1
print cuenta
| ActiveState/code | recipes/Python/576370_Weighted_random_choice/recipe-576370.py | Python | mit | 1,256 |
__author__ = 'miko'
from Tkinter import Frame
class GameState(Frame):
def __init__(self, *args, **kwargs):
self.stateName = kwargs["stateName"]
self.root = args[0]
self.id = kwargs["id"]
Frame.__init__(self, self.root.mainWindow)
self.config(
background="gold"
)
self.place(relwidth=1, relheight=1)
| FSI-HochschuleTrier/hacker-jeopardy | de/hochschuletrier/jpy/states/GameState.py | Python | mit | 319 |
import os
from pyfaidx import Faidx, Fasta
from nose.tools import raises
from unittest import TestCase
path = os.path.dirname(__file__)
os.chdir(path)
class TestFeatureSplitChar(TestCase):
def setUp(self):
pass
def tearDown(self):
try:
os.remove('data/genes.fasta.fai')
except EnvironmentError:
pass # some tests may delete this file
def test_keys(self):
fasta = Fasta('data/genes.fasta', split_char='|')
expect = ['530364724', '530364725', '530364726', '530373235', '530373237', '530384534', '530384536', '530384538', '530384540', '543583738', '543583740', '543583785', '543583786', '543583788', '543583794', '543583795', '543583796', '557361097', '557361099', '563317589', 'AB821309.1', 'KF435149.1', 'KF435150.1', 'NM_000465.3', 'NM_001282543.1', 'NM_001282545.1', 'NM_001282548.1', 'NM_001282549.1', 'NR_104212.1', 'NR_104215.1', 'NR_104216.1', 'XM_005249642.1', 'XM_005249643.1', 'XM_005249644.1', 'XM_005249645.1', 'XM_005265507.1', 'XM_005265508.1', 'XR_241079.1', 'XR_241080.1', 'XR_241081.1', 'dbj']
result = sorted(fasta.keys())
assert result == expect
def test_key_function_by_dictionary_get_key(self):
fasta = Fasta('data/genes.fasta', split_char='|')
expect = 'TTGAAGATTTTGCATGCAGCAGGTGCGCAAGGTGAAATGTTCACTGTTAAA'
result = fasta['KF435150.1'][100-1:150]
assert str(result) == expect
def test_key_function_by_fetch(self):
faidx = Faidx('data/genes.fasta', split_char='|')
expect = 'TTGAAGATTTTGCATGCAGCAGGTGCGCAAGGTGAAATGTTCACTGTTAAA'
result = faidx.fetch('KF435150.1',
100, 150)
assert str(result) == expect
| mattions/pyfaidx | tests/test_feature_split_char.py | Python | bsd-3-clause | 1,722 |
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: elb_target_group_facts
short_description: Gather facts about ELB target groups in AWS
description:
- Gather facts about ELB target groups in AWS
version_added: "2.4"
author: Rob White (@wimnat)
options:
load_balancer_arn:
description:
- The Amazon Resource Name (ARN) of the load balancer.
required: false
target_group_arns:
description:
- The Amazon Resource Names (ARN) of the target groups.
required: false
names:
description:
- The names of the target groups.
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all target groups
- elb_target_group_facts:
# Gather facts about the target group attached to a particular ELB
- elb_target_group_facts:
load_balancer_arn: "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-elb/aabbccddeeff"
# Gather facts about a target groups named 'tg1' and 'tg2'
- elb_target_group_facts:
names:
- tg1
- tg2
'''
RETURN = '''
target_groups:
description: a list of target groups
returned: always
type: complex
contains:
deregistration_delay_timeout_seconds:
description: The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused.
returned: always
type: int
sample: 300
health_check_interval_seconds:
description: The approximate amount of time, in seconds, between health checks of an individual target.
returned: always
type: int
sample: 30
health_check_path:
description: The destination for the health check request.
returned: always
type: string
sample: /index.html
health_check_port:
description: The port to use to connect with the target.
returned: always
type: string
sample: traffic-port
health_check_protocol:
description: The protocol to use to connect with the target.
returned: always
type: string
sample: HTTP
health_check_timeout_seconds:
description: The amount of time, in seconds, during which no response means a failed health check.
returned: always
type: int
sample: 5
healthy_threshold_count:
description: The number of consecutive health checks successes required before considering an unhealthy target healthy.
returned: always
type: int
sample: 5
load_balancer_arns:
description: The Amazon Resource Names (ARN) of the load balancers that route traffic to this target group.
returned: always
type: list
sample: []
matcher:
description: The HTTP codes to use when checking for a successful response from a target.
returned: always
type: dict
sample: {
"http_code": "200"
}
port:
description: The port on which the targets are listening.
returned: always
type: int
sample: 80
protocol:
description: The protocol to use for routing traffic to the targets.
returned: always
type: string
sample: HTTP
stickiness_enabled:
description: Indicates whether sticky sessions are enabled.
returned: always
type: bool
sample: true
stickiness_lb_cookie_duration_seconds:
description: Indicates whether sticky sessions are enabled.
returned: always
type: int
sample: 86400
stickiness_type:
description: The type of sticky sessions.
returned: always
type: string
sample: lb_cookie
tags:
description: The tags attached to the target group.
returned: always
type: dict
sample: "{
'Tag': 'Example'
}"
target_group_arn:
description: The Amazon Resource Name (ARN) of the target group.
returned: always
type: string
sample: "arn:aws:elasticloadbalancing:ap-southeast-2:01234567890:targetgroup/mytargetgroup/aabbccddee0044332211"
target_group_name:
description: The name of the target group.
returned: always
type: string
sample: mytargetgroup
unhealthy_threshold_count:
description: The number of consecutive health check failures required before considering the target unhealthy.
returned: always
type: int
sample: 2
vpc_id:
description: The ID of the VPC for the targets.
returned: always
type: string
sample: vpc-0123456
'''
import traceback
try:
import boto3
from botocore.exceptions import ClientError, NoCredentialsError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
ec2_argument_spec, get_aws_connection_info)
def get_target_group_attributes(connection, module, target_group_arn):
try:
target_group_attributes = boto3_tag_list_to_ansible_dict(connection.describe_target_group_attributes(TargetGroupArn=target_group_arn)['Attributes'])
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
# Replace '.' with '_' in attribute key names to make it more Ansibley
for k, v in target_group_attributes.items():
target_group_attributes[k.replace('.', '_')] = v
del target_group_attributes[k]
return target_group_attributes
def get_target_group_tags(connection, module, target_group_arn):
try:
return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags'])
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
def list_target_groups(connection, module):
load_balancer_arn = module.params.get("load_balancer_arn")
target_group_arns = module.params.get("target_group_arns")
names = module.params.get("names")
try:
target_group_paginator = connection.get_paginator('describe_target_groups')
if not load_balancer_arn and not target_group_arns and not names:
target_groups = target_group_paginator.paginate().build_full_result()
if load_balancer_arn:
target_groups = target_group_paginator.paginate(LoadBalancerArn=load_balancer_arn).build_full_result()
if target_group_arns:
target_groups = target_group_paginator.paginate(TargetGroupArns=target_group_arns).build_full_result()
if names:
target_groups = target_group_paginator.paginate(Names=names).build_full_result()
except ClientError as e:
if e.response['Error']['Code'] == 'TargetGroupNotFound':
module.exit_json(target_groups=[])
else:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except NoCredentialsError as e:
module.fail_json(msg="AWS authentication problem. " + e.message, exception=traceback.format_exc())
# Get the attributes and tags for each target group
for target_group in target_groups['TargetGroups']:
target_group.update(get_target_group_attributes(connection, module, target_group['TargetGroupArn']))
# Turn the boto3 result in to ansible_friendly_snaked_names
snaked_target_groups = [camel_dict_to_snake_dict(target_group) for target_group in target_groups['TargetGroups']]
# Get tags for each target group
for snaked_target_group in snaked_target_groups:
snaked_target_group['tags'] = get_target_group_tags(connection, module, snaked_target_group['target_group_arn'])
module.exit_json(target_groups=snaked_target_groups)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
load_balancer_arn=dict(type='str'),
target_group_arns=dict(type='list'),
names=dict(type='list')
)
)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=['load_balancer_arn', 'target_group_arns', 'names'],
supports_check_mode=True
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
list_target_groups(connection, module)
if __name__ == '__main__':
main()
| nrwahl2/ansible | lib/ansible/modules/cloud/amazon/elb_target_group_facts.py | Python | gpl-3.0 | 9,841 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Karmind app - Automotive tool based on OBD-II protocol
# Check www.karmind.com for further details
#
# obd_link.py
#
# Copyright 2010 miguel <enoelrocotiv@gmail.com>
# Copyright 2010 oscar <osc.iglesias@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import serial
import string
import time
import re
import logging
import utils
class OBDPort:
""" OBDPort allows to abstract the communication with ELM-327"""
def __init__(self, portnum, serial_timeout, max_retries, record):
baud = 9600
databits = 8
par = serial.PARITY_NONE
sb = 1
to = serial_timeout
self.ELMver = "Unknown"
self.State = 1 #state SERIAL is 1 connected, 0 disconnected (connection failed)
self.portnum = portnum
self.record = record
self.max_retries = max_retries
pre_connect_commands = [
'ate0',
'ati',
'ath1',
'atsp0'
]
post_connect_commands = [
'atdp',
'atdpn',
'atstff'
]
logging.debug('Opening interface (serial port)')
self.record.set_info('Opening interface (serial port)')
available_ports = utils.serial_test()
ELM_found = 0
for i in range(len(available_ports)):
try:
self.State = 1
logging.debug('Trying to open %s (serial port)' %available_ports[i])
self.record.set_info('Trying to open '+str(available_ports[i])+' (serial port)')
self.port = serial.Serial(available_ports[i], baud, \
parity = par, stopbits = sb, bytesize = databits,timeout = to)
except serial.SerialException:
logging.debug('Port %s could not be opened' %available_ports[i])
self.State = 0
else:
logging.debug('Port %s opened successfully, trying to connect ...' %available_ports[i])
self.send_command("ati")
data, validation_test = self.get_result("ati")
if re.search('ELM', data):
logging.debug('Found ELM device in port %s !!!' %available_ports[i])
ELM_found = 1
self.portnum = available_ports[i]
self.port.close()
break
else:
logging.debug('ELM device not found in port %s ...' %available_ports[i])
if not(ELM_found):
logging.debug('ELM device could not be found. Trying with port from .ini file...')
""" Now the connection will be made from COM detected earlier, or in case was not, with that from elm.ini """
self.State = 1
try:
logging.debug('Trying to open designed port %s (serial port)' %self.portnum)
self.record.set_info('Trying to open designed port'+ str(self.portnum)+' (serial port)')
self.port = serial.Serial(self.portnum, baud, \
parity = par, stopbits = sb, bytesize = databits,timeout = to)
except serial.SerialException:
self.State = 0
return None
logging.debug('Interface '+ str(self.port.portstr) +' scanned successfully')
self.record.set_info('Interface '+ str(self.port.portstr) +' scanned successfully')
logging.debug('Connecting to ECU...')
self.record.set_info('Connecting to ECU...')
ready = "ERROR"
count=0
while ready == "ERROR": #until error is returned try to connect
try:
self.send_command('atz') # initialize
except serial.SerialException:
self.State = 0
return None
self.ELMver, validation_test = self.get_result('atz')
if not(re.search('ELM',self.ELMver) or re.search('OK', self.ELMver)):
logging.warning('Aborted execution: unable to connect to ELM device')
self.record.set_info('Aborted execution: unable to connect to ELM device')
self.close()
self.State = 0
return None
for i in pre_connect_commands:
self.send_command(i)
got, validation_test = self.get_result(i)
self.send_command("0100") # ping/PID request
ready, validation_test = self.get_result("0100")
if re.search("[0-9]",ready) or re.search("OK", ready):
for i in post_connect_commands:
self.send_command(i)
got, validation_test = self.get_result(i)
else:
logging.debug('Connection attempt failed: '+ready)
self.record.set_info('Connection attempt failed: '+str(ready))
ready='ERROR' #Expecting error message: BUSINIT:.ERROR
time.sleep(5)
logging.debug('Connection attempt: '+str(count))
self.record.set_info('Connection attempt: '+str(count))
count+=1
if count == self.max_retries:
logging.warning('EXECUTION ABORTED: unable to connect after max_retries')
self.record.set_info('EXECUTION ABORTED: unable to connect after max_retries')
self.close()
self.State = 0
return None
def send_command(self, cmd):
"""Internal use only: not a public interface"""
if self.port:
self.port.flushOutput()
self.port.flushInput()
for c in cmd:
self.port.write(c)
self.port.write("\r\n")
return
def get_result(self, cmd):
"""Internal use only: not a public interface"""
if self.port:
buffer = ""
ini_t = time.time()
cur_t = time.time()
while (cur_t-ini_t < 5):
c = self.port.read(1)
cur_t = time.time()
if c == '>' and len(buffer) > 0:
break
else:
if (buffer != "" or c != ">"):
if (c=="\r" or c=="\n" or c==':'):
buffer = buffer + ' '
else:
buffer = buffer + c
if re.search('at|AT|At|aT',cmd):
valid_response = 'SETUP'
else:
valid_response = 'N'
test_pattern = string.replace(buffer,' ','')
check = '4' + cmd[1:]
if re.search(check,test_pattern):
valid_response = 'Y'
logging.info('Output of '+str(cmd)+': '+str(string.strip(buffer)))
if valid_response != 'SETUP':
self.record.set_value(str(cmd),str(string.strip(buffer)))
else:
self.record.set_info(str(cmd),'SETUP')
return buffer, valid_response
return None, None
def close(self):
""" Resets device and closes all associated filehandles"""
if (self.port!= None) and self.State==1:
self.send_command("atz")
self.port.close()
self.port = None
self.ELMver = "Unknown"
self.record.do_complete()
return | jukkar/karmind-obd-application | obd_link.py | Python | gpl-3.0 | 8,145 |
import sqlalchemy as _sqla
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from mc.utils import update_helper
from .query_builder import QueryBuilder
class Db(object):
ITEM_TYPES = ['job', 'flow', 'queue']
class ItemNotFoundError(Exception):
pass
def __init__(self, engine=None, db_uri=None, schema=None,
ensure_tables=False):
if engine:
self.engine = engine
else:
self.db_uri = db_uri
if schema:
self.schema = schema
if ensure_tables:
self.ensure_tables()
self.query_builder = QueryBuilder()
@property
def engine(self):
if not hasattr(self, '_engine'):
db_uri = self.db_uri
if callable(db_uri):
db_uri = db_uri()
self._engine = create_engine(db_uri)
return self._engine
@engine.setter
def engine(self, value): self._engine = value
@property
def schema(self):
if not hasattr(self, '_schema'):
self._schema = self._get_default_schema()
return self._schema
@schema.setter
def schema(self, value): self._schema = value
def _get_default_schema(self):
from . import schema
return schema
@property
def session(self):
if not hasattr(self, '_session'):
self._session = self.Session()
return self._session
@session.setter
def session(self, value): self._session = value
@property
def Session(self):
if not hasattr(self, '_Session'):
self._Session = sessionmaker(bind=self.engine)
return self._Session
@Session.setter
def Session(self, value): self._Session = value
def ensure_tables(self):
assert self.schema is not None
self.create_tables()
def create_tables(self):
self.schema.metadata.create_all(self.engine)
def drop_tables(self):
self.schema.metadata.drop_all(self.engine)
@property
def models(self):
return self.schema.models
def create_item(self, item_type=None, item_kwargs=None):
Model = self.get_model_for_item_type(item_type)
with self.session.begin_nested():
item = Model(**item_kwargs)
self.session.add(item)
return self.item_to_dict(item)
def get_model_for_item_type(self, item_type):
return getattr(self.models, item_type.title())
def query_items(self, item_type=None, query=None):
"""Get items of item_type that match the given query.
Args:
item_type (str): one of :attr:`.ITEM_TYPES`
query (dict, optional): a dict in this shape:
::
{'filters': [filter_dict_1, ...filter_dict_n]}
where a filter_dict has this shape: ::
{'field': 'prop_name', 'op': 'op_name',
'arg': 'op argument'}
Returns:
items (list): a list of retrieved items.
"""
q = self.generate_item_query(item_type=item_type, query_spec=query)
return self.items_to_dicts(items=q)
def generate_item_query(self, item_type=None, query_spec=None):
Model = self.get_model_for_item_type(item_type)
base_query = self.session.query(Model)
item_query = self.query_builder.alter_query_per_query_spec(
query=base_query, query_spec=query_spec)
return item_query
def patch_item(self, item_type=None, key=None, patches=None):
"""
Args:
item_type (str): item_type <str>: one of :attr:`.ITEM_TYPES`
key (str): the item's key.
patches (dict): a dict of item props to update.
Returns:
patched_item <dict>: the patched item.
"""
Model = self.get_model_for_item_type(item_type)
with self.session.begin_nested():
item = self.session.query(Model).filter_by(key=key).first()
for field, value in patches.items():
setattr(item, field, value)
self.session.add(item)
return item.to_dict()
def flush(self, item_types=None):
"""clear the db tables."""
for item_type in (item_types or self.ITEM_TYPES):
Model = self.get_model_for_item_type(item_type)
self.session.query(Model).delete()
def items_to_dicts(self, items):
return [self.item_to_dict(item) for item in items]
def item_to_dict(self, item): return item.to_dict()
def get_lock_count_subquery(self):
lock_query = (
self.session.query(
_sqla.func.count(self.models.Lock.key).label('lock_count'),
self.models.Lock.lockee_key.label('lockee_key')
)
.group_by(self.models.Lock.lockee_key)
)
return lock_query.subquery()
def delete_items(self, item_type=None, query=None):
"""
Args:
item_type (str): one of :attr:`.ITEM_TYPES`
query (dict): a query dict
Returns:
None
"""
q = self.generate_item_query(item_type=item_type, query_spec=query)
return {'num_deleted': q.delete(synchronize_session='fetch')}
def get_item_by_key(self, item_type=None, key=None):
"""
Args:
item_type (str): item_type <str>: one of :attr:`.ITEM_TYPES`
key (str): the item's key
Raises:
ItemNotFoundError
"""
try:
return self.query_items(item_type=item_type, query={
'filters': [{'field': 'key', 'op': '=', 'arg': key}]
})[0]
except IndexError as exc:
error_details = "item_type '{item_type}', key '{key}'".format(
item_type=item_type, key=key)
raise self.ItemNotFoundError(error_details)
def patch_items(self, item_type=None, keyed_patches=None):
"""
Args:
item_type (str): item_type <str>: one of :attr:`.ITEM_TYPES`
keyed_patches (dict): a dictionary in which the keys are item_keys
and the values are dicts of item props to update.
Returns:
patched_items (dict): a dictionary of patched results, keyed by
item keys
"""
return {
key: self.patch_item(item_type=item_type, key=key, patches=patches)
for key, patches in keyed_patches.items()
}
def claim_queue_items(self, queue_key=None, **kwargs):
"""
Builds query for queue by examining queue's queue_spec.
Args:
queue_key (str): the queue's key
Returns:
claimed_items (dict): a dict of claim result, in this shape:
::
{items: [claimed_item_1, ..., claimed_item_n]}
"""
queue = self.get_item_by_key(item_type='queue', key=queue_key)
items_to_claim = self.get_queue_items_to_claim(queue=queue)
if items_to_claim:
claimed_items = self.patch_items(
item_type=queue['queue_spec']['item_type'],
keyed_patches={
item['key']: {'claimed': True} for item in items_to_claim
}
).values()
else:
claimed_items = []
return {'items': claimed_items}
def get_queue_items_to_claim(self, queue=None):
"""
Args:
queue (dic): a queue record
Returns:
items (list): a list of items that match the queue's query.
"""
queue_item_type = queue['queue_spec']['item_type']
if queue_item_type == 'flow':
claim_fn = self.get_flow_queue_items_to_claim
else:
claim_fn = self.default_get_queue_items_to_claim
return claim_fn(queue=queue)
def get_flow_queue_items_to_claim(self, queue=None):
"""Gets flow queue items.
Checks for lock records on items.
"""
Flow = self.models.Flow
query = self.session.query(Flow)
query = self.query_builder.alter_query_per_query_spec(
query=query,
query_spec={'filters': self.get_default_claiming_filters()}
)
lock_count_subquery = self.get_lock_count_subquery()
query = (
query.join(
lock_count_subquery,
(Flow.key == lock_count_subquery.c.lockee_key),
isouter=True,
)
.filter(
(Flow.num_tickable_tasks.is_(None))
| (lock_count_subquery.c.lock_count.is_(None))
| (Flow.num_tickable_tasks > lock_count_subquery.c.lock_count)
)
)
return self.items_to_dicts(items=query)
def get_default_claiming_filters(self):
"""
Returns:
filters (list): a list of default filters to use for claiming queue
items.
"""
return [
{'field': 'claimed', 'op': '=', 'arg': False},
{'field': 'status', 'op': 'IN', 'arg': ['PENDING', 'RUNNING']}
]
def default_get_queue_items_to_claim(self, queue=None, filters=None):
"""Default handler for claiming queue items.
Args:
queue (dict): a queue record
filters <list>: filters to use for getting claimable items.
Returns:
items (list): a list of items that match the combination of the
filters and the queue's queue_spec.
"""
return self.query_items(
item_type=queue['queue_spec']['item_type'],
query={'filters': self.get_default_claiming_filters()}
)
def create_lock(self, lockee_key=None, locker_key=None):
"""Create a lock record.
Args:
lockee_key (str): keyfor the item being locked.
locker_key (str): key for the item that holds the lock.
Returns:
lock_record (dict): a lock_record
"""
self.create_item(
item_type='Lock',
item_kwargs={'lockee_key': lockee_key, 'locker_key': locker_key}
)
def release_locks(self, locker_keys=None):
"""Release locks.
Args:
locker_key (str): key for the item that holds the lock.
Returns:
None
"""
return self.delete_items(
item_type='lock',
query={
'filters': [
{'field': 'locker_key', 'op': 'IN', 'arg': locker_keys},
]
}
)
def upsert(self, key=None, updates=None, model_type=None, commit=True):
model_type = model_type or key.split(':')[0].title()
model = getattr(self.models, model_type)
instance, created = self.get_or_create_instance(key=key, model=model)
updates = self._alter_updates(updates)
update_helper.update(instance, updates)
self.session.merge(instance)
if commit:
self.session.commit()
def _alter_updates(self, updates):
return [self._alter_update(update) for update in updates]
def _alter_update(self, update):
calls_that_need_session = ['add_parents_by_key',
'add_ancestors_by_key']
if (
update[1] == '$call' and
update[0].split('.')[-1] in calls_that_need_session
):
altered_update = self._alter_update_that_needs_session(update)
else:
altered_update = update
return altered_update
def _alter_update_that_needs_session(self, update):
session_kwargs = {'session': self.session}
params = [*update[2:]]
if len(params) == 0:
params = [[], session_kwargs]
elif len(params) == 1:
params.append(session_kwargs)
elif len(params) == 2:
params[1] = {**params[1], **session_kwargs}
altered_update = [update[0], update[1], *params]
return altered_update
def get_or_create_instance(self, key=None, model=None):
instance = self.session.query(model).filter_by(key=key).first()
if instance:
return instance, False
else:
model_kwargs = {'key': key}
if model is self.models.Ent:
_, ent_type, key_body = key.split(':', maxsplit=2)
model_kwargs = {'key': key, 'ent_type': ent_type}
instance = model(**model_kwargs)
self.session.add(instance)
return instance, True
def execute_actions(self, actions=None):
results = []
with self.session.begin_nested():
for action in actions or []:
result = self.execute_action(action=action, commit=False)
results.append(result)
self.session.commit()
return results
def execute_action(self, action=None, commit=False):
params = action.get('params', {})
if action['type'] == 'upsert':
fn = self.upsert
params = {**params, 'commit': commit}
return fn(**params)
| aspuru-guzik-group/mission_control | mc/db/db.py | Python | apache-2.0 | 13,175 |
"""Saranani URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic import TemplateView
from django.contrib.auth.views import logout, login
urlpatterns = [
url(r'^', include('apps.home.urls')),
url(r'^', include('apps.events.urls')),
url(r'^', include('apps.users.urls', namespace='users')),
url(r'^admin/', admin.site.urls),
# PYTHON SOCIAL AUTH
url('', include('social.apps.django_app.urls', namespace='social')),
url(r'^users/logout/$', logout, {'next_page': '/'}, name="user-logout"),
url(r'^login/$', login, name='login'),
]
| AlexandroPQC/sara-ani | Saranani/Saranani/urls.py | Python | gpl-3.0 | 1,241 |
"""224. Basic Calculator
https://leetcode.com/problems/basic-calculator/
Given a string s representing an expression, implement a basic calculator to
evaluate it.
Example 1:
Input: s = "1 + 1"
Output: 2
Example 2:
Input: s = " 2-1 + 2 "
Output: 3
Example 3:
Input: s = "(1+(4+5+2)-3)+(6+8)"
Output: 23
Constraints:
1 <= s.length <= 3 * 10^5
s consists of digits, '+', '-', '(', ')', and ' '.
s represents a valid expression.
"""
class Solution:
def calculate(self, s: str) -> int:
stack, rev_stack = [], []
i, n, ans = 0, len(s), 0
while i < n:
if s[i] == ' ':
i += 1
continue
if s[i].isnumeric():
num = ''
while s[i].isnumeric():
num += s[i]
i += 1
stack.append(int(num))
elif s[i] == ')':
while stack[-1] != '(':
rev_stack.append(stack.pop())
stack.pop()
cur = 0
while rev_stack:
top = rev_stack.pop()
if isinstance(top, int):
cur += top
elif top == '+':
cur += rev_stack.pop()
elif top == '-':
cur -= rev_stack.pop()
stack.append(cur)
i += 1
else:
stack.append(s[i])
i += 1
while stack:
top = stack.pop(0)
if isinstance(top, int):
ans += top
elif top == '+':
ans += stack.pop(0)
elif top == '-':
ans -= stack.pop(0)
return ans
def calculate_2(self, s: str) -> int:
def helper(expr, x: int) -> int:
ret = 0
while len(expr) > x:
ele = expr.pop(x)
if isinstance(ele, int):
ret += ele
else:
ret = ret + (expr.pop(x) if ele == '+' else - expr.pop(x))
return ret
stack = []
i, n = 0, len(s)
while i < n:
if s[i] == ' ':
i += 1
continue
if s[i].isnumeric():
num = ''
while i < n and s[i].isnumeric():
num += s[i]
i += 1
stack.append(int(num))
elif s[i] == ')':
j = len(stack) - 1
while j >= 0 and stack[j] != '(':
j -= 1
cur = helper(stack, j + 1)
stack.pop()
stack.append(cur)
i += 1
else:
stack.append(s[i])
i += 1
return helper(stack, 0)
| isudox/leetcode-solution | python-algorithm/leetcode/problem_224.py | Python | mit | 2,843 |
import math
from sqlalchemy import not_
from pprint import pprint # noqa
from aleph.index import TYPE_DOCUMENT
from aleph.core import es, es_index
from aleph.model import Collection
from aleph.search.fragments import text_query, filter_query
def round(x, factor):
rnd = int(math.floor(x / float(factor))) * factor
return 'More than %s' % format(rnd, '0,.0f')
def format_total(obj):
total = obj.pop('total', 0)
if total == 0:
total = 'No'
elif total < 15:
total = 'Some'
elif total < 100:
total = round(total, 10)
elif total < 1000:
total = round(total, 100)
else:
total = round(total, 1000)
obj['total'] = total
return obj
def peek_query(state):
"""Peek into hidden collections.
This allows users to retrieve an approximate result count of a given query
against those collections which they are not authorised to view. It is a
rudimentary collaboration mechanism.
"""
filters = state.filters
cq = Collection.all()
cq = cq.filter(not_(Collection.id.in_(state.authz.collections_read)))
cq = cq.filter(Collection.creator_id != None) # noqa
cq = cq.filter(Collection.private != True) # noqa
collections = {c.id: c for c in cq}
filters['collection_id'] = collections.keys()
q = text_query(state.text)
q = {
'query': filter_query(q, filters),
'query': q,
'size': 0,
'aggregations': {
'collections': {
'terms': {'field': 'collection_id', 'size': 1000}
}
},
'_source': False
}
result = es.search(index=es_index, body=q, doc_type=TYPE_DOCUMENT)
roles = {}
total = 0
aggs = result.get('aggregations', {}).get('collections', {})
for bucket in aggs.get('buckets', []):
collection = collections.get(bucket.get('key'))
if collection is None or collection.creator is None:
continue
total += bucket.get('doc_count')
if collection.creator_id in roles:
roles[collection.creator_id]['total'] += bucket.get('doc_count')
else:
roles[collection.creator_id] = {
'name': collection.creator.name,
'email': collection.creator.email,
'total': bucket.get('doc_count')
}
roles = sorted(roles.values(), key=lambda r: r['total'], reverse=True)
roles = [format_total(r) for r in roles]
return format_total({
'roles': roles,
'active': total > 0,
'total': total
})
| smmbllsm/aleph | aleph/search/peek.py | Python | mit | 2,568 |
print bin(1)
print bin(2)
print bin(3)
print bin(4)
print bin(5)
print int("1",2)
print int("10",2)
print int("111",2)
print int("0b100",2)
print int(bin(5),2)
print int("11001001",2)
| KellyChan/python-examples | python/gists/bin.py | Python | mit | 186 |
# coding=utf-8
from django import template
import subprocess
try:
head = subprocess.Popen("git rev-parse --short HEAD", shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
HEAD = head.stdout.readline().strip()
except:
HEAD = 'unknown'
register = template.Library()
@register.simple_tag()
def git_head():
return HEAD
| joostrijneveld/eetFestijn | orders/templatetags/git_head.py | Python | cc0-1.0 | 375 |
# encoding: utf8
#
# This file is part of the a2billing-spyne project.
# Copyright (c), Arskom Ltd. (arskom.com.tr),
# Cemrecan Ünal <unalcmre@gmail.com>.
# Burak Arslan <burak.arslan@arskom.com.tr>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the Arskom Ltd. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from contextlib import closing
from lxml.html.builder import E
from twisted.internet.threads import deferToThread
from spyne import rpc, Array
from spyne.const.http import HTTP_302
from spyne.protocol.html.table import HtmlColumnTable
from neurons.form import HtmlForm, HrefWidget
from a2billing_spyne.model import Extensions
from a2billing_spyne.service import ReaderServiceBase, ScreenBase, DalBase
EXTENSION_CUST = dict(
exten=dict(order=1),
priority=dict(order=2),
app=dict(order=3),
appdata=dict(order=4),
context=dict(order=5)
)
ExtScreen = Extensions.customize(
prot=HtmlForm(), form_action="put_ext",
child_attrs_all=dict(exc=False,),
child_attrs=dict(
id=dict(order=0, write=False),
**EXTENSION_CUST
),
)
class NewExtScreen(ScreenBase):
main = ExtScreen
class NewExtDetailScreen(ScreenBase):
main = ExtScreen
def _write_new_ext_link(ctx, cls, inst, parent, name, *kwargs):
parent.write(E.a("New Extension", href="/new_ext"))
class ExtListScreen(ScreenBase):
main = Array(
Extensions.customize(
child_attrs_all=dict(exc=False,),
child_attrs=dict(
id=dict(prot=HrefWidget("/get_ext?id={}")),
**EXTENSION_CUST
),
),
prot=HtmlColumnTable(before_table=_write_new_ext_link),
)
class ExtDal(DalBase):
def put_ext(self, ext):
with closing(self.ctx.app.config.get_main_store().Session()) as session:
session.add(ext)
session.commit()
return ext
def get_ext(self, ext):
with closing(self.ctx.app.config.get_main_store().Session()) as session:
return session.query(Extensions).filter(Extensions.id ==
ext.id).one()
def get_all_extension(self, ext):
with closing(self.ctx.app.config.get_main_store().Session()) as session:
return session.query(Extensions).all()
class ExtReaderServices(ReaderServiceBase):
@rpc(Extensions, _returns=NewExtScreen,_body_style='bare')
def new_ext(ctx, ext):
return NewExtScreen(title="New Extension", main=ext)
@rpc(Extensions, _returns=NewExtScreen,_body_style='bare')
def get_ext_detail(ctx, ext):
return deferToThread(ExtDal(ctx).get_ext, ext) \
.addCallback(lambda ret:
NewExtDetailScreen(title="Get Extension", main=ret))
@rpc(Extensions, _returns=ExtListScreen,_body_style='bare')
def get_all_extension(ctx, ext):
return deferToThread(ExtDal(ctx).get_all_extension, ext) \
.addCallback(lambda ret: ExtListScreen(title="Extensions",main=ret))
class ExtWriterServices(ReaderServiceBase):
@rpc(Extensions, _body_style='bare')
def put_ext(ctx, ext):
return deferToThread(ExtDal(ctx).put_ext, ext) \
.addCallback(lambda ret: ctx.transport.respond(HTTP_302,
location="get_ext_detail?id=%d" %
ret.id))
| cemrecan/a2billing-spyne | a2billing_spyne/service/extensions.py | Python | bsd-3-clause | 4,821 |
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module for querying BigQuery."""
import collections
import json
import os
import subprocess
from flake_suppressor import results as results_module
from flake_suppressor import tag_utils
from unexpected_passes_common import queries as upc_queries
MAX_ROWS = (2**31) - 1
# A note about the try version of the queries: The submitted builds subquery is
# included in each query instead of running it once by itself and including the
# returned data in other queries because we can end up getting a very large
# number of build IDs, which can push the query over BigQuery's hard query size
# limit. The query runs pretty quickly (within a couple of seconds), so
# duplicating it does not add much runtime.
# Subquery for getting all builds used for CL submission in the past
# |sample_period| days. Will be inserted into other queries.
SUBMITTED_BUILDS_SUBQUERY = """\
submitted_builds AS (
SELECT
CONCAT("build-", CAST(unnested_builds.id AS STRING)) as id
FROM
`commit-queue.chromium.attempts`,
UNNEST(builds) as unnested_builds,
UNNEST(gerrit_changes) as unnested_changes
WHERE
unnested_builds.host = "cr-buildbucket.appspot.com"
AND unnested_changes.submit_status = "SUCCESS"
AND start_time > TIMESTAMP_SUB(CURRENT_TIMESTAMP(),
INTERVAL @sample_period DAY)
),
"""
# Gets all failures from the past |sample_period| days from CI bots that did not
# already have an associated test suppression when the test ran.
CI_FAILED_TEST_QUERY = """\
WITH
failed_tests AS (
SELECT
exported.id,
test_metadata.name,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "typ_tag") as typ_tags,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "raw_typ_expectation") as typ_expectations
FROM
`chrome-luci-data.chromium.gpu_ci_test_results` tr
WHERE
status = "FAIL"
AND exported.realm = "chromium:ci"
AND partition_time > TIMESTAMP_SUB(CURRENT_TIMESTAMP(),
INTERVAL @sample_period DAY)
)
SELECT *
FROM failed_tests ft
WHERE
ARRAY_TO_STRING(ft.typ_expectations, '') = "Pass"
"""
# Gets all failures from the past |sample_period| days from trybots that did not
# already have an associated test suppresssion when the test ran, only including
# data from builds that were used for CL submission.
TRY_FAILED_TEST_QUERY = """\
WITH
{submitted_builds_subquery}
failed_tests AS (
SELECT
exported.id,
test_metadata.name,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "typ_tag") as typ_tags,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "raw_typ_expectation") as typ_expectations
FROM
`chrome-luci-data.chromium.gpu_try_test_results` tr,
submitted_builds sb
WHERE
status = "FAIL"
AND exported.realm = "chromium:try"
AND partition_time > TIMESTAMP_SUB(CURRENT_TIMESTAMP(),
INTERVAL @sample_period DAY)
AND exported.id = sb.id
)
SELECT *
FROM failed_tests ft
WHERE
ARRAY_TO_STRING(ft.typ_expectations, '') = "Pass"
""".format(submitted_builds_subquery=SUBMITTED_BUILDS_SUBQUERY)
# Gets the count of all results in the past |sample_period| days for distinct
# test/tag combinations from CI bots.
CI_RESULT_COUNT_QUERY = """\
WITH
grouped_results AS (
SELECT
exported.id as id,
test_metadata.name as name,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "typ_tag") as typ_tags
FROM
`chrome-luci-data.chromium.gpu_ci_test_results` tr
WHERE
exported.realm = "chromium:ci"
AND partition_time > TIMESTAMP_SUB(CURRENT_TIMESTAMP(),
INTERVAL @sample_period DAY)
)
SELECT
COUNT(gr.id) as result_count,
ANY_VALUE(gr.name) as test_name,
ANY_VALUE(gr.typ_tags) as typ_tags
FROM grouped_results gr
GROUP BY gr.name, ARRAY_TO_STRING(gr.typ_tags, '')
"""
# Gets the count of all results in the past |sample_period| days for distinct
# test/tag combinations from trybots, only including data from builds that were
# used for CL submission.
TRY_RESULT_COUNT_QUERY = """\
WITH
{submitted_builds_subquery}
grouped_results AS (
SELECT
exported.id as id,
test_metadata.name as name,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "typ_tag") as typ_tags
FROM
`chrome-luci-data.chromium.gpu_try_test_results` tr,
submitted_builds sb
WHERE
exported.realm = "chromium:try"
AND partition_time > TIMESTAMP_SUB(CURRENT_TIMESTAMP(),
INTERVAL @sample_period DAY)
AND exported.id = sb.id
)
SELECT
COUNT(gr.id) as result_count,
ANY_VALUE(gr.name) as test_name,
ANY_VALUE(gr.typ_tags) as typ_tags
FROM grouped_results gr
GROUP BY gr.name, ARRAY_TO_STRING(gr.typ_tags, '')
""".format(submitted_builds_subquery=SUBMITTED_BUILDS_SUBQUERY)
class BigQueryQuerier():
def __init__(self, sample_period, billing_project):
"""Class for making calls to BigQuery.
Args:
sample_period: An int denoting the number of days that data should be
queried over.
billing_project: A string containing the billing project to use for
BigQuery queries.
"""
self._sample_period = sample_period
self._billing_project = billing_project
def GetFlakyOrFailingCiTests(self):
"""Gets all flaky or failing GPU tests from CI.
Returns:
A JSON representation of the BigQuery results containing all found flaky
or failing test results that came from CI bots.
"""
return self._GetJsonResultsFromBigQuery(CI_FAILED_TEST_QUERY)
def GetFlakyOrFailingTryTests(self):
"""Gets all flaky or failing GPU tests from the trybots.
Limits results to those that came from builds used for CL submission.
Returns:
A JSON representation of the BigQuery results containing all found flaky
or failing test results that came from trybots AND came from builds that
were used for CL submission.
"""
return self._GetJsonResultsFromBigQuery(TRY_FAILED_TEST_QUERY)
def GetResultCounts(self):
"""Gets the result count for each test/config combination.
Returns:
A dict in the format:
{
typ_tags (tuple): {
test_name (str): result_count (int)
}
}
"""
# A default dict of default dicts of ints.
result_counts = collections.defaultdict(lambda: collections.defaultdict(int)
)
self._GetResultCountWithQuery(CI_RESULT_COUNT_QUERY, result_counts)
self._GetResultCountWithQuery(TRY_RESULT_COUNT_QUERY, result_counts)
return result_counts
def _GetJsonResultsFromBigQuery(self, query):
"""Gets the JSON results from a BigQuery query.
Automatically passes in the "@sample_period" parameterized argument to
BigQuery.
Args:
query: A string containing the SQL query to run in BigQuery.
Returns:
The loaded JSON results from running |query|.
"""
cmd = upc_queries.GenerateBigQueryCommand(
self._billing_project,
{'INT64': {
'sample_period': self._sample_period
}},
batch=False)
with open(os.devnull, 'w') as devnull:
completed_process = subprocess.run(cmd,
input=query,
stdout=subprocess.PIPE,
stderr=devnull,
check=True,
text=True)
return json.loads(completed_process.stdout)
def _GetResultCountWithQuery(self, query, result_counts):
"""Helper to get result counts using a particular query.
Args:
query: A string containing a SQL query to run.
result_counts: A defaultdict of defaultdict of ints that will be modified
in place to tally result counts.
"""
json_results = self._GetJsonResultsFromBigQuery(query)
for r in json_results:
typ_tags = tuple(tag_utils.RemoveMostIgnoredTags(r['typ_tags']))
test_name = r['test_name']
_, test_name = results_module.GetTestSuiteAndNameFromResultDbName(
test_name)
count = int(r['result_count'])
result_counts[typ_tags][test_name] += count
| chromium/chromium | content/test/gpu/flake_suppressor/queries.py | Python | bsd-3-clause | 8,625 |
#!/usr/bin/env python
from __future__ import unicode_literals
from setuptools import setup, find_packages
install_requires = [
"Jinja2",
"boto>=2.20.0",
"flask",
"httpretty>=0.6.1",
"requests",
"xmltodict",
"six",
"werkzeug",
]
import sys
if sys.version_info < (2, 7):
# No buildint OrderedDict before 2.7
install_requires.append('ordereddict')
setup(
name='moto',
version='0.4.7',
description='A library that allows your python tests to easily'
' mock out the boto library',
author='Steve Pulec',
author_email='spulec@gmail',
url='https://github.com/spulec/moto',
entry_points={
'console_scripts': [
'moto_server = moto.server:main',
],
},
packages=find_packages(exclude=("tests", "tests.*")),
install_requires=install_requires,
license="Apache",
test_suite="tests",
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Testing",
],
)
| jotes/moto | setup.py | Python | apache-2.0 | 1,286 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import numpy
import pandas
import dendropy
import Bio.Alphabet
from Bio.AlignIO import MultipleSeqAlignment
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from pandas.util.testing import (
assert_categorical_equal,
assert_dict_equal,
assert_frame_equal,
assert_index_equal,
assert_produces_warning,
assert_series_equal)
from pandascharm import (
frame_as_categorical,
frame_as_object,
from_charmatrix,
to_charmatrix,
from_bioalignment,
to_bioalignment,
from_sequence_dict,
to_sequence_dict)
class TestAsCategorical():
frame = pandas.DataFrame({
't1': ['T', 'G', 'C', 'A', '?'],
't2': ['T', 'G', 'C', 'A', 'A'],
't3': ['T', 'G', 'C', 'A', 'A'],
't4': ['T', 'G', 'C', 'A', 'A']}, dtype='category')
def test_unaltered_categories(self):
assert (
set(frame_as_categorical(self.frame)['t1'].cat.categories) ==
set(self.frame['t1'].cat.categories))
def test_altered_categories(self):
assert (
set(frame_as_categorical(self.frame)['t2'].cat.categories) !=
set(self.frame['t2'].cat.categories))
def test_add_category(self):
assert(
set(
frame_as_categorical(self.frame, ['-'])['t1'].cat.categories
) == {'T', 'G', 'C', 'A', '?', '-'})
class TestAsObject():
frame_cat = pandas.DataFrame({
't1': ['T', 'G', 'C', 'A', '?'],
't2': ['T', 'G', 'C', 'A', 'A'],
't3': ['T', 'G', 'C', 'A', 'A'],
't4': ['T', 'G', 'C', 'A', 'A']}, dtype='category')
frame_obj = pandas.DataFrame({
't1': ['T', 'G', 'C', 'A', '?'],
't2': ['T', 'G', 'C', 'A', 'A'],
't3': ['T', 'G', 'C', 'A', 'A'],
't4': ['T', 'G', 'C', 'A', 'A']}, dtype='object')
def test_conversion(self):
assert_frame_equal(frame_as_object(self.frame_cat), self.frame_obj)
class TestCharmatrixConversion():
dna_charmatrix_string = '3 5\nt1 TCCAA\nt2 TGCAA\nt3 TG-AA\n'
dna_charmatrix = dendropy.DnaCharacterMatrix.get(
data=dna_charmatrix_string, schema='phylip')
dna_frame = pandas.DataFrame({
't1': ['T', 'C', 'C', 'A', 'A'],
't2': ['T', 'G', 'C', 'A', 'A'],
't3': ['T', 'G', '-', 'A', 'A']}, dtype='category')
rna_charmatrix_string = '3 5\nt1 UCCAA\nt2 UGCAA\nt3 UG-AA\n'
rna_charmatrix = dendropy.RnaCharacterMatrix.get(
data=rna_charmatrix_string, schema='phylip')
rna_frame = pandas.DataFrame({
't1': ['U', 'C', 'C', 'A', 'A'],
't2': ['U', 'G', 'C', 'A', 'A'],
't3': ['U', 'G', '-', 'A', 'A']}, dtype='category')
protein_charmatrix_string = '3 5\nt1 VKYPN\nt2 VLYPN\nt3 VL-PN\n'
protein_charmatrix = dendropy.ProteinCharacterMatrix.get(
data=protein_charmatrix_string, schema='phylip')
protein_frame = pandas.DataFrame({
't1': ['V', 'K', 'Y', 'P', 'N'],
't2': ['V', 'L', 'Y', 'P', 'N'],
't3': ['V', 'L', '-', 'P', 'N']}, dtype='category')
standard_charmatrix_string = '3 5\nt1 01010\nt2 02010\nt3 02-10\n'
standard_charmatrix = dendropy.StandardCharacterMatrix.get(
data=standard_charmatrix_string, schema='phylip')
standard_frame = pandas.DataFrame({
't1': ['0', '1', '0', '1', '0'],
't2': ['0', '2', '0', '1', '0'],
't3': ['0', '2', '-', '1', '0']}, dtype='category')
def test_from_charmatrix_dna(self):
assert_frame_equal(
from_charmatrix(self.dna_charmatrix), self.dna_frame,
check_categorical=False)
def test_from_charmatrix_dna_object(self):
assert_frame_equal(
from_charmatrix(self.dna_charmatrix, categorical=False),
frame_as_object(self.dna_frame))
def test_to_charmatrix_dna(self):
assert (
to_charmatrix(self.dna_frame, data_type='dna')
.as_string('phylip') == self.dna_charmatrix.as_string('phylip'))
def test_from_charmatrix_rna(self):
assert_frame_equal(
from_charmatrix(self.rna_charmatrix), self.rna_frame,
check_categorical=False)
def test_to_charmatrix_rna(self):
assert (
to_charmatrix(self.rna_frame, data_type='rna')
.as_string('phylip') == self.rna_charmatrix.as_string('phylip'))
def test_from_charmatrix_protein(self):
assert_frame_equal(
from_charmatrix(self.protein_charmatrix), self.protein_frame,
check_categorical=False)
def test_to_charmatrix_protein(self):
assert (
to_charmatrix(self.protein_frame, data_type='protein')
.as_string('phylip') == self.protein_charmatrix
.as_string('phylip'))
def test_from_charmatrix_standard(self):
assert_frame_equal(
from_charmatrix(self.standard_charmatrix), self.standard_frame,
check_categorical=False)
def test_to_charmatrix_standard(self):
assert (
to_charmatrix(self.standard_frame, data_type='standard')
.as_string('phylip') == self.standard_charmatrix
.as_string('phylip'))
def test_invalid_data_type(self):
with pytest.raises(ValueError):
to_charmatrix(self.standard_frame, data_type='unknown')
class TestBioalignmentConversion():
def dict_to_bioalignment(d, alphabet='generic_alphabet', sorted=True):
"""
Create a BioPython MultipleSequenceAlignment
from a dict with pairs consisting of id and sequence.
"""
alignment = MultipleSeqAlignment([])
bio_alphabet = getattr(Bio.Alphabet, alphabet)
for id, seq in d.items():
seq_record = SeqRecord(Seq(seq, alphabet=bio_alphabet), id=id)
alignment.append(seq_record)
if sorted:
alignment.sort()
return alignment
dna_alignment_dict = {'t1': 'TCCAA', 't2': 'TGCAA', 't3': 'TG-AA'}
dna_bioalignment = dict_to_bioalignment(
dna_alignment_dict, alphabet='generic_dna')
dna_frame = pandas.DataFrame({
't1': ['T', 'C', 'C', 'A', 'A'],
't2': ['T', 'G', 'C', 'A', 'A'],
't3': ['T', 'G', '-', 'A', 'A']}, dtype='category')
def test_from_bioalignment_dna(self):
assert_frame_equal(
from_bioalignment(self.dna_bioalignment), self.dna_frame)
def test_to_bioalignment_dna(self):
assert (
to_bioalignment(self.dna_frame, alphabet='generic_dna')
.format('phylip') == self.dna_bioalignment.format('phylip'))
def test_invalid_alphabet(self):
with pytest.raises(ValueError):
to_bioalignment(self.dna_frame, alphabet='dna')
class TestSequenceDictConversion():
dna_frame = pandas.DataFrame({
't1': ['T', 'C', 'C', 'A', 'A'],
't2': ['T', 'G', 'C', 'A', 'A'],
't3': ['T', 'G', '-', 'A', 'A']}, dtype='object')
dna_frame_nan = pandas.DataFrame({
't1': ['T', 'C', 'C', 'A', 'A'],
't2': ['T', 'G', 'C', 'A', 'A'],
't3': ['T', 'G', '-', 'A', numpy.nan]}, dtype='object')
dna_dict = {'t1': 'TCCAA', 't2': 'TGCAA', 't3': 'TG-AA'}
def test_from_sequence_dict(self):
assert_frame_equal(
from_sequence_dict(self.dna_dict, categorical=False),
self.dna_frame)
def test_to_sequence_dict(self):
assert(to_sequence_dict(self.dna_frame) == self.dna_dict)
def test_do_sequence_dict_nan(self):
with pytest.raises(TypeError):
to_sequence_dict(self.dna_frame_nan)
| jmenglund/pandas-charm | test_pandascharm.py | Python | mit | 7,631 |
from datetime import datetime
from sqlalchemy.orm import reconstructor, relationship, backref
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.types import Integer, Unicode, Boolean, DateTime
from sqlalchemy import BigInteger
from sqlalchemy.sql.expression import false, or_
from sqlalchemy.ext.associationproxy import association_proxy
from openspending.core import db
from openspending.model.common import (MutableDict, JSONType,
DatasetFacetMixin)
class DataOrg(db.Model):
""" The dataset is the core entity of any access to data. All
requests to the actual data store are routed through it, as well
as data loading and model generation.
The dataset keeps an in-memory representation of the data model
(including all dimensions and measures) which can be used to
generate necessary queries.
"""
__tablename__ = 'dataorg'
__searchable__ = ['label', 'description']
id = Column(Integer, primary_key=True)
label = Column(Unicode(2000))
description = Column(Unicode())
ORTemplate = Column(MutableDict.as_mutable(JSONType), default=dict)
mappingTemplate = Column(MutableDict.as_mutable(JSONType), default=dict)
prefuncs = Column(MutableDict.as_mutable(JSONType), default=dict)
lastUpdated = Column(DateTime, onupdate=datetime.utcnow)
#metadataorg_id = Column(Integer, ForeignKey('metadataorg.id'))
# metadataorg = relationship(MetadataOrg,
# backref=backref('dataorgs', lazy='dynamic'))
def __init__(self, dataorg=None):
if not dataorg:
return
self.label = dataorg.get('label')
self.description = dataorg.get('description')
self.ORTemplate = dataorg.get('ORTemplate', {})
self.mappingTemplate = dataorg.get('mappingTemplate', {})
self.prefuncs = dataorg.get('prefuncs', {})
self.lastUpdated = datetime.utcnow()
def touch(self):
""" Update the dataset timestamp. This is used for cache
invalidation. """
self.updated_at = datetime.utcnow()
db.session.add(self)
def to_json_dump(self):
""" Returns a JSON representation of an SQLAlchemy-backed object.
"""
json = {}
json['fields'] = {}
json['pk'] = getattr(self, 'id')
json['model'] = "DataOrg"
fields = ['label','description','ORTemplate','mappingTemplate','prefuncs']
for field in fields:
json['fields'][field] = getattr(self, field)
return json
@classmethod
def import_json_dump(cls, theobj):
fields = ['label','description','ORTemplate','mappingTemplate','prefuncs']
classobj = cls()
for field in fields:
setattr(classobj, field, theobj['fields'][field])
#classobj.set(field, theobj['fields'][field])
db.session.add(classobj)
db.session.commit()
return classobj.id
def __repr__(self):
return "<DataOrg(%r,%r)>" % (self.id, self.label)
def update(self, dataorg):
self.label = dataset.get('label')
self.description = dataset.get('description')
self.ORTemplate = dataset.get('ORTemplate', {})
self.mappingTemplate = dataset.get('mappingTemplate', {})
self.prefuncs = dataset.get('prefuncs', {})
self.lastUpdated = datetime.utcnow()
def as_dict(self):
return {
'id' : self.id,
'label': self.label,
'description': self.description,
'lastUpdated': self.lastUpdated
}
@classmethod
def get_all_admin(cls, order=True):
""" Query available datasets based on dataset visibility. """
q = db.session.query(cls)
if order:
q = q.order_by(cls.label.asc())
return q
@classmethod
def get_all(cls, order=True):
""" Query available datasets based on dataset visibility. """
q = db.session.query(cls)
if order:
q = q.order_by(cls.label.asc())
return q
@classmethod
def all(cls, order=True):
""" Query available datasets based on dataset visibility. """
q = db.session.query(cls)
if order:
q = q.order_by(cls.label.asc())
return q
@classmethod
def by_name(cls, label):
return db.session.query(cls).filter_by(label=label).first()
@classmethod
def by_id(cls, id):
return db.session.query(cls).filter_by(id=id).first()
#TODO
# class MetadataOrgSettings(colander.MappingSchema):
# fullname = colander.SchemaNode(colander.String())
# email = colander.SchemaNode(colander.String(),
# validator=colander.Email())
# public_email = colander.SchemaNode(colander.Boolean(), missing=False)
# twitter = colander.SchemaNode(colander.String(), missing=None,
# validator=colander.Length(max=140))
# public_twitter = colander.SchemaNode(colander.Boolean(), missing=False)
# password1 = colander.SchemaNode(colander.String(),
# missing=None, default=None)
# password2 = colander.SchemaNode(colander.String(),
# missing=None, default=None)
# script_root = colander.SchemaNode(colander.String(),
# missing=None, default=None) | USStateDept/FPA_Core | openspending/model/dataorg.py | Python | agpl-3.0 | 5,452 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_listwidget.ui'
#
# Created: Fri Apr 5 10:20:33 2013
# by: PyQt4 UI code generator 4.9.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_GenericTreeWidget(object):
def setupUi(self, GenericTreeWidget):
GenericTreeWidget.setObjectName(_fromUtf8("GenericTreeWidget"))
GenericTreeWidget.resize(463, 376)
self.verticalLayout = QtGui.QVBoxLayout(GenericTreeWidget)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.topLayout = QtGui.QHBoxLayout()
self.topLayout.setObjectName(_fromUtf8("topLayout"))
self.textlabel = QtGui.QLabel(GenericTreeWidget)
self.textlabel.setObjectName(_fromUtf8("textlabel"))
self.topLayout.addWidget(self.textlabel)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.topLayout.addItem(spacerItem)
self.verticalLayout.addLayout(self.topLayout)
self.treeWidget = QtGui.QTreeWidget(GenericTreeWidget)
self.treeWidget.setObjectName(_fromUtf8("treeWidget"))
self.treeWidget.headerItem().setText(0, _fromUtf8("1"))
self.verticalLayout.addWidget(self.treeWidget)
self.bottomLayout = QtGui.QHBoxLayout()
self.bottomLayout.setObjectName(_fromUtf8("bottomLayout"))
self.verticalLayout.addLayout(self.bottomLayout)
self.retranslateUi(GenericTreeWidget)
QtCore.QMetaObject.connectSlotsByName(GenericTreeWidget)
def retranslateUi(self, GenericTreeWidget):
GenericTreeWidget.setWindowTitle(QtGui.QApplication.translate("GenericTreeWidget", "GenericTreeWidget", None, QtGui.QApplication.UnicodeUTF8))
self.textlabel.setText(QtGui.QApplication.translate("GenericTreeWidget", "Teksti", None, QtGui.QApplication.UnicodeUTF8))
| mape90/VetApp | uipy/ui_listwidget.py | Python | gpl-3.0 | 2,028 |
# demo start
from sanic import Sanic, Blueprint
from sanic.response import text
from sanic_limiter import Limiter, get_remote_address
app = Sanic(__name__)
limiter = Limiter(app, global_limits=['1 per hour', '10 per day'], key_func=get_remote_address)
bp = Blueprint('some_bp')
limiter.limit("2 per hour")(bp)
@bp.route("/bp1")
async def bp_t1(request):
return text("bp_t1")
@app.route("/t1")
@limiter.limit("100 per hour;10/minute")
async def t1(request):
return text("t1")
@app.route("/t2")
async def t2(request):
return text("t2")
@app.route("/t3")
@limiter.exempt
async def t3(request):
return text("t3")
@app.route("/t4/<part>")
@limiter.limit("1/minute")
async def t4(request, part):
return text(part)
@app.route("/t5/<part>")
@limiter.shared_limit("1/minute", scope="t5")
async def t5(request, part):
return text(part)
app.blueprint(bp)
app.run(host="0.0.0.0", port=5000, debug=True)
# demo end
import unittest
class DemoTest(unittest.TestCase):
def test_demo(self):
self.assertEqual(app.test_client.get('/t1')[1].body.decode(), 't1')
self.assertEqual(app.test_client.get('/t1')[1].status, 200)
self.assertEqual(app.test_client.get('/t1')[1].status, 200)
self.assertEqual(app.test_client.get('/t2')[1].status, 200)
self.assertEqual(app.test_client.get('/t2')[1].status, 429)
self.assertEqual(app.test_client.get('/t3')[1].status, 200)
self.assertEqual(app.test_client.get('/t3')[1].status, 200)
self.assertEqual(app.test_client.get('/t4/one')[1].status, 200)
self.assertEqual(app.test_client.get('/t4/one')[1].status, 429)
self.assertEqual(app.test_client.get('/t4/two')[1].status, 200)
self.assertEqual(app.test_client.get('/t4/two')[1].status, 429)
self.assertEqual(app.test_client.get('/t5/foo')[1].status, 200)
self.assertEqual(app.test_client.get('/t5/bar')[1].status, 429)
self.assertEqual(app.test_client.get('/bp1')[1].status, 200)
self.assertEqual(app.test_client.get('/bp1')[1].status, 200)
self.assertEqual(app.test_client.get('/bp1')[1].status, 429)
| bohea/sanic-limiter | test/test_demo.py | Python | mit | 2,145 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
Created on May 17, 2011
@author: frank
'''
from OvmCommonModule import *
class OvmVifDecoder(json.JSONDecoder):
def decode(self, jStr):
deDict = asciiLoads(jStr)
vif = OvmVif()
vif.mac = deDict['mac']
vif.bridge = deDict['bridge']
return vif
class OvmVifEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, OvmVif): raise Exception("%s is not instance of OvmVif"%type(obj))
dct = {}
safeDictSet(obj, dct, 'mac')
safeDictSet(obj, dct, 'bridge')
safeDictSet(obj, dct, 'type')
safeDictSet(obj, dct, 'name')
return dct
def fromOvmVif(vif):
return normalizeToGson(json.dumps(vif, cls=OvmVifEncoder))
def fromOvmVifList(vifList):
return [fromOvmVif(v) for v in vifList]
def toOvmVif(jStr):
return json.loads(jStr, cls=OvmVifDecoder)
def toOvmVifList(jStr):
vifs = []
for i in jStr:
vif = toOvmVif(i)
vifs.append(vif)
return vifs
class OvmVif(OvmObject):
name = ''
mac = ''
bridge = ''
type = ''
mode = ''
def toXenString(self):
return "%s,%s,%s"%(self.mac, self.bridge, self.type)
| argv0/cloudstack | plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmVifModule.py | Python | apache-2.0 | 1,984 |
import webracer
import re
import nose.tools
import nose.plugins.attrib
from . import utils
from .apps import form_app
utils.app_runner_setup(__name__, form_app.app, 8043)
@nose.plugins.attrib.attr('client')
@webracer.config(host='localhost', port=8043)
class FormTest(webracer.WebTestCase):
def test_with_specified_attributes(self):
self.get('/one_form')
self.assert_status(200)
form = self.response.form()
self.assertEqual('/dump_params', form.action)
# always a full url
self.assertEqual('http://localhost:8043/dump_params', form.computed_action)
self.assertEqual('post', form.method)
self.assertEqual('POST', form.computed_method)
def test_without_specified_attributes(self):
self.get('/no_attribute_form')
self.assert_status(200)
form = self.response.form()
self.assertTrue(form.action is None)
self.assertEqual('http://localhost:8043/no_attribute_form', form.computed_action)
self.assertTrue(form.method is None)
self.assertEqual('GET', form.computed_method)
def test_computed_action_relative(self):
self.get('/subdir/relative_action_form')
self.assert_status(200)
form = self.response.form()
self.assertEqual('in_subdir', form.action)
self.assertEqual('http://localhost:8043/subdir/in_subdir', form.computed_action)
def test_params(self):
self.get('/no_attribute_form')
self.assert_status(200)
form = self.response.form()
params = form.params.list
if isinstance(params, list):
params = tuple(params)
expected = (('textf', 'textv'),)
self.assertEqual(expected, params)
def test_params_select_not_selected(self):
self.get('/form_with_select_not_selected')
self.assert_status(200)
form = self.response.form()
params = form.params.list
if isinstance(params, list):
params = tuple(params)
expected = (('selectf', 'first'),)
self.assertEqual(expected, params)
def test_params_select_selected(self):
self.get('/form_with_select_selected')
self.assert_status(200)
form = self.response.form()
params = form.params.list
if isinstance(params, list):
params = tuple(params)
expected = (('selectf', 'second'),)
self.assertEqual(expected, params)
def test_params_select_with_optgroup(self):
self.get('/form_with_optgroup')
self.assert_status(200)
form = self.response.form()
params = form.params.list
if isinstance(params, list):
params = tuple(params)
expected = (('selectf', 'first'),)
self.assertEqual(expected, params)
def test_multiple_submits(self):
self.get('/form_with_two_submits')
self.assert_status(200)
form = self.response.form()
params = form.params.dict
# first submit element should be returned by default
self.assertTrue('submit-first' in params)
self.assertTrue('submit-second' not in params)
# choose another submit button
elements = form.elements.mutable
elements.submit('submit-second')
params = elements.params.dict
self.assertTrue('submit-first' not in params)
self.assertTrue('submit-second' in params)
# submit and verify, this is really unnecessary but
# I already wrote the target
self.post(form.computed_action, body=params)
self.assert_status(200)
self.assertEquals({'submit-second': 'second'}, self.response.json)
def test_nameless_default_submit_with_multiple_submits(self):
self.get('/form_with_nameless_and_named_submits')
self.assert_status(200)
form = self.response.form()
self.submit_form(form)
self.assert_status(200)
self.assertEquals({}, self.response.json)
def test_set_value(self):
self.get('/one_form')
self.assert_status(200)
form = self.response.form()
elements = form.elements.mutable
elements.set_value('textf', 'newvalue')
params = elements.params.list
self.assertEqual([['textf', 'newvalue']], utils.listit(params))
def test_clear_text(self):
self.get('/one_form')
self.assert_status(200)
form = self.response.form()
elements = form.elements.mutable
elements.clear('textf')
params = elements.params.list
self.assertEqual([['textf', '']], utils.listit(params))
def test_set_and_clear_text(self):
self.get('/one_form')
self.assert_status(200)
form = self.response.form()
elements = form.elements.mutable
elements.set_value('textf', 'newvalue')
params = elements.params.list
self.assertEqual([['textf', 'newvalue']], utils.listit(params))
elements.clear('textf')
params = elements.params.list
self.assertEqual([['textf', '']], utils.listit(params))
def test_set_value_on_missing_element(self):
self.get('/one_form')
self.assert_status(200)
form = self.response.form()
elements = form.elements.mutable
# https://github.com/nose-devs/nose/issues/30
with self.assert_raises(ValueError) as cm:
elements.set_value('missing', 'newvalue')
assert 'Did not find element with name' in str(cm.exception)
def test_clear_on_missing_element(self):
self.get('/one_form')
self.assert_status(200)
form = self.response.form()
elements = form.elements.mutable
# https://github.com/nose-devs/nose/issues/30
with self.assert_raises(ValueError) as cm:
elements.clear('missing')
assert 'Did not find element with name' in str(cm.exception)
def test_first_radio_selected(self):
self.get('/first_radio_selected')
self.assert_status(200)
form = self.response.form()
elements = form.elements
self.assertEquals([['field', 'first']], utils.listit(elements.params.list))
def test_second_radio_selected(self):
self.get('/second_radio_selected')
self.assert_status(200)
form = self.response.form()
elements = form.elements
self.assertEquals([['field', 'second']], utils.listit(elements.params.list))
def test_radio_selection(self):
self.get('/first_radio_selected')
self.assert_status(200)
form = self.response.form()
elements = form.elements.mutable
self.assertEquals([['field', 'first']], utils.listit(elements.params.list))
# select the other radio button
elements.set_value('field', 'second')
self.assertEquals([['field', 'second']], utils.listit(elements.params.list))
# select a nonexistent radio button
try:
elements.set_value('field', 'nonexistent')
except ValueError as e:
assert re.search(r'Element .* does not have .* as a possible value', str(e))
else:
self.fail('Expected ValueError to be raised')
def test_checkboxes(self):
self.get('/checkboxes')
self.assert_status(200)
form = self.response.form()
elements = form.elements
self.assertEquals([['field', 'second']], utils.listit(elements.params.list))
def test_clear_checkbox(self):
self.get('/checkboxes')
self.assert_status(200)
form = self.response.form()
elements = form.elements.mutable
self.assertEquals([['field', 'second']], utils.listit(elements.params.list))
elements.clear('field', 'second')
self.assertEquals([], utils.listit(elements.params.list))
def test_checkbox_selection(self):
self.get('/checkboxes')
self.assert_status(200)
form = self.response.form()
elements = form.elements.mutable
self.assertEquals([['field', 'second']], utils.listit(elements.params.list))
# select the other checkbox
elements.set_value('field', 'first')
self.assertEquals([['field', 'first'], ['field', 'second']], utils.listit(elements.params.list))
def test_unvalued_checkbox_value(self):
self.get('/unvalued_checkbox')
self.assert_status(200)
form = self.response.form()
elements = form.elements.mutable
elements.set_value('field', True)
self.assertEquals([['field', 'on']], utils.listit(elements.params.list))
def test_set_and_clear_checkbox(self):
self.get('/checkboxes')
self.assert_status(200)
form = self.response.form()
elements = form.elements.mutable
self.assertEquals([['field', 'second']], utils.listit(elements.params.list))
# select the other checkbox
elements.set_value('field', 'first')
self.assertEquals([['field', 'first'], ['field', 'second']], utils.listit(elements.params.list))
# clear the other checkbox
elements.clear('field', 'first')
self.assertEquals([['field', 'second']], utils.listit(elements.params.list))
def test_empty_textarea(self):
self.get('/empty_textarea')
self.assert_status(200)
form = self.response.form()
elements = form.elements
self.assertEquals([['field', '']], utils.listit(elements.params.list))
def test_textarea(self):
self.get('/textarea')
self.assert_status(200)
form = self.response.form()
elements = form.elements
self.assertEquals([['field', 'hello world']], utils.listit(elements.params.list))
def test_submit_form(self):
self.get('/one_form')
self.assert_status(200)
form = self.response.form()
self.submit_form(form)
self.assertEquals({'textf': 'textv'}, self.response.json)
def test_submit_form_with_elements(self):
self.get('/one_form')
self.assert_status(200)
form = self.response.form()
elements = form.elements.mutable
elements.set_value('textf', 'modvalue')
self.submit_form(form, elements)
self.assertEquals({'textf': 'modvalue'}, self.response.json)
| p/webracer | tests/form_test.py | Python | bsd-2-clause | 10,706 |
#!/usr/bin/env python3
# Copyright (C) 2017 Freie Universität Berlin
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import sys
from testrunner import run
# It takes ~11s on nucleo-l152re, so add some margin
TIMEOUT = 15
def testfunc(child):
child.expect('OK \(\d+ tests\)', timeout=TIMEOUT)
if __name__ == "__main__":
sys.exit(run(testfunc))
| cladmi/RIOT | tests/pkg_qdsa/tests/01-run.py | Python | lgpl-2.1 | 481 |
#!/usr/bin/env python
#
# Compute changes in on-field composition during
# a 2012-13 Premier League match.
#
from soccermetrics.rest import SoccermetricsRestClient
client = SoccermetricsRestClient()
home_club = "Brazil"
away_club = "Croatia"
# get match information
match = client.natl.information.get(home_team_name=home_club,
away_team_name=away_club).all()
# collect name and ID of all players in lineups
lineup = client.link.get(match[0].link.lineups).all()
players = {x.player:x.playerName for x in lineup}
# get all segments of the match
segments = client.link.get(match[0].link.analytics.segments).all()
# loop over all segments
# return players in each segment
platoon = lambda rec: ', '.join([players[_id] for _id in rec])
for segment in segments:
if segment.startStoppageMins > 0:
match_time = "%d+%d" % (segment.startTimeMins,
segment.startStoppageMins)
else:
match_time = "%d" % segment.startTimeMins
print "Start segment: %s" % match_time
print "Home Players: %s" % platoon(segment.homePlayersOn)
print "Away Players: %s" % platoon(segment.awayPlayersOn)
print "Duration: %s mins" % segment.duration | soccermetrics/soccermetrics-client-py | examples/nationalteams/example_pbp.py | Python | mit | 1,228 |
from nltk import AlignedSent
from stalimet.ibm2_exact import IBMModel2Exact
class SentencePair(AlignedSent):
@property
def score(self) -> float:
return self._score
@score.setter
def score(self, value: float):
self._score = value
def __init__(self, words, mots, alignment=None):
super(SentencePair, self).__init__(words, mots, alignment=alignment)
self._score = 0.0
def __str__(self):
return str(self.score) + ' ||| ' + ' '.join(self.words) + ' ||| ' + ' '.join(self.mots)
def align(self, model: IBMModel2Exact):
model.align(self)
class ParallelCorpus(list):
@property
def alignment_model(self) -> IBMModel2Exact:
return self._alignment_model
@alignment_model.setter
def alignment_model(self, model: IBMModel2Exact):
self._alignment_model = model
def __init__(self):
super(ParallelCorpus, self).__init__()
self._alignment_model = None
self._self_trained = False
def build_parallel_corpus(self, tgt: list, ref: list) -> 'ParallelCorpus':
for i in range(len(tgt)):
self.append(SentencePair(tgt[i].split(), ref[i].split()))
return self
def train_alignment_model(self):
self._alignment_model = IBMModel2Exact(self, 5)
return self
def align_sentences(self):
if not self._self_trained:
for sentence_page in self:
self.alignment_model.align(sentence_page)
| amalinovskiy/stalimet | stalimet/corpus.py | Python | apache-2.0 | 1,492 |
# -*- coding: utf-8 -*-
from flask import Flask
from flask_script import Manager, Command
import sys
def get_cur_info():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
f = sys.exc_info()[2].tb_frame.f_back
return (f.f_code.co_name, f.f_lineno)
app = Flask(__name__)
manager = Manager(app)
class ClsA(Command):
def __init__(self):
print("init ClsA : [{0}]".format(id(self)))
def run(self):
print("run : [{0}]".format(id(self)))
class ClsB(Command):
def __init__(self):
print("init ClsB : [{0}]".format(id(self)))
def run(self):
print("run : [{0}]".format(id(self)))
#manager.add_command('a', cmd_a)
manager.add_command('a', ClsA())
manager.add_command('b', ClsB)
if "__main__" == __name__:
manager.run()
| fengbohello/practice | python/libs/flask_script/test_manager.py | Python | lgpl-3.0 | 838 |
category_output = {
'Ad': 9,
'Al': 14,
'Co': 8,
'Cr': 11,
'Da': 5,
'Hu': 14,
'Ra': 12,
'Ro': 6,
'Sa': 4,
'Sl': 12,
'Tr': 7,
}
def categories(format_json, input_json):
output = []
for key in sorted(input_json['story']['categories']):
if input_json['story']['categories'][key]:
if key[:2] in category_output:
output.append('$c{colour}{category}$r'.format(colour=category_output[key[:2]],
category=key))
else:
output.append(key)
return ';'.join(output)
| DanielOaks/goshu | modules/link/fimfiction_lnk.py | Python | isc | 634 |
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: docker_container
short_description: manage docker containers
description:
- Manage the life cycle of docker containers.
- Supports check mode. Run with --check and --diff to view config difference and list of actions to be taken.
version_added: "2.1"
options:
blkio_weight:
description:
- Block IO (relative weight), between 10 and 1000.
default: null
required: false
capabilities:
description:
- List of capabilities to add to the container.
default: null
required: false
cleanup:
description:
- Use with I(detach) to remove the container after successful execution.
default: false
required: false
version_added: "2.2"
command:
description:
- Command to execute when the container starts.
default: null
required: false
cpu_period:
description:
- Limit CPU CFS (Completely Fair Scheduler) period
default: 0
required: false
cpu_quota:
description:
- Limit CPU CFS (Completely Fair Scheduler) quota
default: 0
required: false
cpuset_cpus:
description:
- CPUs in which to allow execution C(1,3) or C(1-3).
default: null
required: false
cpuset_mems:
description:
- Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1)
default: null
required: false
cpu_shares:
description:
- CPU shares (relative weight).
default: null
required: false
detach:
description:
- Enable detached mode to leave the container running in background.
If disabled, the task will reflect the status of the container run (failed if the command failed).
default: true
required: false
devices:
description:
- "List of host device bindings to add to the container. Each binding is a mapping expressed
in the format: <path_on_host>:<path_in_container>:<cgroup_permissions>"
default: null
required: false
dns_servers:
description:
- List of custom DNS servers.
default: null
required: false
dns_search_domains:
description:
- List of custom DNS search domains.
default: null
required: false
env:
description:
- Dictionary of key,value pairs.
default: null
required: false
env_file:
version_added: "2.2"
description:
- Path to a file containing environment variables I(FOO=BAR).
- If variable also present in C(env), then C(env) value will override.
- Requires docker-py >= 1.4.0.
default: null
required: false
entrypoint:
description:
- Command that overwrites the default ENTRYPOINT of the image.
default: null
required: false
etc_hosts:
description:
- Dict of host-to-IP mappings, where each host name is a key in the dictionary.
Each host name will be added to the container's /etc/hosts file.
default: null
required: false
exposed_ports:
description:
- List of additional container ports which informs Docker that the container
listens on the specified network ports at runtime.
If the port is already exposed using EXPOSE in a Dockerfile, it does not
need to be exposed again.
default: null
required: false
aliases:
- exposed
force_kill:
description:
- Use the kill command when stopping a running container.
default: false
required: false
groups:
description:
- List of additional group names and/or IDs that the container process will run as.
default: null
required: false
hostname:
description:
- Container hostname.
default: null
required: false
ignore_image:
description:
- When C(state) is I(present) or I(started) the module compares the configuration of an existing
container to requested configuration. The evaluation includes the image version. If
the image version in the registry does not match the container, the container will be
recreated. Stop this behavior by setting C(ignore_image) to I(True).
default: false
required: false
version_added: "2.2"
image:
description:
- Repository path and tag used to create the container. If an image is not found or pull is true, the image
will be pulled from the registry. If no tag is included, 'latest' will be used.
default: null
required: false
interactive:
description:
- Keep stdin open after a container is launched, even if not attached.
default: false
required: false
ipc_mode:
description:
- Set the IPC mode for the container. Can be one of 'container:<name|id>' to reuse another
container's IPC namespace or 'host' to use the host's IPC namespace within the container.
default: null
required: false
keep_volumes:
description:
- Retain volumes associated with a removed container.
default: true
required: false
kill_signal:
description:
- Override default signal used to kill a running container.
default: null
required: false
kernel_memory:
description:
- "Kernel memory limit (format: <number>[<unit>]). Number is a positive integer.
Unit can be one of b, k, m, or g. Minimum is 4M."
default: 0
required: false
labels:
description:
- Dictionary of key value pairs.
default: null
required: false
links:
description:
- List of name aliases for linked containers in the format C(container_name:alias)
default: null
required: false
log_driver:
description:
- Specify the logging driver. Docker uses json-file by default.
choices:
- none
- json-file
- syslog
- journald
- gelf
- fluentd
- awslogs
- splunk
default: null
required: false
log_options:
description:
- Dictionary of options specific to the chosen log_driver. See https://docs.docker.com/engine/admin/logging/overview/
for details.
required: false
default: null
mac_address:
description:
- Container MAC address (e.g. 92:d0:c6:0a:29:33)
default: null
required: false
memory:
description:
- "Memory limit (format: <number>[<unit>]). Number is a positive integer.
Unit can be one of b, k, m, or g"
default: 0
required: false
memory_reservation:
description:
- "Memory soft limit (format: <number>[<unit>]). Number is a positive integer.
Unit can be one of b, k, m, or g"
default: 0
required: false
memory_swap:
description:
- Total memory limit (memory + swap, format:<number>[<unit>]).
Number is a positive integer. Unit can be one of b, k, m, or g.
default: 0
required: false
memory_swappiness:
description:
- Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
default: 0
required: false
name:
description:
- Assign a name to a new container or match an existing container.
- When identifying an existing container name may be a name or a long or short container ID.
required: true
network_mode:
description:
- Connect the container to a network.
choices:
- bridge
- container:<name|id>
- host
- none
default: null
required: false
networks:
description:
- List of networks the container belongs to.
- Each network is a dict with keys C(name), C(ipv4_address), C(ipv6_address), C(links), C(aliases).
- For each network C(name) is required, all other keys are optional.
- If included, C(links) or C(aliases) are lists.
- For examples of the data structure and usage see EXAMPLES below.
- To remove a container from one or more networks, use the C(purge_networks) option.
default: null
required: false
version_added: "2.2"
oom_killer:
description:
- Whether or not to disable OOM Killer for the container.
default: false
required: false
oom_score_adj:
description:
- An integer value containing the score given to the container in order to tune OOM killer preferences.
default: 0
required: false
version_added: "2.2"
paused:
description:
- Use with the started state to pause running processes inside the container.
default: false
required: false
pid_mode:
description:
- Set the PID namespace mode for the container. Currently only supports 'host'.
default: null
required: false
privileged:
description:
- Give extended privileges to the container.
default: false
required: false
published_ports:
description:
- List of ports to publish from the container to the host.
- "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a
container port, 9000 is a host port, and 0.0.0.0 is a host interface."
- Container ports must be exposed either in the Dockerfile or via the C(expose) option.
- A value of all will publish all exposed container ports to random host ports, ignoring
any other mappings.
- If C(networks) parameter is provided, will inspect each network to see if there exists
a bridge network with optional parameter com.docker.network.bridge.host_binding_ipv4.
If such a network is found, then published ports where no host IP address is specified
will be bound to the host IP pointed to by com.docker.network.bridge.host_binding_ipv4.
Note that the first bridge network with a com.docker.network.bridge.host_binding_ipv4
value encountered in the list of C(networks) is the one that will be used.
aliases:
- ports
required: false
default: null
pull:
description:
- If true, always pull the latest version of an image. Otherwise, will only pull an image when missing.
default: false
required: false
purge_networks:
description:
- Remove the container from ALL networks not included in C(networks) parameter.
- Any default networks such as I(bridge), if not found in C(networks), will be removed as well.
default: false
required: false
version_added: "2.2"
read_only:
description:
- Mount the container's root file system as read-only.
default: false
required: false
recreate:
description:
- Use with present and started states to force the re-creation of an existing container.
default: false
required: false
restart:
description:
- Use with started state to force a matching container to be stopped and restarted.
default: false
required: false
restart_policy:
description:
- Container restart policy. Place quotes around I(no) option.
choices:
- always
- no
- on-failure
- unless-stopped
default: on-failure
required: false
restart_retries:
description:
- Use with restart policy to control maximum number of restart attempts.
default: 0
required: false
shm_size:
description:
- Size of `/dev/shm`. The format is `<number><unit>`. `number` must be greater than `0`.
Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes).
- Omitting the unit defaults to bytes. If you omit the size entirely, the system uses `64m`.
default: null
required: false
security_opts:
description:
- List of security options in the form of C("label:user:User")
default: null
required: false
state:
description:
- 'I(absent) - A container matching the specified name will be stopped and removed. Use force_kill to kill the container
rather than stopping it. Use keep_volumes to retain volumes associated with the removed container.'
- 'I(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no
container matches the name, a container will be created. If a container matches the name but the provided configuration
does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created
with the requested config. Image version will be taken into account when comparing configuration. To ignore image
version use the ignore_image option. Use the recreate option to force the re-creation of the matching container. Use
force_kill to kill the container rather than stopping it. Use keep_volumes to retain volumes associated with a removed
container.'
- 'I(started) - Asserts there is a running container matching the name and any provided configuration. If no container
matches the name, a container will be created and started. If a container matching the name is found but the
configuration does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed
and a new container will be created with the requested configuration and started. Image version will be taken into
account when comparing configuration. To ignore image version use the ignore_image option. Use recreate to always
re-create a matching container, even if it is running. Use restart to force a matching container to be stopped and
restarted. Use force_kill to kill a container rather than stopping it. Use keep_volumes to retain volumes associated
with a removed container.'
- 'I(stopped) - Asserts that the container is first I(present), and then if the container is running moves it to a stopped
state. Use force_kill to kill a container rather than stopping it.'
required: false
default: started
choices:
- absent
- present
- stopped
- started
stop_signal:
description:
- Override default signal used to stop the container.
default: null
required: false
stop_timeout:
description:
- Number of seconds to wait for the container to stop before sending SIGKILL.
required: false
default: null
trust_image_content:
description:
- If true, skip image verification.
default: false
required: false
tty:
description:
- Allocate a psuedo-TTY.
default: false
required: false
ulimits:
description:
- "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)"
default: null
required: false
user:
description:
- Sets the username or UID used and optionally the groupname or GID for the specified command.
- "Can be [ user | user:group | uid | uid:gid | user:gid | uid:group ]"
default: null
required: false
uts:
description:
- Set the UTS namespace mode for the container.
default: null
required: false
volumes:
description:
- List of volumes to mount within the container.
- "Use docker CLI-style syntax: C(/host:/container[:mode])"
- You can specify a read mode for the mount with either C(ro) or C(rw).
- SELinux hosts can additionally use C(z) or C(Z) to use a shared or
private label for the volume.
default: null
required: false
volume_driver:
description:
- The container volume driver.
default: none
required: false
volumes_from:
description:
- List of container names or Ids to get volumes from.
default: null
required: false
extends_documentation_fragment:
- docker
author:
- "Cove Schneider (@cove)"
- "Joshua Conner (@joshuaconner)"
- "Pavel Antonov (@softzilla)"
- "Thomas Steinbach (@ThomasSteinbach)"
- "Philippe Jandot (@zfil)"
- "Daan Oosterveld (@dusdanig)"
- "James Tanner (@jctanner)"
- "Chris Houseknecht (@chouseknecht)"
requirements:
- "python >= 2.6"
- "docker-py >= 1.7.0"
- "Docker API >= 1.20"
'''
EXAMPLES = '''
- name: Create a data container
docker_container:
name: mydata
image: busybox
volumes:
- /data
- name: Re-create a redis container
docker_container:
name: myredis
image: redis
command: redis-server --appendonly yes
state: present
recreate: yes
exposed_ports:
- 6379
volumes_from:
- mydata
- name: Restart a container
docker_container:
name: myapplication
image: someuser/appimage
state: started
restart: yes
links:
- "myredis:aliasedredis"
devices:
- "/dev/sda:/dev/xvda:rwm"
ports:
- "8080:9000"
- "127.0.0.1:8081:9001/udp"
env:
SECRET_KEY: ssssh
- name: Container present
docker_container:
name: mycontainer
state: present
image: ubuntu:14.04
command: sleep infinity
- name: Stop a container
docker_container:
name: mycontainer
state: stopped
- name: Start 4 load-balanced containers
docker_container:
name: "container{{ item }}"
recreate: yes
image: someuser/anotherappimage
command: sleep 1d
with_sequence: count=4
- name: remove container
docker_container:
name: ohno
state: absent
- name: Syslogging output
docker_container:
name: myservice
image: busybox
log_driver: syslog
log_options:
syslog-address: tcp://my-syslog-server:514
syslog-facility: daemon
# NOTE: in Docker 1.13+ the "syslog-tag" option was renamed to "tag" for
# older docker installs, use "syslog-tag" instead
tag: myservice
- name: Create db container and connect to network
docker_container:
name: db_test
image: "postgres:latest"
networks:
- name: "{{ docker_network_name }}"
- name: Start container, connect to network and link
docker_container:
name: sleeper
image: ubuntu:14.04
networks:
- name: TestingNet
ipv4_address: "172.1.1.100"
aliases:
- sleepyzz
links:
- db_test:db
- name: TestingNet2
- name: Start a container with a command
docker_container:
name: sleepy
image: ubuntu:14.04
command: ["sleep", "infinity"]
- name: Add container to networks
docker_container:
name: sleepy
networks:
- name: TestingNet
ipv4_address: 172.1.1.18
links:
- sleeper
- name: TestingNet2
ipv4_address: 172.1.10.20
- name: Update network with aliases
docker_container:
name: sleepy
networks:
- name: TestingNet
aliases:
- sleepyz
- zzzz
- name: Remove container from one network
docker_container:
name: sleepy
networks:
- name: TestingNet2
purge_networks: yes
- name: Remove container from all networks
docker_container:
name: sleepy
purge_networks: yes
'''
RETURN = '''
docker_container:
description:
- Before 2.3 this was 'ansible_docker_container' but was renamed due to conflicts with the connection plugin.
- Facts representing the current state of the container. Matches the docker inspection output.
- Note that facts are not part of registered vars but accessible directly.
- Empty if C(state) is I(absent)
- If detached is I(False), will include Output attribute containing any output from container run.
returned: always
type: dict
sample: '{
"AppArmorProfile": "",
"Args": [],
"Config": {
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"/usr/bin/supervisord"
],
"Domainname": "",
"Entrypoint": null,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"ExposedPorts": {
"443/tcp": {},
"80/tcp": {}
},
"Hostname": "8e47bf643eb9",
"Image": "lnmp_nginx:v1",
"Labels": {},
"OnBuild": null,
"OpenStdin": false,
"StdinOnce": false,
"Tty": false,
"User": "",
"Volumes": {
"/tmp/lnmp/nginx-sites/logs/": {}
},
...
}'
'''
import re
from ansible.module_utils.docker_common import *
try:
from docker import utils
if HAS_DOCKER_PY_2:
from docker.types import Ulimit
else:
from docker.utils.types import Ulimit
except:
# missing docker-py handled in ansible.module_utils.docker
pass
REQUIRES_CONVERSION_TO_BYTES = [
'memory',
'memory_reservation',
'memory_swap',
'shm_size'
]
VOLUME_PERMISSIONS = ('rw', 'ro', 'z', 'Z')
class TaskParameters(DockerBaseClass):
'''
Access and parse module parameters
'''
def __init__(self, client):
super(TaskParameters, self).__init__()
self.client = client
self.blkio_weight = None
self.capabilities = None
self.cleanup = None
self.command = None
self.cpu_period = None
self.cpu_quota = None
self.cpuset_cpus = None
self.cpuset_mems = None
self.cpu_shares = None
self.detach = None
self.debug = None
self.devices = None
self.dns_servers = None
self.dns_opts = None
self.dns_search_domains = None
self.env = None
self.env_file = None
self.entrypoint = None
self.etc_hosts = None
self.exposed_ports = None
self.force_kill = None
self.groups = None
self.hostname = None
self.ignore_image = None
self.image = None
self.interactive = None
self.ipc_mode = None
self.keep_volumes = None
self.kernel_memory = None
self.kill_signal = None
self.labels = None
self.links = None
self.log_driver = None
self.log_options = None
self.mac_address = None
self.memory = None
self.memory_reservation = None
self.memory_swap = None
self.memory_swappiness = None
self.name = None
self.network_mode = None
self.networks = None
self.oom_killer = None
self.oom_score_adj = None
self.paused = None
self.pid_mode = None
self.privileged = None
self.purge_networks = None
self.pull = None
self.read_only = None
self.recreate = None
self.restart = None
self.restart_retries = None
self.restart_policy = None
self.shm_size = None
self.security_opts = None
self.state = None
self.stop_signal = None
self.stop_timeout = None
self.trust_image_content = None
self.tty = None
self.user = None
self.uts = None
self.volumes = None
self.volume_binds = dict()
self.volumes_from = None
self.volume_driver = None
for key, value in client.module.params.items():
setattr(self, key, value)
for param_name in REQUIRES_CONVERSION_TO_BYTES:
if client.module.params.get(param_name):
try:
setattr(self, param_name, human_to_bytes(client.module.params.get(param_name)))
except ValueError as exc:
self.fail("Failed to convert %s to bytes: %s" % (param_name, exc))
self.publish_all_ports = False
self.published_ports = self._parse_publish_ports()
if self.published_ports in ('all', 'ALL'):
self.publish_all_ports = True
self.published_ports = None
self.ports = self._parse_exposed_ports(self.published_ports)
self.log("expose ports:")
self.log(self.ports, pretty_print=True)
self.links = self._parse_links(self.links)
if self.volumes:
self.volumes = self._expand_host_paths()
self.env = self._get_environment()
self.ulimits = self._parse_ulimits()
self.log_config = self._parse_log_config()
self.exp_links = None
self.volume_binds = self._get_volume_binds(self.volumes)
self.log("volumes:")
self.log(self.volumes, pretty_print=True)
self.log("volume binds:")
self.log(self.volume_binds, pretty_print=True)
if self.networks:
for network in self.networks:
if not network.get('name'):
self.fail("Parameter error: network must have a name attribute.")
network['id'] = self._get_network_id(network['name'])
if not network['id']:
self.fail("Parameter error: network named %s could not be found. Does it exist?" % network['name'])
if network.get('links'):
network['links'] = self._parse_links(network['links'])
if self.entrypoint:
# convert from list to str.
self.entrypoint = ' '.join([str(x) for x in self.entrypoint])
if self.command:
# convert from list to str
self.command = ' '.join([str(x) for x in self.command])
def fail(self, msg):
self.client.module.fail_json(msg=msg)
@property
def update_parameters(self):
'''
Returns parameters used to update a container
'''
update_parameters = dict(
blkio_weight='blkio_weight',
cpu_period='cpu_period',
cpu_quota='cpu_quota',
cpu_shares='cpu_shares',
cpuset_cpus='cpuset_cpus',
mem_limit='memory',
mem_reservation='mem_reservation',
memswap_limit='memory_swap',
kernel_memory='kernel_memory'
)
result = dict()
for key, value in update_parameters.items():
if getattr(self, value, None) is not None:
result[key] = getattr(self, value)
return result
@property
def create_parameters(self):
'''
Returns parameters used to create a container
'''
create_params = dict(
command='command',
hostname='hostname',
user='user',
detach='detach',
stdin_open='interactive',
tty='tty',
ports='ports',
environment='env',
name='name',
entrypoint='entrypoint',
cpu_shares='cpu_shares',
mac_address='mac_address',
labels='labels',
stop_signal='stop_signal',
volume_driver='volume_driver',
)
result = dict(
host_config=self._host_config(),
volumes=self._get_mounts(),
)
for key, value in create_params.items():
if getattr(self, value, None) is not None:
result[key] = getattr(self, value)
return result
def _expand_host_paths(self):
new_vols = []
for vol in self.volumes:
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if re.match(r'[\.~]', host):
host = os.path.abspath(host)
new_vols.append("%s:%s:%s" % (host, container, mode))
continue
elif len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS and re.match(r'[\.~]', parts[0]):
host = os.path.abspath(parts[0])
new_vols.append("%s:%s:rw" % (host, parts[1]))
continue
new_vols.append(vol)
return new_vols
def _get_mounts(self):
'''
Return a list of container mounts.
:return:
'''
result = []
if self.volumes:
for vol in self.volumes:
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, _ = vol.split(':')
result.append(container)
continue
if len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS:
result.append(parts[1])
continue
result.append(vol)
self.log("mounts:")
self.log(result, pretty_print=True)
return result
def _host_config(self):
'''
Returns parameters used to create a HostConfig object
'''
host_config_params=dict(
port_bindings='published_ports',
publish_all_ports='publish_all_ports',
links='links',
privileged='privileged',
dns='dns_servers',
dns_search='dns_search_domains',
binds='volume_binds',
volumes_from='volumes_from',
network_mode='network_mode',
cap_add='capabilities',
extra_hosts='etc_hosts',
read_only='read_only',
ipc_mode='ipc_mode',
security_opt='security_opts',
ulimits='ulimits',
log_config='log_config',
mem_limit='memory',
memswap_limit='memory_swap',
mem_swappiness='memory_swappiness',
oom_score_adj='oom_score_adj',
shm_size='shm_size',
group_add='groups',
devices='devices',
pid_mode='pid_mode'
)
params = dict()
for key, value in host_config_params.items():
if getattr(self, value, None) is not None:
params[key] = getattr(self, value)
if self.restart_policy:
params['restart_policy'] = dict(Name=self.restart_policy,
MaximumRetryCount=self.restart_retries)
return self.client.create_host_config(**params)
@property
def default_host_ip(self):
ip = '0.0.0.0'
if not self.networks:
return ip
for net in self.networks:
if net.get('name'):
network = self.client.inspect_network(net['name'])
if network.get('Driver') == 'bridge' and \
network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'):
ip = network['Options']['com.docker.network.bridge.host_binding_ipv4']
break
return ip
def _parse_publish_ports(self):
'''
Parse ports from docker CLI syntax
'''
if self.published_ports is None:
return None
if 'all' in self.published_ports:
return 'all'
default_ip = self.default_host_ip
binds = {}
for port in self.published_ports:
parts = str(port).split(':')
container_port = parts[-1]
if '/' not in container_port:
container_port = int(parts[-1])
p_len = len(parts)
if p_len == 1:
bind = (default_ip,)
elif p_len == 2:
bind = (default_ip, int(parts[0]))
elif p_len == 3:
bind = (parts[0], int(parts[1])) if parts[1] else (parts[0],)
if container_port in binds:
old_bind = binds[container_port]
if isinstance(old_bind, list):
old_bind.append(bind)
else:
binds[container_port] = [binds[container_port], bind]
else:
binds[container_port] = bind
return binds
@staticmethod
def _get_volume_binds(volumes):
'''
Extract host bindings, if any, from list of volume mapping strings.
:return: dictionary of bind mappings
'''
result = dict()
if volumes:
for vol in volumes:
host = None
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS:
host, container, mode = (vol.split(':') + ['rw'])
if host is not None:
result[host] = dict(
bind=container,
mode=mode
)
return result
def _parse_exposed_ports(self, published_ports):
'''
Parse exposed ports from docker CLI-style ports syntax.
'''
exposed = []
if self.exposed_ports:
for port in self.exposed_ports:
port = str(port).strip()
protocol = 'tcp'
match = re.search(r'(/.+$)', port)
if match:
protocol = match.group(1).replace('/', '')
port = re.sub(r'/.+$', '', port)
exposed.append((port, protocol))
if published_ports:
# Any published port should also be exposed
for publish_port in published_ports:
match = False
if isinstance(publish_port, basestring) and '/' in publish_port:
port, protocol = publish_port.split('/')
port = int(port)
else:
protocol = 'tcp'
port = int(publish_port)
for exposed_port in exposed:
if isinstance(exposed_port[0], basestring) and '-' in exposed_port[0]:
start_port, end_port = exposed_port[0].split('-')
if int(start_port) <= port <= int(end_port):
match = True
elif exposed_port[0] == port:
match = True
if not match:
exposed.append((port, protocol))
return exposed
@staticmethod
def _parse_links(links):
'''
Turn links into a dictionary
'''
if links is None:
return None
result = {}
for link in links:
parsed_link = link.split(':', 1)
if len(parsed_link) == 2:
result[parsed_link[0]] = parsed_link[1]
else:
result[parsed_link[0]] = parsed_link[0]
return result
def _parse_ulimits(self):
'''
Turn ulimits into an array of Ulimit objects
'''
if self.ulimits is None:
return None
results = []
for limit in self.ulimits:
limits = dict()
pieces = limit.split(':')
if len(pieces) >= 2:
limits['name'] = pieces[0]
limits['soft'] = int(pieces[1])
limits['hard'] = int(pieces[1])
if len(pieces) == 3:
limits['hard'] = int(pieces[2])
try:
results.append(Ulimit(**limits))
except ValueError as exc:
self.fail("Error parsing ulimits value %s - %s" % (limit, exc))
return results
def _parse_log_config(self):
'''
Create a LogConfig object
'''
if self.log_driver is None:
return None
options = dict(
Type=self.log_driver,
Config = dict()
)
if self.log_options is not None:
options['Config'] = self.log_options
try:
return LogConfig(**options)
except ValueError as exc:
self.fail('Error parsing logging options - %s' % (exc))
def _get_environment(self):
"""
If environment file is combined with explicit environment variables, the explicit environment variables
take precedence.
"""
final_env = {}
if self.env_file:
parsed_env_file = utils.parse_env_file(self.env_file)
for name, value in parsed_env_file.items():
final_env[name] = str(value)
if self.env:
for name, value in self.env.items():
final_env[name] = str(value)
return final_env
def _get_network_id(self, network_name):
network_id = None
try:
for network in self.client.networks(names=[network_name]):
if network['Name'] == network_name:
network_id = network['Id']
break
except Exception as exc:
self.fail("Error getting network id for %s - %s" % (network_name, str(exc)))
return network_id
class Container(DockerBaseClass):
def __init__(self, container, parameters):
super(Container, self).__init__()
self.raw = container
self.Id = None
self.container = container
if container:
self.Id = container['Id']
self.Image = container['Image']
self.log(self.container, pretty_print=True)
self.parameters = parameters
self.parameters.expected_links = None
self.parameters.expected_ports = None
self.parameters.expected_exposed = None
self.parameters.expected_volumes = None
self.parameters.expected_ulimits = None
self.parameters.expected_etc_hosts = None
self.parameters.expected_env = None
def fail(self, msg):
self.parameters.client.module.fail_json(msg=msg)
@property
def exists(self):
return True if self.container else False
@property
def running(self):
if self.container and self.container.get('State'):
if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False):
return True
return False
def has_different_configuration(self, image):
'''
Diff parameters vs existing container config. Returns tuple: (True | False, List of differences)
'''
self.log('Starting has_different_configuration')
self.parameters.expected_entrypoint = self._get_expected_entrypoint()
self.parameters.expected_links = self._get_expected_links()
self.parameters.expected_ports = self._get_expected_ports()
self.parameters.expected_exposed = self._get_expected_exposed(image)
self.parameters.expected_volumes = self._get_expected_volumes(image)
self.parameters.expected_binds = self._get_expected_binds(image)
self.parameters.expected_ulimits = self._get_expected_ulimits(self.parameters.ulimits)
self.parameters.expected_etc_hosts = self._convert_simple_dict_to_list('etc_hosts')
self.parameters.expected_env = self._get_expected_env(image)
self.parameters.expected_cmd = self._get_expected_cmd()
self.parameters.expected_devices = self._get_expected_devices()
if not self.container.get('HostConfig'):
self.fail("has_config_diff: Error parsing container properties. HostConfig missing.")
if not self.container.get('Config'):
self.fail("has_config_diff: Error parsing container properties. Config missing.")
if not self.container.get('NetworkSettings'):
self.fail("has_config_diff: Error parsing container properties. NetworkSettings missing.")
host_config = self.container['HostConfig']
log_config = host_config.get('LogConfig', dict())
restart_policy = host_config.get('RestartPolicy', dict())
config = self.container['Config']
network = self.container['NetworkSettings']
# The previous version of the docker module ignored the detach state by
# assuming if the container was running, it must have been detached.
detach = not (config.get('AttachStderr') and config.get('AttachStdout'))
# "ExposedPorts": null returns None type & causes AttributeError - PR #5517
if config.get('ExposedPorts') is not None:
expected_exposed = [re.sub(r'/.+$', '', p) for p in config.get('ExposedPorts', dict()).keys()]
else:
expected_exposed = []
# Map parameters to container inspect results
config_mapping = dict(
image=config.get('Image'),
expected_cmd=config.get('Cmd'),
hostname=config.get('Hostname'),
user=config.get('User'),
detach=detach,
interactive=config.get('OpenStdin'),
capabilities=host_config.get('CapAdd'),
expected_devices=host_config.get('Devices'),
dns_servers=host_config.get('Dns'),
dns_opts=host_config.get('DnsOptions'),
dns_search_domains=host_config.get('DnsSearch'),
expected_env=(config.get('Env') or []),
expected_entrypoint=config.get('Entrypoint'),
expected_etc_hosts=host_config['ExtraHosts'],
expected_exposed=expected_exposed,
groups=host_config.get('GroupAdd'),
ipc_mode=host_config.get("IpcMode"),
labels=config.get('Labels'),
expected_links=host_config.get('Links'),
log_driver=log_config.get('Type'),
log_options=log_config.get('Config'),
mac_address=network.get('MacAddress'),
memory_swappiness=host_config.get('MemorySwappiness'),
network_mode=host_config.get('NetworkMode'),
oom_killer=host_config.get('OomKillDisable'),
oom_score_adj=host_config.get('OomScoreAdj'),
pid_mode=host_config.get('PidMode'),
privileged=host_config.get('Privileged'),
expected_ports=host_config.get('PortBindings'),
read_only=host_config.get('ReadonlyRootfs'),
restart_policy=restart_policy.get('Name'),
restart_retries=restart_policy.get('MaximumRetryCount'),
# Cannot test shm_size, as shm_size is not included in container inspection results.
# shm_size=host_config.get('ShmSize'),
security_opts=host_config.get("SecuriytOpt"),
stop_signal=config.get("StopSignal"),
tty=config.get('Tty'),
expected_ulimits=host_config.get('Ulimits'),
uts=host_config.get('UTSMode'),
expected_volumes=config.get('Volumes'),
expected_binds=host_config.get('Binds'),
volumes_from=host_config.get('VolumesFrom'),
volume_driver=host_config.get('VolumeDriver')
)
differences = []
for key, value in config_mapping.items():
self.log('check differences %s %s vs %s' % (key, getattr(self.parameters, key), str(value)))
if getattr(self.parameters, key, None) is not None:
if isinstance(getattr(self.parameters, key), list) and isinstance(value, list):
if len(getattr(self.parameters, key)) > 0 and isinstance(getattr(self.parameters, key)[0], dict):
# compare list of dictionaries
self.log("comparing list of dict: %s" % key)
match = self._compare_dictionary_lists(getattr(self.parameters, key), value)
else:
# compare two lists. Is list_a in list_b?
self.log("comparing lists: %s" % key)
set_a = set(getattr(self.parameters, key))
set_b = set(value)
match = (set_a <= set_b)
elif isinstance(getattr(self.parameters, key), dict) and isinstance(value, dict):
# compare two dicts
self.log("comparing two dicts: %s" % key)
match = self._compare_dicts(getattr(self.parameters, key), value)
else:
# primitive compare
self.log("primitive compare: %s" % key)
match = (getattr(self.parameters, key) == value)
if not match:
# no match. record the differences
item = dict()
item[key] = dict(
parameter=getattr(self.parameters, key),
container=value
)
differences.append(item)
has_differences = True if len(differences) > 0 else False
return has_differences, differences
def _compare_dictionary_lists(self, list_a, list_b):
'''
If all of list_a exists in list_b, return True
'''
if not isinstance(list_a, list) or not isinstance(list_b, list):
return False
matches = 0
for dict_a in list_a:
for dict_b in list_b:
if self._compare_dicts(dict_a, dict_b):
matches += 1
break
result = (matches == len(list_a))
return result
def _compare_dicts(self, dict_a, dict_b):
'''
If dict_a in dict_b, return True
'''
if not isinstance(dict_a, dict) or not isinstance(dict_b, dict):
return False
for key, value in dict_a.items():
if isinstance(value, dict):
match = self._compare_dicts(value, dict_b.get(key))
elif isinstance(value, list):
if len(value) > 0 and isinstance(value[0], dict):
match = self._compare_dictionary_lists(value, dict_b.get(key))
else:
set_a = set(value)
set_b = set(dict_b.get(key))
match = (set_a == set_b)
else:
match = (value == dict_b.get(key))
if not match:
return False
return True
def has_different_resource_limits(self):
'''
Diff parameters and container resource limits
'''
if not self.container.get('HostConfig'):
self.fail("limits_differ_from_container: Error parsing container properties. HostConfig missing.")
host_config = self.container['HostConfig']
config_mapping = dict(
cpu_period=host_config.get('CpuPeriod'),
cpu_quota=host_config.get('CpuQuota'),
cpuset_cpus=host_config.get('CpusetCpus'),
cpuset_mems=host_config.get('CpusetMems'),
cpu_shares=host_config.get('CpuShares'),
kernel_memory=host_config.get("KernelMemory"),
memory=host_config.get('Memory'),
memory_reservation=host_config.get('MemoryReservation'),
memory_swap=host_config.get('MemorySwap'),
oom_score_adj=host_config.get('OomScoreAdj'),
)
differences = []
for key, value in config_mapping.items():
if getattr(self.parameters, key, None) and getattr(self.parameters, key) != value:
# no match. record the differences
item = dict()
item[key] = dict(
parameter=getattr(self.parameters, key),
container=value
)
differences.append(item)
different = (len(differences) > 0)
return different, differences
def has_network_differences(self):
'''
Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6
'''
different = False
differences = []
if not self.parameters.networks:
return different, differences
if not self.container.get('NetworkSettings'):
self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.")
connected_networks = self.container['NetworkSettings']['Networks']
for network in self.parameters.networks:
if connected_networks.get(network['name'], None) is None:
different = True
differences.append(dict(
parameter=network,
container=None
))
else:
diff = False
if network.get('ipv4_address') and network['ipv4_address'] != connected_networks[network['name']].get('IPAddress'):
diff = True
if network.get('ipv6_address') and network['ipv6_address'] != connected_networks[network['name']].get('GlobalIPv6Address'):
diff = True
if network.get('aliases') and not connected_networks[network['name']].get('Aliases'):
diff = True
if network.get('aliases') and connected_networks[network['name']].get('Aliases'):
for alias in network.get('aliases'):
if alias not in connected_networks[network['name']].get('Aliases', []):
diff = True
if network.get('links') and not connected_networks[network['name']].get('Links'):
diff = True
if network.get('links') and connected_networks[network['name']].get('Links'):
expected_links = []
for link, alias in network['links'].items():
expected_links.append("%s:%s" % (link, alias))
for link in expected_links:
if link not in connected_networks[network['name']].get('Links', []):
diff = True
if diff:
different = True
differences.append(dict(
parameter=network,
container=dict(
name=network['name'],
ipv4_address=connected_networks[network['name']].get('IPAddress'),
ipv6_address=connected_networks[network['name']].get('GlobalIPv6Address'),
aliases=connected_networks[network['name']].get('Aliases'),
links=connected_networks[network['name']].get('Links')
)
))
return different, differences
def has_extra_networks(self):
'''
Check if the container is connected to non-requested networks
'''
extra_networks = []
extra = False
if not self.container.get('NetworkSettings'):
self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.")
connected_networks = self.container['NetworkSettings'].get('Networks')
if connected_networks:
for network, network_config in connected_networks.items():
keep = False
if self.parameters.networks:
for expected_network in self.parameters.networks:
if expected_network['name'] == network:
keep = True
if not keep:
extra = True
extra_networks.append(dict(name=network, id=network_config['NetworkID']))
return extra, extra_networks
def _get_expected_devices(self):
if not self.parameters.devices:
return None
expected_devices = []
for device in self.parameters.devices:
parts = device.split(':')
if len(parts) == 1:
expected_devices.append(
dict(
CgroupPermissions='rwm',
PathInContainer=parts[0],
PathOnHost=parts[0]
))
elif len(parts) == 2:
parts = device.split(':')
expected_devices.append(
dict(
CgroupPermissions='rwm',
PathInContainer=parts[1],
PathOnHost=parts[0]
)
)
else:
expected_devices.append(
dict(
CgroupPermissions=parts[2],
PathInContainer=parts[1],
PathOnHost=parts[0]
))
return expected_devices
def _get_expected_entrypoint(self):
if not self.parameters.entrypoint:
return None
return shlex.split(self.parameters.entrypoint)
def _get_expected_ports(self):
if not self.parameters.published_ports:
return None
expected_bound_ports = {}
for container_port, config in self.parameters.published_ports.items():
if isinstance(container_port, int):
container_port = "%s/tcp" % container_port
if len(config) == 1:
expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': ""}]
elif isinstance(config[0], tuple):
expected_bound_ports[container_port] = []
for host_ip, host_port in config:
expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': str(host_port)})
else:
expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}]
return expected_bound_ports
def _get_expected_links(self):
if self.parameters.links is None:
return None
self.log('parameter links:')
self.log(self.parameters.links, pretty_print=True)
exp_links = []
for link, alias in self.parameters.links.items():
exp_links.append("/%s:%s/%s" % (link, ('/' + self.parameters.name), alias))
return exp_links
def _get_expected_binds(self, image):
self.log('_get_expected_binds')
image_vols = []
if image:
image_vols = self._get_image_binds(image['ContainerConfig'].get('Volumes'))
param_vols = []
if self.parameters.volumes:
for vol in self.parameters.volumes:
host = None
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS:
host, container, mode = vol.split(':') + ['rw']
if host:
param_vols.append("%s:%s:%s" % (host, container, mode))
result = list(set(image_vols + param_vols))
self.log("expected_binds:")
self.log(result, pretty_print=True)
return result
def _get_image_binds(self, volumes):
'''
Convert array of binds to array of strings with format host_path:container_path:mode
:param volumes: array of bind dicts
:return: array of strings
'''
results = []
if isinstance(volumes, dict):
results += self._get_bind_from_dict(volumes)
elif isinstance(volumes, list):
for vol in volumes:
results += self._get_bind_from_dict(vol)
return results
@staticmethod
def _get_bind_from_dict(volume_dict):
results = []
if volume_dict:
for host_path, config in volume_dict.items():
if isinstance(config, dict) and config.get('bind'):
container_path = config.get('bind')
mode = config.get('mode', 'rw')
results.append("%s:%s:%s" % (host_path, container_path, mode))
return results
def _get_expected_volumes(self, image):
self.log('_get_expected_volumes')
expected_vols = dict()
if image and image['ContainerConfig'].get('Volumes'):
expected_vols.update(image['ContainerConfig'].get('Volumes'))
if self.parameters.volumes:
for vol in self.parameters.volumes:
container = None
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS:
host, container, mode = vol.split(':') + ['rw']
new_vol = dict()
if container:
new_vol[container] = dict()
else:
new_vol[vol] = dict()
expected_vols.update(new_vol)
if not expected_vols:
expected_vols = None
self.log("expected_volumes:")
self.log(expected_vols, pretty_print=True)
return expected_vols
def _get_expected_env(self, image):
self.log('_get_expected_env')
expected_env = dict()
if image and image['ContainerConfig'].get('Env'):
for env_var in image['ContainerConfig']['Env']:
parts = env_var.split('=', 1)
expected_env[parts[0]] = parts[1]
if self.parameters.env:
expected_env.update(self.parameters.env)
param_env = []
for key, value in expected_env.items():
param_env.append("%s=%s" % (key, value))
return param_env
def _get_expected_exposed(self, image):
self.log('_get_expected_exposed')
image_ports = []
if image:
image_ports = [re.sub(r'/.+$', '', p) for p in (image['ContainerConfig'].get('ExposedPorts') or {}).keys()]
param_ports = []
if self.parameters.ports:
param_ports = [str(p[0]) for p in self.parameters.ports]
result = list(set(image_ports + param_ports))
self.log(result, pretty_print=True)
return result
def _get_expected_ulimits(self, config_ulimits):
self.log('_get_expected_ulimits')
if config_ulimits is None:
return None
results = []
for limit in config_ulimits:
results.append(dict(
Name=limit.name,
Soft=limit.soft,
Hard=limit.hard
))
return results
def _get_expected_cmd(self):
self.log('_get_expected_cmd')
if not self.parameters.command:
return None
return shlex.split(self.parameters.command)
def _convert_simple_dict_to_list(self, param_name, join_with=':'):
if getattr(self.parameters, param_name, None) is None:
return None
results = []
for key, value in getattr(self.parameters, param_name).items():
results.append("%s%s%s" % (key, join_with, value))
return results
class ContainerManager(DockerBaseClass):
'''
Perform container management tasks
'''
def __init__(self, client):
super(ContainerManager, self).__init__()
self.client = client
self.parameters = TaskParameters(client)
self.check_mode = self.client.check_mode
self.results = {'changed': False, 'actions': []}
self.diff = {}
self.facts = {}
state = self.parameters.state
if state in ('stopped', 'started', 'present'):
self.present(state)
elif state == 'absent':
self.absent()
if not self.check_mode and not self.parameters.debug:
self.results.pop('actions')
if self.client.module._diff or self.parameters.debug:
self.results['diff'] = self.diff
if self.facts:
self.results['ansible_facts'] = {'docker_container': self.facts}
def present(self, state):
container = self._get_container(self.parameters.name)
image = self._get_image()
if not container.exists:
# New container
self.log('No container found')
new_container = self.container_create(self.parameters.image, self.parameters.create_parameters)
if new_container:
container = new_container
else:
# Existing container
different, differences = container.has_different_configuration(image)
image_different = False
if not self.parameters.ignore_image:
image_different = self._image_is_different(image, container)
if image_different or different or self.parameters.recreate:
self.diff['differences'] = differences
if image_different:
self.diff['image_different'] = True
self.log("differences")
self.log(differences, pretty_print=True)
if container.running:
self.container_stop(container.Id)
self.container_remove(container.Id)
new_container = self.container_create(self.parameters.image, self.parameters.create_parameters)
if new_container:
container = new_container
if container and container.exists:
container = self.update_limits(container)
container = self.update_networks(container)
if state == 'started' and not container.running:
container = self.container_start(container.Id)
elif state == 'started' and self.parameters.restart:
self.container_stop(container.Id)
container = self.container_start(container.Id)
elif state == 'stopped' and container.running:
self.container_stop(container.Id)
container = self._get_container(container.Id)
self.facts = container.raw
def absent(self):
container = self._get_container(self.parameters.name)
if container.exists:
if container.running:
self.container_stop(container.Id)
self.container_remove(container.Id)
def fail(self, msg, **kwargs):
self.client.module.fail_json(msg=msg, **kwargs)
def _get_container(self, container):
'''
Expects container ID or Name. Returns a container object
'''
return Container(self.client.get_container(container), self.parameters)
def _get_image(self):
if not self.parameters.image:
self.log('No image specified')
return None
repository, tag = utils.parse_repository_tag(self.parameters.image)
if not tag:
tag = "latest"
image = self.client.find_image(repository, tag)
if not self.check_mode:
if not image or self.parameters.pull:
self.log("Pull the image.")
image, alreadyToLatest = self.client.pull_image(repository, tag)
if alreadyToLatest:
self.results['changed'] = False
else:
self.results['changed'] = True
self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
self.log("image")
self.log(image, pretty_print=True)
return image
def _image_is_different(self, image, container):
if image and image.get('Id'):
if container and container.Image:
if image.get('Id') != container.Image:
return True
return False
def update_limits(self, container):
limits_differ, different_limits = container.has_different_resource_limits()
if limits_differ:
self.log("limit differences:")
self.log(different_limits, pretty_print=True)
if limits_differ and not self.check_mode:
self.container_update(container.Id, self.parameters.update_parameters)
return self._get_container(container.Id)
return container
def update_networks(self, container):
has_network_differences, network_differences = container.has_network_differences()
updated_container = container
if has_network_differences:
if self.diff.get('differences'):
self.diff['differences'].append(dict(network_differences=network_differences))
else:
self.diff['differences'] = [dict(network_differences=network_differences)]
self.results['changed'] = True
updated_container = self._add_networks(container, network_differences)
if self.parameters.purge_networks:
has_extra_networks, extra_networks = container.has_extra_networks()
if has_extra_networks:
if self.diff.get('differences'):
self.diff['differences'].append(dict(purge_networks=extra_networks))
else:
self.diff['differences'] = [dict(purge_networks=extra_networks)]
self.results['changed'] = True
updated_container = self._purge_networks(container, extra_networks)
return updated_container
def _add_networks(self, container, differences):
for diff in differences:
# remove the container from the network, if connected
if diff.get('container'):
self.results['actions'].append(dict(removed_from_network=diff['parameter']['name']))
if not self.check_mode:
try:
self.client.disconnect_container_from_network(container.Id, diff['parameter']['id'])
except Exception as exc:
self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'],
str(exc)))
# connect to the network
params = dict(
ipv4_address=diff['parameter'].get('ipv4_address', None),
ipv6_address=diff['parameter'].get('ipv6_address', None),
links=diff['parameter'].get('links', None),
aliases=diff['parameter'].get('aliases', None)
)
self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params))
if not self.check_mode:
try:
self.log("Connecting container to network %s" % diff['parameter']['id'])
self.log(params, pretty_print=True)
self.client.connect_container_to_network(container.Id, diff['parameter']['id'], **params)
except Exception as exc:
self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], str(exc)))
return self._get_container(container.Id)
def _purge_networks(self, container, networks):
for network in networks:
self.results['actions'].append(dict(removed_from_network=network['name']))
if not self.check_mode:
try:
self.client.disconnect_container_from_network(container.Id, network['name'])
except Exception as exc:
self.fail("Error disconnecting container from network %s - %s" % (network['name'],
str(exc)))
return self._get_container(container.Id)
def container_create(self, image, create_parameters):
self.log("create container")
self.log("image: %s parameters:" % image)
self.log(create_parameters, pretty_print=True)
self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters))
self.results['changed'] = True
new_container = None
if not self.check_mode:
try:
new_container = self.client.create_container(image, **create_parameters)
except Exception as exc:
self.fail("Error creating container: %s" % str(exc))
return self._get_container(new_container['Id'])
return new_container
def container_start(self, container_id):
self.log("start container %s" % (container_id))
self.results['actions'].append(dict(started=container_id))
self.results['changed'] = True
if not self.check_mode:
try:
self.client.start(container=container_id)
except Exception as exc:
self.fail("Error starting container %s: %s" % (container_id, str(exc)))
if not self.parameters.detach:
status = self.client.wait(container_id)
output = self.client.logs(container_id, stdout=True, stderr=True, stream=False, timestamps=False)
if status != 0:
self.fail(output, status=status)
if self.parameters.cleanup:
self.container_remove(container_id, force=True)
insp = self._get_container(container_id)
if insp.raw:
insp.raw['Output'] = output
else:
insp.raw = dict(Output=output)
return insp
return self._get_container(container_id)
def container_remove(self, container_id, link=False, force=False):
volume_state = (not self.parameters.keep_volumes)
self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force))
self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force))
self.results['changed'] = True
response = None
if not self.check_mode:
try:
response = self.client.remove_container(container_id, v=volume_state, link=link, force=force)
except Exception as exc:
self.fail("Error removing container %s: %s" % (container_id, str(exc)))
return response
def container_update(self, container_id, update_parameters):
if update_parameters:
self.log("update container %s" % (container_id))
self.log(update_parameters, pretty_print=True)
self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters))
self.results['changed'] = True
if not self.check_mode and callable(getattr(self.client, 'update_container')):
try:
self.client.update_container(container_id, **update_parameters)
except Exception as exc:
self.fail("Error updating container %s: %s" % (container_id, str(exc)))
return self._get_container(container_id)
def container_kill(self, container_id):
self.results['actions'].append(dict(killed=container_id, signal=self.parameters.kill_signal))
self.results['changed'] = True
response = None
if not self.check_mode:
try:
if self.parameters.kill_signal:
response = self.client.kill(container_id, signal=self.parameters.kill_signal)
else:
response = self.client.kill(container_id)
except Exception as exc:
self.fail("Error killing container %s: %s" % (container_id, exc))
return response
def container_stop(self, container_id):
if self.parameters.force_kill:
self.container_kill(container_id)
return
self.results['actions'].append(dict(stopped=container_id, timeout=self.parameters.stop_timeout))
self.results['changed'] = True
response = None
if not self.check_mode:
try:
if self.parameters.stop_timeout:
response = self.client.stop(container_id, timeout=self.parameters.stop_timeout)
else:
response = self.client.stop(container_id)
except Exception as exc:
self.fail("Error stopping container %s: %s" % (container_id, str(exc)))
return response
def main():
argument_spec = dict(
blkio_weight=dict(type='int'),
capabilities=dict(type='list'),
cleanup=dict(type='bool', default=False),
command=dict(type='list'),
cpu_period=dict(type='int'),
cpu_quota=dict(type='int'),
cpuset_cpus=dict(type='str'),
cpuset_mems=dict(type='str'),
cpu_shares=dict(type='int'),
detach=dict(type='bool', default=True),
devices=dict(type='list'),
dns_servers=dict(type='list'),
dns_opts=dict(type='list'),
dns_search_domains=dict(type='list'),
env=dict(type='dict'),
env_file=dict(type='path'),
entrypoint=dict(type='list'),
etc_hosts=dict(type='dict'),
exposed_ports=dict(type='list', aliases=['exposed', 'expose']),
force_kill=dict(type='bool', default=False, aliases=['forcekill']),
groups=dict(type='list'),
hostname=dict(type='str'),
ignore_image=dict(type='bool', default=False),
image=dict(type='str'),
interactive=dict(type='bool', default=False),
ipc_mode=dict(type='str'),
keep_volumes=dict(type='bool', default=True),
kernel_memory=dict(type='str'),
kill_signal=dict(type='str'),
labels=dict(type='dict'),
links=dict(type='list'),
log_driver=dict(type='str',
choices=['none', 'json-file', 'syslog', 'journald', 'gelf', 'fluentd', 'awslogs', 'splunk'],
default=None),
log_options=dict(type='dict', aliases=['log_opt']),
mac_address=dict(type='str'),
memory=dict(type='str', default='0'),
memory_reservation=dict(type='str'),
memory_swap=dict(type='str'),
memory_swappiness=dict(type='int'),
name=dict(type='str', required=True),
network_mode=dict(type='str'),
networks=dict(type='list'),
oom_killer=dict(type='bool'),
oom_score_adj=dict(type='int'),
paused=dict(type='bool', default=False),
pid_mode=dict(type='str'),
privileged=dict(type='bool', default=False),
published_ports=dict(type='list', aliases=['ports']),
pull=dict(type='bool', default=False),
purge_networks=dict(type='bool', default=False),
read_only=dict(type='bool', default=False),
recreate=dict(type='bool', default=False),
restart=dict(type='bool', default=False),
restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']),
restart_retries=dict(type='int', default=None),
shm_size=dict(type='str'),
security_opts=dict(type='list'),
state=dict(type='str', choices=['absent', 'present', 'started', 'stopped'], default='started'),
stop_signal=dict(type='str'),
stop_timeout=dict(type='int'),
trust_image_content=dict(type='bool', default=False),
tty=dict(type='bool', default=False),
ulimits=dict(type='list'),
user=dict(type='str'),
uts=dict(type='str'),
volumes=dict(type='list'),
volumes_from=dict(type='list'),
volume_driver=dict(type='str'),
)
required_if = [
('state', 'present', ['image'])
]
client = AnsibleDockerClient(
argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True
)
cm = ContainerManager(client)
client.module.exit_json(**cm.results)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| GustavoHennig/ansible | lib/ansible/modules/cloud/docker/docker_container.py | Python | gpl-3.0 | 76,360 |
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from flask import session, render_template, flash
from indico.core import signals
from indico.core.db import db
from indico.core.logger import Logger
from indico.core.roles import ManagementRole
from indico.modules.events import Event
from indico.modules.events.features.base import EventFeature
from indico.modules.events.layout.util import MenuEntryData
from indico.modules.events.registration.settings import RegistrationSettingsProxy
from indico.util.i18n import _, ngettext
from indico.web.flask.templating import template_hook
from indico.web.flask.util import url_for
from indico.web.menu import SideMenuItem
logger = Logger.get('events.registration')
registration_settings = RegistrationSettingsProxy('registrations', {
# Whether to merge display forms on the participant list
'merge_registration_forms': True,
# Columns to show on the participant list when the registration forms are merged
'participant_list_columns': ['first_name', 'last_name', 'affiliation'],
# Order of the forms to show on the participant list
'participant_list_forms': [],
# Columns to show for each form on the participant list
'participant_list_form_columns': {}
})
@signals.menu.items.connect_via('event-management-sidemenu')
def _extend_event_management_menu(sender, event, **kwargs):
registration_section = 'organization' if event.type == 'conference' else 'advanced'
if not event.can_manage(session.user, 'registration', allow_key=True):
return
if event.type != 'conference':
yield SideMenuItem('participants', _("Participants"), url_for('event_participation.manage', event),
section='organization')
if event.has_feature('registration'):
yield SideMenuItem('registration', _('Registration'), url_for('event_registration.manage_regform_list', event),
section=registration_section)
def _get_open_regforms(event):
if not event.has_feature('registration'):
return []
from indico.modules.events.registration.models.forms import RegistrationForm
return (RegistrationForm.find(RegistrationForm.is_open, event_id=int(event.id))
.order_by(db.func.lower(RegistrationForm.title))
.all())
@template_hook('conference-home-info')
def _inject_regform_announcement(event, **kwargs):
from indico.modules.events.registration.util import user_registered_in_event, get_registrations_with_tickets
regforms = _get_open_regforms(event)
if regforms:
return render_template('events/registration/display/conference_home.html', regforms=regforms, event=event,
user_has_registered=(session.user and user_registered_in_event(session.user, event)),
registrations_with_tickets=get_registrations_with_tickets(session.user, event))
@template_hook('event-header')
def _inject_event_header(event, **kwargs):
from indico.modules.events.registration.models.forms import RegistrationForm
event = event.as_event
regforms = (event.registration_forms
.filter_by(is_deleted=False, is_open=True)
.order_by(db.func.lower(RegistrationForm.title))
.all())
# A participant could appear more than once in the list in case he register to multiple registration form.
# This is deemed very unlikely in the case of meetings and lectures and thus not worth the extra complexity.
return render_template('events/registration/display/event_header.html', event=event, regforms=regforms)
@signals.event.sidemenu.connect
def _extend_event_menu(sender, **kwargs):
from indico.modules.events.registration.models.forms import RegistrationForm
from indico.modules.events.registration.models.registrations import Registration
def _visible_registration(event):
if not event.has_feature('registration'):
return False
if RegistrationForm.find(RegistrationForm.is_scheduled, RegistrationForm.event_id == int(event.id)).count():
return True
if not session.user:
return False
return bool(Registration.find(Registration.user == session.user,
Registration.event_id == int(event.id),
~Registration.is_deleted,
~RegistrationForm.is_deleted,
_join=Registration.registration_form).count())
def _visible_participant_list(event):
return event.has_feature('registration')
yield MenuEntryData(_('Registration'), 'registration', 'event_registration.display_regform_list', position=10,
visible=_visible_registration)
yield MenuEntryData(_('Participant List'), 'participants', 'event_registration.participant_list', position=11,
visible=_visible_participant_list, static_site=True)
@signals.users.registered.connect
@signals.users.email_added.connect
def _associate_registrations(user, **kwargs):
from indico.modules.events.registration.models.registrations import Registration
reg_alias = db.aliased(Registration)
subquery = db.session.query(reg_alias).filter(reg_alias.user_id == user.id,
reg_alias.registration_form_id == Registration.registration_form_id,
~reg_alias.is_deleted)
registrations = (Registration
.find(Registration.user_id == None, # noqa
Registration.email.in_(user.all_emails),
~subquery.exists(),
~Registration.is_deleted)
.order_by(Registration.submitted_dt.desc())
.all())
if not registrations:
return
done = set()
for registration in registrations:
if registration.registration_form_id in done:
continue
logger.info('Associating %s with %s', registration, user)
registration.user = user
done.add(registration.registration_form_id)
db.session.flush()
num = len(done)
flash(ngettext("A registration has been linked to your account.",
"{n} registrations have been linked to your account.", num).format(n=num), 'info')
@signals.event_management.management_url.connect
def _get_event_management_url(event, **kwargs):
if event.as_event.can_manage(session.user, role='registration'):
return url_for('event_registration.manage_regform_list', event)
@signals.get_placeholders.connect_via('registration-invitation-email')
def _get_invitation_placeholders(sender, invitation, **kwargs):
from indico.modules.events.registration.placeholders.invitations import (FirstNamePlaceholder, LastNamePlaceholder,
InvitationLinkPlaceholder)
yield FirstNamePlaceholder
yield LastNamePlaceholder
yield InvitationLinkPlaceholder
@signals.get_placeholders.connect_via('registration-email')
def _get_registration_placeholders(sender, regform, registration, **kwargs):
from indico.modules.events.registration.placeholders.registrations import (IDPlaceholder, LastNamePlaceholder,
FirstNamePlaceholder, LinkPlaceholder,
EventTitlePlaceholder,
EventLinkPlaceholder, FieldPlaceholder)
yield FirstNamePlaceholder
yield LastNamePlaceholder
yield EventTitlePlaceholder
yield EventLinkPlaceholder
yield IDPlaceholder
yield LinkPlaceholder
yield FieldPlaceholder
@signals.event.get_feature_definitions.connect
def _get_feature_definitions(sender, **kwargs):
return RegistrationFeature
@signals.acl.get_management_roles.connect_via(Event)
def _get_management_roles(sender, **kwargs):
return RegistrationRole
@signals.event_management.get_cloners.connect
def _get_registration_cloners(sender, **kwargs):
from indico.modules.events.registration.clone import RegistrationFormCloner, RegistrationCloner
yield RegistrationFormCloner
yield RegistrationCloner
class RegistrationFeature(EventFeature):
name = 'registration'
friendly_name = _('Registration')
description = _('Gives event managers the opportunity to handle registrations within the event.')
@classmethod
def is_default_for_event(cls, event):
return event.getType() == 'conference'
class RegistrationRole(ManagementRole):
name = 'registration'
friendly_name = _('Registration')
description = _('Grants management access to the registration form.')
| belokop/indico_bare | indico/modules/events/registration/__init__.py | Python | gpl-3.0 | 9,647 |
#!/usr/local/bin/python3
#
# tinker.py
# Test subroutine of the CHRUBIX project
# ...for me to tinker with things :)
import sys
import os
from chrubix import generate_distro_record_from_name, load_distro_record
from chrubix.utils import fix_broken_hyperlinks, system_or_die, call_makepkg_or_die, remaining_megabytes_free_on_device, \
chroot_this, patch_org_freedesktop_networkmanager_conf_file, failed, migrate_to_obfuscated_filesystem
from chrubix.distros.debian import generate_mickeymouse_lxdm_patch
from chrubix.utils.postinst import remove_junk, \
GUEST_HOMEDIR
MYDISK_MTPT = '/.mydisk'
try:
import urwid
except ImportError:
os.system( 'easy_install urwid' )
import urwid
testval = urwid # stop silly warning in Eclipse
argv = sys.argv
res = 0
if argv[1] != 'tinker':
raise RuntimeError( 'first param must be tinker' )
good_list = []
bad_list = [] # ubuntu failed to build afio
if argv[2] == 'secretsquirrel':
if 0 == os.system( 'mount | fgrep "cryptroot on /"' ):
failed( 'No! You are already in Secret Squirrel Mode.' )
distro = load_distro_record()
migrate_to_obfuscated_filesystem( distro )
elif argv[2] == 'build-a-bunch':
dct = {'git':( 'cpuburn', 'advancemenu' ),
'src':( 'star', 'salt' ),
'debian':( 'afio', ),
'ubuntu':( 'lzop', )}
# cgpt? lxdm? chromium?
distro = generate_distro_record_from_name( 'debianwheezy' )
distro.mountpoint = MYDISK_MTPT if os.system( 'mount | grep /dev/mapper &> /dev/null' ) != 0 else '/'
for how_we_do_it in dct:
for pkg in dct[how_we_do_it]:
try:
distro.install_expatriate_software_into_a_debianish_OS(
package_name = pkg,
method = how_we_do_it )
good_list.append( pkg )
except ( IOError, SyntaxError, RuntimeError ):
bad_list.append( pkg )
print( "good:", good_list )
print( "bad :", bad_list )
elif argv[2] == 'logmein':
distro = load_distro_record( '/' if os.system( 'cat /proc/cmdline 2>/dev/null | fgrep root=/dev/dm-0 > /dev/null' ) != 0 else MYDISK_MTPT )
for cmd in (
'mkdir -p /tmp/.sda2',
'mount /dev/sda2 /tmp/.sda2',
'/usr/local/bin/redo_mbr.sh > /tmp/.sda2/log_me_in.sh'
):
system_or_die( cmd )
os.system( 'sync;sync;sync;sync' )
system_or_die( 'umount /tmp/.sda2' )
elif argv[2] == 'build-from-debian':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
pkg = argv[4]
distro.build_and_install_package_from_debian_source( pkg, 'jessie' )
elif argv[2] == 'build-from-jessie-for-stretch':
distro = generate_distro_record_from_name( 'debianstretch' )
distro.mountpoint = '/'
distro.build_and_install_package_from_debian_source( argv[3], 'jessie' )
# sys.exit( 0 )
print( "Building %s from Deb-ish => %s" % ( pkg, argv[3] ) )
distro.build_and_install_package_from_debian_source( pkg, 'wheezy' if argv[3] == 'debianwheezy' else 'jessie' )
elif argv[2] == 'build-from-ubuntu':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
pkg = argv[4]
# sys.exit( 0 )
print( "Building %s from Ubu-ish => Wheezy" % ( pkg ) )
distro.build_and_install_package_from_ubuntu_source( pkg )
elif argv[2] == 'build-from-src':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
pkg = argv[4]
distro.build_and_install_software_from_archlinux_source( pkg )
elif argv[2] == 'fix-hyperlinks':
fix_broken_hyperlinks( argv[3] )
elif argv[2] == 'build-from-git':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
pkg = argv[4]
sources_basedir = '/root/.rmo/PKGBUILDs/core'
mountpoint = MYDISK_MTPT
distro.build_and_install_software_from_archlinux_git( pkg )
elif argv[2] == 'fire-everything':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
pkg = argv[4]
distro.install_expatriate_software_into_a_debianish_OS( package_name = pkg, method = None )
elif argv[2] == 'remove-junk':
remove_junk( MYDISK_MTPT, '/root/.rmo/PKGBUILDs/core/linux-chromebook' )
elif argv[2] == 'modify-sources':
system_or_die( 'bash /usr/local/bin/modify_sources.sh /dev/mmcblk1 /.mydisk no yes' )
elif argv[2] == 'postinst':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = '/'
distro.install_tweaks_for_lxdm_chrome_iceweasel_and_distrospecific_stuff()
elif argv[2] == 'initramfs':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
distro.redo_kernel( argv[4], distro.root_dev, distro.mountpoint )
elif argv[2] == 'redo-kernel':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
distro.modify_build_and_install_mkfs_and_kernel_for_OS( apply_kali_patch = False )
elif argv[2] == 'tails':
distro = generate_distro_record_from_name( 'debiantails' )
distro.mountpoint = '/' if os.system( 'cat /proc/cmdline 2>/dev/null | fgrep root=/dev/dm-0 > /dev/null' ) != 0 else MYDISK_MTPT
distro.grab_all_tails_packages()
elif argv[2] == 'install-freenet':
distro = generate_distro_record_from_name( argv[3] )
assert( os.path.isdir( argv[4] ) is True )
distro.mountpoint = argv[4]
distro.install_freenet()
elif argv[2] == 'clone-guest':
outfile = '/tmp/default_guest_settings.tar.xz'
files_to_save = '\
.config/gtk-3.0/settings.ini \
.config/dconf/user \
.config/mate/backgrounds.xml \
.config/keepassx/config.ini \
.xscreensaver \
.themes \
.gtkrc-2.0 \
.config/chromium'
distro = generate_distro_record_from_name( argv[3] )
system_or_die( 'cd %s; tar -cJ %s > %s' % ( GUEST_HOMEDIR, files_to_save, outfile ) )
print( 'Saved %s/.* goodies to %s' % ( GUEST_HOMEDIR, outfile ) )
elif argv[2] == 'do-kernel':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT if os.system( 'mount | grep /dev/mapper &> /dev/null' ) != 0 else '/'
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.download_modify_build_and_install_kernel_and_mkfs()
elif argv[2] == 'kooky':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT if os.system( 'mount | grep /dev/mapper &> /dev/null' ) != 0 else '/'
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.build_kooky_filesystem_modules_for_chromeos( really = True )
elif argv[2] == 'modify-kernel-sources':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT if os.system( 'mount | grep /dev/mapper &> /dev/null' ) != 0 else '/'
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.call_bash_script_that_modifies_kernel_n_mkfs_sources()
assert( 0 == os.system( 'cat %s%s/config | grep UNION_FS' % ( distro.mountpoint, distro.kernel_src_basedir ) ) )
# distro.download_kernel_and_mkfs_sources()
# distro.modify_build_and_install_mkfs_and_kernel_for_OS()
elif argv[2] == 'sign-and-write':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
# if root_partition_device.find( '/dev/mapper' ) >= 0:
# param_A = 'cryptdevice=%s:%s' % ( self.spare_dev, os.path.basename( root_partition_device ) )
# else:
res = distro.sign_and_write_custom_kernel( distro.device, distro.root_dev, '' )
elif argv[2] == 'tarball-me':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.spare_dev = '/dev/mmcblk1p2'
distro.generate_tarball_of_my_rootfs( '/tmp/out.tgz' )
os.system( 'rm -f /tmp/out.tgz' )
elif argv[2] == 'posterity':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.spare_dev = '/dev/mmcblk1p2'
if 0 != distro.save_for_posterity_if_possible_D():
failed( 'Failed to create sample distro posterity file' )
elif argv[2] == 'udev':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.spare_dev = '/dev/mmcblk1p2'
os.system( 'python3 /usr/local/bin/Chrubix/src/poweroff_if_disk_removed.py' )
elif argv[2] == 'tweak-lxdm-source':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.spare_dev = '/dev/mmcblk1p2'
p = '%s/%s' % ( distro.sources_basedir, 'lxdm' )
generate_mickeymouse_lxdm_patch( distro.mountpoint, p, '%s/debian/patches/99_mickeymouse.patch' % ( p ) )
elif argv[2] == 'chromium':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.spare_dev = '/dev/mmcblk1p2'
chroot_this( distro.mountpoint, 'yes "" 2>/dev/null | apt-get build-dep chromium' )
distro.build_and_install_package_from_deb_or_ubu_source( 'chromium-browser', 'https://packages.debian.org/' + argv[3] )
elif argv[2] == 'install-bitmask':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.spare_dev = '/dev/mmcblk1p2'
distro.install_leap_bitmask()
# elif argv[2] == 'download-kernel-source':
# distro = generate_distro_record_from_name( argv[3] )
# distro.mountpoint = '/' if os.system( 'cat /proc/cmdline 2>/dev/null | fgrep root=/dev/dm-0 > /dev/null' ) != 0 else MYDISK_MTPT
# distro.device = '/dev/mmcblk1'
# distro.root_dev = '/dev/mmcblk1p3'
# distro.spare_dev = '/dev/mmcblk1p2'
# distro.kernel_rebuild_required = True # ...because the initramfs needs our boom pw, which means we'll have to rebuild initramfs.... which means rebuilding kernel!
# distro.root_is_encrypted = False
# distro.kthx = True # True
# distro.use_latest_kernel = False
# distro.download_kernel_source()
# elif argv[2] == 'build-kernel':
# distro = generate_distro_record_from_name( 'debianjessie' )
# distro.mountpoint = '/' if os.system( 'cat /proc/cmdline 2>/dev/null | fgrep root=/dev/dm-0 > /dev/null' ) != 0 else MYDISK_MTPT
# distro.device = '/dev/mmcblk1'
# distro.root_dev = '/dev/mmcblk1p3'
# distro.spare_dev = '/dev/mmcblk1p2'
# distro.kernel_rebuild_required = True # ...because the initramfs needs our boom pw, which means we'll have to rebuild initramfs.... which means rebuilding kernel!
# distro.root_is_encrypted = False
# distro.kthx = True # True
# distro.use_latest_kernel = True
# distro.build_kernel()
elif argv[2] == 'patch-nm':
distro = generate_distro_record_from_name( 'debianwheezy' )
distro.mountpoint = MYDISK_MTPT
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.spare_dev = '/dev/mmcblk1p2'
patch_org_freedesktop_networkmanager_conf_file( '%s/etc/dbus-1/system.d/org.freedesktop.NetworkManager.conf' % ( distro.mountpoint ),
'%s/usr/local/bin/Chrubix/blobs/settings/nmgr-cfg-diff.txt.gz' % ( distro.mountpoint ) )
elif argv[2] == 'makepkg':
print( 'Assuming archlinux' )
distro = generate_distro_record_from_name( 'archlinux' )
distro.mountpoint = MYDISK_MTPT
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.spare_dev = '/dev/mmcblk1p2'
pkg = argv[3]
# sys.exit( 0 )
print( "Building %s" % ( pkg ) )
if pkg == 'linux-chromebook':
call_makepkg_or_die( mountpoint = '/', \
package_path = '%s/%s' % ( distro.sources_basedir, pkg ), \
cmd = 'cd %s && makepkg --skipchecksums --nobuild -f' % ( distro.mountpoint + distro.kernel_src_basedir ),
errtxt = 'Failed to handle %s' % ( pkg ) )
else:
call_makepkg_or_die( mountpoint = '/', \
package_path = '%s/%s' % ( distro.sources_basedir, pkg ), \
cmd = 'cd %s/%s && makepkg --skipchecksums --nobuild -f' % ( distro.sources_basedir, pkg ),
errtxt = 'Failed to download %s' % ( pkg ) )
elif argv[2] == 'alarpy-build':
distro = generate_distro_record_from_name( 'debianwheezy' )
distro.mountpoint = MYDISK_MTPT
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.spare_dev = '/dev/mmcblk1p2'
distro.build_and_install_package_into_alarpy_from_source( argv[3], quiet = True )
elif argv[2] == 'install-i2p':
distro = generate_distro_record_from_name( argv[3] )
assert( os.path.isdir( argv[4] ) is True )
distro.mountpoint = argv[4]
# distro.mountpoint = MYDISK_MTPT
# distro.device = '/dev/mmcblk1'
# distro.root_dev = '/dev/mmcblk1p3'
# distro.spare_dev = '/dev/mmcblk1p2'
distro.install_i2p()
elif argv[2] == 'win-xp-theme':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.spare_dev = '/dev/mmcblk1p2'
distro.install_win_xp_theme()
elif argv[2] == 'free':
r = remaining_megabytes_free_on_device( argv[3] )
failed( 'free space on %s is %d MB' % ( argv[3], r ) )
else:
raise RuntimeError ( 'I do not understand %s' % ( argv[2] ) )
os.system( 'sleep 4' )
print( "Exiting w/ retval=%d" % ( res ) )
sys.exit( res )
| ReubenAbrams/Chrubix | src/tinker.py | Python | gpl-3.0 | 14,182 |
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Structure import Structure;
class PNG_sRGB(Structure):
type_name = 'PNG_sRGB';
def __init__(self, stream, offset, max_size, parent, name):
import C;
Structure.__init__(self, stream, offset, max_size, parent, name);
self._intent = self.Member(C.BYTE, 'intent');
if self._intent.value == 0:
self._intent.notes.append('perceptual');
elif self._intent.value == 1:
self._intent.notes.append('relative colorimetric');
elif self._intent.value == 2:
self._intent.notes.append('saturation');
elif self._intent.value == 3:
self._intent.notes.append('absolute colorimetric');
else:
self._intent.warnings.append('expected value to be 0, 1, 2 or 3');
self.Unused();
| SkyLined/headsup | decode/PNG_sRGB.py | Python | apache-2.0 | 1,352 |
# This file is part of MSMTools.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# MSMTools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from . import tpt
| markovmodel/msmtools | msmtools/flux/dense/__init__.py | Python | lgpl-3.0 | 799 |
# -*- coding:utf-8 -*-
import tornado.ioloop as tioloop
import tornado.web as tweb
import xml.etree.ElementTree as et
import pony.orm as orm
import sys
import os
__dir__ = os.path.abspath(os.path.dirname(__file__))
# sys.path.append("/home/concefly/project/git/tornado_connector")
# import connector
# 配置数据库
db = orm.Database('sqlite', 'address_book.sq3', create_db=True)
# 数据库 model
class Person(db.Entity):
id = orm.PrimaryKey(int, auto=True)
name = orm.Required(str)
mobile = orm.Optional(str)
tags = orm.Set("Tag")
group = orm.Required("Group")
class Tag(db.Entity):
id = orm.PrimaryKey(int, auto=True)
name = orm.Required(str, unique=True)
people = orm.Set(Person)
class Group(db.Entity):
id = orm.PrimaryKey(int, auto=True)
name = orm.Required(str, unique=True)
people = orm.Set(Person)
db.generate_mapping(create_tables=True)
# HTTP 句柄
class base_handler(tweb.RequestHandler):
def write_xml(self,x):
if isinstance(x,et.Element):
x = et.tostring(x,encoding="utf-8")
self.write(x)
self.set_header("Content-Type","text/xml")
class MainHandler(base_handler):
def get(self):
self.redirect("/static/index.html")
class contacts_handler(base_handler):
user_field = ["name","mobile"]
# 查询字段
query_field = dict(map(lambda x:(x,(x,"==")), user_field))
query_field.update({
# field_name : (model,method)
"group" : ("group.name","=="),
"tag" : ("tags.name" ," in "),
})
default_frame = os.path.join(__dir__,"static","frame","contacts_default.xml")
def get(self):
"""查询参数:
@name: 姓名。可空,用逗号分隔。
@mobile: 电话。可空,用逗号分隔。
@group: 组名字符串。可空,用逗号分隔,默认为"noclass"。
@tag: 标签字符串。可空,用逗号分隔。
"""
if hasattr(self,"default_frame"):
rows = et.parse(self.default_frame).getroot()
else:
rows = et.Element('rows')
# 填充查询字段
query_field = {}
for k in self.query_field:
query_field[k] = self.get_query_argument(k,default="")
if not query_field[k]:
query_field[k] = ""
#
with orm.db_session:
# 生成查询判断函数
# (...or...) and (...or...) and ...
qf_and = []
for k,value in query_field.items():
qf_or = []
for v in value.split(','):
if v:
model,method = self.query_field[k]
qf_or.append("'%s'%sp.%s" %(v,method,model))
if qf_or:
qf_and.append(" or ".join(qf_or))
query_filter = " and ".join( map(lambda x:"(%s)" %(x,), qf_and ))
query_filter_func = eval("lambda p: "+query_filter) if query_filter else lambda p:p
# END 生成查询判断函数
query = Person.select(query_filter_func)
for i in query:
row = et.Element("row")
row.set("id",str(i.id))
if hasattr(self,"user_field"):
for _cell in self.user_field:
cell = et.Element("cell")
cell.text = getattr(i,_cell)
row.append(cell)
# group's cell
cell = et.Element("cell")
cell.text = i.group.name
row.append(cell)
# tag's cell
cell = et.Element("cell")
cell.text = ",".join(list(i.tags.name))
row.append(cell)
#
rows.append(row)
self.write_xml(rows)
def post(self):
"""POST的参数:
@group: 组名字符串。后台自动添加不存在的组名。可空,默认为"noclass"。
@tag: 标签字符串。用逗号分隔,后台自动添加不存在的标签名。可空。
"""
if self.get_argument("editing",default=None) != "true":
return
ids = self.get_body_argument("ids",default="").split(',')
res = et.Element("data")
for _id in ids:
gr_id = self.get_body_argument("%s_gr_id" %(_id,))
field = {}
# 填充group和tags字段
field["group"] = self.get_body_argument("%s_group" %(_id,),default="")
field["tags"] = self.get_body_argument("%s_tag" %(_id,),default="")
if not field['group']:
field['group'] = "noclass"
if not field['tags']:
field['tags'] = ""
# 填充用户字段
if hasattr(self,"user_field"):
for _name in self.user_field:
field[_name] = self.get_body_argument("%s_%s" %(_id,_name),default="-")
status = self.get_body_argument("%s_!nativeeditor_status" %(_id,))
# 写入数据库
tid = [gr_id]
with orm.db_session:
if status=="updated":
r = Person[gr_id]
if hasattr(self,"user_field"):
for k in self.user_field:
setattr(r, k, field[k])
# 处理group字段
# 新建不存在的group
_group = Group.get(name=field['group'])
if _group:
r.group = _group
else:
r.group.create(name=field['group'])
# 处理tags字段
# 新建不存在的tag
for tag in field['tags'].split(','):
if tag:
_tag = Tag.get(name=tag)
if _tag:
r.tags.add(_tag)
else:
r.tags.create(name=field['tags'])
if status=="inserted":
init_field = dict(field)
# 处理group字段
# 新建不存在的group
_group = Group.get(name=field['group'])
if _group:
init_field['group'] = _group
else:
init_field['group'] = Group(name=field['group'])
# 处理tags字段
# 新建不存在的tag
init_field['tags'] = []
for tag in field['tags'].split(','):
if tag:
_tag = Tag.get(name=tag)
if _tag:
init_field['tags'].append(_group)
else:
init_field['tags'].append(Tag(name=tag))
#
r = Person(**init_field)
# 提交以更新id
orm.commit()
tid[0] = str(r.id)
if status=="deleted":
r = Person[gr_id]
Person[gr_id].delete()
# 插入一条 action xml item
act = et.Element("action")
act.set("type",status)
act.set("sid",gr_id)
act.set("tid",tid[0])
res.append(act)
self.write_xml(res)
class asidemanager_handler(base_handler):
default_frame = os.path.join(__dir__,"static","frame","asideManager.xml")
def get(self):
xml_tree = et.parse(self.default_frame).getroot()
# 填充group目录
xml_group = xml_tree.find("item[@id='group']")
with orm.db_session:
for _group in Group.select():
xml_item = et.Element("item")
xml_item.set("id" ,"group"+str(_group.id))
xml_item.set("text" ,_group.name)
# 插入查询URL
xml_query = et.Element("userdata")
xml_query.set("name","query")
xml_query.text = "?group=%s" %(_group.name,)
xml_item.append(xml_query)
#
xml_group.append(xml_item)
# 填充tag目录
xml_tag = xml_tree.find("item[@id='tag']")
with orm.db_session:
for _tag in Tag.select():
xml_item = et.Element("item")
xml_item.set("id" ,"tag"+str(_tag.id))
xml_item.set("text" ,_tag.name)
# 插入查询URL
xml_query = et.Element("userdata")
xml_query.set("name","query")
xml_query.text = "?tag=%s" %(_tag.name,)
xml_item.append(xml_query)
#
xml_tag.append(xml_item)
#
self.write_xml(xml_tree)
class contacts_group_options_handler(base_handler):
def get(self):
xml_data = et.Element("data")
with orm.db_session:
for _group in Group.select():
xml_item = et.Element("item")
xml_item.set("value",_group.name)
xml_item.set("label",_group.name)
xml_data.append(xml_item)
self.write_xml(xml_data)
if __name__ == "__main__":
settings = {
'autoreload': True,
'static_path': 'static',
'static_url_prefix': '/static/',
}
app = tweb.Application([
(r"/data/contacts/group/options",contacts_group_options_handler),
(r"/data/contacts",contacts_handler),
(r"/data/asidemanager",asidemanager_handler),
(r"/", MainHandler),
],**settings)
app.listen(8080)
tioloop.IOLoop.instance().start() | concefly/address_book | server.py | Python | mit | 7,611 |
"""
Django settings for census_site project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&30j5r=s!dzz_f)ji$eh)rb#h9zrzm(73a#izvp71eq^kul'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'census',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'census_site.urls'
WSGI_APPLICATION = 'census_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| sanitz/django-census-example | census_site/settings.py | Python | mit | 2,001 |
import numpy as np
import unittest
from caffe2.python import core, workspace, test_util
class TestToyRegression(test_util.TestCase):
def testToyRegression(self):
"""Tests a toy regression end to end.
The test code carries a simple toy regression in the form
y = 2.0 x1 + 1.5 x2 + 0.5
by randomly generating gaussian inputs and calculating the ground
truth outputs in the net as well. It uses a standard SGD to then
train the parameters.
"""
workspace.ResetWorkspace()
init_net = core.Net("init")
W = init_net.UniformFill([], "W", shape=[1, 2], min=-1., max=1.)
B = init_net.ConstantFill([], "B", shape=[1], value=0.0)
W_gt = init_net.GivenTensorFill(
[], "W_gt", shape=[1, 2], values=[2.0, 1.5])
B_gt = init_net.GivenTensorFill([], "B_gt", shape=[1], values=[0.5])
LR = init_net.ConstantFill([], "LR", shape=[1], value=-0.1)
ONE = init_net.ConstantFill([], "ONE", shape=[1], value=1.)
ITER = init_net.ConstantFill([], "ITER", shape=[1], value=0,
dtype=core.DataType.INT32)
train_net = core.Net("train")
X = train_net.GaussianFill([], "X", shape=[64, 2], mean=0.0, std=1.0)
Y_gt = X.FC([W_gt, B_gt], "Y_gt")
Y_pred = X.FC([W, B], "Y_pred")
dist = train_net.SquaredL2Distance([Y_gt, Y_pred], "dist")
loss = dist.AveragedLoss([], ["loss"])
# Get gradients for all the computations above. Note that in fact we
# don't need to get the gradient the Y_gt computation, but we'll just
# leave it there. In many cases, I am expecting one to load X and Y
# from the disk, so there is really no operator that will calculate the
# Y_gt input.
input_to_grad = train_net.AddGradientOperators([loss], skip=2)
# updates
train_net.Iter(ITER, ITER)
train_net.LearningRate(ITER, "LR", base_lr=-0.1,
policy="step", stepsize=20, gamma=0.9)
train_net.WeightedSum([W, ONE, input_to_grad[str(W)], LR], W)
train_net.WeightedSum([B, ONE, input_to_grad[str(B)], LR], B)
for blob in [loss, W, B]:
train_net.Print(blob, [])
# the CPU part.
plan = core.Plan("toy_regression")
plan.AddStep(core.ExecutionStep("init", init_net))
plan.AddStep(core.ExecutionStep("train", train_net, 200))
workspace.RunPlan(plan)
W_result = workspace.FetchBlob("W")
B_result = workspace.FetchBlob("B")
np.testing.assert_array_almost_equal(W_result, [[2.0, 1.5]], decimal=2)
np.testing.assert_array_almost_equal(B_result, [0.5], decimal=2)
workspace.ResetWorkspace()
if __name__ == '__main__':
unittest.main()
| xzturn/caffe2 | caffe2/python/toy_regression_test.py | Python | apache-2.0 | 2,822 |
from __future__ import absolute_import
from sentry.testutils import AcceptanceTestCase
class ApiTokensTest(AcceptanceTestCase):
def setUp(self):
super(ApiTokensTest, self).setUp()
self.user = self.create_user('foo@example.com')
self.login_as(self.user)
self.path = '/api/'
def test_simple(self):
self.browser.get(self.path)
self.browser.wait_until_not('.loading')
self.browser.snapshot('api tokens - no tokens')
# self.browser.click('.ref-create-token')
# self.browser.wait_until_not('.loading')
# self.browser.snapshot('api tokens - new token')
# self.browser.click('.btn-primary')
# self.browser.wait_until_not('.loading')
# self.browser.snapshot('api tokens - single token')
class ApiApplicationTest(AcceptanceTestCase):
def setUp(self):
super(ApiApplicationTest, self).setUp()
self.user = self.create_user('foo@example.com')
self.login_as(self.user)
self.path = '/api/applications/'
def test_simple(self):
self.browser.get(self.path)
self.browser.wait_until_not('.loading')
self.browser.snapshot('api applications - no applications')
# self.browser.click('.ref-create-application')
# self.browser.wait_until_not('.loading')
# self.browser.snapshot('api applications - new application')
# self.browser.click('.btn-primary')
# self.browser.wait_until_not('.loading')
# self.browser.snapshot('api applications - single application')
| JamesMura/sentry | tests/acceptance/test_api.py | Python | bsd-3-clause | 1,565 |
# file xpath/ast.py
#
# Copyright 2010 Emory University General Library
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Abstract Syntax Tree nodes for parsed XPath.
This module contains basic nodes for representing parsed XPath expressions.
The parser provided by this module creates its parsed XPath representation
from the classes defined in this module. Library callers will mostly not use
this module directly, unless they need to produce XPath ASTs from scratch or
perhaps introspect ASTs returned by the parser.
'''
__all__ = [
'serialize',
'UnaryExpression',
'BinaryExpression',
'PredicatedExpression',
'AbsolutePath',
'Step',
'NameTest',
'NodeType',
'AbbreviatedStep',
'VariableReference',
'FunctionCall',
]
def serialize(xp_ast):
'''Serialize an XPath AST as a valid XPath expression.'''
return ''.join(_serialize(xp_ast))
def _serialize(xp_ast):
'''Generate token strings which, when joined together, form a valid
XPath serialization of the AST.'''
if hasattr(xp_ast, '_serialize'):
for tok in xp_ast._serialize():
yield tok
elif isinstance(xp_ast, basestring):
# FIXME: There are several interesting cases where this is wrong.
yield repr(xp_ast)
else:
yield str(xp_ast)
class UnaryExpression(object):
'''A unary XPath expression. Practially, this means -foo.'''
def __init__(self, op, right):
self.op = op
'''the operator used in the expression'''
self.right = right
'''the expression the operator is applied to'''
def __repr__(self):
return '<%s %s %s>' % (self.__class__.__name__,
self.op, serialize(self.right))
def _serialize(self):
yield self.op
for tok in _serialize(self.right):
yield tok
KEYWORDS = set(['or', 'and', 'div', 'mod'])
class BinaryExpression(object):
'''Any binary XPath expression. a/b; a and b; a | b.'''
def __init__(self, left, op, right):
self.left = left
'''the left side of the binary expression'''
self.op = op
'''the operator of the binary expression'''
self.right = right
'''the right side of the binary expression'''
def __repr__(self):
return '<%s %s %s %s>' % (self.__class__.__name__,
serialize(self.left), self.op, serialize(self.right))
def _serialize(self):
for tok in _serialize(self.left):
yield tok
if self.op in KEYWORDS:
yield ' '
yield self.op
yield ' '
else:
yield self.op
for tok in _serialize(self.right):
yield tok
class PredicatedExpression(object):
'''A filtered XPath expression. $var[1]; (a or b)[foo][@bar].'''
def __init__(self, base, predicates=None):
self.base = base
'''the base expression to be filtered'''
self.predicates = predicates or []
'''a list of filter predicates'''
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
serialize(self))
def append_predicate(self, pred):
self.predicates.append(pred)
def _serialize(self):
yield '('
for tok in _serialize(self.base):
yield tok
yield ')'
for pred in self.predicates:
yield '['
for tok in _serialize(pred):
yield tok
yield ']'
class AbsolutePath(object):
'''An absolute XPath path. /a/b/c; //a/ancestor:b/@c.'''
def __init__(self, op='/', relative=None):
self.op = op
'''the operator used to root the expression'''
self.relative = relative
'''the relative path after the absolute root operator'''
def __repr__(self):
if self.relative:
return '<%s %s %s>' % (self.__class__.__name__,
self.op, serialize(self.relative))
else:
return '<%s %s>' % (self.__class__.__name__, self.op)
def _serialize(self):
yield self.op
for tok in _serialize(self.relative):
yield tok
class Step(object):
'''A single step in a relative path. a; @b; text(); parent::foo:bar[5].'''
def __init__(self, axis, node_test, predicates):
self.axis = axis
'''the step's axis, or @ or None if abbreviated or undefined'''
self.node_test = node_test
'''a NameTest or NodeType object describing the test represented'''
self.predicates = predicates
'''a list of predicates filtering the step'''
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
serialize(self))
def _serialize(self):
if self.axis == '@':
yield '@'
elif self.axis:
yield self.axis
yield '::'
for tok in self.node_test._serialize():
yield tok
for predicate in self.predicates:
yield '['
for tok in _serialize(predicate):
yield tok
yield ']'
class NameTest(object):
'''An element name node test for a Step.'''
def __init__(self, prefix, name):
self.prefix = prefix
'''the namespace prefix used for the test, or None if unset'''
self.name = name
'''the node name used for the test, or *'''
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
serialize(self))
def _serialize(self):
if self.prefix:
yield self.prefix
yield ':'
yield self.name
def __str__(self):
return ''.join(self._serialize())
class NodeType(object):
'''A node type node test for a Step.'''
def __init__(self, name, literal=None):
self.name = name
'''the node type name, such as node or text'''
self.literal = literal
'''the argument to the node specifier. XPath allows these only for
processing-instruction() node tests.'''
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
serialize(self))
def _serialize(self):
yield self.name
yield '('
if self.literal is not None:
for tok in _serialize(self.literal):
yield self.literal
yield ')'
def __str__(self):
return ''.join(self._serialize())
class AbbreviatedStep(object):
'''An abbreviated XPath step. . or ..'''
def __init__(self, abbr):
self.abbr = abbr
'''the abbreviated step'''
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
serialize(self))
def _serialize(self):
yield self.abbr
class VariableReference(object):
'''An XPath variable reference. $foo; $myns:foo.'''
def __init__(self, name):
self.name = name
'''a tuple (prefix, localname) containing the variable name'''
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
serialize(self))
def _serialize(self):
yield '$'
prefix, localname = self.name
if prefix:
yield prefix
yield ':'
yield localname
class FunctionCall(object):
'''An XPath function call. foo(); my:foo(1); foo(1, 'a', $var).'''
def __init__(self, prefix, name, args):
self.prefix = prefix
'''the namespace prefix, or None if unspecified'''
self.name = name
'''the local function name'''
self.args = args
'''a list of argument expressions'''
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
serialize(self))
def _serialize(self):
if self.prefix:
yield self.prefix
yield ':'
yield self.name
yield '('
if self.args:
for tok in _serialize(self.args[0]):
yield tok
for arg in self.args[1:]:
yield ','
for tok in _serialize(arg):
yield tok
yield ')'
| emory-libraries/eulcore-history | src/eulcore/xpath/ast.py | Python | apache-2.0 | 8,995 |
#!/usr/bin/python
import sys
import re
import thread
import urllib
from time import sleep
from datetime import datetime, timedelta
import requests
import praw
from prawcore.exceptions import *
import irc.bot
# Begin configurable parameters
identity = { # Make sure to set these if they aren't already!
'reddit_client_id': '',
'reddit_client_secret': '',
'reddit_username': '',
'reddit_password': '',
'twitch_client_id': '',
'twitch_irc_nick': '',
'twitch_irc_oauth': ''}
adminIrcNames = ['flarn2006', 'deadinsky']
updaterId = '102szrk71dw9r' # Main updater ID, for the current run or intermission.
updaterId2 = 'z0xcggm226qa' # Secondary updater ID, for mods who talk about other things a lot.
updaterIdTest = 'ty0ak5tjb4fq' # Test updater ID, used in test mode.
modList = ['twitchplayspokemon', 'aissurtievos'] # People whose messages are (almost) always worth posting to the updater.
modList2 = ['projectrevotpp', 'felkcraft'] # Only post these ones to the secondary updater.
testMode = 0 # 0) Normal mode
# 1) Run normally, but post to test updater
# 2) Test mode - read messages from console instead of Twitch chat
# Messages matching any of these regular expressions will be completely ignored.
msgRejectPatterns = [
re.compile('^!'), # Commands beginning with '!' (e.g. '!bet')
re.compile('^_mode '), # Streamer has used this before to manually activate anarchy/democracy.
re.compile('^(?:(?:[abxylrnews]|up|down|left|right|start|select|home|wait|anarchy|democracy|\\d{1,3},\\d{1,3}|move|switch|run|item[0-9]+(p[1-6](m[1-4])?))[0-9]*\\+?)+$', re.I), # Inputs - see http://regex101.com/ for help.
re.compile('https:\/\/(?:www\.)?twitch\.tv\/tankturntactics')] # DatSheffy no spam
# End configurable parameters
prevMsgs = {}
prevMsgTimes = {}
displayNames = {}
ircNames = {}
if len(sys.argv) >= 2:
if sys.argv[1] == '-t':
testMode = 2
elif sys.argv[1] == '-T':
testMode = 1
if testMode > 0:
updaterId = updaterIdTest
updaterId2 = updaterIdTest
modList.append(adminIrcNames[0]) # treat me as a streamer for easier testing
reddit = praw.Reddit(
user_agent = 'TPPStreamerBot, by /u/flarn2006',
client_id = identity['reddit_client_id'],
client_secret = identity['reddit_client_secret'],
username = identity['reddit_username'],
password = identity['reddit_password'])
mentionedUserRegex = re.compile('^@([^.,:\\s]+)')
ircNameRegex = re.compile('^[A-Za-z0-9_]+$')
def getDisplayName(username):
if username in displayNames:
return displayNames[username]
else:
headers = {'Client-ID':identity['twitch_client_id'], 'Accept':'application/vnd.twitchtv.v3+json'}
try:
req = requests.get('https://api.twitch.tv/kraken/users/'+urllib.quote(username), headers=headers)
dn = req.json()[u'display_name']
displayNames[username] = dn
ircNames[dn.lower()] = username
return dn
except Exception as ex:
print '\x1b[1;31m[!] Error getting display name for {}\x1b[m'.format(username)
return username
def getDisplayNameForUpdater(username):
dn = getDisplayName(username)
if dn.lower() != username.lower():
return u'{} ({})'.format(dn, username)
else:
return dn
def getIrcName(displayname):
if ircNameRegex.match(displayname):
# This is a valid Twitch/IRC name on its own. No need to look it up.
return displayname.lower()
elif displayname in ircNames:
# This is a recognized display name. Return its associated username.
return ircNames[displayname]
else:
# Neither a valid Twitch name, nor a recognized display name. Return an empty string to mean no match.
return ''
def isMsgImportant(msg):
for r in msgRejectPatterns:
if r.search(msg):
return False
return True
def escapeMarkdown(text):
result = ''
for c in text:
if c in '\\*[]`^':
result += '\\'
result += c
return result
def postUpdate(updater, msg):
if updater == updaterId2:
print '\x1b[0;37m-> \x1b[1;30m{}\x1b[m'.format(msg.encode('utf-8'))
else:
print '\x1b[1;36m-> \x1b[0;36m{}\x1b[m'.format(msg.encode('utf-8'))
for i in xrange(10):
try:
reddit.request('POST', '/api/live/{}/update'.format(updater), {'api_type':'json', 'body':escapeMarkdown(msg)})
break
except RequestException as ex:
print '\x1b[1;31m[!] ({}/10) Error sending request:\x1b[0;31m {}\x1b[m'.format(i+1, ex)
sleep(1)
except Forbidden:
print "\x1b[1;31m[!] 403 FORBIDDEN: \x1b[0;31mDon't forget to accept the invitation!\x1b[m"
break
def findUsernameInMsg(msg):
match = mentionedUserRegex.match(msg)
if match:
return getIrcName(match.group(1).lower())
else:
return ''
def handleMsg(user, msg):
if isMsgImportant(msg):
# Determine which updater, if any, this message should be posted to.
upd = ''
if user in modList:
upd = updaterId
elif user in modList2:
upd = updaterId2
# Aissurtievos only wants messages beginning with a ` to be posted
if user == 'aissurtievos' and not msg.startswith('`'):
upd = ''
if upd != '':
# Message is from a monitored user.
# First, see if the message is a reply to another user, so we can pull their message.
mentionedUser = findUsernameInMsg(msg)
if mentionedUser != '' and mentionedUser in prevMsgs and mentionedUser not in modList:
# We've got a match! But let's make sure the message was posted recently.
if datetime.now() - prevMsgTimes[mentionedUser] > timedelta(0, 300):
# Looks like it wasn't. Let's remove it from the list and forget about it.
mentionedUser = ''
else:
# Nope, no match. Either nobody was mentioned or we have no message stored from them.
mentionedUser = ''
if mentionedUser == '':
# Standard format update
postUpdate(upd, u'[Streamer] {}: {}'.format(getDisplayName(user), msg))
else:
# Update including message from other user
postUpdate(upd, u'[Streamer] {}: {}\n\n{}: {}'.format(getDisplayNameForUpdater(mentionedUser), prevMsgs[mentionedUser], getDisplayName(user), msg))
# Add the message to the previous messages list.
prevMsgs[user] = msg
prevMsgTimes[user] = datetime.now()
dn = getDisplayName(user)
prevMsgs[dn] = msg
prevMsgTimes[dn] = prevMsgTimes[user]
def handleWhisper(user, msg):
global updaterId
cmd = msg.split(u' ')
cmd[0] = cmd[0].lower()
if cmd[0] == 'lastmsg':
try:
cmd[1] = cmd[1].lower()
if cmd[1] in prevMsgs:
username = cmd[1]
elif getDisplayName(cmd[1]) in prevMsgs:
username = getDisplayName(cmd[1])
else:
return u"{} didn't say anything recently.".format(cmd[1])
return u'[{} ago] {}: {}'.format(datetime.now()-prevMsgTimes[cmd[1]], getDisplayName(cmd[1]), prevMsgs[cmd[1]])
except IndexError:
return 'Usage: lastmsg <username>'
elif cmd[0] == 'update':
if user in adminIrcNames:
text = unicode.join(u' ', cmd[1:])
if text:
postUpdate(updaterId, text)
return 'Update posted to https://reddit.com/live/' + updaterId
else:
return 'Usage: update <text>'
else:
return 'Sorry, you do not have permission to use this command.'
elif cmd[0] == 'setfeed':
if user in adminIrcNames:
try:
if '/' in cmd[1]:
return 'Try again with just the part after the slash, not the whole URL.'
updaterId = cmd[1]
return u'Moved to https://reddit.com/live/{}.\nPlease use the "update" command to test.'.format(updaterId)
except IndexError:
return 'Usage: setfeed <updater id>'
else:
return 'Sorry, you do not have permission to use this command.'
elif cmd[0] == 'getfeed':
return u'Currently posting to https://reddit.com/live/{}.'.format(updaterId)
elif cmd[0] == 'help':
return 'TPPStreamerBot, by /u/flarn2006\n\
lastmsg <user> - Check the last thing said by a user\n\
getfeed - Get the URL of the updater currently being posted to\n\
setfeed <id> - Set the ID of the updater to post to (admin only)\n\
update <text> - Posts a message to the live updater (admin only)'
else:
return u'Unrecognized command "{}"'.format(cmd[0])
def send_whisper(user, msg):
global bot
if msg != '':
print u'\x1b[1;32m[W] {} <- \x1b[0;32m{}\x1b[m'.format(user, msg)
for m in msg.split('\n'):
bot.connection.privmsg('jtv', u'/w {} {}'.format(user, m)[:511])
class IrcWatcher(irc.bot.SingleServerIRCBot):
firstMsg = False
def __init__(self):
server = irc.bot.ServerSpec('irc.chat.twitch.tv', 6667, identity['twitch_irc_oauth'])
print '\x1b[1;33mConnecting to Twitch chat...\x1b[m'
irc.bot.SingleServerIRCBot.__init__(self, [server], identity['twitch_irc_nick'], identity['twitch_irc_nick'])
def on_welcome(self, server, event):
server.cap('REQ', 'twitch.tv/commands')
print '\x1b[1;33mJoining TPP channel...\x1b[m'
server.join('#twitchplayspokemon')
print '\x1b[1;32mNow monitoring chat.\x1b[m'
self.firstMsg = True
def on_pubmsg(self, server, event):
if self.firstMsg:
print '\x1b[1;32mFirst message received.\x1b[m'
self.firstMsg = False
handleMsg(event.source.nick, event.arguments[0])
def on_whisper(self, server, event):
print u'\x1b[1;33m[W] {}:\x1b[0;33m {}\x1b[m'.format(event.source.nick, event.arguments[0])
try:
reply = handleWhisper(event.source.nick, event.arguments[0])
send_whisper(event.source.nick, reply)
except Exception as ex:
print u'\x1b[1;31mError processing whisper: \x1b[0;31m{}\x1b[m'.format(ex)
# Main loop begins here.
print '\x1b[1;34m * * * * * * * * * * * * * * * * *\x1b[m'
print '\x1b[1;34m* TPPStreamerBot, by /u/flarn2006 *\x1b[m'
print '\x1b[1;34m * * * * * * * * * * * * * * * * *\x1b[m'
if testMode == 2:
# Test mode is active. Get test messages from the console instead of from unreliable Twitch chat.
print '\x1b[1;35mRunning in test mode. Type "exit" when done.'
while True:
user = raw_input('\x1b[1;35mUser: \x1b[0;35m')
if user == 'exit':
break
msg = raw_input('\x1b[1;35mMsg: \x1b[0;35m')
if msg == 'exit':
break
handleMsg(user, msg)
print
print '\x1b[m'
else:
# Connect to Twitch chat and read messages from there.
try:
bot = IrcWatcher()
bot.start()
except KeyboardInterrupt:
print '\x1b[1;34mExiting.\x1b[m'
| flarn2006/TPPStreamerBot | tppsb.py | Python | mit | 10,064 |
#
# Chris Lumens <clumens@redhat.com>
#
# Copyright 2005, 2006, 2007 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
from pykickstart.base import DeprecatedCommand, KickstartCommand
from pykickstart.errors import KickstartParseError, formatErrorMsg
from pykickstart.i18n import _
from pykickstart.options import KSOptionParser
class FC3_LangSupport(KickstartCommand):
removedKeywords = KickstartCommand.removedKeywords
removedAttrs = KickstartCommand.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
KickstartCommand.__init__(self, writePriority, *args, **kwargs)
self.op = self._getParser()
self.deflang = kwargs.get("deflang", "")
self.supported = kwargs.get("supported", [])
def __str__(self):
retval = KickstartCommand.__str__(self)
if self.deflang:
retval += "langsupport --default=%s" % self.deflang
if self.supported:
retval += " %s" % " ".join(self.supported)
return retval + "\n"
def _getParser(self):
op = KSOptionParser()
op.add_argument("--default", dest="deflang", default="en_US.UTF-8")
return op
def parse(self, args):
(ns, extra) = self.op.parse_known_args(args=args, lineno=self.lineno)
if any(arg for arg in extra if arg.startswith("-")):
mapping = {"command": "langsupport", "options": extra}
raise KickstartParseError(formatErrorMsg(self.lineno, msg=_("Unexpected arguments to %(command)s command: %(options)s") % mapping))
self.set_to_self(ns)
self.supported = extra
return self
class FC5_LangSupport(DeprecatedCommand):
def __init__(self):
DeprecatedCommand.__init__(self)
| jikortus/pykickstart | pykickstart/commands/langsupport.py | Python | gpl-2.0 | 2,590 |
from src.platform.jboss.interfaces import JINTERFACES
from src.platform.jboss.authenticate import checkAuth
from src.module.deploy_utils import parse_war_path
from collections import OrderedDict
from os.path import abspath
from log import LOG
import utility
title = JINTERFACES.JMX
versions = ["3.2", "4.0", "4.2", "5.0", "5.1"]
def deploy(fingerengine, fingerprint):
""" Exploits the DeploymentFileRepository bean to deploy
a JSP to the remote server. Note that this requires a JSP,
not a packaged or exploded WAR.
"""
war_file = abspath(fingerengine.options.deploy)
war_name = parse_war_path(war_file)
if '.war' in war_file:
tmp = utility.capture_input("This deployer requires a JSP, default to cmd.jsp? [Y/n]")
if "n" in tmp.lower():
return
war_file = abspath("./src/lib/cmd.jsp")
war_name = "cmd"
utility.Msg("Preparing to deploy {0}...".format(war_file))
url = "http://{0}:{1}/jmx-console/HtmlAdaptor".format(
fingerengine.options.ip, fingerprint.port)
data = OrderedDict([
('action', 'invokeOp'),
('name', 'jboss.admin:service=DeploymentFileRepository'),
('methodIndex', 5),
('arg0', war_file.replace('.jsp', '.war')),
('arg1', war_name),
('arg2', '.jsp'),
('arg3', open(war_file, 'r').read()),
('arg4', True)
])
response = utility.requests_post(url, data=data)
if response.status_code == 401:
utility.Msg("Host %s:%s requires auth for JMX, checking..." %
(fingerengine.options.ip, fingerprint.port), LOG.DEBUG)
cookies = checkAuth(fingerengine.options.ip, fingerprint.port,
fingerprint.title, fingerprint.version)
if cookies:
response = utility.requests_post(url, data=data, cookies=cookies[0],
auth=cookies[1])
else:
utility.Msg("Could not get auth for %s:%s" %
(fingerengine.options.ip, fingerprint.port), LOG.ERROR)
return
if response.status_code == 200:
utility.Msg("Successfully deployed {0}".format(war_file), LOG.SUCCESS)
else:
utility.Msg("Failed to deploy (HTTP %d)" % response.status_code, LOG.ERROR)
| jorik041/clusterd | src/platform/jboss/deployers/dfs_deploy.py | Python | mit | 2,440 |
try:
from django.conf.urls.defaults import patterns, url
except ImportError:
from django.conf.urls import patterns, url
urlpatterns = patterns('libthumbor.django.views',
url("^$", 'generate_url', name="generate_thumbor_url"),
)
| APSL/libthumbor | libthumbor/django/urls.py | Python | mit | 241 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DdosProtectionPlansOperations:
"""DdosProtectionPlansOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def get(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
**kwargs: Any
) -> "_models.DdosProtectionPlan":
"""Gets information about the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosProtectionPlan, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_06_01.models.DdosProtectionPlan
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
parameters: "_models.DdosProtectionPlan",
**kwargs: Any
) -> "_models.DdosProtectionPlan":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DdosProtectionPlan')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
parameters: "_models.DdosProtectionPlan",
**kwargs: Any
) -> AsyncLROPoller["_models.DdosProtectionPlan"]:
"""Creates or updates a DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:param parameters: Parameters supplied to the create or update operation.
:type parameters: ~azure.mgmt.network.v2018_06_01.models.DdosProtectionPlan
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DdosProtectionPlan or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_06_01.models.DdosProtectionPlan]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.DdosProtectionPlanListResult"]:
"""Gets all DDoS protection plans in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_06_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlanListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DdosProtectionPlanListResult"]:
"""Gets all the DDoS protection plans in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_06_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlanListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_06_01/aio/operations/_ddos_protection_plans_operations.py | Python | mit | 23,738 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-11-09 05:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ptti', '0008_auto_20161108_2355'),
]
operations = [
migrations.AlterField(
model_name='preguntatestti',
name='numero',
field=models.CharField(max_length=200),
),
]
| z3774/ptti-source | ptti/migrations/0009_auto_20161109_0004.py | Python | gpl-3.0 | 457 |
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import uuid
from buildbot.changes.filter import ChangeFilter
from buildbot.scheduler import Dependent
from buildbot.scheduler import Nightly
from buildbot.scheduler import Periodic
from buildbot.schedulers.basic import SingleBranchScheduler as Scheduler
from buildbot.scheduler import Triggerable
from master import slaves_list
from master.url_poller import URLPoller
def GetGSUtilUrl(gs_bucket, root_folder):
return 'gs://%s/%s' % (gs_bucket, root_folder)
class Helper(object):
def __init__(self, defaults):
self._defaults = defaults
self._builders = []
self._factories = {}
self._schedulers = {}
def Builder(self, name, factory, gatekeeper=None, scheduler=None,
builddir=None, auto_reboot=True, notify_on_missing=False,
slavebuilddir=None, category=None):
category = category or self._defaults.get('category')
self._builders.append({'name': name,
'factory': factory,
'gatekeeper': gatekeeper,
'schedulers': scheduler.split('|'),
'builddir': builddir,
'category': category,
'auto_reboot': auto_reboot,
'notify_on_missing': notify_on_missing,
'slavebuilddir': slavebuilddir})
def Hourly(self, name, branch, hour='*'):
"""Helper method for the Nightly scheduler."""
if name in self._schedulers:
raise ValueError('Scheduler %s already exists' % name)
self._schedulers[name] = {'type': 'Nightly',
'builders': [],
'branch': branch,
'hour': hour}
def Periodic(self, name, periodicBuildTimer):
"""Helper method for the Periodic scheduler."""
if name in self._schedulers:
raise ValueError('Scheduler %s already exists' % name)
self._schedulers[name] = {'type': 'Periodic',
'builders': [],
'periodicBuildTimer': periodicBuildTimer}
def Dependent(self, name, parent):
if name in self._schedulers:
raise ValueError('Scheduler %s already exists' % name)
self._schedulers[name] = {'type': 'Dependent',
'parent': parent,
'builders': []}
def Triggerable(self, name):
if name in self._schedulers:
raise ValueError('Scheduler %s already exists' % name)
self._schedulers[name] = {'type': 'Triggerable',
'builders': []}
def Factory(self, name, factory):
if name in self._factories:
raise ValueError('Factory %s already exists' % name)
self._factories[name] = factory
def Scheduler(self, name, branch, treeStableTimer=60, categories=None):
if name in self._schedulers:
raise ValueError('Scheduler %s already exists' % name)
self._schedulers[name] = {'type': 'Scheduler',
'branch': branch,
'treeStableTimer': treeStableTimer,
'builders': [],
'categories': categories}
def URLScheduler(self, name, url, pollInterval=300, include_revision=False):
self._schedulers[name] = {'type': 'URLScheduler',
'url': url,
'builders': [],
'include_revision': include_revision,
'pollInterval': pollInterval}
def Update(self, c):
global_schedulers = dict((s.name, s) for s in c['schedulers']
if s.name.startswith('global_'))
assert not set(global_schedulers) & set(self._schedulers)
for builder in self._builders:
# Update the schedulers with the builder.
schedulers = builder['schedulers']
if schedulers:
for scheduler in schedulers:
if scheduler in global_schedulers:
global_schedulers[scheduler].builderNames.append(builder['name'])
else:
self._schedulers[scheduler]['builders'].append(builder['name'])
# Construct the category.
categories = []
if builder.get('category', None):
categories.append(builder['category'])
if builder.get('gatekeeper', None):
categories.extend(builder['gatekeeper'].split('|'))
category = '|'.join(categories)
# Append the builder to the list.
new_builder = {'name': builder['name'],
'factory': self._factories[builder['factory']],
'category': category,
'auto_reboot': builder['auto_reboot']}
if builder['builddir']:
new_builder['builddir'] = builder['builddir']
if builder['slavebuilddir']:
new_builder['slavebuilddir'] = builder['slavebuilddir']
c['builders'].append(new_builder)
# Process the main schedulers.
for s_name in self._schedulers:
scheduler = self._schedulers[s_name]
if scheduler['type'] == 'Scheduler':
instance = Scheduler(name=s_name,
branch=scheduler['branch'],
treeStableTimer=scheduler['treeStableTimer'],
builderNames=scheduler['builders'],
categories=scheduler['categories'])
scheduler['instance'] = instance
c['schedulers'].append(instance)
elif scheduler['type'] == 'URLScheduler':
ident = str(uuid.uuid4())
c['change_source'].append(
URLPoller(changeurl=scheduler['url'],
pollInterval=scheduler['pollInterval'],
category=ident,
include_revision=scheduler['include_revision']))
instance = Scheduler(name=s_name,
change_filter=ChangeFilter(category=ident),
builderNames=scheduler['builders'])
scheduler['instance'] = instance
c['schedulers'].append(instance)
# Process the dependent schedulers.
for s_name in self._schedulers:
scheduler = self._schedulers[s_name]
if scheduler['type'] == 'Dependent':
c['schedulers'].append(
Dependent(s_name,
self._schedulers[scheduler['parent']]['instance'],
scheduler['builders']))
# Process the triggerable schedulers.
for s_name in self._schedulers:
scheduler = self._schedulers[s_name]
if scheduler['type'] == 'Triggerable':
c['schedulers'].append(Triggerable(s_name,
scheduler['builders']))
# Process the periodic schedulers.
for s_name in self._schedulers:
scheduler = self._schedulers[s_name]
if scheduler['type'] == 'Periodic':
c['schedulers'].append(
Periodic(s_name,
periodicBuildTimer=scheduler['periodicBuildTimer'],
builderNames=scheduler['builders']))
# Process the nightly schedulers.
for s_name in self._schedulers:
scheduler = self._schedulers[s_name]
if scheduler['type'] == 'Nightly':
c['schedulers'].append(Nightly(s_name,
branch=scheduler['branch'],
hour=scheduler['hour'],
builderNames=scheduler['builders']))
| eunchong/build | scripts/master/master_config.py | Python | bsd-3-clause | 7,620 |
#!/usr/bin/env python
# query cpu topology and print all matching cpu numbers
# cputop "query" ["format"]
# query is a python expression, using variables:
# socket, core, thread, type, cpu
# or "offline" to query all offline cpus
# or "atom" or "core" to select core types
# type can be "atom" or "core"
# cpu is the cpu number
# format is a printf format with %d
# %d will be replaced with the cpu number
# format can be offline to offline the cpu or online to online
# Author: Andi Kleen
from __future__ import print_function
import sys
import os
import re
import argparse
import glob
def numfile(fn):
f = open(fn, "r")
v = int(f.read())
f.close()
return v
outstr = ""
def output(p, fmt):
if fmt:
if fmt == "taskset":
global outstr
if outstr:
outstr += ","
else:
outstr += "taskset -c "
outstr += "%d" % p
else:
print(fmt % (p,))
else:
print(p)
ap = argparse.ArgumentParser(description='''
query cpu topology and print all matching cpu numbers
cputop "query" ["format"]
query is a python expression, using variables:
socket, core, thread, type, cpu
type is "core" or "atom" on a hybrid system
cpu is the cpu number
or "offline" to query all offline cpus
format is a printf format with %d
%d will be replaced with the cpu number, or online/offline
to generate online/offline commands, or taskset to generate taskset command line''',
epilog='''
Examples:
print all cores on socket 0
cputop "socket == 0"
print all first threads in each core on socket 0
cputop "thread == 0 and socket == 0"
disable all second threads (disable hyper threading)
cputop "thread == 1" offline
reenable all offlined cpus
cputop offline online
print all online cpus
cputop True ''', formatter_class=argparse.RawTextHelpFormatter)
ap.add_argument('expr', help='python expression with socket/core/thread')
ap.add_argument('fmt', help='Output format string with %%d, or online/offline', nargs='?')
args = ap.parse_args()
if args.expr == "atom":
args.expr = 'type == "atom"'
if args.expr == "core":
args.expr = 'type == "core"'
special = {
"offline": "echo 0 > /sys/devices/system/cpu/cpu%d/online",
"online": "echo 1 > /sys/devices/system/cpu/cpu%d/online",
}
if args.fmt in special:
args.fmt = special[args.fmt]
types = dict()
for fn in glob.glob("/sys/devices/cpu_*/cpus"):
typ = os.path.basename(fn.replace("/cpus", "")).replace("cpu_","")
cpus = open(fn).read()
for j in cpus.split(","):
m = re.match(r'(\d+)(-\d+)?', j)
if m.group(2):
for k in range(int(m.group(1)), int(m.group(2)[1:])+1):
types[k] = typ
else:
types[int(m.group(1))] = typ
base = "/sys/devices/system/cpu/"
p = {}
l = os.listdir(base)
for d in l:
m = re.match(r"cpu([0-9]+)", d)
if not m:
continue
proc = int(m.group(1))
top = base + d + "/topology"
if not os.path.exists(top):
if args.expr == "offline":
output(proc, args.fmt)
continue
socket = numfile(top + "/physical_package_id")
core = numfile(top + "/core_id")
n = 0
while (socket, core, n) in p:
n += 1
p[(socket, core, n)] = proc
if args.expr == "offline":
sys.exit(0)
for j in sorted(p.keys()):
socket, core, thread = j
cpu = p[j]
type = "any"
if cpu in types:
type = types[cpu]
if eval(args.expr):
output(p[j], args.fmt)
if outstr:
print(outstr)
| andikleen/pmu-tools | cputop.py | Python | gpl-2.0 | 3,533 |
import time
import collections
from django.core.exceptions import ImproperlyConfigured
from rest_framework.parsers import JSONParser
from rest_framework.exceptions import ParseError, NotAuthenticated
from framework.auth import signing
from api.base.utils import is_bulk_request
from api.base.renderers import JSONAPIRenderer
from api.base.exceptions import JSONAPIException
NO_RELATIONSHIPS_ERROR = 'Request must include /data/relationships.'
NO_DATA_ERROR = 'Request must include /data.'
NO_TYPE_ERROR = 'Request must include /type.'
NO_ID_ERROR = 'Request must include /data/id.'
class JSONAPIParser(JSONParser):
"""
Parses JSON-serialized data. Overrides media_type.
"""
media_type = 'application/vnd.api+json'
renderer_class = JSONAPIRenderer
@staticmethod
def get_relationship(data, related_resource):
target_type = data.get('type')
if not target_type:
raise JSONAPIException(
source={'pointer': 'data/relationships/{}/data/type'.format(related_resource)},
detail=NO_TYPE_ERROR,
)
id = data.get('id')
return {'id': id, 'target_type': target_type}
# Overrides JSONParser
def flatten_relationships(self, relationships):
"""
Flattens relationships dictionary which has information needed to create related resource objects.
Validates that formatting of relationships dictionary is correct.
"""
if not isinstance(relationships, dict):
raise ParseError()
# Can only create one type of relationship.
related_resource = list(relationships.keys())[0]
if not isinstance(relationships[related_resource], dict) or related_resource == 'data':
raise ParseError()
data = relationships[related_resource].get('data')
if not data:
raise JSONAPIException(source={'pointer': 'data/relationships/{}/data'.format(related_resource)}, detail=NO_DATA_ERROR)
if isinstance(data, list):
return [self.get_relationship(item, related_resource) for item in data]
else:
return self.get_relationship(data, related_resource)
def flatten_data(self, resource_object, parser_context, is_list):
"""
Flattens data objects, making attributes and relationships fields the same level as id and type.
"""
relationships = resource_object.get('relationships')
is_relationship = parser_context.get('is_relationship')
# allow skip type check for legacy api version
legacy_type_allowed = parser_context.get('legacy_type_allowed', False)
request_method = parser_context['request'].method
if is_relationship and request_method == 'POST':
if not relationships:
raise JSONAPIException(source={'pointer': '/data/relationships'}, detail=NO_RELATIONSHIPS_ERROR)
object_id = resource_object.get('id')
object_type = resource_object.get('type')
type_required = not (
legacy_type_allowed and float(parser_context['request'].version) < 2.7 and request_method == 'PATCH'
)
# For validating type and id for bulk delete:
if is_list and request_method == 'DELETE':
if object_id is None:
raise JSONAPIException(source={'pointer': '/data/id'}, detail=NO_ID_ERROR)
if type_required and object_type is None:
raise JSONAPIException(source={'pointer': '/data/type'}, detail=NO_TYPE_ERROR)
attributes = resource_object.get('attributes')
parsed = {'id': object_id, 'type': object_type}
if attributes:
parsed.update(attributes)
if relationships:
relationships = self.flatten_relationships(relationships)
if isinstance(relationships, list):
relationship_values = []
relationship_key = None
for relationship in relationships:
for key, value in relationship.items():
relationship_values.append(value)
relationship_key = key
relationship = {relationship_key: relationship_values}
parsed.update(relationship)
else:
parsed.update(relationships)
return parsed
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as JSON and returns the resulting data.
"""
result = super(JSONAPIParser, self).parse(stream, media_type=media_type, parser_context=parser_context)
if not isinstance(result, dict):
raise ParseError()
data = result.get('data', {})
if data:
if is_bulk_request(parser_context['request']):
if not isinstance(data, list):
raise ParseError('Expected a list of items but got type "dict".')
data_collection = []
data_collection.extend([self.flatten_data(data_object, parser_context, is_list=True) for data_object in data])
return data_collection
else:
if not isinstance(data, collections.Mapping):
raise ParseError('Expected a dictionary of items.')
return self.flatten_data(data, parser_context, is_list=False)
else:
raise JSONAPIException(source={'pointer': '/data'}, detail=NO_DATA_ERROR)
def flatten_multiple_relationships(self, parser, relationships):
rel = {}
for resource in relationships:
ret = super(parser, self).flatten_relationships({resource: relationships[resource]})
if isinstance(ret, list):
rel[resource] = []
for item in ret:
if item.get('target_type') and item.get('id'):
rel[resource].append(item['id'])
else:
if ret.get('target_type') and ret.get('id'):
rel[resource] = ret['id']
return rel
class JSONAPIParserForRegularJSON(JSONAPIParser):
"""
Allows same processing as JSONAPIParser to occur for requests with application/json media type.
"""
media_type = 'application/json'
class JSONAPIRelationshipParser(JSONParser):
"""
Parses JSON-serialized data for relationship endpoints. Overrides media_type.
"""
media_type = 'application/vnd.api+json'
def parse(self, stream, media_type=None, parser_context=None):
res = super(JSONAPIRelationshipParser, self).parse(stream, media_type, parser_context)
if not isinstance(res, dict):
raise ParseError('Request body must be dictionary')
data = res.get('data')
if data:
if not isinstance(data, list):
raise ParseError('Data must be an array')
for i, datum in enumerate(data):
if datum.get('id') is None:
raise JSONAPIException(source={'pointer': '/data/{}/id'.format(str(i))}, detail=NO_ID_ERROR)
if datum.get('type') is None:
raise JSONAPIException(source={'pointer': '/data/{}/type'.format(str(i))}, detail=NO_TYPE_ERROR)
return {'data': data}
return {'data': []}
class JSONAPIRelationshipParserForRegularJSON(JSONAPIRelationshipParser):
"""
Allows same processing as JSONAPIRelationshipParser to occur for requests with application/json media type.
"""
media_type = 'application/json'
class JSONAPIOnetoOneRelationshipParser(JSONParser):
"""
Parses JSON-serialized data for relationship endpoints. Overrides media_type.
"""
media_type = 'application/vnd.api+json'
def parse(self, stream, media_type=None, parser_context=None):
res = super(JSONAPIOnetoOneRelationshipParser, self).parse(stream, media_type, parser_context)
if not isinstance(res, dict):
raise ParseError('Request body must be dictionary')
data = res.get('data')
# allow skip type check for legacy api version
legacy_type_allowed = parser_context.get('legacy_type_allowed', True)
type_required = not (
legacy_type_allowed and
float(parser_context['request'].version) < 2.7 and
parser_context['request'].method == 'PATCH'
)
if data:
id_ = data.get('id')
type_ = data.get('type')
if id_ is None:
raise JSONAPIException(source={'pointer': '/data/id'}, detail=NO_ID_ERROR)
if type_required and type_ is None:
raise JSONAPIException(source={'pointer': '/data/type'}, detail=NO_TYPE_ERROR)
return data
return {'type': None, 'id': None}
class JSONAPIOnetoOneRelationshipParserForRegularJSON(JSONAPIOnetoOneRelationshipParser):
"""
Allows same processing as JSONAPIRelationshipParser to occur for requests with application/json media type.
"""
media_type = 'application/json'
class JSONAPIMultipleRelationshipsParser(JSONAPIParser):
"""
If edits are made to this class, be sure to check JSONAPIMultipleRelationshipsParserForRegularJSON to see if corresponding
edits should be made there.
"""
def flatten_relationships(self, relationships):
return self.flatten_multiple_relationships(JSONAPIMultipleRelationshipsParser, relationships)
class JSONAPIMultipleRelationshipsParserForRegularJSON(JSONAPIParserForRegularJSON):
"""
Allows same processing as JSONAPIMultipleRelationshipsParser to occur for requests with application/json media type.
"""
def flatten_relationships(self, relationships):
return self.flatten_multiple_relationships(JSONAPIMultipleRelationshipsParserForRegularJSON, relationships)
class HMACSignedParser(JSONParser):
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as JSON. Validates the 'signature' in the payload then returns the resulting data.
"""
data = super(HMACSignedParser, self).parse(stream, media_type=media_type, parser_context=parser_context)
try:
sig = data['signature']
payload = signing.unserialize_payload(data['payload'])
exp_time = payload['time']
except (KeyError, ValueError):
raise JSONAPIException(detail='Invalid Payload')
if not signing.default_signer.verify_payload(sig, payload):
raise NotAuthenticated
if time.time() > exp_time:
raise JSONAPIException(detail='Signature has expired')
return payload
class SearchParser(JSONAPIParser):
def parse(self, stream, media_type=None, parser_context=None):
try:
view = parser_context['view']
except KeyError:
raise ImproperlyConfigured('SearchParser requires "view" context.')
data = super(SearchParser, self).parse(stream, media_type=media_type, parser_context=parser_context)
if not data:
raise JSONAPIException(detail='Invalid Payload')
res = {
'query': {
'bool': {},
},
}
sort = parser_context['request'].query_params.get('sort')
if sort:
res['sort'] = [{
sort.lstrip('-'): {
'order': 'desc' if sort.startswith('-') else 'asc',
},
}]
try:
q = data.pop('q')
except KeyError:
pass
else:
res['query']['bool'].update({
'must': {
'query_string': {
'query': q,
'fields': view.search_fields,
},
},
})
if any(data.values()):
res['query']['bool'].update({'filter': []})
for key, val in data.items():
if val is not None:
if isinstance(val, list):
res['query']['bool']['filter'].append({'terms': {key: val}})
else:
res['query']['bool']['filter'].append({'term': {key: val}})
return res
| adlius/osf.io | api/base/parsers.py | Python | apache-2.0 | 12,295 |
from rezgui.qt import QtCore, QtGui
from rezgui.util import create_pane
from rezgui.widgets.IconButton import IconButton
from rezgui.widgets.TimeSelecterPopup import TimeSelecterPopup
from rezgui.dialogs.BrowsePackageDialog import BrowsePackageDialog
import time
class TimestampWidget(QtGui.QFrame):
timeChanged = QtCore.Signal(int) # epoch time
def __init__(self, context_model, parent=None):
super(TimestampWidget, self).__init__(parent)
self.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Sunken)
self.context_model = context_model
self.popup = None
self.package_btn = IconButton("package", "select package release date")
self.clock_btn = IconButton("clock", "select time in the past")
self.checkbox = QtGui.QCheckBox("ignore packages released after:")
pane = create_pane([None,
self.checkbox,
self.package_btn,
self.clock_btn], True, compact=True)
self.edit = QtGui.QDateTimeEdit()
self.edit.setCalendarPopup(True)
self.edit.setDateTime(QtCore.QDateTime.currentDateTime())
create_pane([pane, self.edit], False, compact=True, parent_widget=self)
self.checkbox.stateChanged.connect(self._stateChanged)
self.package_btn.clicked.connect(self._selectPackage)
self.clock_btn.clicked.connect(self._selectTime)
self.refresh()
def datetime(self):
"""Returns the selected datetime, or None if not set."""
if self.checkbox.isChecked():
return self.edit.dateTime()
else:
return None
def set_time(self, epoch):
dt = QtCore.QDateTime()
dt.setTime_t(epoch)
self.edit.setDateTime(dt)
self.checkbox.setChecked(True)
self.timeChanged.emit(epoch)
def refresh(self):
b = self.checkbox.isChecked()
self.package_btn.setEnabled(b)
self.clock_btn.setEnabled(b)
self.edit.setEnabled(b)
def _stateChanged(self, state):
self.refresh()
def _selectPackage(self):
fn = lambda x: bool(x.timestamp)
dlg = BrowsePackageDialog(context_model=self.context_model,
parent=self.parentWidget(),
package_selectable_callback=fn)
dlg.exec_()
if dlg.package:
self.set_time(dlg.package.timestamp)
def _selectTime(self):
self.popup = TimeSelecterPopup(self.clock_btn, parent=self)
self.popup.secondsClicked.connect(self._secondsClicked)
self.popup.show()
def _secondsClicked(self, seconds):
now = int(time.time())
self.set_time(now - seconds)
| saddingtonbaynes/rez | src/rezgui/widgets/TimestampWidget.py | Python | gpl-3.0 | 2,743 |
from django.utils import timezone
from django.forms import (
ValidationError,
ModelForm,
CharField,
HiddenInput,
Form,
FileField,
)
from .models import (
Article,
ArticleComment,
)
HONEYPOT_STRING = str(347 * 347)
def honeypot_ok(cleaned_data, missing_name):
return (
cleaned_data.get("verify") == HONEYPOT_STRING
and not cleaned_data.get(missing_name)
)
class ArticleForm(ModelForm):
error_css_class = "error"
tags = CharField(required=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
article = kwargs.get("instance")
# Set the initial value for the tags field with the space-separated
# tags for the article.
if isinstance(article, Article):
self.fields["tags"].initial = " ".join(
article.tags.all()
.order_by("tag")
.values_list("tag", flat=True)
)
class EditArticleForm (ArticleForm):
class Meta:
model = Article
fields = (
"title",
"slug",
"content",
"creation_date",
"tags",
"active",
)
def save(self):
article = super().save()
# Now use all of the tags set to replace the tags.
article.replace_all_tags(self.cleaned_data["tags"].split())
return article
class ArticleCommentForm (ModelForm):
class Meta:
model = ArticleComment
fields = (
"poster_name",
"content",
)
title = CharField(
required=False,
widget=HiddenInput(attrs={"class": "ningen"}),
)
verify = CharField(widget=HiddenInput())
error_css_class = "error"
def clean(self):
cleaned_data = super().clean()
commenter = self.instance.commenter
if not honeypot_ok(cleaned_data, "title"):
raise ValidationError("You are probably a spammer.")
if commenter.is_banned:
raise ValidationError("You have been banned from posting.")
if commenter.is_comment_too_soon(timezone.now()):
raise ValidationError("You cannot comment again so soon.")
return cleaned_data
class UploadForm(Form):
file = FileField()
error_css_class = "error"
| w0rp/w0rpzone | blog/forms.py | Python | bsd-2-clause | 2,339 |
# Copyright 2017 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Installs BeakerX into a Jupyter and Python environment.'''
import argparse
import json
import os
import pkg_resources
import shutil
import subprocess
import sys
import pathlib
import tempfile
from string import Template
from jupyter_client.kernelspecapp import KernelSpecManager
from jupyter_core import paths
from traitlets.config.manager import BaseJSONConfigManager
from distutils import log
def _all_kernels():
kernels = pkg_resources.resource_listdir(
'beakerx', 'kernel')
return [kernel for kernel in kernels if (kernel != 'base' and kernel !='sparkex' and kernel !='runtimetools')]
def _base_classpath_for(kernel):
return pkg_resources.resource_filename(
'beakerx', os.path.join('kernel', kernel))
def _classpath_for(kernel):
return pkg_resources.resource_filename(
'beakerx', os.path.join('kernel', kernel, 'lib', '*'))
def _uninstall_nbextension():
subprocess.check_call(["jupyter", "nbextension", "disable", "beakerx", "--py", "--sys-prefix"])
subprocess.check_call(["jupyter", "nbextension", "uninstall", "beakerx", "--py", "--sys-prefix"])
subprocess.check_call(["jupyter", "serverextension", "disable", "beakerx", "--py", "--sys-prefix"])
def _install_nbextension():
if sys.platform == 'win32':
subprocess.check_call(["jupyter", "nbextension", "install", "beakerx", "--py", "--sys-prefix"])
else:
subprocess.check_call(["jupyter", "nbextension", "install", "beakerx", "--py", "--symlink", "--sys-prefix"])
subprocess.check_call(["jupyter", "nbextension", "enable", "beakerx", "--py", "--sys-prefix"])
subprocess.check_call(["jupyter", "serverextension", "enable", "beakerx", "--py", "--sys-prefix"])
def _install_labextensions(lab):
if lab:
subprocess.check_call(["jupyter", "labextension", "install", "@jupyter-widgets/jupyterlab-manager"])
subprocess.check_call(["jupyter", "labextension", "install", "beakerx-jupyterlab"])
def _uninstall_labextensions(lab):
if lab:
subprocess.check_call(["jupyter", "labextension", "uninstall", "beakerx-jupyterlab"])
subprocess.check_call(["jupyter", "labextension", "uninstall", "@jupyter-widgets/jupyterlab-manager"])
def _install_tabledisplay(lab):
if lab:
subprocess.check_call(["beakerx_tabledisplay", "install", "--lab"])
else:
subprocess.check_call(["beakerx_tabledisplay", "install"])
def _uninstall_tabledisplay():
subprocess.check_call(["beakerx_tabledisplay", "uninstall"])
def _copy_tree(src, dst):
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
def _copy_icons():
log.info("installing icons...")
kernels = KernelSpecManager().find_kernel_specs()
for kernel in _all_kernels():
dst_base = kernels.get(kernel)
src_base = _base_classpath_for(kernel)
shutil.copyfile(os.path.join(src_base, 'logo-32x32.png'), os.path.join(dst_base, 'logo-32x32.png'))
shutil.copyfile(os.path.join(src_base, 'logo-64x64.png'), os.path.join(dst_base, 'logo-64x64.png'))
def _install_css():
log.info("installing custom CSS...")
resource = os.path.join('static', 'custom')
src_base = pkg_resources.resource_filename('beakerx', resource)
dst_base = pkg_resources.resource_filename('notebook', resource)
_copy_tree(os.path.join(src_base, 'fonts'), os.path.join(dst_base, 'fonts'))
shutil.copyfile(os.path.join(src_base, 'custom.css'), os.path.join(dst_base, 'custom.css'))
def _install_kernels():
base_classpath = _classpath_for('base')
for kernel in _all_kernels():
kernel_classpath = _classpath_for(kernel)
classpath = json.dumps(os.pathsep.join([base_classpath, kernel_classpath]))
template = pkg_resources.resource_string(
'beakerx', os.path.join('kernel', kernel, 'kernel.json'))
contents = Template(template.decode()).substitute(PATH=classpath)
with tempfile.TemporaryDirectory() as tmpdir:
kernel_dir = os.path.join(tmpdir, kernel)
os.mkdir(kernel_dir)
with open(os.path.join(kernel_dir, 'kernel.json'), 'w') as f:
f.write(contents)
install_cmd = [
'jupyter', 'kernelspec', 'install',
'--sys-prefix', '--replace',
'--name', kernel, kernel_dir
]
subprocess.check_call(install_cmd)
def _uninstall_kernels():
for kernel in _all_kernels():
uninstall_cmd = [
'jupyter', 'kernelspec', 'remove', kernel, '-y', '-f'
]
try:
subprocess.check_call(uninstall_cmd)
except subprocess.CalledProcessError:
pass #uninstal_cmd prints the appropriate message
def _install_magics():
log.info("installing groovy magic for python...")
dir_path = os.path.join(sys.prefix, 'etc', 'ipython')
os.makedirs(dir_path, exist_ok=True)
with open(os.path.join(dir_path, 'ipython_config.py'), 'w+') as ipython_config:
ipython_config.write("c = get_config()\n")
ipython_config.write("c.InteractiveShellApp.extensions = ["
"'beakerx.autotranslation',\n"
"'beakerx_magics.kernel_magic',\n"
"'beakerx_magics.groovy_magic',\n"
"'beakerx_magics.clojure_magic',\n"
"'beakerx_magics.sparkex_magic',\n"
"'beakerx_magics.kotlin_magic',\n"
"'beakerx_magics.scala_magic',\n"
"'beakerx_magics.sql_magic',\n"
"'beakerx_magics.java_magic',\n"
"'beakerx_magics.kernel_runner_magic'\n"
"]\n")
def _set_conf_privileges():
config_path = os.path.join(paths.jupyter_config_dir(), 'beakerx.json')
if pathlib.Path(config_path).exists():
os.chmod(config_path, 0o600)
def _pretty(it):
return json.dumps(it, indent=2)
def _install_kernelspec_manager(prefix, disable=False):
CKSM = "beakerx.kernel_spec.BeakerXKernelSpec"
KSMC = "kernel_spec_class"
action_prefix = "Dis" if disable else "En"
log.info("{}abling BeakerX server config...".format(action_prefix))
path = os.path.join(prefix, "etc", "jupyter")
if not os.path.exists(path):
log.debug("Making directory {}...".format(path))
os.makedirs(path)
cm = BaseJSONConfigManager(config_dir=path)
cfg = cm.get("jupyter_notebook_config")
log.debug("Existing config in {}...\n{}".format(path, _pretty(cfg)))
nb_app = cfg.setdefault("KernelSpecManager", {})
if disable and nb_app.get(KSMC, None) == CKSM:
nb_app.pop(KSMC)
elif not disable:
nb_app.update({KSMC: CKSM})
log.debug("Writing config in {}...".format(path))
cm.set("jupyter_notebook_config", cfg)
cfg = cm.get("jupyter_notebook_config")
log.debug("Verifying config in {}...\n{}".format(path, _pretty(cfg)))
if disable:
assert KSMC not in cfg["KernelSpecManager"]
else:
assert cfg["KernelSpecManager"][KSMC] == CKSM
log.info("{}abled BeakerX server config".format(action_prefix))
def make_parser():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--prefix",
help="location of the environment to install into",
default=sys.prefix)
parser.add_argument("--disable",
help="Remove Beakerx extension",
action='store_true')
return parser
def _disable_beakerx(args):
_uninstall_nbextension()
_uninstall_labextensions(args.lab)
_uninstall_kernels()
_install_kernelspec_manager(args.prefix, disable=True)
_uninstall_tabledisplay(args.lab)
def _install_beakerx(args):
_install_nbextension()
_install_labextensions(args.lab)
_install_kernels()
_install_css()
_copy_icons()
_install_kernelspec_manager(args.prefix)
_install_magics()
_set_conf_privileges()
_install_tabledisplay(args.lab)
def install(args):
_install_beakerx(args)
def uninstall(args):
_disable_beakerx(args)
if __name__ == "__main__":
install()
| twosigma/beaker-notebook | beakerx/beakerx/install.py | Python | apache-2.0 | 8,866 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class RubyErubis(RubyPackage):
"""Erubis is a fast, secure, and very extensible implementation of eRuby.
"""
homepage = "http://www.kuwata-lab.com/erubis/"
git = "https://github.com/kwatch/erubis.git"
version('master', branch='master')
version('2.7.0', commit='14d3eab57fbc361312c8f3af350cbf9a5bafce17')
def patch(self):
filter_file('$Release$', str(self.version),
'erubis.gemspec', string=True)
| LLNL/spack | var/spack/repos/builtin/packages/ruby-erubis/package.py | Python | lgpl-2.1 | 659 |
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit test suite that collects all test cases for GRIT.'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..'))
import unittest
# TODO(joi) Use unittest.defaultTestLoader to automatically load tests
# from modules. Iterating over the directory and importing could then
# automate this all the way, if desired.
class TestSuiteAll(unittest.TestSuite):
def __init__(self):
super(type(self), self).__init__()
# Imports placed here to prevent circular imports.
from grit import grd_reader_unittest
from grit import grit_runner_unittest
from grit.node import base_unittest
from grit.node import io_unittest
from grit import clique_unittest
from grit.node import misc_unittest
from grit.gather import rc_unittest
from grit.gather import tr_html_unittest
from grit.node import message_unittest
from grit import tclib_unittest
import grit.format.rc_unittest
import grit.format.data_pack_unittest
from grit.tool import rc2grd_unittest
from grit.tool import transl2tc_unittest
from grit.gather import txt_unittest
from grit.gather import admin_template_unittest
from grit import xtb_reader_unittest
from grit import util_unittest
from grit.tool import preprocess_unittest
from grit.tool import postprocess_unittest
from grit import shortcuts_unittests
from grit.gather import muppet_strings_unittest
from grit.gather import policy_json_unittest
from grit.node.custom import filename_unittest
import grit.format.js_map_format_unittest
test_classes = [
base_unittest.NodeUnittest,
io_unittest.FileNodeUnittest,
grit_runner_unittest.OptionArgsUnittest,
grd_reader_unittest.GrdReaderUnittest,
clique_unittest.MessageCliqueUnittest,
misc_unittest.GritNodeUnittest,
rc_unittest.RcUnittest,
tr_html_unittest.ParserUnittest,
tr_html_unittest.TrHtmlUnittest,
message_unittest.MessageUnittest,
tclib_unittest.TclibUnittest,
grit.format.rc_unittest.FormatRcUnittest,
grit.format.data_pack_unittest.FormatDataPackUnittest,
rc2grd_unittest.Rc2GrdUnittest,
transl2tc_unittest.TranslationToTcUnittest,
txt_unittest.TxtUnittest,
admin_template_unittest.AdmGathererUnittest,
xtb_reader_unittest.XtbReaderUnittest,
misc_unittest.IfNodeUnittest,
util_unittest.UtilUnittest,
preprocess_unittest.PreProcessingUnittest,
postprocess_unittest.PostProcessingUnittest,
misc_unittest.ReleaseNodeUnittest,
shortcuts_unittests.ShortcutsUnittest,
muppet_strings_unittest.MuppetStringsUnittest,
filename_unittest.WindowsFilenameUnittest,
grit.format.js_map_format_unittest.JsMapFormatUnittest,
policy_json_unittest.PolicyJsonUnittest,
# add test classes here...
]
for test_class in test_classes:
self.addTest(unittest.makeSuite(test_class))
if __name__ == '__main__':
test_result = unittest.TextTestRunner(verbosity=2).run(TestSuiteAll())
sys.exit(len(test_result.errors) + len(test_result.failures))
| paul99/clank | tools/grit/grit/test_suite_all.py | Python | bsd-3-clause | 3,329 |
#!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import re, unicodedata
from itertools import chain
from math import ceil
from functools import partial
from collections import namedtuple, OrderedDict
from difflib import SequenceMatcher
from future_builtins import zip
import regex
from PyQt5.Qt import (
QSplitter, QApplication, QTimer,
QTextCursor, QTextCharFormat, Qt, QRect, QPainter, QPalette, QPen, QBrush,
QColor, QTextLayout, QCursor, QFont, QSplitterHandle, QPainterPath,
QHBoxLayout, QWidget, QScrollBar, QEventLoop, pyqtSignal, QImage, QPixmap,
QMenu, QIcon, QKeySequence)
from calibre import human_readable, fit_image
from calibre.gui2 import info_dialog
from calibre.gui2.tweak_book import tprefs
from calibre.gui2.tweak_book.editor.text import PlainTextEdit, default_font_family, LineNumbers
from calibre.gui2.tweak_book.editor.themes import theme_color, get_theme
from calibre.gui2.tweak_book.diff import get_sequence_matcher
from calibre.gui2.tweak_book.diff.highlight import get_highlighter
Change = namedtuple('Change', 'ltop lbot rtop rbot kind')
class BusyCursor(object):
def __enter__(self):
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
def __exit__(self, *args):
QApplication.restoreOverrideCursor()
def beautify_text(raw, syntax):
from lxml import etree
from calibre.ebooks.oeb.polish.parsing import parse
from calibre.ebooks.oeb.polish.pretty import pretty_xml_tree, pretty_html_tree
from calibre.ebooks.chardet import strip_encoding_declarations
if syntax == 'xml':
root = etree.fromstring(strip_encoding_declarations(raw))
pretty_xml_tree(root)
elif syntax == 'css':
import logging
from calibre.ebooks.oeb.base import serialize, _css_logger
from calibre.ebooks.oeb.polish.utils import setup_cssutils_serialization
from cssutils import CSSParser, log
setup_cssutils_serialization(tprefs['editor_tab_stop_width'])
log.setLevel(logging.WARN)
log.raiseExceptions = False
parser = CSSParser(loglevel=logging.WARNING,
# We dont care about @import rules
fetcher=lambda x: (None, None), log=_css_logger)
data = parser.parseString(raw, href='<string>', validate=False)
return serialize(data, 'text/css')
else:
root = parse(raw, line_numbers=False)
pretty_html_tree(None, root)
return etree.tostring(root, encoding=unicode)
class LineNumberMap(dict): # {{{
'Map line numbers and keep track of the maximum width of the line numbers'
def __new__(cls):
self = dict.__new__(cls)
self.max_width = 1
return self
def __setitem__(self, k, v):
v = unicode(v)
dict.__setitem__(self, k, v)
self.max_width = max(self.max_width, len(v))
def clear(self):
dict.clear(self)
self.max_width = 1
# }}}
class TextBrowser(PlainTextEdit): # {{{
resized = pyqtSignal()
wheel_event = pyqtSignal(object)
next_change = pyqtSignal(object)
scrolled = pyqtSignal()
line_activated = pyqtSignal(object, object, object)
def __init__(self, right=False, parent=None, show_open_in_editor=False):
PlainTextEdit.__init__(self, parent)
self.setFrameStyle(0)
self.show_open_in_editor = show_open_in_editor
self.side_margin = 0
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.show_context_menu)
self.setFocusPolicy(Qt.NoFocus)
self.right = right
self.setReadOnly(True)
w = self.fontMetrics()
self.number_width = max(map(lambda x:w.width(str(x)), xrange(10)))
self.space_width = w.width(' ')
self.setLineWrapMode(self.WidgetWidth)
self.setTabStopWidth(tprefs['editor_tab_stop_width'] * self.space_width)
font = self.font()
ff = tprefs['editor_font_family']
if ff is None:
ff = default_font_family()
font.setFamily(ff)
font.setPointSize(tprefs['editor_font_size'])
self.setFont(font)
font = self.heading_font = QFont(self.font())
font.setPointSize(int(tprefs['editor_font_size'] * 1.5))
font.setBold(True)
theme = get_theme(tprefs['editor_theme'])
pal = self.palette()
pal.setColor(pal.Base, theme_color(theme, 'Normal', 'bg'))
pal.setColor(pal.AlternateBase, theme_color(theme, 'CursorLine', 'bg'))
pal.setColor(pal.Text, theme_color(theme, 'Normal', 'fg'))
pal.setColor(pal.Highlight, theme_color(theme, 'Visual', 'bg'))
pal.setColor(pal.HighlightedText, theme_color(theme, 'Visual', 'fg'))
self.setPalette(pal)
self.viewport().setCursor(Qt.ArrowCursor)
self.line_number_area = LineNumbers(self)
self.blockCountChanged[int].connect(self.update_line_number_area_width)
self.updateRequest.connect(self.update_line_number_area)
self.line_number_palette = pal = QPalette()
pal.setColor(pal.Base, theme_color(theme, 'LineNr', 'bg'))
pal.setColor(pal.Text, theme_color(theme, 'LineNr', 'fg'))
pal.setColor(pal.BrightText, theme_color(theme, 'LineNrC', 'fg'))
self.line_number_map = LineNumberMap()
self.search_header_pos = 0
self.changes, self.headers, self.images = [], [], OrderedDict()
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff), self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.diff_backgrounds = {
'replace' : theme_color(theme, 'DiffReplace', 'bg'),
'insert' : theme_color(theme, 'DiffInsert', 'bg'),
'delete' : theme_color(theme, 'DiffDelete', 'bg'),
'replacereplace': theme_color(theme, 'DiffReplaceReplace', 'bg'),
'boundary': QBrush(theme_color(theme, 'Normal', 'fg'), Qt.Dense7Pattern),
}
self.diff_foregrounds = {
'replace' : theme_color(theme, 'DiffReplace', 'fg'),
'insert' : theme_color(theme, 'DiffInsert', 'fg'),
'delete' : theme_color(theme, 'DiffDelete', 'fg'),
'boundary': QColor(0, 0, 0, 0),
}
for x in ('replacereplace', 'insert', 'delete'):
f = QTextCharFormat()
f.setBackground(self.diff_backgrounds[x])
setattr(self, '%s_format' % x, f)
def show_context_menu(self, pos):
m = QMenu(self)
a = m.addAction
i = unicode(self.textCursor().selectedText()).rstrip('\0')
if i:
a(QIcon(I('edit-copy.png')), _('Copy to clipboard'), self.copy).setShortcut(QKeySequence.Copy)
if len(self.changes) > 0:
a(QIcon(I('arrow-up.png')), _('Previous change'), partial(self.next_change.emit, -1))
a(QIcon(I('arrow-down.png')), _('Next change'), partial(self.next_change.emit, 1))
if self.show_open_in_editor:
b = self.cursorForPosition(pos).block()
if b.isValid():
a(QIcon(I('tweak.png')), _('Open file in the editor'), partial(self.generate_sync_request, b.blockNumber()))
if len(m.actions()) > 0:
m.exec_(self.mapToGlobal(pos))
def mouseDoubleClickEvent(self, ev):
if ev.button() == 1:
b = self.cursorForPosition(ev.pos()).block()
if b.isValid():
self.generate_sync_request(b.blockNumber())
return PlainTextEdit.mouseDoubleClickEvent(self, ev)
def generate_sync_request(self, block_number):
if not self.headers:
return
try:
lnum = int(self.line_number_map.get(block_number, ''))
except:
lnum = 1
for i, (num, text) in enumerate(self.headers):
if num > block_number:
name = text if i == 0 else self.headers[i - 1][1]
break
else:
name = self.headers[-1][1]
self.line_activated.emit(name, lnum, bool(self.right))
def search(self, query, reverse=False):
''' Search for query, also searching the headers. Matches in headers
are not highlighted as managing the highlight is too much of a pain.'''
if not query.strip():
return
c = self.textCursor()
lnum = c.block().blockNumber()
cpos = c.positionInBlock()
headers = dict(self.headers)
if lnum in headers:
cpos = self.search_header_pos
lines = unicode(self.toPlainText()).splitlines()
for hn, text in self.headers:
lines[hn] = text
prefix, postfix = lines[lnum][:cpos], lines[lnum][cpos:]
before, after = enumerate(lines[0:lnum]), ((lnum+1+i, x) for i, x in enumerate(lines[lnum+1:]))
if reverse:
sl = chain([(lnum, prefix)], reversed(tuple(before)), reversed(tuple(after)), [(lnum, postfix)])
else:
sl = chain([(lnum, postfix)], after, before, [(lnum, prefix)])
flags = regex.REVERSE if reverse else 0
pat = regex.compile(regex.escape(query, special_only=True), flags=regex.UNICODE|regex.IGNORECASE|flags)
for num, text in sl:
try:
m = next(pat.finditer(text))
except StopIteration:
continue
start, end = m.span()
length = end - start
if text is postfix:
start += cpos
c = QTextCursor(self.document().findBlockByNumber(num))
c.setPosition(c.position() + start)
if num in headers:
self.search_header_pos = start + length
else:
c.setPosition(c.position() + length, c.KeepAnchor)
self.search_header_pos = 0
if reverse:
pos, anchor = c.position(), c.anchor()
c.setPosition(pos), c.setPosition(anchor, c.KeepAnchor)
self.setTextCursor(c)
self.centerCursor()
self.scrolled.emit()
break
else:
info_dialog(self, _('No matches found'), _(
'No matches found for query: %s' % query), show=True)
def clear(self):
PlainTextEdit.clear(self)
self.line_number_map.clear()
del self.changes[:]
del self.headers[:]
self.images.clear()
self.search_header_pos = 0
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
def update_line_number_area_width(self, block_count=0):
self.side_margin = self.line_number_area_width()
if self.right:
self.setViewportMargins(0, 0, self.side_margin, 0)
else:
self.setViewportMargins(self.side_margin, 0, 0, 0)
def available_width(self):
return self.width() - self.side_margin
def line_number_area_width(self):
return 9 + (self.line_number_map.max_width * self.number_width)
def update_line_number_area(self, rect, dy):
if dy:
self.line_number_area.scroll(0, dy)
else:
self.line_number_area.update(0, rect.y(), self.line_number_area.width(), rect.height())
if rect.contains(self.viewport().rect()):
self.update_line_number_area_width()
def resizeEvent(self, ev):
PlainTextEdit.resizeEvent(self, ev)
cr = self.contentsRect()
if self.right:
self.line_number_area.setGeometry(QRect(cr.right() - self.line_number_area_width(), cr.top(), cr.right(), cr.height()))
else:
self.line_number_area.setGeometry(QRect(cr.left(), cr.top(), self.line_number_area_width(), cr.height()))
self.resized.emit()
def paint_line_numbers(self, ev):
painter = QPainter(self.line_number_area)
painter.fillRect(ev.rect(), self.line_number_palette.color(QPalette.Base))
block = self.firstVisibleBlock()
num = block.blockNumber()
top = int(self.blockBoundingGeometry(block).translated(self.contentOffset()).top())
bottom = top + int(self.blockBoundingRect(block).height())
painter.setPen(self.line_number_palette.color(QPalette.Text))
change_starts = {x[0] for x in self.changes}
while block.isValid() and top <= ev.rect().bottom():
r = ev.rect()
if block.isVisible() and bottom >= r.top():
text = unicode(self.line_number_map.get(num, ''))
is_start = text != '-' and num in change_starts
if is_start:
painter.save()
f = QFont(self.font())
f.setBold(True)
painter.setFont(f)
painter.setPen(self.line_number_palette.color(QPalette.BrightText))
if text == '-':
painter.drawLine(r.left() + 2, (top + bottom)//2, r.right() - 2, (top + bottom)//2)
else:
if self.right:
painter.drawText(r.left() + 3, top, r.right(), self.fontMetrics().height(),
Qt.AlignLeft, text)
else:
painter.drawText(r.left() + 2, top, r.right() - 5, self.fontMetrics().height(),
Qt.AlignRight, text)
if is_start:
painter.restore()
block = block.next()
top = bottom
bottom = top + int(self.blockBoundingRect(block).height())
num += 1
def paintEvent(self, event):
w = self.viewport().rect().width()
painter = QPainter(self.viewport())
painter.setClipRect(event.rect())
floor = event.rect().bottom()
ceiling = event.rect().top()
fv = self.firstVisibleBlock().blockNumber()
origin = self.contentOffset()
doc = self.document()
lines = []
for num, text in self.headers:
top, bot = num, num + 3
if bot < fv:
continue
y_top = self.blockBoundingGeometry(doc.findBlockByNumber(top)).translated(origin).y()
y_bot = self.blockBoundingGeometry(doc.findBlockByNumber(bot)).translated(origin).y()
if max(y_top, y_bot) < ceiling:
continue
if min(y_top, y_bot) > floor:
break
painter.setFont(self.heading_font)
br = painter.drawText(3, y_top, w, y_bot - y_top - 5, Qt.TextSingleLine, text)
painter.setPen(QPen(self.palette().text(), 2))
painter.drawLine(0, br.bottom()+3, w, br.bottom()+3)
for top, bot, kind in self.changes:
if bot < fv:
continue
y_top = self.blockBoundingGeometry(doc.findBlockByNumber(top)).translated(origin).y()
y_bot = self.blockBoundingGeometry(doc.findBlockByNumber(bot)).translated(origin).y()
if max(y_top, y_bot) < ceiling:
continue
if min(y_top, y_bot) > floor:
break
if y_top != y_bot:
painter.fillRect(0, y_top, w, y_bot - y_top, self.diff_backgrounds[kind])
lines.append((y_top, y_bot, kind))
if top in self.images:
img, maxw = self.images[top][:2]
if bot > top + 1 and not img.isNull():
y_top = self.blockBoundingGeometry(doc.findBlockByNumber(top+1)).translated(origin).y() + 3
y_bot -= 3
scaled, imgw, imgh = fit_image(img.width(), img.height(), w - 3, y_bot - y_top)
painter.setRenderHint(QPainter.SmoothPixmapTransform, True)
painter.drawPixmap(QRect(3, y_top, imgw, imgh), img)
painter.end()
PlainTextEdit.paintEvent(self, event)
painter = QPainter(self.viewport())
painter.setClipRect(event.rect())
for top, bottom, kind in sorted(lines, key=lambda (t, b, k):{'replace':0}.get(k, 1)):
painter.setPen(QPen(self.diff_foregrounds[kind], 1))
painter.drawLine(0, top, w, top)
painter.drawLine(0, bottom - 1, w, bottom - 1)
def wheelEvent(self, ev):
if ev.angleDelta().x() == 0:
self.wheel_event.emit(ev)
else:
return PlainTextEdit.wheelEvent(self, ev)
# }}}
class DiffSplitHandle(QSplitterHandle): # {{{
WIDTH = 30 # px
wheel_event = pyqtSignal(object)
def event(self, ev):
if ev.type() in (ev.HoverEnter, ev.HoverLeave):
self.hover = ev.type() == ev.HoverEnter
return QSplitterHandle.event(self, ev)
def paintEvent(self, event):
QSplitterHandle.paintEvent(self, event)
left, right = self.parent().left, self.parent().right
painter = QPainter(self)
painter.setClipRect(event.rect())
w = self.width()
h = self.height()
painter.setRenderHints(QPainter.Antialiasing, True)
C = 16 # Curve factor.
def create_line(ly, ry, right_to_left=False):
' Create path that represents upper or lower line of change marker '
line = QPainterPath()
if not right_to_left:
line.moveTo(0, ly)
line.cubicTo(C, ly, w - C, ry, w, ry)
else:
line.moveTo(w, ry)
line.cubicTo(w - C, ry, C, ly, 0, ly)
return line
ldoc, rdoc = left.document(), right.document()
lorigin, rorigin = left.contentOffset(), right.contentOffset()
lfv, rfv = left.firstVisibleBlock().blockNumber(), right.firstVisibleBlock().blockNumber()
lines = []
for (ltop, lbot, kind), (rtop, rbot, kind) in zip(left.changes, right.changes):
if lbot < lfv and rbot < rfv:
continue
ly_top = left.blockBoundingGeometry(ldoc.findBlockByNumber(ltop)).translated(lorigin).y()
ly_bot = left.blockBoundingGeometry(ldoc.findBlockByNumber(lbot)).translated(lorigin).y()
ry_top = right.blockBoundingGeometry(rdoc.findBlockByNumber(rtop)).translated(rorigin).y()
ry_bot = right.blockBoundingGeometry(rdoc.findBlockByNumber(rbot)).translated(rorigin).y()
if max(ly_top, ly_bot, ry_top, ry_bot) < 0:
continue
if min(ly_top, ly_bot, ry_top, ry_bot) > h:
break
upper_line = create_line(ly_top, ry_top)
lower_line = create_line(ly_bot, ry_bot, True)
region = QPainterPath()
region.moveTo(0, ly_top)
region.connectPath(upper_line)
region.lineTo(w, ry_bot)
region.connectPath(lower_line)
region.closeSubpath()
painter.fillPath(region, left.diff_backgrounds[kind])
for path, aa in zip((upper_line, lower_line), (ly_top != ry_top, ly_bot != ry_bot)):
lines.append((kind, path, aa))
for kind, path, aa in sorted(lines, key=lambda x:{'replace':0}.get(x[0], 1)):
painter.setPen(left.diff_foregrounds[kind])
painter.setRenderHints(QPainter.Antialiasing, aa)
painter.drawPath(path)
painter.setFont(left.heading_font)
for (lnum, text), (rnum, text) in zip(left.headers, right.headers):
ltop, lbot, rtop, rbot = lnum, lnum + 3, rnum, rnum + 3
if lbot < lfv and rbot < rfv:
continue
ly_top = left.blockBoundingGeometry(ldoc.findBlockByNumber(ltop)).translated(lorigin).y()
ly_bot = left.blockBoundingGeometry(ldoc.findBlockByNumber(lbot)).translated(lorigin).y()
ry_top = right.blockBoundingGeometry(rdoc.findBlockByNumber(rtop)).translated(rorigin).y()
ry_bot = right.blockBoundingGeometry(rdoc.findBlockByNumber(rbot)).translated(rorigin).y()
if max(ly_top, ly_bot, ry_top, ry_bot) < 0:
continue
if min(ly_top, ly_bot, ry_top, ry_bot) > h:
break
ly = painter.boundingRect(3, ly_top, left.width(), ly_bot - ly_top - 5, Qt.TextSingleLine, text).bottom() + 3
ry = painter.boundingRect(3, ry_top, right.width(), ry_bot - ry_top - 5, Qt.TextSingleLine, text).bottom() + 3
line = create_line(ly, ry)
painter.setPen(QPen(left.palette().text(), 2))
painter.setRenderHints(QPainter.Antialiasing, ly != ry)
painter.drawPath(line)
painter.end()
# Paint the splitter without the change lines if the mouse is over the
# splitter
if getattr(self, 'hover', False):
QSplitterHandle.paintEvent(self, event)
def sizeHint(self):
ans = QSplitterHandle.sizeHint(self)
ans.setWidth(self.WIDTH)
return ans
def wheelEvent(self, ev):
if ev.angleDelta().x() == 0:
self.wheel_event.emit(ev)
else:
return QSplitterHandle.wheelEvent(self, ev)
# }}}
class DiffSplit(QSplitter): # {{{
def __init__(self, parent=None, show_open_in_editor=False):
QSplitter.__init__(self, parent)
self._failed_img = None
self.left, self.right = TextBrowser(parent=self), TextBrowser(right=True, parent=self, show_open_in_editor=show_open_in_editor)
self.addWidget(self.left), self.addWidget(self.right)
self.split_words = re.compile(r"\w+|\W", re.UNICODE)
self.clear()
def createHandle(self):
return DiffSplitHandle(self.orientation(), self)
def clear(self):
self.left.clear(), self.right.clear()
def finalize(self):
for v in (self.left, self.right):
c = v.textCursor()
c.movePosition(c.Start)
v.setTextCursor(c)
self.update()
def add_diff(self, left_name, right_name, left_text, right_text, context=None, syntax=None, beautify=False):
left_text, right_text = left_text or '', right_text or ''
is_identical = len(left_text) == len(right_text) and left_text == right_text and left_name == right_name
is_text = isinstance(left_text, type('')) and isinstance(right_text, type(''))
left_name = left_name or '[%s]'%_('This file was added')
right_name = right_name or '[%s]'%_('This file was removed')
self.left.headers.append((self.left.blockCount() - 1, left_name))
self.right.headers.append((self.right.blockCount() - 1, right_name))
for v in (self.left, self.right):
c = v.textCursor()
c.movePosition(c.End)
(c.insertBlock(), c.insertBlock(), c.insertBlock())
with BusyCursor():
if is_identical:
for v in (self.left, self.right):
c = v.textCursor()
c.movePosition(c.End)
c.insertText('[%s]\n\n' % _('The files are identical'))
elif left_name != right_name and not left_text and not right_text:
self.add_text_diff(_('[This file was renamed to %s]') % right_name, _('[This file was renamed from %s]') % left_name, context, None)
for v in (self.left, self.right):
v.appendPlainText('\n')
elif is_text:
self.add_text_diff(left_text, right_text, context, syntax, beautify=beautify)
elif syntax == 'raster_image':
self.add_image_diff(left_text, right_text)
else:
text = '[%s]' % _('Binary file of size: %s')
left_text, right_text = text % human_readable(len(left_text)), text % human_readable(len(right_text))
self.add_text_diff(left_text, right_text, None, None)
for v in (self.left, self.right):
v.appendPlainText('\n')
# image diffs {{{
@property
def failed_img(self):
if self._failed_img is None:
i = QImage(200, 150, QImage.Format_ARGB32)
i.fill(Qt.white)
p = QPainter(i)
r = i.rect().adjusted(10, 10, -10, -10)
n = QPen(Qt.DashLine)
n.setColor(Qt.black)
p.setPen(n)
p.drawRect(r)
p.setPen(Qt.black)
f = self.font()
f.setPixelSize(20)
p.setFont(f)
p.drawText(r.adjusted(10, 0, -10, 0), Qt.AlignCenter | Qt.TextWordWrap, _('Image could not be rendered'))
p.end()
self._failed_img = QPixmap.fromImage(i)
return self._failed_img
def add_image_diff(self, left_data, right_data):
def load(data):
p = QPixmap()
p.loadFromData(bytes(data))
if data and p.isNull():
p = self.failed_img
return p
left_img, right_img = load(left_data), load(right_data)
change = []
# Let any initial resizing of the window finish in case this is the
# first diff, to avoid the expensive resize calculation later
QApplication.processEvents(QEventLoop.ExcludeUserInputEvents | QEventLoop.ExcludeSocketNotifiers)
for v, img, size in ((self.left, left_img, len(left_data)), (self.right, right_img, len(right_data))):
c = v.textCursor()
c.movePosition(c.End)
start = c.block().blockNumber()
lines, w = self.get_lines_for_image(img, v)
c.movePosition(c.StartOfBlock)
if size > 0:
c.beginEditBlock()
c.insertText(_('Size: {0} Resolution: {1}x{2}').format(human_readable(size), img.width(), img.height()))
for i in xrange(lines + 1):
c.insertBlock()
change.extend((start, c.block().blockNumber()))
c.insertBlock()
c.endEditBlock()
v.images[start] = (img, w, lines)
change.append('replace' if left_data and right_data else 'delete' if left_data else 'insert')
self.left.changes.append((change[0], change[1], change[-1]))
self.right.changes.append((change[2], change[3], change[-1]))
QApplication.processEvents(QEventLoop.ExcludeUserInputEvents | QEventLoop.ExcludeSocketNotifiers)
def resized(self):
' Resize images to fit in new view size and adjust all line number references accordingly '
for v in (self.left, self.right):
changes = []
for i, (top, bot, kind) in enumerate(v.changes):
if top in v.images:
img, oldw, oldlines = v.images[top]
lines, w = self.get_lines_for_image(img, v)
if lines != oldlines:
changes.append((i, lines, lines - oldlines, img, w))
for i, lines, delta, img, w in changes:
top, bot, kind = v.changes[i]
c = QTextCursor(v.document().findBlockByNumber(top+1))
c.beginEditBlock()
c.movePosition(c.StartOfBlock)
if delta > 0:
for _ in xrange(delta):
c.insertBlock()
else:
c.movePosition(c.NextBlock, c.KeepAnchor, -delta)
c.removeSelectedText()
c.endEditBlock()
v.images[top] = (img, w, lines)
def mapnum(x):
return x if x <= top else x + delta
lnm = LineNumberMap()
lnm.max_width = v.line_number_map.max_width
for x, val in v.line_number_map.iteritems():
dict.__setitem__(lnm, mapnum(x), val)
v.line_number_map = lnm
v.changes = [(mapnum(t), mapnum(b), k) for t, b, k in v.changes]
v.headers = [(mapnum(x), name) for x, name in v.headers]
v.images = OrderedDict((mapnum(x), v) for x, v in v.images.iteritems())
v.viewport().update()
def get_lines_for_image(self, img, view):
if img.isNull():
return 0, 0
w, h = img.width(), img.height()
scaled, w, h = fit_image(w, h, view.available_width() - 3, int(0.9 * view.height()))
line_height = view.blockBoundingRect(view.document().begin()).height()
return int(ceil(h / line_height)) + 1, w
# }}}
# text diffs {{{
def add_text_diff(self, left_text, right_text, context, syntax, beautify=False):
left_text = unicodedata.normalize('NFC', left_text)
right_text = unicodedata.normalize('NFC', right_text)
if beautify and syntax in {'xml', 'html', 'css'}:
left_text, right_text = beautify_text(left_text, syntax), beautify_text(right_text, syntax)
if len(left_text) == len(right_text) and left_text == right_text:
for v in (self.left, self.right):
c = v.textCursor()
c.movePosition(c.End)
c.insertText('[%s]\n\n' % _('The files are identical after beautifying'))
return
left_lines = self.left_lines = left_text.splitlines()
right_lines = self.right_lines = right_text.splitlines()
cruncher = get_sequence_matcher()(None, left_lines, right_lines)
left_highlight, right_highlight = get_highlighter(self.left, left_text, syntax), get_highlighter(self.right, right_text, syntax)
cl, cr = self.left_cursor, self.right_cursor = self.left.textCursor(), self.right.textCursor()
cl.beginEditBlock(), cr.beginEditBlock()
cl.movePosition(cl.End), cr.movePosition(cr.End)
self.left_insert = partial(self.do_insert, cl, left_highlight, self.left.line_number_map)
self.right_insert = partial(self.do_insert, cr, right_highlight, self.right.line_number_map)
self.changes = []
if context is None:
for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
getattr(self, tag)(alo, ahi, blo, bhi)
QApplication.processEvents(QEventLoop.ExcludeUserInputEvents | QEventLoop.ExcludeSocketNotifiers)
else:
def insert_boundary():
self.changes.append(Change(
ltop=cl.block().blockNumber()-1, lbot=cl.block().blockNumber(),
rtop=cr.block().blockNumber()-1, rbot=cr.block().blockNumber(), kind='boundary'))
self.left.line_number_map[self.changes[-1].ltop] = '-'
self.right.line_number_map[self.changes[-1].rtop] = '-'
ahi = bhi = 0
for i, group in enumerate(cruncher.get_grouped_opcodes(context)):
for j, (tag, alo, ahi, blo, bhi) in enumerate(group):
if j == 0 and (i > 0 or min(alo, blo) > 0):
insert_boundary()
getattr(self, tag)(alo, ahi, blo, bhi)
QApplication.processEvents(QEventLoop.ExcludeUserInputEvents | QEventLoop.ExcludeSocketNotifiers)
cl.insertBlock(), cr.insertBlock()
if ahi < len(left_lines) - 1 or bhi < len(right_lines) - 1:
insert_boundary()
cl.endEditBlock(), cr.endEditBlock()
del self.left_lines
del self.right_lines
del self.left_insert
del self.right_insert
self.coalesce_changes()
for ltop, lbot, rtop, rbot, kind in self.changes:
if kind != 'equal':
self.left.changes.append((ltop, lbot, kind))
self.right.changes.append((rtop, rbot, kind))
del self.changes
def coalesce_changes(self):
'Merge neighboring changes of the same kind, if any'
changes = []
for x in self.changes:
if changes and changes[-1].kind == x.kind:
changes[-1] = changes[-1]._replace(lbot=x.lbot, rbot=x.rbot)
else:
changes.append(x)
self.changes = changes
def do_insert(self, cursor, highlighter, line_number_map, lo, hi):
start_block = cursor.block()
highlighter.copy_lines(lo, hi, cursor)
for num, i in enumerate(xrange(start_block.blockNumber(), cursor.blockNumber())):
line_number_map[i] = lo + num + 1
return start_block.blockNumber(), cursor.block().blockNumber()
def equal(self, alo, ahi, blo, bhi):
lsb, lcb = self.left_insert(alo, ahi)
rsb, rcb = self.right_insert(blo, bhi)
self.changes.append(Change(
rtop=rsb, rbot=rcb, ltop=lsb, lbot=lcb, kind='equal'))
def delete(self, alo, ahi, blo, bhi):
start_block, current_block = self.left_insert(alo, ahi)
r = self.right_cursor.block().blockNumber()
self.changes.append(Change(
ltop=start_block, lbot=current_block, rtop=r, rbot=r, kind='delete'))
def insert(self, alo, ahi, blo, bhi):
start_block, current_block = self.right_insert(blo, bhi)
l = self.left_cursor.block().blockNumber()
self.changes.append(Change(
rtop=start_block, rbot=current_block, ltop=l, lbot=l, kind='insert'))
def trim_identical_leading_lines(self, alo, ahi, blo, bhi):
''' The patience diff algorithm sometimes results in a block of replace
lines with identical leading lines. Remove these. This can cause extra
lines of context, but that is better than having extra lines of diff
with no actual changes. '''
a, b = self.left_lines, self.right_lines
leading = 0
while alo < ahi and blo < bhi and a[alo] == b[blo]:
leading += 1
alo += 1
blo += 1
if leading > 0:
self.equal(alo - leading, alo, blo - leading, blo)
return alo, ahi, blo, bhi
def replace(self, alo, ahi, blo, bhi):
''' When replacing one block of lines with another, search the blocks
for *similar* lines; the best-matching pair (if any) is used as a synch
point, and intraline difference marking is done on the similar pair.
Lots of work, but often worth it. '''
alo, ahi, blo, bhi = self.trim_identical_leading_lines(alo, ahi, blo, bhi)
if alo == ahi and blo == bhi:
return
if ahi + bhi - alo - blo > 100:
# Too many lines, this will be too slow
# http://bugs.python.org/issue6931
return self.do_replace(alo, ahi, blo, bhi)
# don't synch up unless the lines have a similarity score of at
# least cutoff; best_ratio tracks the best score seen so far
best_ratio, cutoff = 0.74, 0.75
cruncher = SequenceMatcher()
eqi, eqj = None, None # 1st indices of equal lines (if any)
a, b = self.left_lines, self.right_lines
# search for the pair that matches best without being identical
# (identical lines must be junk lines, & we don't want to synch up
# on junk -- unless we have to)
for j in xrange(blo, bhi):
bj = b[j]
cruncher.set_seq2(bj)
for i in xrange(alo, ahi):
ai = a[i]
if ai == bj:
if eqi is None:
eqi, eqj = i, j
continue
cruncher.set_seq1(ai)
# computing similarity is expensive, so use the quick
# upper bounds first -- have seen this speed up messy
# compares by a factor of 3.
# note that ratio() is only expensive to compute the first
# time it's called on a sequence pair; the expensive part
# of the computation is cached by cruncher
if (cruncher.real_quick_ratio() > best_ratio and
cruncher.quick_ratio() > best_ratio and
cruncher.ratio() > best_ratio):
best_ratio, best_i, best_j = cruncher.ratio(), i, j
if best_ratio < cutoff:
# no non-identical "pretty close" pair
if eqi is None:
# no identical pair either -- treat it as a straight replace
self.do_replace(alo, ahi, blo, bhi)
return
# no close pair, but an identical pair -- synch up on that
best_i, best_j, best_ratio = eqi, eqj, 1.0
else:
# there's a close pair, so forget the identical pair (if any)
eqi = None
# a[best_i] very similar to b[best_j]; eqi is None iff they're not
# identical
# pump out diffs from before the synch point
self.replace_helper(alo, best_i, blo, best_j)
# do intraline marking on the synch pair
if eqi is None:
self.do_replace(best_i, best_i+1, best_j, best_j+1)
else:
# the synch pair is identical
self.equal(best_i, best_i+1, best_j, best_j+1)
# pump out diffs from after the synch point
self.replace_helper(best_i+1, ahi, best_j+1, bhi)
def replace_helper(self, alo, ahi, blo, bhi):
if alo < ahi:
if blo < bhi:
self.replace(alo, ahi, blo, bhi)
else:
self.delete(alo, ahi, blo, blo)
elif blo < bhi:
self.insert(alo, alo, blo, bhi)
def do_replace(self, alo, ahi, blo, bhi):
lsb, lcb = self.left_insert(alo, ahi)
rsb, rcb = self.right_insert(blo, bhi)
self.changes.append(Change(
rtop=rsb, rbot=rcb, ltop=lsb, lbot=lcb, kind='replace'))
l, r = '\n'.join(self.left_lines[alo:ahi]), '\n'.join(self.right_lines[blo:bhi])
ll, rl = self.split_words.findall(l), self.split_words.findall(r)
cruncher = get_sequence_matcher()(None, ll, rl)
lsb, rsb = self.left.document().findBlockByNumber(lsb), self.right.document().findBlockByNumber(rsb)
def do_tag(block, words, lo, hi, pos, fmts):
for word in words[lo:hi]:
if word == '\n':
if fmts:
block.layout().setAdditionalFormats(fmts)
pos, block, fmts = 0, block.next(), []
continue
if tag in {'replace', 'insert', 'delete'}:
fmt = getattr(self.left, '%s_format' % ('replacereplace' if tag == 'replace' else tag))
f = QTextLayout.FormatRange()
f.start, f.length, f.format = pos, len(word), fmt
fmts.append(f)
pos += len(word)
return block, pos, fmts
lfmts, rfmts, lpos, rpos = [], [], 0, 0
for tag, llo, lhi, rlo, rhi in cruncher.get_opcodes():
lsb, lpos, lfmts = do_tag(lsb, ll, llo, lhi, lpos, lfmts)
rsb, rpos, rfmts = do_tag(rsb, rl, rlo, rhi, rpos, rfmts)
for block, fmts in ((lsb, lfmts), (rsb, rfmts)):
if fmts:
block.layout().setAdditionalFormats(fmts)
# }}}
# }}}
class DiffView(QWidget): # {{{
SYNC_POSITION = 0.4
line_activated = pyqtSignal(object, object, object)
def __init__(self, parent=None, show_open_in_editor=False):
QWidget.__init__(self, parent)
self.changes = [[], [], []]
self.delta = 0
self.l = l = QHBoxLayout(self)
self.setLayout(l)
self.syncpos = 0
l.setContentsMargins(0, 0, 0, 0), l.setSpacing(0)
self.view = DiffSplit(self, show_open_in_editor=show_open_in_editor)
l.addWidget(self.view)
self.add_diff = self.view.add_diff
self.scrollbar = QScrollBar(self)
l.addWidget(self.scrollbar)
self.syncing = False
self.bars = []
self.resize_timer = QTimer(self)
self.resize_timer.setSingleShot(True)
self.resize_timer.timeout.connect(self.resize_debounced)
for i, bar in enumerate((self.scrollbar, self.view.left.verticalScrollBar(), self.view.right.verticalScrollBar())):
self.bars.append(bar)
bar.valueChanged[int].connect(partial(self.scrolled, i))
self.view.left.resized.connect(self.resized)
for i, v in enumerate((self.view.left, self.view.right, self.view.handle(1))):
v.wheel_event.connect(self.scrollbar.wheelEvent)
if i < 2:
v.next_change.connect(self.next_change)
v.line_activated.connect(self.line_activated)
v.scrolled.connect(partial(self.scrolled, i + 1))
def next_change(self, delta):
assert delta in (1, -1)
position = self.get_position_from_scrollbar(0)
if position[0] == 'in':
p = n = position[1]
else:
p, n = position[1], position[1] + 1
if p < 0:
p = None
if n >= len(self.changes[0]):
n = None
if p == n:
nc = p + delta
if nc < 0 or nc >= len(self.changes[0]):
nc = None
else:
nc = {1:n, -1:p}[delta]
if nc is None:
self.scrollbar.setValue(0 if delta == -1 else self.scrollbar.maximum())
else:
val = self.scrollbar.value()
self.scroll_to(0, ('in', nc, 0))
nval = self.scrollbar.value()
if nval == val:
nval += 5 * delta
if 0 <= nval <= self.scrollbar.maximum():
self.scrollbar.setValue(nval)
def resized(self):
self.resize_timer.start(300)
def resize_debounced(self):
self.view.resized()
self.calculate_length()
self.adjust_range()
self.view.handle(1).update()
def get_position_from_scrollbar(self, which):
changes = self.changes[which]
bar = self.bars[which]
syncpos = self.syncpos + bar.value()
prev = 0
for i, (top, bot, kind) in enumerate(changes):
if syncpos <= bot:
if top <= syncpos:
# syncpos is inside a change
try:
ratio = float(syncpos - top) / (bot - top)
except ZeroDivisionError:
ratio = 0
return 'in', i, ratio
else:
# syncpos is after the previous change
offset = syncpos - prev
return 'after', i - 1, offset
else:
# syncpos is after the current change
prev = bot
offset = syncpos - prev
return 'after', len(changes) - 1, offset
def scroll_to(self, which, position):
changes = self.changes[which]
bar = self.bars[which]
val = None
if position[0] == 'in':
change_idx, ratio = position[1:]
start, end = changes[change_idx][:2]
val = start + int((end - start) * ratio)
else:
change_idx, offset = position[1:]
start = 0 if change_idx < 0 else changes[change_idx][1]
val = start + offset
bar.setValue(val - self.syncpos)
def scrolled(self, which, *args):
if self.syncing:
return
position = self.get_position_from_scrollbar(which)
with self:
for x in {0, 1, 2} - {which}:
self.scroll_to(x, position)
self.view.handle(1).update()
def __enter__(self):
self.syncing = True
def __exit__(self, *args):
self.syncing = False
def clear(self):
with self:
self.view.clear()
self.changes = [[], [], []]
self.delta = 0
self.scrollbar.setRange(0, 0)
def adjust_range(self):
ls, rs = self.view.left.verticalScrollBar(), self.view.right.verticalScrollBar()
self.scrollbar.setPageStep(min(ls.pageStep(), rs.pageStep()))
self.scrollbar.setSingleStep(min(ls.singleStep(), rs.singleStep()))
self.scrollbar.setRange(0, ls.maximum() + self.delta)
self.scrollbar.setVisible(self.view.left.blockCount() > ls.pageStep() or self.view.right.blockCount() > rs.pageStep())
self.syncpos = int(ceil(self.scrollbar.pageStep() * self.SYNC_POSITION))
def finalize(self):
self.view.finalize()
self.changes = [[], [], []]
self.calculate_length()
self.adjust_range()
def calculate_length(self):
delta = 0
line_number_changes = ([], [])
for v, lmap, changes in zip((self.view.left, self.view.right), ({}, {}), line_number_changes):
b = v.document().firstBlock()
ebl = v.document().documentLayout().ensureBlockLayout
last_line_count = 0
while b.isValid():
ebl(b)
lmap[b.blockNumber()] = last_line_count
last_line_count += b.layout().lineCount()
b = b.next()
for top, bot, kind in v.changes:
changes.append((lmap[top], lmap[bot], kind))
changes = []
for (l_top, l_bot, kind), (r_top, r_bot, kind) in zip(*line_number_changes):
height = max(l_bot - l_top, r_bot - r_top)
top = delta + l_top
changes.append((top, top + height, kind))
delta = top + height - l_bot
self.changes, self.delta = (changes,) + line_number_changes, delta
def handle_key(self, ev):
amount, d = None, 1
key = ev.key()
if key in (Qt.Key_Up, Qt.Key_Down, Qt.Key_J, Qt.Key_K):
amount = self.scrollbar.singleStep()
if key in (Qt.Key_Up, Qt.Key_K):
d = -1
elif key in (Qt.Key_PageUp, Qt.Key_PageDown):
amount = self.scrollbar.pageStep()
if key in (Qt.Key_PageUp,):
d = -1
elif key in (Qt.Key_Home, Qt.Key_End):
self.scrollbar.setValue(0 if key == Qt.Key_Home else self.scrollbar.maximum())
return True
elif key in (Qt.Key_N, Qt.Key_P):
self.next_change(1 if key == Qt.Key_N else -1)
return True
if amount is not None:
self.scrollbar.setValue(self.scrollbar.value() + d * amount)
return True
return False
# }}}
| sharad/calibre | src/calibre/gui2/tweak_book/diff/view.py | Python | gpl-3.0 | 46,245 |
from pycp2k.inputsection import InputSection
class _screening3(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Rc_taper = None
self.Rc_range = None
self._name = "SCREENING"
self._keywords = {'Rc_range': 'RC_RANGE', 'Rc_taper': 'RC_TAPER'}
| SINGROUP/pycp2k | pycp2k/classes/_screening3.py | Python | lgpl-3.0 | 306 |
import requests
import json
def update_position_data(position_data):
with open("local_data/positions.json", "w") as positions_file:
json.dump(position_data, positions_file, indent=2)
def update_player_data(player_data):
for player in player_data:
player_response = requests.get("https://fantasy.premierleague.com/drf".format(**player))
# history - summary of performance for current season
# explain - explaining where points have come from(?)
player_full_data = player_response.json()
player['history'] = player_full_data['history']
player['history_past'] = player_full_data['history_past']
with open("local_data/players/{second_name}, {first_name}".format(**player), "w") as player_file:
json.dump(player, player_file, indent=2)
def update_team_data(team_data):
for team in team_data:
with open("local_data/teams/{name}.json".format(**team), "w") as team_file:
json.dump(team, team_file, indent=2)
response = requests.get("https://fantasy.premierleague.com/drf/bootstrap-static")
print(response)
fpl_raw_data = response.json()
print(fpl_raw_data.keys())
print(fpl_raw_data['teams'][0])
# update_player_data(fpl_raw_data['elements'])
# update_position_data(fpl_raw_data['element_types'])
# update_team_data(fpl_raw_data['teams'])
| rachel-sharp/fantasy-scout | archived_scripts/load_fpl_data.py | Python | mit | 1,350 |
from datetime import datetime
import pytz
from django.contrib.postgres.fields import ArrayField
from django.db import models
from osf.models import Node
from osf.models import OSFUser
from osf.models.base import BaseModel, ObjectIDMixin
from osf.models.validators import validate_subscription_type
from website.notifications.constants import NOTIFICATION_TYPES
class NotificationSubscription(BaseModel):
primary_identifier_name = '_id'
# TODO DELETE ME POST MIGRATION
modm_model_path = 'website.notifications.model.NotificationSubscription'
modm_query = None
migration_page_size = 120000
# /TODO DELETE ME POST MIGRATION
_id = models.CharField(max_length=50, db_index=True) # pxyz_wiki_updated, uabc_comment_replies
event_name = models.CharField(max_length=50) # wiki_updated, comment_replies
user = models.ForeignKey('OSFUser', null=True, related_name='notification_subscriptions', blank=True)
node = models.ForeignKey('Node', null=True, blank=True, related_name='notification_subscriptions')
# Notification types
none = models.ManyToManyField('OSFUser', related_name='+') # reverse relationships
email_digest = models.ManyToManyField('OSFUser', related_name='+') # for these
email_transactional = models.ManyToManyField('OSFUser', related_name='+') # are pointless
@classmethod
def load(cls, q):
# modm doesn't throw exceptions when loading things that don't exist
try:
return cls.objects.get(_id=q)
except cls.DoesNotExist:
return None
@classmethod
def migrate_from_modm(cls, modm_obj):
"""
Given a modm object, make a django object with the same local fields.
This is a base method that may work for simple things. It should be customized for complex ones.
:param modm_obj:
:return:
"""
django_obj = cls()
django_obj._id = modm_obj._id
local_django_fields = set([x.name for x in django_obj._meta.get_fields() if not x.is_relation])
intersecting_fields = set(modm_obj.to_storage().keys()).intersection(
set(local_django_fields))
for field in intersecting_fields:
modm_value = getattr(modm_obj, field)
if modm_value is None:
continue
if isinstance(modm_value, datetime):
modm_value = pytz.utc.localize(modm_value)
setattr(django_obj, field, modm_value)
return django_obj
@property
def owner(self):
# ~100k have owner==user
if self.user is not None:
return self.user
# ~8k have owner=Node
elif self.node is not None:
return self.node
@owner.setter
def owner(self, value):
if isinstance(value, OSFUser):
self.user = value
elif isinstance(value, Node):
self.node = value
def add_user_to_subscription(self, user, notification_type, save=True):
for nt in NOTIFICATION_TYPES:
if getattr(self, nt).filter(id=user.id).exists():
if nt != notification_type:
getattr(self, nt).remove(user)
else:
if nt == notification_type:
getattr(self, nt).add(user)
if notification_type != 'none' and isinstance(self.owner, Node) and self.owner.parent_node:
user_subs = self.owner.parent_node.child_node_subscriptions
if self.owner._id not in user_subs.setdefault(user._id, []):
user_subs[user._id].append(self.owner._id)
self.owner.parent_node.save()
if save:
self.save()
def remove_user_from_subscription(self, user, save=True):
for notification_type in NOTIFICATION_TYPES:
try:
getattr(self, notification_type, []).remove(user)
except ValueError:
pass
if isinstance(self.owner, Node) and self.owner.parent_node:
try:
self.owner.parent_node.child_node_subscriptions.get(user._id, []).remove(self.owner._id)
self.owner.parent_node.save()
except ValueError:
pass
if save:
self.save()
class NotificationDigest(ObjectIDMixin, BaseModel):
# TODO DELETE ME POST MIGRATION
modm_model_path = 'website.notifications.model.NotificationDigest'
modm_query = None
# /TODO DELETE ME POST MIGRATION
user = models.ForeignKey('OSFUser', null=True, blank=True)
timestamp = models.DateTimeField()
send_type = models.CharField(max_length=50, db_index=True, validators=[validate_subscription_type, ])
event = models.CharField(max_length=50)
message = models.CharField(max_length=2048)
# TODO: Could this be a m2m with or without an order field?
node_lineage = ArrayField(models.CharField(max_length=5))
| mluo613/osf.io | osf/models/notifications.py | Python | apache-2.0 | 4,912 |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""
"""
from micropsi_core import runtime as micropsi
import logging
def test_set_logging_level():
assert logging.getLogger('system').getEffectiveLevel() == logging.WARNING
micropsi.set_logging_levels(system='DEBUG', world='DEBUG', nodenet='DEBUG')
assert logging.getLogger('system').getEffectiveLevel() == logging.DEBUG
assert logging.getLogger('world').getEffectiveLevel() == logging.DEBUG
assert logging.getLogger('nodenet').getEffectiveLevel() == logging.DEBUG
def test_get_logging_levels():
logging.getLogger('system').setLevel(logging.INFO)
logging.getLogger('world').setLevel(logging.WARNING)
logging.getLogger('nodenet').setLevel(logging.DEBUG)
res = micropsi.get_logging_levels()
assert res['system'] == 'INFO'
assert res['world'] == 'WARNING'
assert res['nodenet'] == 'DEBUG'
def test_get_logger_messages():
msg = "Attention passengers. The next redline train to braintree is now arriving!"
micropsi.set_logging_levels(system='INFO')
logging.getLogger('system').info(msg)
res = micropsi.get_logger_messages('system')
item = res['logs'][-1]
assert item['msg']
assert item['logger'] == 'system'
assert item['level'] == 'INFO'
assert 'time' in item
def test_get_multiple_logger_messages_are_sorted():
logging.getLogger('nodenet').warning('First.')
logging.getLogger('system').warning('Second')
logging.getLogger('world').warning('Wat?')
res = micropsi.get_logger_messages(['system', 'world', 'nodenet'])
assert len(res['logs']) == 3
assert res['logs'][0]['logger'] == 'nodenet'
assert res['logs'][1]['logger'] == 'system'
assert res['logs'][2]['logger'] == 'world'
def test_register_runner_condition_step(test_nodenet):
import time
micropsi.set_runner_properties(1, 1)
success, data = micropsi.set_runner_condition(test_nodenet, steps=7)
assert data['step'] == 7
assert data['step_amount'] == 7
micropsi.start_nodenetrunner(test_nodenet)
assert micropsi.nodenets[test_nodenet].is_active
time.sleep(1)
assert micropsi.nodenets[test_nodenet].current_step == 7
assert not micropsi.nodenets[test_nodenet].is_active
# test that the condition stays active.
micropsi.start_nodenetrunner(test_nodenet)
assert micropsi.nodenets[test_nodenet].is_active
time.sleep(1)
assert micropsi.nodenets[test_nodenet].current_step == 14
assert not micropsi.nodenets[test_nodenet].is_active
def test_register_runner_condition_monitor(test_nodenet):
import time
micropsi.set_runner_properties(1, 1)
nn = micropsi.nodenets[test_nodenet]
node = nn.netapi.create_node('Register', None)
nn.netapi.link(node, 'gen', node, 'gen', weight=2)
node.activation = 0.1
uid = micropsi.add_gate_monitor(test_nodenet, node.uid, 'gen')
micropsi.set_runner_condition(test_nodenet, monitor={
'uid': uid,
'value': 0.8
})
micropsi.start_nodenetrunner(test_nodenet)
assert micropsi.nodenets[test_nodenet].is_active
time.sleep(1)
assert not micropsi.nodenets[test_nodenet].is_active
assert micropsi.nodenets[test_nodenet].current_step == 3
assert round(nn.get_node(node.uid).get_gate('gen').activation, 4) == 0.8
def test_get_links_for_nodes(test_nodenet, node):
api = micropsi.nodenets[test_nodenet].netapi
ns = api.create_nodespace(None)
node = api.get_node(node)
pipe1 = api.create_node("Pipe", ns.uid, "pipe1")
pipe2 = api.create_node("Pipe", ns.uid, "pipe2")
pipe3 = api.create_node("Pipe", ns.uid, "pipe3")
api.link(node, 'gen', pipe1, 'gen')
api.link(pipe2, 'sub', node, 'sub')
data = micropsi.get_links_for_nodes(test_nodenet, [node.uid])
assert len(data['links'].values()) == 3 # node has a genloop
assert len(data['nodes'].values()) == 2
assert pipe1.uid in data['nodes']
assert pipe2.uid in data['nodes']
assert pipe3.uid not in data['nodes']
| printedheart/micropsi2 | micropsi_core/tests/test_runtime.py | Python | mit | 3,993 |
from datetime import datetime, timedelta
import functools
import numpy as np
import pandas as pd
from .options import OPTIONS
from .pycompat import iteritems, unicode_type, bytes_type, dask_array_type
def pretty_print(x, numchars):
"""Given an object `x`, call `str(x)` and format the returned string so
that it is numchars long, padding with trailing spaces or truncating with
ellipses as necessary
"""
s = str(x)
if len(s) > numchars:
return s[:(numchars - 3)] + '...'
else:
return s + ' ' * (numchars - len(s))
def wrap_indent(text, start='', length=None):
if length is None:
length = len(start)
indent = '\n' + ' ' * length
return start + indent.join(x for x in text.splitlines())
def _get_indexer_at_least_n_items(shape, n_desired):
assert 0 < n_desired <= np.prod(shape)
cum_items = np.cumprod(shape[::-1])
n_steps = np.argmax(cum_items >= n_desired)
stop = int(np.ceil(float(n_desired) / np.r_[1, cum_items][n_steps]))
indexer = ((0, ) * (len(shape) - 1 - n_steps) + (slice(stop), ) +
(slice(None), ) * n_steps)
return indexer
def first_n_items(x, n_desired):
"""Returns the first n_desired items of an array"""
# Unfortunately, we can't just do x.flat[:n_desired] here because x might
# not be a numpy.ndarray. Moreover, access to elements of x could be very
# expensive (e.g. if it's only available over DAP), so go out of our way to
# get them in a single call to __getitem__ using only slices.
if n_desired < 1:
raise ValueError('must request at least one item')
if x.size == 0:
# work around for https://github.com/numpy/numpy/issues/5195
return []
if n_desired < x.size:
indexer = _get_indexer_at_least_n_items(x.shape, n_desired)
x = x[indexer]
return np.asarray(x).flat[:n_desired]
def format_timestamp(t):
"""Cast given object to a Timestamp and return a nicely formatted string"""
datetime_str = str(pd.Timestamp(t))
try:
date_str, time_str = datetime_str.split()
except ValueError:
# catch NaT and others that don't split nicely
return datetime_str
else:
if time_str == '00:00:00':
return date_str
else:
return '%sT%s' % (date_str, time_str)
def format_timedelta(t, timedelta_format=None):
"""Cast given object to a Timestamp and return a nicely formatted string"""
timedelta_str = str(pd.Timedelta(t))
try:
days_str, time_str = timedelta_str.split(' days ')
except ValueError:
# catch NaT and others that don't split nicely
return timedelta_str
else:
if timedelta_format == 'date':
return days_str + ' days'
elif timedelta_format == 'time':
return time_str
else:
return timedelta_str
def format_item(x, timedelta_format=None, quote_strings=True):
"""Returns a succinct summary of an object as a string"""
if isinstance(x, (np.datetime64, datetime)):
return format_timestamp(x)
if isinstance(x, (np.timedelta64, timedelta)):
return format_timedelta(x, timedelta_format=timedelta_format)
elif isinstance(x, (unicode_type, bytes_type)):
return repr(x) if quote_strings else x
elif isinstance(x, (float, np.float)):
return '{0:.4}'.format(x)
else:
return str(x)
def format_items(x):
"""Returns a succinct summaries of all items in a sequence as strings"""
x = np.asarray(x)
timedelta_format = 'datetime'
if np.issubdtype(x.dtype, np.timedelta64):
x = np.asarray(x, dtype='timedelta64[ns]')
day_part = (x[~pd.isnull(x)]
.astype('timedelta64[D]')
.astype('timedelta64[ns]'))
time_needed = x != day_part
day_needed = day_part != np.timedelta64(0, 'ns')
if np.logical_not(day_needed).all():
timedelta_format = 'time'
elif np.logical_not(time_needed).all():
timedelta_format = 'date'
formatted = [format_item(xi, timedelta_format) for xi in x]
return formatted
def format_array_flat(items_ndarray, max_width):
"""Return a formatted string for as many items in the flattened version of
items_ndarray that will fit within max_width characters
"""
# every item will take up at least two characters, but we always want to
# print at least one item
max_possibly_relevant = max(int(np.ceil(max_width / 2.0)), 1)
relevant_items = first_n_items(items_ndarray, max_possibly_relevant)
pprint_items = format_items(relevant_items)
cum_len = np.cumsum([len(s) + 1 for s in pprint_items]) - 1
if (max_possibly_relevant < items_ndarray.size or
(cum_len > max_width).any()):
end_padding = ' ...'
count = max(np.argmax((cum_len + len(end_padding)) > max_width), 1)
pprint_items = pprint_items[:count]
else:
end_padding = ''
pprint_str = ' '.join(pprint_items) + end_padding
return pprint_str
def _summarize_var_or_coord(name, var, col_width, show_values=True,
marker=' ', max_width=None):
if max_width is None:
max_width = OPTIONS['display_width']
first_col = pretty_print(' %s %s ' % (marker, name), col_width)
dims_str = '(%s) ' % ', '.join(map(str, var.dims)) if var.dims else ''
front_str = first_col + dims_str + ('%s ' % var.dtype)
if show_values:
values_str = format_array_flat(var, max_width - len(front_str))
else:
values_str = '...'
return front_str + values_str
def _not_remote(var):
"""Helper function to identify if array is positively identifiable as
coming from a remote source.
"""
source = var.encoding.get('source')
if source and source.startswith('http') and not var._in_memory:
return False
return True
def summarize_var(name, var, col_width):
show_values = _not_remote(var)
return _summarize_var_or_coord(name, var, col_width, show_values)
def summarize_coord(name, var, col_width):
is_index = name in var.dims
show_values = is_index or _not_remote(var)
marker = '*' if is_index else ' '
return _summarize_var_or_coord(name, var, col_width, show_values, marker)
def _maybe_truncate(obj, maxlen=500):
s = str(obj)
if len(s) > maxlen:
s = s[:(maxlen - 3)] + '...'
return s
def summarize_attr(key, value, col_width=None):
# ignore col_width for now to more clearly distinguish attributes
return ' %s: %s' % (key, _maybe_truncate(value))
EMPTY_REPR = ' *empty*'
def _calculate_col_width(mapping):
max_name_length = max(len(str(k)) for k in mapping) if mapping else 0
col_width = max(max_name_length, 7) + 6
return col_width
def _mapping_repr(mapping, title, summarizer, col_width=None):
if col_width is None:
col_width = _calculate_col_width(mapping)
summary = ['%s:' % title]
if mapping:
summary += [summarizer(k, v, col_width) for k, v in mapping.items()]
else:
summary += [EMPTY_REPR]
return '\n'.join(summary)
coords_repr = functools.partial(_mapping_repr, title='Coordinates',
summarizer=summarize_coord)
vars_repr = functools.partial(_mapping_repr, title='Data variables',
summarizer=summarize_var)
attrs_repr = functools.partial(_mapping_repr, title='Attributes',
summarizer=summarize_attr)
def indexes_repr(indexes):
summary = []
for k, v in indexes.items():
summary.append(wrap_indent(repr(v), '%s: ' % k))
return '\n'.join(summary)
def array_repr(arr):
# used for DataArray, Variable and Coordinate
if hasattr(arr, 'name') and arr.name is not None:
name_str = '%r ' % arr.name
else:
name_str = ''
dim_summary = ', '.join('%s: %s' % (k, v) for k, v
in zip(arr.dims, arr.shape))
summary = ['<xarray.%s %s(%s)>'
% (type(arr).__name__, name_str, dim_summary)]
if isinstance(getattr(arr, 'variable', arr)._data, dask_array_type):
summary.append(repr(arr.data))
elif arr._in_memory or arr.size < 1e5:
summary.append(repr(arr.values))
else:
summary.append('[%s values with dtype=%s]' % (arr.size, arr.dtype))
if hasattr(arr, 'coords'):
if arr.coords:
summary.append(repr(arr.coords))
if arr.attrs:
summary.append(attrs_repr(arr.attrs))
return '\n'.join(summary)
def dataset_repr(ds):
summary = ['<xarray.%s>' % type(ds).__name__]
col_width = _calculate_col_width(ds)
dims_start = pretty_print('Dimensions:', col_width)
all_dim_strings = ['%s: %s' % (k, v) for k, v in iteritems(ds.dims)]
summary.append('%s(%s)' % (dims_start, ', '.join(all_dim_strings)))
summary.append(coords_repr(ds.coords, col_width=col_width))
summary.append(vars_repr(ds.data_vars, col_width=col_width))
if ds.attrs:
summary.append(attrs_repr(ds.attrs))
return '\n'.join(summary)
| drewokane/xray | xarray/core/formatting.py | Python | apache-2.0 | 9,132 |
from __future__ import print_function
import sys
from . import actions
from . import axioms
from . import conditions
from . import predicates
from . import pddl_types
from . import functions
from . import f_expression
class Task(object):
def __init__(self, domain_name, task_name, requirements,
types, objects, predicates, functions, init, goal, actions, axioms, use_metric):
self.domain_name = domain_name
self.task_name = task_name
self.requirements = requirements
self.types = types
self.objects = objects
self.predicates = predicates
self.functions = functions
self.init = init
self.goal = goal
self.actions = actions
self.axioms = axioms
self.axiom_counter = 0
self.use_min_cost_metric = use_metric
def add_axiom(self, parameters, condition):
name = "new-axiom@%d" % self.axiom_counter
self.axiom_counter += 1
axiom = axioms.Axiom(name, parameters, len(parameters), condition)
self.predicates.append(predicates.Predicate(name, parameters))
self.axioms.append(axiom)
return axiom
@staticmethod
def parse(domain_pddl, task_pddl):
domain_name, domain_requirements, types, constants, predicates, functions, actions, axioms \
= parse_domain(domain_pddl)
task_name, task_domain_name, task_requirements, objects, init, goal, use_metric = parse_task(task_pddl)
assert domain_name == task_domain_name
requirements = Requirements(sorted(set(
domain_requirements.requirements +
task_requirements.requirements)))
objects = constants + objects
check_for_duplicates(
[o.name for o in objects],
errmsg="error: duplicate object %r",
finalmsg="please check :constants and :objects definitions")
init += [conditions.Atom("=", (obj.name, obj.name)) for obj in objects]
return Task(domain_name, task_name, requirements, types, objects,
predicates, functions, init, goal, actions, axioms, use_metric)
def dump(self):
print("Problem %s: %s [%s]" % (
self.domain_name, self.task_name, self.requirements))
print("Types:")
for type in self.types:
print(" %s" % type)
print("Objects:")
for obj in self.objects:
print(" %s" % obj)
print("Predicates:")
for pred in self.predicates:
print(" %s" % pred)
print("Functions:")
for func in self.functions:
print(" %s" % func)
print("Init:")
for fact in self.init:
print(" %s" % fact)
print("Goal:")
self.goal.dump()
print("Actions:")
for action in self.actions:
action.dump()
if self.axioms:
print("Axioms:")
for axiom in self.axioms:
axiom.dump()
class Requirements(object):
def __init__(self, requirements):
self.requirements = requirements
for req in requirements:
assert req in (
":strips", ":adl", ":typing", ":negation", ":equality",
":negative-preconditions", ":disjunctive-preconditions",
":existential-preconditions", ":universal-preconditions",
":quantified-preconditions", ":conditional-effects",
":derived-predicates", ":action-costs"), req
def __str__(self):
return ", ".join(self.requirements)
def parse_domain(domain_pddl):
iterator = iter(domain_pddl)
define_tag = next(iterator)
assert define_tag == "define"
domain_line = next(iterator)
assert domain_line[0] == "domain" and len(domain_line) == 2
yield domain_line[1]
## We allow an arbitrary order of the requirement, types, constants,
## predicates and functions specification. The PDDL BNF is more strict on
## this, so we print a warning if it is violated.
requirements = Requirements([":strips"])
the_types = [pddl_types.Type("object")]
constants, the_predicates, the_functions = [], [], []
correct_order = [":requirements", ":types", ":constants", ":predicates",
":functions"]
seen_fields = []
for opt in iterator:
field = opt[0]
if field not in correct_order:
first_action = opt
break
if field in seen_fields:
raise SystemExit("Error in domain specification\n" +
"Reason: two '%s' specifications." % field)
if (seen_fields and
correct_order.index(seen_fields[-1]) > correct_order.index(field)):
msg = "\nWarning: %s specification not allowed here (cf. PDDL BNF)" % field
print(msg, file=sys.stderr)
seen_fields.append(field)
if field == ":requirements":
requirements = Requirements(opt[1:])
elif field == ":types":
the_types.extend(pddl_types.parse_typed_list(opt[1:],
constructor=pddl_types.Type))
elif field == ":constants":
constants = pddl_types.parse_typed_list(opt[1:])
elif field == ":predicates":
the_predicates = [predicates.Predicate.parse(entry)
for entry in opt[1:]]
the_predicates += [predicates.Predicate("=",
[pddl_types.TypedObject("?x", "object"),
pddl_types.TypedObject("?y", "object")])]
elif field == ":functions":
the_functions = pddl_types.parse_typed_list(
opt[1:],
constructor=functions.Function.parse,
default_type="number")
pddl_types.set_supertypes(the_types)
# for type in the_types:
# print repr(type), type.supertype_names
yield requirements
yield the_types
yield constants
yield the_predicates
yield the_functions
entries = [first_action] + [entry for entry in iterator]
the_axioms = []
the_actions = []
for entry in entries:
if entry[0] == ":derived":
axiom = axioms.Axiom.parse(entry)
the_axioms.append(axiom)
else:
action = actions.Action.parse(entry)
the_actions.append(action)
yield the_actions
yield the_axioms
def parse_task(task_pddl):
iterator = iter(task_pddl)
define_tag = next(iterator)
assert define_tag == "define"
problem_line = next(iterator)
assert problem_line[0] == "problem" and len(problem_line) == 2
yield problem_line[1]
domain_line = next(iterator)
assert domain_line[0] == ":domain" and len(domain_line) == 2
yield domain_line[1]
requirements_opt = next(iterator)
if requirements_opt[0] == ":requirements":
requirements = requirements_opt[1:]
objects_opt = next(iterator)
else:
requirements = []
objects_opt = requirements_opt
yield Requirements(requirements)
if objects_opt[0] == ":objects":
yield pddl_types.parse_typed_list(objects_opt[1:])
init = next(iterator)
else:
yield []
init = objects_opt
assert init[0] == ":init"
initial = []
initial_true = set()
initial_false = set()
initial_assignments = dict()
for fact in init[1:]:
if fact[0] == "=":
try:
assignment = f_expression.parse_assignment(fact)
except ValueError as e:
raise SystemExit("Error in initial state specification\n" +
"Reason: %s." % e)
if not isinstance(assignment.expression,
f_expression.NumericConstant):
raise SystemExit("Illegal assignment in initial state " +
"specification:\n%s" % assignment)
if assignment.fluent in initial_assignments:
prev = initial_assignments[assignment.fluent]
if assignment.expression == prev.expression:
print("Warning: %s is specified twice" % assignment,
"in initial state specification")
else:
raise SystemExit("Error in initial state specification\n" +
"Reason: conflicting assignment for " +
"%s." % assignment.fluent)
else:
initial_assignments[assignment.fluent] = assignment
initial.append(assignment)
elif fact[0] == "not":
atom = conditions.Atom(fact[1][0], fact[1][1:])
check_atom_consistency(atom, initial_false, initial_true, False)
initial_false.add(atom)
else:
atom = conditions.Atom(fact[0], fact[1:])
check_atom_consistency(atom, initial_true, initial_false)
initial_true.add(atom)
initial.extend(initial_true)
yield initial
goal = next(iterator)
assert goal[0] == ":goal" and len(goal) == 2
yield conditions.parse_condition(goal[1])
use_metric = False
for entry in iterator:
if entry[0] == ":metric":
if entry[1]=="minimize" and entry[2][0] == "total-cost":
use_metric = True
else:
assert False, "Unknown metric."
yield use_metric
for entry in iterator:
assert False, entry
def check_atom_consistency(atom, same_truth_value, other_truth_value, atom_is_true=True):
if atom in other_truth_value:
raise SystemExit("Error in initial state specification\n" +
"Reason: %s is true and false." % atom)
if atom in same_truth_value:
if not atom_is_true:
atom = atom.negate()
print("Warning: %s is specified twice in initial state specification" % atom)
def check_for_duplicates(elements, errmsg, finalmsg):
seen = set()
errors = []
for element in elements:
if element in seen:
errors.append(errmsg % element)
else:
seen.add(element)
if errors:
raise SystemExit("\n".join(errors) + "\n" + finalmsg)
| rock-planning/planning-fd_uniform | src/translate/pddl/tasks.py | Python | gpl-3.0 | 10,257 |
import datetime, json, logging, uuid
from . import LANGUAGES, RE_CHALLENGE_ID, RE_USER_ID
from .StorageHelper import StorageKeys, get_redis, wait_for_redis
# -------------------------------------------------------------------
@wait_for_redis
def CreateSubmission(lang, user_id, challenge_id, code, simulation=None):
# Language
lang = lang.strip()
if not lang in LANGUAGES:
logging.error('Language is invalid')
return None
# User
user_id = user_id.strip()
if not RE_USER_ID.match(user_id):
logging.error('User is invalid')
return None
user = get_redis().hget(StorageKeys.Users, user_id)
if not user:
logging.error('User is Unknown')
return None
# Challenge
challenge_id = challenge_id.strip()
if not RE_CHALLENGE_ID.match(challenge_id):
logging.error('Challenge is invalid')
return None
from .ChallengeHelper import LoadChallenge
if not LoadChallenge(challenge_id):
logging.error('Challenge is unknown')
return None
# Code
code = code.replace('\r', '')
if not code:
logging.error('Code is invalid')
return None
# Execute
submission_id = str(uuid.uuid4())
submission = {
'challenge_id': challenge_id,
'code': code,
'id': submission_id,
'lang': lang,
'stamp': datetime.datetime.utcnow().timestamp(),
'user_id': user_id
}
if simulation != None:
if not isinstance(simulation, int):
logging.error('Simulation is invalid: %s', simulation)
return None
submission['simulation'] = simulation
pipe = get_redis().pipeline()
pipe.hset(StorageKeys.Submissions, submission_id, json.dumps(submission))
pipe.lpush(StorageKeys.SubmissionsQueue, submission_id)
pipe.execute()
return submission
# -------------------------------------------------------------------
@wait_for_redis
def LoadSubmissions():
return [ json.loads(submission) for submission_id, submission in get_redis().hgetall(StorageKeys.Submissions).items() ]
# -------------------------------------------------------------------
@wait_for_redis
def LoadSubmission(submission_id):
submission = get_redis().hget(StorageKeys.Submissions, submission_id)
return json.loads(submission) if submission else None
# -------------------------------------------------------------------
@wait_for_redis
def WaitSubmission():
logging.info('Wait submission')
submission_id = get_redis().brpoplpush(StorageKeys.SubmissionsQueue, StorageKeys.SubmissionsQueueWIP, 0)
logging.info('Load submission %s', submission_id)
return json.loads(get_redis().hget(StorageKeys.Submissions, submission_id))
# -------------------------------------------------------------------
| rlods/CodingChallenge | app/helpers/SubmissionHelper.py | Python | mit | 2,584 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"quickly.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| wearespindle/quickly.press | manage.py | Python | mit | 276 |
"""
Algorithmic Thinking Part 2
Project 4: Computing alignment of Sequences
Author: Weikang Sun
Date: 11/2/15
CodeSkulptor source:
http://www.codeskulptor.org/#user40_tbt1hSyQm6_25.py
"""
def build_scoring_matrix(alphabet, diag_score, off_diag_score, dash_score):
"""
Function to build a scoring matrix given the alphabet, diagonal score,
off-diagonal score, and dash score.
Returns dictionary of dictionaries.
"""
alphabet_dash = list(alphabet) + ["-"]
score_matrix = {}
for entry_row in alphabet_dash:
matrix_row = {}
for entry_column in alphabet_dash:
if entry_row is "-" or entry_column is "-":
matrix_row[entry_column] = dash_score
elif entry_column is entry_row:
matrix_row[entry_column] = diag_score
else:
matrix_row[entry_column] = off_diag_score
score_matrix[entry_row] = matrix_row
return score_matrix
def print_scoring_matrix(scoring_matrix):
""" Helper function to print scoring matrix nicely """
for row in scoring_matrix.keys():
print str(row) + ": " + str(scoring_matrix[row])
def compute_alignment_matrix(seq_x, seq_y, scoring_matrix, global_flag = True):
"""
Function to compute the alignment matrix for two sequences given
those sequences and the scoring matrix. Global flag dictates whether
a global or local alignment should be computed.
Returns a matrix.
"""
len_x = len(seq_x) + 1
len_y = len(seq_y) + 1
# first create an empty grid of the right dimensions
align_matrix = [[0 for dummy_col in range(len_y)] for dummy_row in range(len_x)]
# global flag allows negative scores
if global_flag:
# fill out leftmost column with incrementing dash score
for row in range(1, len_x ):
align_matrix[row][0] = align_matrix[row - 1][0] + \
scoring_matrix[seq_x[row - 1]]["-"]
# fill out uppermost row with increment dash score
for col in range(1, len_y):
align_matrix[0][col] = align_matrix[0][col - 1] + \
scoring_matrix["-"][seq_y[col - 1]]
# iteratively fill out the rest of the matrix
for row in range(1, len_x):
for col in range(1, len_y):
align_matrix[row][col] = max(align_matrix[row - 1][col - 1] +
scoring_matrix[seq_x[row - 1]][seq_y[col - 1]],
align_matrix[row - 1][col] +
scoring_matrix[seq_x[row - 1]]["-"],
align_matrix[row][col - 1] +
scoring_matrix["-"][seq_y[col - 1]])
if not global_flag:
# must be all positive or 0
align_matrix[row][col] = max(align_matrix[row][col], 0)
return align_matrix
def print_alignment_matrix(align_matrix):
""" Helper function to print alignment matrix nicely"""
for row in range(len(align_matrix)):
print align_matrix[row]
return
def compute_global_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):
"""
Function to compute the global alignment of two sequences given the
scoring matrix and their global alignment matrix.
Returns a tuple of the form (score, align_x, align_y)
"""
row = len(seq_x)
col = len(seq_y)
align_x = ""
align_y = ""
while row != 0 and col != 0:
# checks along diagonal
if alignment_matrix[row][col] == alignment_matrix[row - 1][col - 1] + \
scoring_matrix[seq_x[row - 1]][seq_y[col - 1]]:
align_x = seq_x[row - 1] + align_x
align_y = seq_y[col - 1] + align_y
row -= 1
col -= 1
else:
# checks along row
if alignment_matrix[row][col] == alignment_matrix[row - 1][col] + \
scoring_matrix[seq_x[row - 1]]["-"]:
align_x = seq_x[row - 1] + align_x
align_y = "-" + align_y
row -= 1
else:
align_x = "-" + align_x
align_y = seq_y[col - 1] + align_y
col -= 1
while row != 0:
align_x = seq_x[row - 1] + align_x
align_y = "-" + align_y
row -= 1
while col != 0:
align_x = "-" + align_x
align_y = seq_y[col - 1] + align_y
col -= 1
return (alignment_matrix[-1][-1], align_x, align_y)
def compute_local_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):
"""
Function to compute the local alignment of two sequences given the
scoring matrix and their local alignment matrix.
Returns a tuple of the form (score, align_x, align_y)
"""
row = 0
col = 0
max_value = 0
# find the maximum value and grid coordinates in the alignment matrix
for row_i in range(len(seq_x) + 1):
for col_j in range(len(seq_y) + 1):
value = alignment_matrix[row_i][col_j]
if value > max_value:
max_value = value
row = row_i
col = col_j
align_x = ""
align_y = ""
while row != 0 and col != 0:
# checks along diagonal
if alignment_matrix[row][col] == alignment_matrix[row - 1][col - 1] + \
scoring_matrix[seq_x[row - 1]][seq_y[col - 1]]:
align_x = seq_x[row - 1] + align_x
align_y = seq_y[col - 1] + align_y
row -= 1
col -= 1
else:
# checks along row
if alignment_matrix[row][col] == alignment_matrix[row - 1][col] + \
scoring_matrix[seq_x[row - 1]]["-"]:
align_x = seq_x[row - 1] + align_x
align_y = "-" + align_y
row -= 1
else:
align_x = "-" + align_x
align_y = seq_y[col - 1] + align_y
col -= 1
if alignment_matrix[row][col] == 0:
break
return (max_value, align_x, align_y)
| wezil/algorithmic-thinking | 4p-sequence-align.py | Python | mit | 6,354 |
# coding:utf-8
from importlib import import_module
from django.http import HttpResponse
from . import settings as USettings
import os
import json
from django.views.decorators.csrf import csrf_exempt
import datetime
import random
import urllib
from django.utils import six
if six.PY3:
long = int
def get_path_format_vars():
return {
"year": datetime.datetime.now().strftime("%Y"),
"month": datetime.datetime.now().strftime("%m"),
"day": datetime.datetime.now().strftime("%d"),
"date": datetime.datetime.now().strftime("%Y%m%d"),
"time": datetime.datetime.now().strftime("%H%M%S"),
"datetime": datetime.datetime.now().strftime("%Y%m%d%H%M%S"),
"rnd": random.randrange(100, 999)
}
# 保存上传的文件
def save_upload_file(PostFile, FilePath):
try:
f = open(FilePath, 'wb')
for chunk in PostFile.chunks():
f.write(chunk)
except Exception as E:
f.close()
return u"写入文件错误:" + E.message
f.close()
return u"SUCCESS"
@csrf_exempt
def get_ueditor_settings(request):
return HttpResponse(json.dumps(USettings.UEditorUploadSettings, ensure_ascii=False), content_type="application/javascript")
@csrf_exempt
def get_ueditor_controller(request):
"""获取ueditor的后端URL地址 """
action = request.GET.get("action", "")
reponseAction = {
"config": get_ueditor_settings,
"uploadimage": UploadFile,
"uploadscrawl": UploadFile,
"uploadvideo": UploadFile,
"uploadfile": UploadFile,
"catchimage": catcher_remote_image,
"listimage": list_files,
"listfile": list_files
}
return reponseAction[action](request)
@csrf_exempt
def list_files(request):
"""列出文件"""
if request.method != "GET":
return HttpResponse(json.dumps(u"{'state:'ERROR'}"), content_type="application/javascript")
# 取得动作
action = request.GET.get("action", "listimage")
allowFiles = {
"listfile": USettings.UEditorUploadSettings.get("fileManagerAllowFiles", []),
"listimage": USettings.UEditorUploadSettings.get("imageManagerAllowFiles", [])
}
listSize = {
"listfile": USettings.UEditorUploadSettings.get("fileManagerListSize", ""),
"listimage": USettings.UEditorUploadSettings.get("imageManagerListSize", "")
}
listpath = {
"listfile": USettings.UEditorUploadSettings.get("fileManagerListPath", ""),
"listimage": USettings.UEditorUploadSettings.get("imageManagerListPath", "")
}
# 取得参数
list_size = long(request.GET.get("size", listSize[action]))
list_start = long(request.GET.get("start", 0))
files = []
root_path = os.path.join(
USettings.gSettings.MEDIA_ROOT, listpath[action]).replace("\\", "/")
files = get_files(root_path, root_path, allowFiles[action])
if (len(files) == 0):
return_info = {
"state": u"未找到匹配文件!",
"list": [],
"start": list_start,
"total": 0
}
else:
return_info = {
"state": "SUCCESS",
"list": files[list_start:list_start + list_size],
"start": list_start,
"total": len(files)
}
return HttpResponse(json.dumps(return_info), content_type="application/javascript")
def get_files(root_path, cur_path, allow_types=[]):
files = []
items = os.listdir(cur_path)
for item in items:
item = unicode(item)
item_fullname = os.path.join(
root_path, cur_path, item).replace("\\", "/")
if os.path.isdir(item_fullname):
files.extend(get_files(root_path, item_fullname, allow_types))
else:
ext = os.path.splitext(item_fullname)[1]
is_allow_list = (len(allow_types) == 0) or (ext in allow_types)
if is_allow_list:
files.append({
"url": urllib.basejoin(USettings.gSettings.MEDIA_URL, os.path.join(os.path.relpath(cur_path, root_path), item).replace("\\", "/")),
"mtime": os.path.getmtime(item_fullname)
})
return files
@csrf_exempt
def UploadFile(request):
"""上传文件"""
if not request.method == "POST":
return HttpResponse(json.dumps(u"{'state:'ERROR'}"), content_type="application/javascript")
state = "SUCCESS"
action = request.GET.get("action")
# 上传文件
upload_field_name = {
"uploadfile": "fileFieldName", "uploadimage": "imageFieldName",
"uploadscrawl": "scrawlFieldName", "catchimage": "catcherFieldName",
"uploadvideo": "videoFieldName",
}
UploadFieldName = request.GET.get(
upload_field_name[action], USettings.UEditorUploadSettings.get(action, "upfile"))
# 上传涂鸦,涂鸦是采用base64编码上传的,需要单独处理
if action == "uploadscrawl":
upload_file_name = "scrawl.png"
upload_file_size = 0
else:
# 取得上传的文件
file = request.FILES.get(UploadFieldName, None)
if file is None:
return HttpResponse(json.dumps(u"{'state:'ERROR'}"), content_type="application/javascript")
upload_file_name = file.name
upload_file_size = file.size
# 取得上传的文件的原始名称
upload_original_name, upload_original_ext = os.path.splitext(
upload_file_name)
# 文件类型检验
upload_allow_type = {
"uploadfile": "fileAllowFiles",
"uploadimage": "imageAllowFiles",
"uploadvideo": "videoAllowFiles"
}
if action in upload_allow_type:
allow_type = list(request.GET.get(upload_allow_type[
action], USettings.UEditorUploadSettings.get(upload_allow_type[action], "")))
if not upload_original_ext in allow_type:
state = u"服务器不允许上传%s类型的文件。" % upload_original_ext
# 大小检验
upload_max_size = {
"uploadfile": "filwMaxSize",
"uploadimage": "imageMaxSize",
"uploadscrawl": "scrawlMaxSize",
"uploadvideo": "videoMaxSize"
}
max_size = long(request.GET.get(upload_max_size[
action], USettings.UEditorUploadSettings.get(upload_max_size[action], 0)))
if max_size != 0:
from .utils import FileSize
MF = FileSize(max_size)
if upload_file_size > MF.size:
state = u"上传文件大小不允许超过%s。" % MF.FriendValue
# 检测保存路径是否存在,如果不存在则需要创建
upload_path_format = {
"uploadfile": "filePathFormat",
"uploadimage": "imagePathFormat",
"uploadscrawl": "scrawlPathFormat",
"uploadvideo": "videoPathFormat"
}
path_format_var = get_path_format_vars()
path_format_var.update({
"basename": upload_original_name,
"extname": upload_original_ext[1:],
"filename": upload_file_name,
})
# 取得输出文件的路径
OutputPathFormat, OutputPath, OutputFile = get_output_path(
request, upload_path_format[action], path_format_var)
# 所有检测完成后写入文件
if state == "SUCCESS":
if action == "uploadscrawl":
state = save_scrawl_file(
request, os.path.join(OutputPath, OutputFile))
else:
# 保存到文件中,如果保存错误,需要返回ERROR
upload_module_name = USettings.UEditorUploadSettings.get(
"upload_module", None)
if upload_module_name:
mod = import_module(upload_module_name)
state = mod.upload(file, OutputPathFormat)
else:
state = save_upload_file(
file, os.path.join(OutputPath, OutputFile))
# 返回数据
return_info = {
# 保存后的文件名称
'url': urllib.basejoin(USettings.gSettings.MEDIA_URL, OutputPathFormat),
'original': upload_file_name, # 原始文件名
'type': upload_original_ext,
'state': state, # 上传状态,成功时返回SUCCESS,其他任何值将原样返回至图片上传框中
'size': upload_file_size
}
return HttpResponse(json.dumps(return_info, ensure_ascii=False), content_type="application/javascript")
@csrf_exempt
def catcher_remote_image(request):
"""远程抓图,当catchRemoteImageEnable:true时,
如果前端插入图片地址与当前web不在同一个域,则由本函数从远程下载图片到本地
"""
if not request.method == "POST":
return HttpResponse(json.dumps(u"{'state:'ERROR'}"), content_type="application/javascript")
state = "SUCCESS"
allow_type = list(request.GET.get(
"catcherAllowFiles", USettings.UEditorUploadSettings.get("catcherAllowFiles", "")))
max_size = long(request.GET.get(
"catcherMaxSize", USettings.UEditorUploadSettings.get("catcherMaxSize", 0)))
remote_urls = request.POST.getlist("source[]", [])
catcher_infos = []
path_format_var = get_path_format_vars()
for remote_url in remote_urls:
# 取得上传的文件的原始名称
remote_file_name = os.path.basename(remote_url)
remote_original_name, remote_original_ext = os.path.splitext(
remote_file_name)
# 文件类型检验
if remote_original_ext in allow_type:
path_format_var.update({
"basename": remote_original_name,
"extname": remote_original_ext[1:],
"filename": remote_original_name
})
# 计算保存的文件名
o_path_format, o_path, o_file = get_output_path(
request, "catcherPathFormat", path_format_var)
o_filename = os.path.join(o_path, o_file).replace("\\", "/")
# 读取远程图片文件
try:
remote_image = urllib.urlopen(remote_url)
# 将抓取到的文件写入文件
try:
f = open(o_filename, 'wb')
f.write(remote_image.read())
f.close()
state = "SUCCESS"
except Exception as E:
state = u"写入抓取图片文件错误:%s" % E.message
except Exception as E:
state = u"抓取图片错误:%s" % E.message
catcher_infos.append({
"state": state,
"url": urllib.basejoin(USettings.gSettings.MEDIA_URL, o_path_format),
"size": os.path.getsize(o_filename),
"title": os.path.basename(o_file),
"original": remote_file_name,
"source": remote_url
})
return_info = {
"state": "SUCCESS" if len(catcher_infos) > 0 else "ERROR",
"list": catcher_infos
}
return HttpResponse(json.dumps(return_info, ensure_ascii=False), content_type="application/javascript")
def get_output_path(request, path_format, path_format_var):
# 取得输出文件的路径
OutputPathFormat = (request.GET.get(path_format, USettings.UEditorSettings[
"defaultPathFormat"]) % path_format_var).replace("\\", "/")
# 分解OutputPathFormat
OutputPath, OutputFile = os.path.split(OutputPathFormat)
OutputPath = os.path.join(USettings.gSettings.MEDIA_ROOT, OutputPath)
# 如果OutputFile为空说明传入的OutputPathFormat没有包含文件名,因此需要用默认的文件名
if not OutputFile:
OutputFile = USettings.UEditorSettings[
"defaultPathFormat"] % path_format_var
OutputPathFormat = os.path.join(OutputPathFormat, OutputFile)
if not os.path.exists(OutputPath):
os.makedirs(OutputPath)
return (OutputPathFormat, OutputPath, OutputFile)
# 涂鸦功能上传处理
@csrf_exempt
def save_scrawl_file(request, filename):
import base64
try:
content = request.POST.get(
USettings.UEditorUploadSettings.get("scrawlFieldName", "upfile"))
f = open(filename, 'wb')
f.write(base64.decodestring(content))
f.close()
state = "SUCCESS"
except Exception as E:
state = "写入图片文件错误:%s" % E.message
return state
| yephper/django | django/bin/minicms/DjangoUeditor/views.py | Python | bsd-3-clause | 12,693 |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA # noqa
import six
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.access_and_security.\
keypairs.forms import CreateKeypair
from openstack_dashboard.dashboards.project.access_and_security.\
keypairs.forms import KEYPAIR_ERROR_MESSAGES
from openstack_dashboard.test import helpers as test
INDEX_VIEW_URL = reverse('horizon:project:access_and_security:index')
class KeyPairViewTests(test.TestCase):
def test_delete_keypair(self):
keypair = self.keypairs.first()
self.mox.StubOutWithMock(api.network, 'floating_ip_supported')
self.mox.StubOutWithMock(api.nova, 'keypair_list')
self.mox.StubOutWithMock(api.nova, 'keypair_delete')
# floating_ip_supported is called in Floating IP tab allowed().
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.nova.keypair_delete(IsA(http.HttpRequest), keypair.name)
self.mox.ReplayAll()
formData = {'action': 'keypairs__delete__%s' % keypair.name}
res = self.client.post(INDEX_VIEW_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_VIEW_URL)
def test_delete_keypair_exception(self):
keypair = self.keypairs.first()
self.mox.StubOutWithMock(api.network, 'floating_ip_supported')
self.mox.StubOutWithMock(api.nova, 'keypair_list')
self.mox.StubOutWithMock(api.nova, 'keypair_delete')
# floating_ip_supported is called in Floating IP tab allowed().
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.nova.keypair_delete(IsA(http.HttpRequest), keypair.name) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'keypairs__delete__%s' % keypair.name}
res = self.client.post(INDEX_VIEW_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_VIEW_URL)
def test_create_keypair_get(self):
res = self.client.get(
reverse('horizon:project:access_and_security:keypairs:create'))
self.assertTemplateUsed(
res, 'project/access_and_security/keypairs/create.html')
def test_download_keypair_get(self):
keypair_name = "keypair"
context = {'keypair_name': keypair_name}
url = reverse('horizon:project:access_and_security:keypairs:download',
kwargs={'keypair_name': keypair_name})
res = self.client.get(url, context)
self.assertTemplateUsed(
res, 'project/access_and_security/keypairs/download.html')
def test_generate_keypair_get(self):
keypair = self.keypairs.first()
keypair.private_key = "secret"
self.mox.StubOutWithMock(api.nova, 'keypair_create')
api.nova.keypair_create(IsA(http.HttpRequest),
keypair.name).AndReturn(keypair)
self.mox.ReplayAll()
context = {'keypair_name': keypair.name}
url = reverse('horizon:project:access_and_security:keypairs:generate',
kwargs={'keypair_name': keypair.name})
res = self.client.get(url, context)
self.assertTrue(res.has_header('content-disposition'))
def test_keypair_detail_get(self):
keypair = self.keypairs.first()
keypair.private_key = "secrete"
self.mox.StubOutWithMock(api.nova, 'keypair_get')
api.nova.keypair_get(IsA(http.HttpRequest),
keypair.name).AndReturn(keypair)
self.mox.ReplayAll()
context = {'keypair_name': keypair.name}
url = reverse('horizon:project:access_and_security:keypairs:detail',
kwargs={'keypair_name': keypair.name})
res = self.client.get(url, context)
# Note(Itxaka): With breadcrumbs, the title is in a list as active
self.assertContains(res, '<li class="active">Key Pair Details</li>',
1, 200)
self.assertContains(res, "<dd>%s</dd>" % keypair.name, 1, 200)
@test.create_stubs({api.nova: ("keypair_create", "keypair_delete")})
def test_regenerate_keypair_get(self):
keypair = self.keypairs.first()
keypair.private_key = "secret"
optional_param = "regenerate"
api.nova.keypair_delete(IsA(http.HttpRequest), keypair.name)
api.nova.keypair_create(IsA(http.HttpRequest),
keypair.name).AndReturn(keypair)
self.mox.ReplayAll()
url = reverse('horizon:project:access_and_security:keypairs:generate',
kwargs={'keypair_name': keypair.name,
'optional': optional_param})
res = self.client.get(url)
self.assertTrue(res.has_header('content-disposition'))
@test.create_stubs({api.nova: ("keypair_import",)})
def test_import_keypair(self):
key1_name = "new_key_pair"
public_key = "ssh-rsa ABCDEFGHIJKLMNOPQR\r\n" \
"STUVWXYZ1234567890\r" \
"XXYYZZ user@computer\n\n"
api.nova.keypair_import(IsA(http.HttpRequest), key1_name,
public_key.replace("\r", "").replace("\n", ""))
self.mox.ReplayAll()
formData = {'method': 'ImportKeypair',
'name': key1_name,
'public_key': public_key}
url = reverse('horizon:project:access_and_security:keypairs:import')
res = self.client.post(url, formData)
self.assertMessageCount(res, success=1)
@test.create_stubs({api.nova: ("keypair_import",)})
def test_import_keypair_invalid_key(self):
key_name = "new_key_pair"
public_key = "ABCDEF"
api.nova.keypair_import(IsA(http.HttpRequest), key_name, public_key) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'method': 'ImportKeypair',
'name': key_name,
'public_key': public_key}
url = reverse('horizon:project:access_and_security:keypairs:import')
res = self.client.post(url, formData, follow=True)
self.assertEqual(res.redirect_chain, [])
msg = 'Unable to import key pair.'
self.assertFormErrors(res, count=1, message=msg)
def test_import_keypair_invalid_key_name(self):
key_name = "invalid#key?name=!"
public_key = "ABCDEF"
formData = {'method': 'ImportKeypair',
'name': key_name,
'public_key': public_key}
url = reverse('horizon:project:access_and_security:keypairs:import')
res = self.client.post(url, formData, follow=True)
self.assertEqual(res.redirect_chain, [])
msg = six.text_type(KEYPAIR_ERROR_MESSAGES['invalid'])
self.assertFormErrors(res, count=1, message=msg)
@test.create_stubs({api.nova: ("keypair_create",)})
def test_generate_keypair_exception(self):
keypair = self.keypairs.first()
api.nova.keypair_create(IsA(http.HttpRequest), keypair.name) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
context = {'keypair_name': keypair.name}
url = reverse('horizon:project:access_and_security:keypairs:generate',
kwargs={'keypair_name': keypair.name})
res = self.client.get(url, context)
self.assertRedirectsNoFollow(
res, reverse('horizon:project:access_and_security:index'))
@test.create_stubs({api.nova: ("keypair_import",)})
def test_import_keypair_with_regex_defined_name(self):
key1_name = "new-key-pair with_regex"
public_key = "ssh-rsa ABCDEFGHIJKLMNOPQR\r\n" \
"STUVWXYZ1234567890\r" \
"XXYYZZ user@computer\n\n"
api.nova.keypair_import(IsA(http.HttpRequest), key1_name,
public_key.replace("\r", "").replace("\n", ""))
self.mox.ReplayAll()
formData = {'method': 'ImportKeypair',
'name': key1_name,
'public_key': public_key}
url = reverse('horizon:project:access_and_security:keypairs:import')
res = self.client.post(url, formData)
self.assertMessageCount(res, success=1)
@test.create_stubs({api.nova: ("keypair_create",)})
def test_create_keypair_with_regex_name_get(self):
keypair = self.keypairs.first()
keypair.name = "key-space pair-regex_name-0123456789"
keypair.private_key = "secret"
api.nova.keypair_create(IsA(http.HttpRequest),
keypair.name).AndReturn(keypair)
self.mox.ReplayAll()
context = {'keypair_name': keypair.name}
url = reverse('horizon:project:access_and_security:keypairs:generate',
kwargs={'keypair_name': keypair.name})
res = self.client.get(url, context)
self.assertTrue(res.has_header('content-disposition'))
def test_download_with_regex_name_get(self):
keypair_name = "key pair-regex_name-0123456789"
context = {'keypair_name': keypair_name}
url = reverse('horizon:project:access_and_security:keypairs:download',
kwargs={'keypair_name': keypair_name})
res = self.client.get(url, context)
self.assertTemplateUsed(
res, 'project/access_and_security/keypairs/download.html')
@test.create_stubs({api.nova: ('keypair_list',)})
def test_create_duplicate_keypair(self):
keypair_name = self.keypairs.first().name
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
self.mox.ReplayAll()
form = CreateKeypair(self.request, data={'name': keypair_name})
self.assertFalse(form.is_valid())
self.assertIn('The name is already in use.',
form.errors['name'][0])
| ankur-gupta91/horizon-net-ip | openstack_dashboard/dashboards/project/access_and_security/keypairs/tests.py | Python | apache-2.0 | 10,966 |
from django import forms
from .validators import IBANValidator, swift_bic_validator, IBAN_COUNTRY_CODE_LENGTH
IBAN_MIN_LENGTH = min(IBAN_COUNTRY_CODE_LENGTH.values())
class IBANFormField(forms.CharField):
"""
An IBAN consists of up to 34 alphanumeric characters.
To limit validation to specific countries, set the 'include_countries' argument with a tuple or list of ISO 3166-1
alpha-2 codes. For example, `include_countries=('NL', 'BE, 'LU')`.
A list of countries that use IBANs as part of SEPA is included for convenience. To use this feature, set
`include_countries=IBAN_SEPA_COUNTRIES` as an argument to the field.
Example:
.. code-block:: python
from django import forms
from localflavor.generic.forms import IBANFormField
from localflavor.generic.sepa_countries import IBAN_SEPA_COUNTRIES
class MyForm(forms.Form):
iban = IBANFormField(include_countries=IBAN_SEPA_COUNTRIES)
In addition to validating official IBANs, this field can optionally validate unofficial IBANs that have been
catalogued by Nordea by setting the `use_nordea_extensions` argument to True.
https://en.wikipedia.org/wiki/International_Bank_Account_Number
"""
def __init__(self, use_nordea_extensions=False, include_countries=None, *args, **kwargs):
kwargs.setdefault('min_length', IBAN_MIN_LENGTH)
kwargs.setdefault('max_length', 34)
self.default_validators = [IBANValidator(use_nordea_extensions, include_countries)]
super(IBANFormField, self).__init__(*args, **kwargs)
def to_python(self, value):
value = super(IBANFormField, self).to_python(value)
if value is not None:
return value.upper().replace(' ', '').replace('-', '')
return value
def prepare_value(self, value):
""" The display format for IBAN has a space every 4 characters. """
if value is None:
return value
grouping = 4
value = value.upper().replace(' ', '').replace('-', '')
return ' '.join(value[i:i + grouping] for i in range(0, len(value), grouping))
class SWIFTBICFormField(forms.CharField):
"""
A SWIFT-BIC consists of up to 11 alphanumeric characters.
https://en.wikipedia.org/wiki/ISO_9362
"""
default_validators = [swift_bic_validator]
def __init__(self, *args, **kwargs):
kwargs.setdefault('max_length', 11)
super(SWIFTBICFormField, self).__init__(*args, **kwargs)
def to_python(self, value):
# BIC is always written in upper case
# https://www2.swift.com/uhbonline/books/public/en_uk/bic_policy/bic_policy.pdf
value = super(SWIFTBICFormField, self).to_python(value)
if value is not None:
return value.upper()
return value
def prepare_value(self, value):
# BIC is always written in upper case
if value is None:
return value
return value.upper()
| benkonrath/django-iban | django_iban/forms.py | Python | bsd-3-clause | 2,977 |
import sys
import socket
import os
import select
from threading import Thread
from queue import Queue
from helpers import encrypt, decrypt, get_client_key, get_nonce, socket_buffer_size
# Globals
router = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
router.settimeout(5)
name = ""
keys = {}
nonces = {}
# Used as temp storage for messages which a client tried to send before they had a secure connection
msg_store = []
file_store = []
# States. A full state machine could be used, but the protocol is simple enough to go without.
# These are named according to what we are waiting for.
# For example, key_ack means that we are waiting for the other
# client to acknowledge that they have the shared key
active = [] # Clients which are completely set up
# Client A side
init_nonce = [] # Requested a nonce from receiver
auth_ack = [] # Waiting for the auth server to send the share key etc
key_ack = [] # Sent the shared key, encrypted with the receiver's master key
final_ack = [] # Sent back nonce-1, waiting for confirmation that B's connection is open
# Client B side
init_key = [] # Someone has requested a nonce. We are waiting for the shared key
nonce_ack = [] # Sent my nonce, encrypted with the shared key
states = [active, init_nonce, auth_ack, key_ack, final_ack, init_key, nonce_ack]
def client(chat_queue, name):
# Main logic
while True:
# queue.get() default is block=True, timeout=None
# so if queue is empty this will block until not empty (just like select)
msg = chat_queue.get()
##############################
# We are receiving a message #
##############################
if msg[0] == "socket":
sep = msg[1].find(")")
sender, content = msg[1][1:sep], msg[1][sep+1:].strip()
# Control messages
#-----------------
if sender == "Router":
if content.startswith("You are now known as"):
# Confirm up my own name
name = content.rsplit(" ", 1)[1]
keys["Auth"] = get_client_key(name)
print(msg[1])
elif content == "/cancel":
print("(Router) %s cancelled their session with you" % sender)
cancel_connection(sender)
# Client A
#---------
elif sender in init_nonce:
# We have gotten a nonce encrypted with the other client's master key
init_nonce.remove(sender)
auth_ack.append(sender)
nonces[sender] = get_nonce()
text = "Auth: %s,%s,%s,%s" % (name, sender, nonces[sender], content)
router.send(text.encode())
elif sender == "Auth":
# We now have a shared key from the Auth server
plaintext = decrypt(keys["Auth"], content)
nonce, sharedkey, receiver, receiver_block = plaintext.split(",")
if not check_nonce(receiver, int(nonce)):
continue
auth_ack.remove(receiver)
key_ack.append(receiver)
keys[receiver] = sharedkey
text = "%s: %s" % (receiver, receiver_block)
router.send(text.encode())
elif sender in key_ack:
# We have gotten an encrypted nonce from the other client
key_ack.remove(sender)
final_ack.append(sender)
plaintext = decrypt(keys[sender], content)
ciphertext = "%s: %s" % (sender, encrypt(keys[sender], eval(plaintext)-1))
router.send(ciphertext.encode())
elif sender in final_ack:
# Final 3 way handshake confirmation
if content == "open" and sender in final_ack:
final_ack.remove(sender)
active.append(sender)
# Send any stored messages
for msg in msg_store:
process_message(msg, name)
msg_store.clear()
# Send any stored files
for file in file_store:
send_file(file[0], file[1])
file_store.clear()
# Client B
#---------
elif sender not in [x for state in states for x in state]:
# Someone wants to set up a secure connection
init_key.append(sender)
nonces[sender] = get_nonce()
plaintext = "%s,%s" % (sender,nonces[sender])
send_encrypted(sender, keys["Auth"], plaintext)
elif sender in init_key:
# Someone has sent us a shared key
init_key.remove(sender)
nonce_ack.append(sender)
plaintext = decrypt(keys["Auth"], content)
shared_key, sender_name, nonce = plaintext.split(",")
check_nonce(sender_name, int(nonce))
keys[sender_name] = shared_key
# make a new nonce to authenticate that both parties have the key
nonces[sender] = get_nonce()
ciphertext = "%s: %s" % (sender_name, encrypt(keys[sender_name], nonces[sender]))
router.send(ciphertext.encode())
elif sender in nonce_ack:
# We have confirmed the connection
nonce = int(decrypt(keys[sender], content))
if not check_nonce(sender, nonce+1):
continue
nonce_ack.remove(sender)
active.append(sender)
# Do the final 3-way handshake
text = "%s: open" % sender
router.send(text.encode())
elif sender in active:
# We have a secure message
if content.startswith("file:"):
receive_file(sender, content[5:])
else:
plaintext = decrypt(keys[sender], content)
print("(%s) %s" % (sender, plaintext))
############################
# We are sending a message #
############################
elif msg[0] == "stdin":
if msg[1] == "quit":
break
process_message(msg[1], name)
def check_nonce(name, nonce):
if not nonces[name] == nonce:
print("%s responded with wrong nonce" % name)
cancel_connection(name)
print("Cancelling connection with %s" % name)
text = "%s: /cancel" % name
router.send(text.encode())
return False
return True
def cancel_connection(name):
for state in states:
if name in state:
state.remove(name)
if name in keys:
del keys[name]
if name in nonces:
del nonces[name]
def process_message(msg, name):
if msg.startswith("/name"):
if name == "":
router.send(msg.encode())
else:
print("You already have a registered name, to change your name restart the client")
elif msg.startswith("/list"):
router.send(msg.encode())
elif msg.startswith("/file"):
fileargs = msg.split(" ")
if fileargs[1] in active:
send_file(fileargs[1], fileargs[2])
else:
file_store.append((fileargs[1], fileargs[2]))
init_nonce.append(fileargs[1])
text = "%s: %s" % (fileargs[1], name)
router.send(text.encode())
else:
sep = msg.find(":")
receiver, content = msg[:sep], msg[sep+1:].strip()
if receiver in active:
send_encrypted(receiver, keys[receiver], content)
else:
# Init protocol with the other client
msg_store.append(msg) # Store the message to send it once we have a connection
init_nonce.append(receiver)
text = "%s: %s" % (receiver, name)
router.send(text.encode())
def send_encrypted(receiver, key, msg):
ciphertext = "%s: %s" % (receiver, encrypt(key, msg))
router.send(ciphertext.encode())
def send_file(receiver, filepath):
try:
filebytes = b''
with open(filepath, "rb") as readfile:
filebytes = readfile.read()
head, tail = os.path.split(filepath)
filename = tail
encrypted_filename = encrypt(keys[receiver], filename)
ciphertext = encrypt(keys[receiver], filebytes, bytes=True)
message = "%s:file:%s:%s" % (receiver, encrypted_filename, ciphertext)
message = message.encode()
if len(message) > socket_buffer_size:
print("File size too large to send. File transfer cancelled")
else:
router.send(message)
print("file sent")
except IOError as e:
print("File not found, %s" % e)
except MemoryError as m:
print("Not enough memory, file too big, %s" % m)
def receive_file(sender, message):
encrypted_filename, encrypted_filebytes = message.split(":")
filename = decrypt(keys[sender], encrypted_filename)
filebytes = decrypt(keys[sender], encrypted_filebytes, bytes=True)
download_dir = "downloads"
if not os.path.exists(download_dir):
os.makedirs(download_dir)
filepath = os.path.join(download_dir, filename)
with open(filepath,"wb") as writefile:
writefile.write(filebytes)
print("file received")
def queue_stdin(q):
for line in sys.stdin:
q.put(("stdin", line.strip()))
def queue_sock_stream(q, s):
while True:
# still using select for sockets because
# it is merely an interface to a system call so better to use for sockets
ready_to_read, ready_to_write, in_error = select.select([s], [], [])
for sock in ready_to_read:
data = sock.recv(socket_buffer_size).decode()
if not data:
sock.close()
q.put(("socket", "Server has closed the connection"))
sys.exit()
else:
q.put(("socket", data))
if __name__ == "__main__":
try:
# Initial setup
if len(sys.argv) < 3:
print('Usage : python chat_client.py hostname port')
sys.exit()
host = sys.argv[1]
port = int(sys.argv[2])
# Connect to router
try:
router.connect((host, port))
except socket.error:
print("Unable to connect")
sys.exit()
# Queue is synchronized and completely thread safe
# These handle stdin and the socket connection to the server
chat_queue = Queue()
thread_stdin = Thread(target=queue_stdin, args=(chat_queue,), daemon=True)
thread_stdin.start()
thread_sock = Thread(target=queue_sock_stream, args=(chat_queue, router), daemon=True)
thread_sock.start()
print("Type 'quit' to quit")
print("Connected to the router. You can start sending msgs")
client(chat_queue, name)
except KeyboardInterrupt:
pass
| wraithy/crypto-chat | client.py | Python | mit | 9,242 |
# Code modified from the Natural Language Toolkit
# Original author: Long Duong <longdt219@gmail.com>
import tempfile
import pickle
import os
import copy
import operator
import scipy.sparse as sparse
import numpy as np
from sklearn.datasets import load_svmlight_file
from sklearn import svm
class Configuration(object):
"""
Class for holding configuration which is the partial analysis of the input sentence.
The transition based parser aims at finding set of operators that transfer the initial
configuration to the terminal configuration.
The configuration includes:
- Stack: for storing partially proceeded words
- Buffer: for storing remaining input words
- Set of arcs: for storing partially built dependency tree
This class also provides a method to represent a configuration as list of features.
"""
def __init__(self, dep_graph, feature_extractor):
"""
:param dep_graph: the representation of an input in the form of dependency graph.
:type dep_graph: DependencyGraph where the dependencies are not specified.
:param feature_extractor: a function which operates on tokens, the
stack, the buffer and returns a list of string features
"""
# dep_graph.nodes contain list of token for a sentence
self.stack = [0] # The root element
# The rest is in the buffer
self.buffer = range(1, len(dep_graph.nodes))
self.arcs = [] # empty set of arc
self._tokens = dep_graph.nodes
self._max_address = len(self.buffer)
self._user_feature_extractor = feature_extractor
def __str__(self):
return 'Stack : ' + \
str(self.stack) + ' Buffer : ' + \
str(self.buffer) + ' Arcs : ' + str(self.arcs)
def extract_features(self):
"""
Extracts features from the configuration
:return: list(str)
"""
return self._user_feature_extractor(self._tokens, self.buffer, self.stack, self.arcs)
class TransitionParser(object):
"""
An arc-eager transition parser
"""
def __init__(self, transition, feature_extractor):
self._dictionary = {}
self._transition = {}
self._match_transition = {}
self._model = None
self._user_feature_extractor = feature_extractor
self.transitions = transition
def _get_dep_relation(self, idx_parent, idx_child, depgraph):
p_node = depgraph.nodes[idx_parent]
c_node = depgraph.nodes[idx_child]
if c_node['word'] is None:
return None # Root word
if c_node['head'] == p_node['address']:
return c_node['rel']
else:
return None
def _convert_to_binary_features(self, features):
"""
This function converts a feature into libsvm format, and adds it to the
feature dictionary
:param features: list of feature string which is needed to convert to
binary features
:type features: list(str)
:return : string of binary features in libsvm format which is
'featureID:value' pairs
"""
unsorted_result = []
for feature in features:
self._dictionary.setdefault(feature, len(self._dictionary))
unsorted_result.append(self._dictionary[feature])
# Default value of each feature is 1.0
return ' '.join(str(featureID) + ':1.0' for featureID in sorted(unsorted_result))
@staticmethod
def _is_projective(depgraph):
"""
Checks if a dependency graph is projective
"""
arc_list = set()
for key in depgraph.nodes:
node = depgraph.nodes[key]
if 'head' in node:
childIdx = node['address']
parentIdx = node['head']
arc_list.add((parentIdx, childIdx))
for (parentIdx, childIdx) in arc_list:
# Ensure that childIdx < parentIdx
if childIdx > parentIdx:
temp = childIdx
childIdx = parentIdx
parentIdx = temp
for k in range(childIdx + 1, parentIdx):
for m in range(len(depgraph.nodes)):
if (m < childIdx) or (m > parentIdx):
if (k, m) in arc_list:
return False
if (m, k) in arc_list:
return False
return True
def _write_to_file(self, key, binary_features, input_file):
"""
write the binary features to input file and update the transition dictionary
"""
self._transition.setdefault(key, len(self._transition) + 1)
self._match_transition[self._transition[key]] = key
input_str = str(self._transition[key]) + ' ' + binary_features + '\n'
input_file.write(input_str.encode('utf-8'))
def _create_training_examples_arc_eager(self, depgraphs, input_file):
"""
Create the training example in the libsvm format and write it to the input_file.
Reference : 'A Dynamic Oracle for Arc-Eager Dependency Parsing' by Joav Goldberg and Joakim Nivre
"""
training_seq = []
projective_dependency_graphs = [
dg for dg in depgraphs if TransitionParser._is_projective(dg)]
countProj = len(projective_dependency_graphs)
for depgraph in projective_dependency_graphs:
conf = Configuration(
depgraph, self._user_feature_extractor.extract_features)
while conf.buffer:
b0 = conf.buffer[0]
features = conf.extract_features()
binary_features = self._convert_to_binary_features(features)
if conf.stack:
s0 = conf.stack[-1]
# Left-arc operation
rel = self._get_dep_relation(b0, s0, depgraph)
if rel is not None:
key = self.transitions.LEFT_ARC + ':' + rel
self._write_to_file(key, binary_features, input_file)
self.transitions.left_arc(conf, rel)
training_seq.append(key)
continue
# Right-arc operation
rel = self._get_dep_relation(s0, b0, depgraph)
if rel is not None:
key = self.transitions.RIGHT_ARC + ':' + rel
self._write_to_file(key, binary_features, input_file)
self.transitions.right_arc(conf, rel)
training_seq.append(key)
continue
# reduce operation
flag = False
for k in range(s0):
if self._get_dep_relation(k, b0, depgraph) is not None:
flag = True
if self._get_dep_relation(b0, k, depgraph) is not None:
flag = True
if flag:
key = self.transitions.REDUCE
self._write_to_file(key, binary_features, input_file)
self.transitions.reduce(conf)
training_seq.append(key)
continue
# Shift operation as the default
key = self.transitions.SHIFT
self._write_to_file(key, binary_features, input_file)
self.transitions.shift(conf)
training_seq.append(key)
print(" Number of training examples : {}".format(len(depgraphs)))
print(" Number of valid (projective) examples : {}".format(countProj))
return training_seq
def train(self, depgraphs):
"""
:param depgraphs : list of DependencyGraph as the training data
:type depgraphs : DependencyGraph
"""
try:
input_file = tempfile.NamedTemporaryFile(
prefix='transition_parse.train',
dir=tempfile.gettempdir(),
delete=False)
self._create_training_examples_arc_eager(depgraphs, input_file)
input_file.close()
# Using the temporary file to train the libsvm classifier
x_train, y_train = load_svmlight_file(input_file.name)
# The parameter is set according to the paper:
# Algorithms for Deterministic Incremental Dependency Parsing by Joakim Nivre
# this is very slow.
self._model = svm.SVC(
kernel='poly',
degree=2,
coef0=0,
gamma=0.2,
C=0.5,
verbose=False,
probability=True)
print('Training support vector machine from input data file{}...'.format(
input_file.name))
self._model.fit(x_train, y_train)
print('done!')
finally:
# os.remove(input_file.name)
pass
def parse(self, depgraphs):
"""
:param depgraphs: the list of test sentence, each sentence is represented as a dependency graph where the 'head' information is dummy
:type depgraphs: list(DependencyGraph)
:return: list (DependencyGraph) with the 'head' and 'rel' information
"""
result = []
if not self._model:
raise ValueError('No model trained!')
for depgraph in depgraphs:
conf = Configuration(
depgraph, self._user_feature_extractor.extract_features)
while conf.buffer:
features = conf.extract_features()
col = []
row = []
data = []
for feature in features:
if feature in self._dictionary:
col.append(self._dictionary[feature])
row.append(0)
data.append(1.0)
np_col = np.array(sorted(col)) # NB : index must be sorted
np_row = np.array(row)
np_data = np.array(data)
x_test = sparse.csr_matrix(
(np_data, (np_row, np_col)), shape=(1, len(self._dictionary)))
pred_prob = self._model.predict_proba(x_test)[0]
sorted_predicted_values = [
self._model.classes_[x[0]]
for x in sorted(enumerate(pred_prob), key=operator.itemgetter(1), reverse=True)]
# Note that SHIFT is always a valid operation
for y_pred in sorted_predicted_values:
if y_pred in self._match_transition:
strTransition = self._match_transition[y_pred]
try:
baseTransition, relation = strTransition.split(":")
except ValueError:
baseTransition = strTransition
if baseTransition == self.transitions.LEFT_ARC:
if self.transitions.left_arc(conf, relation) != -1:
break
elif baseTransition == self.transitions.RIGHT_ARC:
if self.transitions.right_arc(conf, relation) != -1:
break
elif baseTransition == self.transitions.REDUCE:
if self.transitions.reduce(conf) != -1:
break
elif baseTransition == self.transitions.SHIFT:
if self.transitions.shift(conf) != -1:
break
else:
raise ValueError(
"The predicted transition is not recognized, expected errors")
# Finish with operations build the dependency graph from Conf.arcs
new_depgraph = copy.deepcopy(depgraph)
for key in new_depgraph.nodes:
node = new_depgraph.nodes[key]
node['rel'] = ''
# With the default, all the token depend on the Root
node['head'] = 0
for (head, rel, child) in conf.arcs:
c_node = new_depgraph.nodes[child]
c_node['head'] = head
c_node['rel'] = rel
result.append(new_depgraph)
return result
def save(self, filepath):
"""
Save the parameters with pickle
"""
with open(filepath, 'wb') as f:
pickle.dump(self, f)
@staticmethod
def load(filepath):
with open(filepath) as f:
return pickle.load(f)
| Alexoner/mooc | coursera/nlpintro-001/Assignment1/code/providedcode/transitionparser.py | Python | apache-2.0 | 12,825 |
from .base import * # noqa
from .base import env
SECRET_KEY = env("SECRET_KEY", default="only dev replace me")
ALLOWED_HOSTS = ["*"]
#
# django session configure
#
SESSION_ENGINE = "django.contrib.sessions.backends.file"
SESSION_FILE_PATH = env("SESSION_PATH", default="/tmp")
# SESSION_COOKIE_SECURE = True
# CSRF_COOKIE_SECURE = True
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
# INSTALLED_APPS += [
# "debug_toolbar",
# ] # noqa F405
# DEBUG_TOOLBAR_PANELS = [
# 'debug_toolbar.panels.history.HistoryPanel',
# 'debug_toolbar.panels.versions.VersionsPanel',
# 'debug_toolbar.panels.timer.TimerPanel',
# 'debug_toolbar.panels.settings.SettingsPanel',
# 'debug_toolbar.panels.headers.HeadersPanel',
# 'debug_toolbar.panels.request.RequestPanel',
# 'debug_toolbar.panels.sql.SQLPanel',
# 'debug_toolbar.panels.staticfiles.StaticFilesPanel',
# 'debug_toolbar.panels.templates.TemplatesPanel',
# 'debug_toolbar.panels.cache.CachePanel',
# 'debug_toolbar.panels.signals.SignalsPanel',
# 'debug_toolbar.panels.logging.LoggingPanel',
# 'debug_toolbar.panels.redirects.RedirectsPanel',
# 'debug_toolbar.panels.profiling.ProfilingPanel',
# ]
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
# MIDDLEWARE += [
# "debug_toolbar.middleware.DebugToolbarMiddleware",
# ] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
# DEBUG_TOOLBAR_CONFIG = {
# "DISABLE_PANELS": [
# "debug_toolbar.panels.profiling.ProfilingPanel",
# "debug_toolbar.panels.redirects.RedirectsPanel",
# ],
# 'RESULTS_CACHE_SIZE': 3,
# "SHOW_TEMPLATE_CONTEXT": True,
# # "JQUERY_URL": "//cdn.bootcss.com/jquery/2.2.4/jquery.min.js",
# }
# # https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
# INTERNAL_IPS = ["127.0.0.1"]
#
# django storage configure
# --------------------------------------------------------------------
DEFAULT_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
AWS_STORAGE_BUCKET_NAME = env("AWS_STORAGE_BUCKET_NAME")
AWS_S3_REGION_NAME = env("AWS_S3_REGION_NAME")
AWS_S3_ENDPOINT_URL = env("AWS_S3_ENDPOINT_URL")
AWS_ACCESS_KEY_ID = env("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = env("AWS_SECRET_ACCESS_KEY")
AWS_S3_CUSTOM_DOMAIN = env("AWS_S3_CUSTOM_DOMAIN")
AWS_DEFAULT_ACL = env("AWS_DEFAULT_ACL", default="public-read")
AWS_QUERYSTRING_AUTH = False
# STATIC
# ----------------------------------------------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
# STATICFILES_STORAGE = "apps.ext.storages.backends.s3.StaticStorage"
# STATIC_AWS_IS_GZIPPED = True
# STATIC_AWS_S3_CUSTOM_DOMAIN = env("STATIC_AWS_S3_CUSTOM_DOMAIN", default=None)
| edison7500/dugong | dugong/settings/dev.py | Python | gpl-3.0 | 3,002 |
import sys
import warnings
from collections import defaultdict, OrderedDict
from django.db.models.query import RawQuerySet
from django.core.exceptions import FieldError
from itertools import groupby, chain, islice
from operator import itemgetter
from .utils import _getattr
from .validation import clean_dps, clean_pdps, clean_sortf_mapf_mts
# in Python 3 the standard str type is unicode and the
# unicode type has been removed so define the keyword here
if sys.version_info.major >= 3:
unicode = str
class DataPool(object):
"""DataPool holds the data retrieved from various models (tables)."""
def __init__(self, series):
"""Create a DataPool object as specified by the ``series``.
:Arguments:
- **series** *(list of dict)* - specifies the what data to retrieve
and where to retrieve it from. It is of the form ::
[{'options': {
'source': a django model, Manager or QuerySet,
},
'terms': [
'a_valid_field_name', ... ,
{'any_name': 'a_valid_field_name', ... },
]
},
...
]
Where
- **options** (**required**) - a ``dict``. Any of the `series
options <http://www.highcharts.com/ref/#series>`_ for the
Highcharts ``options`` object are valid.
- **terms** - is a list. Each element in ``terms`` is either
1. a ``str`` - needs to be a valid model field for the
corresponding ``source``, or
2. a ``dict`` - need to be of the form
``{'any_name': 'a_valid_field_name', ...}``.
To retrieve data from multiple models or QuerySets, just add more
dictionaries with the corresponding ``options`` and terms.
:Raises:
- **APIInputError** - sif the ``series`` argument has any invalid
parameters.
.. warning:: All elements in ``terms`` **must be unique** across all
the dictionaries in the ``series`` list. If there are two terms
with same ``name``, the latter one is going to overwrite the one
before it.
For example, the following is **wrong**: ::
[{'options': {
'source': SomeModel},
'terms':[
'foo',
'bar']},
{'options': {
'source': OtherModel},
'terms':[
'foo']}]
In this case, the term ``foo`` from ``OtherModel`` is going to
**overwrite** ``foo`` from ``SomeModel``.
Here is the **right** way of retrieving data from two different models
both of which have the same field name. ::
[{'options': {
'source': SomeModel},
'terms':[
'foo',
'bar']},
{'options': {
'source': OtherModel},
'terms':[
{'foo_2': 'foo'}]}]
"""
self.series = clean_dps(series)
self.query_groups = self._group_terms_by_query()
# Now get data
self._get_data()
def _group_terms_by_query(self, sort_by_term=None, *addl_grp_terms):
"""Groups all the terms that can be extracted in a single query. This
reduces the number of database calls.
:returns:
- a list of sub-lists where each sub-list has items that can
all be retrieved with the same query (i.e. terms from the same source
and any additional criteria as specified in addl_grp_terms).
"""
# TODO: using str(source.query) was the only way that I could think of
# to compare whether two sources are exactly same. Need to figure out
# if there is a better way. - PG
def sort_grp_fn(td_tk):
return tuple(chain(str(td_tk[1]['source'].query),
[list(td_tk[1][t]) for t in addl_grp_terms]))
def sort_by_term_fn(td_tk):
return -1 * (abs(td_tk[1][sort_by_term]))
s = sorted(self.series.items(), key=sort_grp_fn)
# The following groupby will create an iterator which returns
# <(grp-1, <(tk, td), ...>), (grp-2, <(tk, td), ...>), ...>
# where sclt is a source, category, legend_by tuple
qg = groupby(s, sort_grp_fn)
if sort_by_term is not None:
sort_by_fn = sort_by_term_fn
else:
sort_by_fn = None
qg = [sorted(itr, key=sort_by_fn) for (grp, itr) in qg]
return qg
def _generate_vqs(self):
# query_groups is a list of lists.
for tk_td_tuples in self.query_groups:
src = tk_td_tuples[0][1]['source']
try:
# RawQuerySet doesn't support values
if isinstance(src, RawQuerySet):
vqs = src
else:
vqs = src.values(*(td['field']
for (tk, td) in tk_td_tuples))
except FieldError:
# model attributes can't be resolved into fields
vqs = src
vqs2 = []
for v in vqs:
for (_, td) in tk_td_tuples:
f = td.get('fn')
if f:
v[td['field']] = f(_getattr(v, td['field']))
vqs2.append(v)
yield tk_td_tuples, vqs2
def _get_data(self):
for tk_td_tuples, vqs in self._generate_vqs():
vqs_list = list(vqs)
for tk, _ in tk_td_tuples:
# everything has a reference to the same list
self.series[tk]['_data'] = vqs_list
class PivotDataPool(DataPool):
"""PivotDataPool holds the data retrieved from various tables (models) and
then *pivoted* against the category fields."""
def __init__(self, series, top_n_term=None, top_n=None, pareto_term=None,
sortf_mapf_mts=None):
""" Creates a PivotDataPool object.
:Arguments:
- **series** (**required**) - a list of dicts that specifies the what
data to retrieve, where to retrieve it from and how to pivot the
data. It is of the form ::
[{'options': {
'source': django Model, Manager or QuerySet ,
'categories': ['a_valid_field', ...],
'legend_by': ['a_valid_field', ...] (optional),
'top_n_per_cat': a number (optional),
},
'terms': {
'any_name_here': django Aggregate,
'some_other_name':{
'func': django Aggregate,
#any options to override
...
},
...
}
},
... #repeat dicts with 'options' & 'terms'
]
Where
- **options** - is a dict that specifies the common options for all
the terms.
+ **source** (**required**) - is either a ``Model``, ``Manager``
or a ``QuerySet``.
+ **categories** (**required**) - is a list of model fields by
which the data needs to be pivoted by. If there is only a single
item, ``categories`` can just be a string instead of a list with
single element.
For example if you have a model with ``country``, ``state``,
``county``, ``city``, ``date``, ``rainfall``, ``temperature``
and you want to pivot the data by ``country`` and ``state``,
then ``categories = ['country', 'state']`` .
.. note:: Order of elements in the ``categories`` list matters!
``categories = ['country', 'state']`` groups your data first by
``country`` and then by ``state`` when running the SQL query.
This obviously is not the same as grouping by ``state`` first
and then by ``country``.
+ **legend_by** (*optional*) - is a list of model fields by which
the data needs to be legended by. For example, in the above case,
if you want to legend by ``county`` and ``city``, then
``legend_by = ['county', 'city']``
.. note:: Order of elements in the ``legend_by`` list matters!
See the note in ``categories`` above.
+ **top_n_per_cat** (*optional*) - The number of top items that
the legended entries need to be limited to in each category. For
example, in the above case, if you wanted only the top 3
``county/cities`` with highest rainfall for each of the
``country/state``, then ``top_n_per_cat = 3``.
- **terms** - is a ``dict``. The keys can be any strings (but helps
if they are meaningful aliases for the field). The values can
either be
+ a django ``Aggregate`` : of a valid field in corresponding model.
For example, ``Avg('temperature')``, ``Sum('price')``, etc. or
+ a ``dict``: In this case the ``func`` must specify relevant
django aggregate to retrieve. For example
``'func': Avg('price')``. The dict can also have any additional
entries from the options dict. Any entries here will override
the entries in the ``options`` dict.
- **top_n_term** (*optional*) - a string. Must be one of the keys in
the corresponding ``terms`` in the ``series`` argument.
- **top_n** (*optional*) - an integer. The number of items for the
corresponding ``top_n_term`` that need to be retained.
If ``top_n_term`` and ``top_n`` are present, only the ``top_n``
numberof items are going to displayed in the pivot chart. For
example, if you want to plot only the top 5 states with highest
average rainfall, you can do something like this. ::
PivotDataPool(
series = [
{'options': {
'source': RainfallData.objects.all(),
'categories': 'state'},
'terms': {
'avg_rain': Avg('rainfall')}}],
top_n_term = 'avg_rain',
top_n = 5)
Note that the ``top_n_term`` is ``'avg_rain'`` and **not** ``state``;
because we want to limit by the average rainfall.
- **pareto_term** (*optional*) - the term with respect to which the
pivot chart needs to be paretoed by.
For example, if you want to plot the average rainfall on the y-axis
w.r.t the state on the x-axis and want to pareto by the average
rainfall, you can do something like this. ::
PivotDataPool(
series = [
{'options': {
'source': RainfallData.objects.all(),
'categories': 'state'},
'terms': {
'avg_rain': Avg('rainfall')}}],
pareto_term = 'avg_rain')
- **sortf_mapf_mts** (*optional*) - a ``tuple`` with three elements of
the form ``(sortf, mapf, mts)`` where
+ **sortf** - is a function (or a callable) that is used as a `key`
when sorting the category values.
For example, if ``categories = 'month_num'`` and if the months
need to be sorted in reverse order, then ``sortf`` can be ::
sortf = lambda *x: (-1*x[0],)
.. note:: ``sortf`` is passed the category values as tuples and
must return tuples!
If ``categories`` is ``['city', 'state']`` and if the category
values returned need to be sorted with state first and then city,
then ``sortf`` can be ::
sortf = lambda *x: (x[1], x[0])
The above ``sortf`` is passed tuples like
``('San Francisco', 'CA')``, ``('New York', 'NY')``, ``...`` and
it returns tuples like ``('CA', 'San Francisco')``,
``('NY', 'New York')``, ``...`` which when used as keys to sort the
category values will obviously first sort by state and then by
city.
+ **mapf** - is a function (or a callable) that defines how the
category values need to be mapped.
For example, let's say ``categories`` is ``'month_num'`` and that
the category values that are retrieved from your database are
``1``, ``2``, ``3``, etc. If you want month *names* as the
category values instead of month numbers, you can define a
``mapf`` to transform the month numbers to month names like so ::
def month_name(*t):
names ={1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr',
5: 'May', 6: 'Jun', 7: 'Jul', 8: 'Aug',
9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec'}
month_num = t[0]
return (names[month_num], )
mapf = month_name
.. note:: ``mapf`` like ``sortf`` is passed the category values
as tuples and must return tuples.
+ **mts** - *map then sort* ; a ``bool``. If ``True``, the
category values are mapped first and then sorted, and if
``False`` category values are sorted first and then mapped.
In the above example of month names, we ``mts`` must be ``False``
because the months must first be sorted based on their number
and then mapped to their names. If ``mts`` is ``True``, the
month numbers would be transformed to the month names, and then
sorted, which would yield an order like ``Apr``, ``Aug``,
``Dec``, etc. (not what we want).
:Raises:
- **APIInputError** - if the ``series`` argument has any invalid
parameters.
Here is a full example of a ``series`` term that retrieves the
average temperature of the top 3 cities in each country/state and
the average rainfall of the top 2 cities in each country/state. ::
[{'options': {
'source': Weather.objects.all(),
'categories': ['country', 'state'],
'legend_by': 'city',
'top_n_per_cat': 3},
'terms': {
'avg_temp': Avg('temperature'),
'avg_rain': {
'func': Avg('rainfall'),
'top_n_per_cat': 2}}}]
The ``'top_n_per_cat': 2`` term in ``avg_rain`` dict overrides
``'top_n_per_cat': 5`` from the comon options dict. Effectively,
the above ``series`` retrieves the *top 2* ``cities`` with
highest ``avg_rain`` in each ``country/state`` and *top 3* ``cities``
with highest ``avg_temp`` in each ``country/state``.
A single ``PivotDataPool`` can hold data from multiple Models.
If there are more models or QuerySets to retrieve the data from,
just add more dicts to the series list with different ``source``
values.
.. warning:: The ``keys`` for the ``terms`` must be **unique across
all the dictionaries** in the ``series`` list! If there are
multiple terms with same ``key``, the latter ones will just
overwrite the previous ones.
For instance, the following example is **wrong**. ::
[{'options': {
'source': EuropeWeather.objects.all(),
'categories': ['country', 'state']},
'terms': {
'avg_temp': Avg('temperature')}},
{'options': {
'source': AsiaWeather.objects.all(),
'categories': ['country', 'state']},
'terms': {
'avg_temp': Avg('temperature')}}]
The second ``avg_temp`` will overwrite the first one. Instead just
use different names for each of the keys in all the dictionaries.
Here is the **right** format. ::
[{'options': {
'source': EuropeWeather.objects.all(),
'categories': ['country', 'state']},
'terms': {
'europe_avg_temp': Avg('temperature')}},
{'options': {
'source': AsiaWeather.objects.all(),
'categories': ['country', 'state']},
'terms': {
'asia_avg_temp': Avg('temperature')}}]
"""
warnings.warn('PivotDataPool will be deprecated soon.'
' Use DataPool instead!', DeprecationWarning)
self.series = clean_pdps(series)
self.top_n_term = (top_n_term if top_n_term
in self.series.keys() else None)
self.top_n = (top_n if (self.top_n_term is not None and
isinstance(top_n, int)) else 0)
self.pareto_term = (pareto_term if pareto_term in
self.series.keys() else None)
self.sortf, self.mapf, self.mts = clean_sortf_mapf_mts(sortf_mapf_mts)
# query groups and data
self.query_groups = self._group_terms_by_query(
'top_n_per_cat', 'categories', 'legend_by'
)
self._get_data()
def _generate_vqs(self):
"""Generates and yields the value query set for each query in the
query group."""
# query_groups is a list of lists.
for tk_td_tuples in self.query_groups:
# tk: term key, td: term dict
# All (tk, td) tuples within the list tk_td_tuples, share the same
# source, categories and legend_by. So we can extract these three
# from the first tuple in the list.
tk, td = tk_td_tuples[0]
qs = td['source']
categories = td['categories']
legend_by = td['legend_by']
# vqs = values queryset
values_terms = chain(categories, legend_by)
vqs = qs.values(*values_terms)
# NOTE: Order of annotation is important!!!
# So need an OrderedDict. Can't use a regular dict.
ann_terms = OrderedDict((k, d['func']) for k, d in tk_td_tuples)
vqs = vqs.annotate(**ann_terms)
# Now order by
top_n_per_cat = td['top_n_per_cat']
if top_n_per_cat > 0:
order_by = ('-' + tk,)
elif top_n_per_cat < 0:
order_by = (tk,)
else:
order_by = ()
order_by_terms = chain(categories, order_by)
vqs = vqs.order_by(*order_by_terms)
yield tk_td_tuples, vqs
def _get_data(self):
# These are some of the attributes that will used to store some
# temporarily generated data.
self.cv_raw = set([])
_pareto_by_cv = defaultdict(int)
_cum_dfv_by_cv = defaultdict(int)
for tk_td_tuples, vqs in self._generate_vqs():
# tk: term key, td: term dict
# All (tk, td) tuples within the list tk_td_tuples, share the same
# source, categories and legend_by. So we can extract these three
# from the first tuple in the list.
tk, td = tk_td_tuples[0]
categories = td['categories']
legend_by = td['legend_by']
for i, (tk, td) in enumerate(tk_td_tuples):
# cv_lv_dfv: dict with category value, legend value as keys
# and datafunc-values as values.
# For example, if
# category = ['continent'], legend_by = ['country'] and
# func = Sum('population_millions')
# cv_lv_dfv = {'Asia': {'India': 1001, 'China': 1300},
# 'Europe': {'UK': 61.8, 'France': 62.6},
# ... }
cv_lv_dfv = defaultdict(dict)
# lv_set is the set of legend_values
# For instance, lv_set for the above example is
# set(['India', 'China', 'UK', 'France'])
lv_set = set()
# cv: category value. For example,
# if categories = ('continent', 'country'), then
# cv = ('NA', 'USA'), ('Asia', 'India'), etc.
# g_vqs_by_cv = grouped ValueQuerySet (grouped by cv)
# i.e. grouped by ('NA', 'USA'), ('Asia', 'India'), etc.
#
# vqs is a list of dicts. For example
# [{'continent': 'NA', 'country': 'USA', 'pop__sum': 300}]
for cv, g_vqs_by_cv in groupby(vqs, itemgetter(*categories)):
if not isinstance(cv, tuple):
cv = (cv,)
cv = tuple(map(unicode, cv))
self.cv_raw |= set([cv])
# For the first loop (i==0), the queryset is already
# pre-sorted by value of the data func alias (for example
# pop__sum) when retrieved from the DB. So don't
# sort it again. If we need to retrieve all the
# elements (not just top n) per category
# (fd['top_n_per_group'] == 0), we don't care about the
# sort order. Don't sort in this case.
if i != 0 and td['top_n_per_cat'] != 0:
g_vqs_by_cv.sort(key=itemgetter(tk),
reverse=(td['top_n_per_cat'] > 0))
# g_vqs_by_cv_dfv: Grouped Value QuerySet (grouped by
# category and then by datafunc value.
# alias = 'population__sum'
# itemgetter('pop__sum') = 10 etc.
# So grouped by pop__sum = 10, 9, etc.
# NOTE: Need this step to make sure we retain duplicates
# in the top n if there are multiple entries. For example
# if pop__sum is 10, 10, 9, 9, 7, 3, 2, 1 and we want
# top 3, then the result should we 10, 10, 9, 9, 7 and
# not just 10, 10, 9. A simple list slice will only retain
# 10, 10, 9. So it is not useful. An alternative is to
# group_by and then slice.
g_vqs_by_cv_dfv = groupby(g_vqs_by_cv, itemgetter(tk))
# Now that this is grouped by datafunc value, slice off
# if we only need the top few per each category
if td['top_n_per_cat'] != 0:
g_vqs_by_cv_dfv = islice(g_vqs_by_cv_dfv, 0,
abs(td['top_n_per_cat']))
# Now build the result dictionary
# dfv = datafunc value
# vqs_by_c_dfv = ValuesQuerySet by cat. and datafunc value
for dfv, vqs_by_cv_dfv in g_vqs_by_cv_dfv:
if tk == self.top_n_term:
_cum_dfv_by_cv[cv] += dfv
if tk == self.pareto_term:
_pareto_by_cv[cv] += dfv
for vd in vqs_by_cv_dfv:
# vd: values dict
# vd: {'continent': 'NA', 'country': 'USA',
# 'year': 2010, 'quarter': 2,
# 'population__avg': 301,
# 'gdp__avg': 14.12}
# category = ('continent', 'country',)
# legend = ('year', 'quarter')
# lv = (2010, 2)
# dfa = 'price__max'
# cv_lv_dfv[('NA', 'USA')][(2010, 2)] = 301
try:
lv = itemgetter(*legend_by)(vd)
if not isinstance(lv, tuple):
lv = (lv,)
lv = tuple(map(unicode, lv))
# If there is nothing to legend by i.e.
# legend_by=() then itemgetter raises a TypeError.
# Handle it.
except TypeError:
lv = ()
cv_lv_dfv[cv][lv] = vd[tk]
lv_set |= set([lv])
td['_cv_lv_dfv'] = cv_lv_dfv
td['_lv_set'] = lv_set
# If we only need top n items, remove the other items from self.cv_raw
if self.top_n_term:
cum_cv_dfv_items = sorted(_cum_dfv_by_cv.items(),
key=itemgetter(1),
reverse=self.top_n > 0)
cv_dfv_top_n_items = cum_cv_dfv_items[0:abs(self.top_n)]
self.cv_raw = [cv_dfv[0] for cv_dfv in cv_dfv_top_n_items]
else:
self.cv_raw = list(self.cv_raw)
# If we need to pareto, order the category values in pareto order.
if self.pareto_term:
pareto_cv_dfv_items = sorted(_pareto_by_cv.items(),
key=itemgetter(1),
reverse=True)
pareto_cv = [cv_dfv[0] for cv_dfv in pareto_cv_dfv_items]
if self.top_n_term:
self.cv_raw = [_cv for _cv in pareto_cv if cv in self.cv_raw]
else:
self.cv_raw = pareto_cv
if self.mapf is None:
self.cv = self.cv_raw
else:
self.cv = [self.mapf(_cv) for _cv in self.cv_raw]
else:
# otherwise, order them by sortf if there is one.
if not self.cv_raw:
# if there isn't any data available just
# set self.cv to empty list and return
# otherwise we get
# ValueError: not enough values to unpack (expected 2, got 0)
# from zip(*combined) below
self.cv = self.cv_raw
elif self.mapf is None:
self.cv_raw.sort(key=self.sortf)
self.cv = self.cv_raw
else:
if not self.mts:
self.cv_raw.sort(key=self.sortf)
self.cv = [self.mapf(_cv) for _cv in self.cv_raw]
if self.mts:
combined = sorted(zip(self.cv, self.cv_raw),
key=self.sortf)
self.cv, self.cv_raw = zip(*combined)
| pgollakota/django-chartit | chartit/chartdata.py | Python | bsd-2-clause | 26,655 |
#!/usr/bin/env python3
# Sample code for doing computations with braids
#
# The code here emphasizes clarity over speed. We have used the memoize()
# function to memoize functions that are called repeatedly with the same
# arguments. Use of memoize is an indication that better algorithms exist.
import hashlib
import bitcoin # uses python-bitcoinlib https://github.com/petertodd/python-bitcoinlib
from bitcoin.core import uint256_from_str as uint256
import graph_tool.all as gt
import graph_tool.draw as gtdraw
import numpy as np
from numpy.random import choice, sample, randint
from copy import copy
from math import sqrt, pi, sin, cos, acos
NETWORK_SIZE = 1.0 # The round-trip time in seconds to traverse the network
TICKSIZE = 0.1 # One "tick" of the network in which beads will be propagated and mined
MAX_HASH = 2**256-1 # Maximum value a 256 bit unsigned hash can have, used to calculate targets
bead_color = ( 27/255, 158/255, 119/255, 1) # Greenish
genesis_color = (217/255, 95/255, 2/255, 1) # Orangeish
cohort_color = (117/255, 112/255, 179/255, 1) # Purplish
tip_color = (231/255, 41/255, 138/255, 1) # Pinkish
sibling_color = (102/255, 166/255, 30/255, 1) # Light Greenish
highlight1_color = ( 1, 1, 0, 1) # Yellow
highlight2_color = ( 1, 0, 1, 1) # Magenta
highlight3_color = ( 0, 1, 1, 1) # Yellow
nohighlight_color = ( 1, 1, 1, 1) # White
me_color = ( 0, 0, 0, 1) # Black
descendant_color = highlight2_color
ancestor_color = highlight3_color
# A rotating color palette to color cohorts
color_palette = [genesis_color, cohort_color, sibling_color, tip_color]
#gencache = {}
#gencache[True] = {}
#gencache[False] = {}
cohort_size_benchmark = [] # cohort size vs time
def sha256(x: int): return hashlib.sha256(('%d'%x).encode()).digest()
def printvset(vs):
""" Print a (sub-)set of vertices in compact form. """
return("{"+",".join(sorted([str(v) for v in vs]))+"}")
def sph_arclen(n1, n2):
""" Compute the arc length on the surface of a unit sphere. """
# phi = 90 - latitude
phi1 = (90.0 - n1.latitude)*pi/180.0
phi2 = (90.0 - n2.latitude)*pi/180.0
# theta = longitude
theta1 = n1.longitude*pi/180.0
theta2 = n2.longitude*pi/180.0
c = sin(phi1)*sin(phi2)*cos(theta1-theta2) + cos(phi1)*cos(phi2)
return acos(c)
class Network:
""" Abstraction for an entire network containing <n> nodes. The network has
a internal clock for simulation which uses <ticksize>. Latencies are taken
from a uniform distribution on [0,1) so <ticksize> should be < 1.
"""
def __init__(self, nnodes, ticksize=TICKSIZE, npeers=4, target=None, topology='sphere'):
self.t = 0 # the current "time"
self.ticksize = ticksize # the size of a "tick": self.t += self.tick at each step
self.npeers = npeers
self.nnodes = nnodes
self.genesis = uint256(sha256(0))
self.beads = {} # a hash map of all beads in existence
#self.inflightdelay = {} # list of in-flight beads
#self.mempool = set() # A list of transactions for everyone to mine. Everyone
# sees the same mempool, p2p propegation delays are not modelled
self.beads[self.genesis] = Bead(self.genesis, set(), set(), self, -1)
# FIXME not modelling mempool propagation means that we will either have all blocks in a round have
# the same tx, or none. Maybe each round mining should grab a random subset?
self.nodes = [Node(self.genesis, self, nodeid, target=target) for nodeid in range(nnodes)]
latencies = None
for (node, peers) in zip(self.nodes, [choice(list(set(range(nnodes)) - {me}), \
npeers, replace=False) for me in range(nnodes)]):
#print("Node ", node, " has peers ", peers)
if topology == 'sphere':
latencies = [10*sph_arclen(node, self.nodes[i]) for i in peers];
node.setpeers([self.nodes[i] for i in peers], latencies)
self.reset(target=target)
def tick(self, mine=True):
""" Execute one tick. """
self.t += self.ticksize
# Create a new set of transaction IDs in the mempool
#self.mempool.update([uint256(sha256(randint(2**63-1))) for dummy in range(randint(1,2))])
# Have each node attempt to mine a random subset of the global mempool
for node in self.nodes:
# numpy.random.choice doesn't work with sets :-(
#node.tick(choice(list(self.mempool), randint(len(self.mempool)), replace=False), mine)
node.tick(mine=mine)
for (node, bead) in copy(self.inflightdelay):
self.inflightdelay[(node, bead)] -= self.ticksize
if self.inflightdelay[(node, bead)] < 0:
node.receive(bead)
del self.inflightdelay[(node, bead)]
def broadcast(self, node, bead, delay):
""" Announce a block/bead discovery to a node who is <delay> away. """
if bead not in node.beads:
prevdelay = NETWORK_SIZE
if (node,bead) in self.inflightdelay: prevdelay = self.inflightdelay[(node, bead)]
self.inflightdelay[(node, bead)] = min(prevdelay, delay)
def reset(self, target=None):
self.t = 0
self.beads = {}
self.beads[self.genesis] = Bead(self.genesis, set(), set(), self, -1)
self.inflightdelay = {}
self.mempool = set()
for node in self.nodes:
node.reset(target)
def printinflightdelays(self):
for (node, bead) in self.inflightdelay:
print("bead ", bead, " to node ", node, " will arrive in %fs"%self.inflightdelay[(node, bead)])
class Node:
""" Abstraction for a node. """
def __init__(self, genesis, network, nodeid, target=None):
self.genesis = genesis
self.network = network
self.peers = []
self.latencies = []
self.nodeid = nodeid
# A salt for this node, so all nodes don't produce the same hashes
self.nodesalt = uint256(sha256(randint(2**63-1)))
self.nonce = 0 # Will be increased in the mining process
self.reset(target)
# Geospatial location information
self.latitude = pi*(1/2-sample(1))
self.longitude = 2*pi*sample(1)
def reset(self, target=None):
self.beads = [self.network.beads[self.network.genesis]] # A list of beads in the order received
self.braids = [Braid(self.beads)] # A list of viable braids, each having a braid tip
self.mempool = set() # A set of txids I'm mining
self.incoming = set() # incoming beads we were unable to process
self.target = target
self.braids[0].tips = {self.beads[0]}
self.hremaining = np.random.geometric(self.target/MAX_HASH)
def __str__(self):
return "<Node %d>"%self.nodeid
def setpeers(self, peers, latencies=None):
""" Add a peer separated by a latency <delay>. """
self.peers = peers
if latencies: self.latencies = latencies
else: self.latencies = sample(len(peers))*NETWORK_SIZE
assert(len(self.peers) == len(self.latencies))
def tick(self, newtxs=[], mine=True):
""" Add a Bead satisfying <target>. """
# First try to extend all braids by received beads that have not been added to a braid
newincoming = set()
oldtips = self.braids[0].tips
while len(newincoming) != len(self.incoming):
for bead in self.incoming:
for braid in self.braids:
if not braid.extend(bead):
newincoming.add(bead)
self.incoming = newincoming
if mine:
#PoW = uint256(sha256(self.nodesalt+self.nonce))
PoW = uint256(sha256(np.random.randint(1<<64-1)*np.random.randint(1<<64-1)))
self.nonce += 1
if PoW < self.target:
b = Bead(PoW, copy(self.braids[0].tips), copy(self.mempool), self.network, self.nodeid)
self.receive(b) # Send it to myself (will rebroadcast to peers)
# TODO remove txids from mempool
else :
self.hremaining -= 1
if(self.hremaining <= 0):
PoW = (uint256(sha256(self.nodesalt+self.nonce))*self.target)//MAX_HASH
self.nonce += 1
# The expectation of how long it will take to mine a block is Geometric
# This node will generate one after this many hashing rounds (ticks)
b = Bead(PoW, copy(self.braids[0].tips), copy(self.mempool), self.network, self.nodeid)
self.receive(b) # Send it to myself (will rebroadcast to peers)
self.hremaining = np.random.geometric(self.target/MAX_HASH)
elif(self.braids[0].tips != oldtips):
# reset mining if we have new tips
self.hremaining = np.random.geometric(self.target/MAX_HASH)
def receive(self, bead):
""" Recieve announcement of a new bead. """
# TODO Remove txids from mempool
if bead in self.beads: return
else: self.beads.append(bead)
for braid in self.braids:
if not braid.extend(bead):
self.incoming.add(bead) # index of vertex is index in list
self.send(bead)
def send(self, bead):
""" Announce a new block from a peer to this node. """
for (peer, delay) in zip(self.peers, self.latencies):
self.network.broadcast(peer, bead, delay)
class Bead:
""" A bead is either a block of transactions, or an individual transaction.
This class stores auxiliary information about a bead and is separate
from the vertex being stored by the Braid class. Beads are stored by
the Braid object in the same order as vertices. So if you know the
vertex v, the Bead instance is Braid.beads[int(v)]. graph_tool vertices
can be cast to integers as int(v), giving their index.
"""
# FIXME lots of stuff here
def __init__(self, hash, parents, transactions, network, creator):
self.t = network.t
self.hash = hash # a hash that identifies this block
self.parents = parents
self.children = set() # filled in by Braid.make_children()
self.siblings = set() # filled in by Braid.analyze
self.cohort = set() # filled in by Braid.analyze
self.transactions = transactions
self.network = network
self.creator = creator
if creator != -1: # if we're not the genesis block (which has no creator node)
self.difficulty = MAX_HASH/network.nodes[creator].target
else: self.difficulty = 1
self.sibling_difficulty = 0
network.beads[hash] = self # add myself to global list
self.reward = None # this bead's reward (filled in by Braid.rewards)
def __str__(self):
return "<Bead ...%04d>"%(self.hash%10000)
class Braid(gt.Graph):
""" A Braid is a Directed Acyclic Graph with no incest (parents may not also
be non-parent ancestors). A braid may have multiple tips. """
def __init__(self, beads=[]):
super().__init__(directed=True, vorder=True)
self.times = self.new_vertex_property("double")
self.beads = [] # A list of beads in this braid
self.vhashes = {} # A dict of (hash, Vertex) for each vertex
self.vcolors = self.new_vertex_property("vector<float>") # vertex colorings
self.vhcolors = self.new_vertex_property("vector<float>") # vertex halo colorings
self.groups = self.new_vertex_property("vector<float>") # vertex group (cohort number)
self.vsizes = self.new_vertex_property("float") # vertex size
self.ncohorts = -1 # updated by cohorts()
if beads:
for b in beads:
self.beads.append(b) # Reference to a list of beads
self.vhashes[b.hash] = self.add_vertex()
self.vhashes[b.hash].bead = b
self.vcolors[self.vhashes[b.hash]] = genesis_color
self.vhcolors[self.vhashes[b.hash]] = nohighlight_color
self.vsizes[self.vhashes[b.hash]] = 14
# FIXME add edges if beads has more than one element.
self.tips = {beads[-1]}
self.tips = set() # A tip is a bead with no children.
def extend(self, bead):
""" Add a bead to the end of this braid. Returns True if the bead
successfully extended this braid, and False otherwise. """
if (not bead.parents # No parents -- bad block
or not all([p.hash in self.vhashes for p in bead.parents]) # We don't have all parents
or bead in self.beads): # We've already seen this bead
return False
self.beads.append(bead)
self.vhashes[bead.hash] = self.add_vertex()
self.vhashes[bead.hash].bead = bead
self.vcolors[self.vhashes[bead.hash]] = bead_color
self.vhcolors[self.vhashes[bead.hash]] = nohighlight_color
self.vsizes[self.vhashes[bead.hash]] = 14
for p in bead.parents:
self.add_edge(self.vhashes[bead.hash], self.vhashes[p.hash])
self.times[self.vhashes[bead.hash]] = bead.t
if p in self.tips:
self.tips.remove(p)
self.tips.add(bead)
return True
def rewards(self, coinbase):
""" Compute the rewards for each bead, where each cohort is awarded
<conbase> coins.
FIXME splitting of tx fees not implemented.
"""
for cohort in self.cohorts():
for c in cohort:
siblings = cohort - self.ancestors(c, cohort) - self.descendants(c, cohort) - {c}
bc = self.beads[int(c)]
# Denominator (normalization) for distribution among siblings
bc.sibling_difficulty = MAX_HASH/(sum([self.beads[int(s)].difficulty for s in siblings])
+ bc.difficulty)
N = sum([self.beads[int(c)].difficulty/self.beads[int(c)].sibling_difficulty for c in cohort])
for c in cohort:
bc = self.beads[int(c)]
bc.reward = coinbase*(bc.difficulty/bc.sibling_difficulty)/N
# FIXME I can make 3-way siblings too: find the common ancestor of any 3 siblings
# and ask what its rank is...
def siblings(self):
""" The siblings of a bead are other beads for which it cannot be
decided whether the come before or after this bead in time.
Note that it does not make sense to call siblings() on a cohort
which contains dangling chain tips. The result is a dict of
(s,v): (m,n)
which can be read as:
The sibling $s$ of vertex $v$ has a common ancestor $m$
generations away from $v$ and a common descendant $n$
generations away from $v$.
"""
retval = dict()
if self.ncohorts < 0:
for c in c.cohorts(): pass # force cohorts() to generate all cohorts
# FIXME Since siblings are mutual, we could compute (s,c) and (c,s) at the same time
for (cohort, ncohort) in zip(self.cohorts(), range(self.ncohorts)):
for c in cohort:
#siblings = cohort - self.ancestors(c, cohort) - self.descendants(c, cohort) - {c}
for s in self.sibling_cache[ncohort][c]:
ycas = self.youngest_common_ancestors({s,c})
ocds = self.oldest_common_descendants({s,c})
# Step through each generation of parents/children until the common ancestor is found
pgen = {s} # FIXME either c or s depending on whether we want to step from the
for m in range(1,len(cohort)):
pgen = {q for p in pgen for q in self.parents(p) }
if pgen.intersection(ycas) or not pgen: break
cgen = {s} # FIXME and here
for n in range(1,len(cohort)):
cgen = {q for p in cgen for q in self.children(p)}
if cgen.intersection(ocds) or not cgen: break
retval[int(s),int(c)] = (m,n)
return retval
def cohorts(self, initial_cohort=None, older=False, cache=False):
""" Given the seed of the next cohort (which is the set of beads one step older, in the next
cohort), build an ancestor and descendant set for each visited bead. A new cohort is
formed if we encounter a set of beads, stepping in the descendant direction, for which
*all* beads in this cohort are ancestors of the first generation of beads in the next
cohort.
This function will not return the tips nor any beads connected to them, that do not yet
form a cohort.
"""
cohort = head = nexthead = initial_cohort or frozenset([self.vertex(0)])
parents = ancestors = {h: self.next_generation(h, not older) - cohort for h in head}
ncohorts = 0
if cache and not hasattr(self, 'cohort_cache'):
self.cohort_cache = []
self.sibling_cache = {}
# These caches also contain the first beads *outside* the cohort in both the
# (ancestor,descendant) directions and are used primarily for finding siblings
self.ancestor_cache = {} # The ancestors of each bead, *within* their own cohort
self.descendant_cache = {} # The descendants of each bead, *within* their own cohort
while True :
if cache and ncohorts in self.cohort_cache:
yield self.cohort_cache[ncohorts]
else:
if cache:
acohort = cohort.union(self.next_generation(head, older)) # add the youngest beads in the ancestor cohort
ancestors = {int(v): frozenset(map(int,
self.ancestors(v, acohort))) for v in acohort}
self.ancestor_cache[ncohorts] = ancestors
dcohort = cohort.union(self.next_generation(head, not older)) # add the oldest beads in the descendant cohort
descendants = {int(v): frozenset(map(int,
self.descendants(v, dcohort))) for v in dcohort}
self.descendant_cache[ncohorts] = descendants
self.cohort_cache.append(cohort)
self.sibling_cache[ncohorts] = {v: cohort - ancestors[v] - descendants[v] -
frozenset([v]) for v in cohort}
yield cohort
ncohorts += 1
gen = head = nexthead
parents = ancestors = {h: self.next_generation(h, not older) - cohort for h in head}
while True :
gen = self.next_generation(gen, older)
if not gen:
self.ncohorts = ncohorts
return # Ends the iteration (StopIteration)
for v in gen: parents[v] = self.next_generation(v, not older)
while True: # Update ancestors: parents plus its parents' parents
oldancestors = {v: ancestors[v] for v in gen} # loop because ancestors may have new ancestors
for v in gen:
if all([p in ancestors for p in parents[v]]): # If we have ancestors for all parents,
ancestors[v] = parents[v].union(*[ancestors[p] for p in parents[v]]) # update the ancestors
if oldancestors == {v: ancestors[v] for v in gen}: break
if(all([p in ancestors] for p in frozenset.union(*[parents[v] for v in gen]))# we have no missing ancestors
and all([h in ancestors[v] for h in head for v in gen])): # and everyone has all head beads as ancestors
cohort = frozenset.intersection(*[ancestors[v] for v in gen]) # We found a new cohort
nexthead = self.next_generation(cohort, older) - cohort
tail = self.next_generation(nexthead, not older) # the oldest beads in the candidate cohort
if all([n in ancestors and p in ancestors[n] for n in nexthead for p in tail]):
break
def cohort_time(self):
""" Compute the average cohort time and its standard deviation returned
as a tuple (mean, stddev). """
t = 0
ctimes = []
for c in self.cohorts():
if c == {self.vertex(0)}: continue # skip genesis bead
times = [self.beads[int(v)].t for v in c]
ctimes.append(max(times)-t)
t = max(times)
return (np.mean(ctimes), np.std(ctimes))
def exclude(self, vs, predicate):
""" Recursively exclude beads which satisfy a predicate (usually either
parents or children) -- this removes all ancestors or descendants. """
lastvs = copy(vs)
while True:
newvs = {v for v in vs if predicate(v) not in vs}
if newvs == lastvs: return newvs
lastvs = newvs
def common_generation(self, vs, older=True):
""" Find the first common ancestor/descendant generation of all vs, and
all intermediate ancestors/descendants by bfs. This is analagous to the
Most Recent Common Ancestor (MRCA) in biology. The first return value
should be the seed for the *next* cohort while the second return value
is the *current* cohort. """
if older: (edgef, nodef, nextgen_f) = ("out_edges","target", self.parents)
else: (edgef, nodef, nextgen_f) = ("in_edges", "source", self.children)
if not isinstance(vs, set): vs = {vs}
lastvs = self.exclude(vs, nextgen_f)
nextgen = lastgen = {v: nextgen_f(v) for v in lastvs}
firstv = next(iter(lastvs))
niter = 0
while True:
commond = frozenset.intersection(*[nextgen[v] for v in nextgen]) - lastvs
if commond: return commond
else: # add one generation of descendants for bfs
nextgenupd = dict()
for v in lastgen:
nextgenupd[v] = nextgen_f(lastgen[v])
nextgen[v] = frozenset.union(nextgen[v], nextgenupd[v])
# We hit a tip, on all paths there can be no common descendants
if not all([nextgenupd[v] for v in nextgenupd]):
return set()
lastgen = nextgen
niter += 1
if niter > 1000:
raise Exception("infinite loop in common_generation? ")
def oldest_common_descendants(self, vs):
return self.common_generation(vs, older=False)
def youngest_common_ancestors(self, vs):
return self.common_generation(vs, older=True)
def all_generations(self, v:gt.Vertex, older, cohort=None, limit=None):
""" Return all vertices in <cohort> older or younger depending on the value of <older>. """
result = gen = self.next_generation(frozenset([v]), older)
while gen:
gen = self.next_generation(gen, older)
result = result.union(gen)
if cohort: result = result.intersection(cohort)
return result
def ancestors(self, v:gt.Vertex, cohort=None, limit=None):
return self.all_generations(v, older=True, cohort=cohort, limit=limit)
def descendants(self, v:gt.Vertex, cohort=None, limit=None):
return self.all_generations(v, older=False, cohort=cohort, limit=limit)
def next_generation(self, vs, older):
""" Returns the set of vertices one generation from <vs> in the <older> direction. """
if older: (edgef, nodef) = ("out_edges","target")
else: (edgef, nodef) = ("in_edges", "source")
if isinstance(vs, gt.Vertex):
return frozenset([getattr(y, nodef)() for y in getattr(vs,edgef)()])
elif isinstance(vs, frozenset):
ng = [self.next_generation(v, older) for v in vs]
if not ng: return frozenset()
else: return frozenset.union(*ng)
def parents(self, vs):
return self.next_generation(vs, older=True)
def children(self, vs):
return self.next_generation(vs, older=False)
def plot(self, focusbead=None, cohorts=True, focuscohort=None, numbervertices=False,
highlightancestors=False, output=None, rewards=False, layout=None, **kwargs):
""" Plot this braid, possibly coloring graph cuts. <focusbead>
indicates which bead to consider for coloring its siblings and
cohort. """
vlabel = self.new_vertex_property("string")
pos = self.new_vertex_property("vector<double>")
if layout: pos = layout(self, **kwargs)
else: pos = self.braid_layout(**kwargs)
n = 0
kwargs = {'vertex_size': self.vsizes,
'vertex_font_size':10,
'nodesfirst':True,
'vertex_text':vlabel,
'vertex_halo':True,
'vertex_halo_size':0,
'vertex_fill_color':self.vcolors,
'vertex_halo_color':self.vhcolors,
'pos':pos}
if rewards:
# We want the sum of the area of the beads to be a constant. Since
# the area is pi r^2, the vertex size should scale like the sqrt of
# the reward
self.rewards(400)
for v in self.vertices():
if self.beads[int(v)].reward:
self.vsizes[v] = sqrt(self.beads[int(v)].reward)
else:
self.vsizes[v] = 0
if output: kwargs['output'] = output
if focusbead:
if not hasattr(self, 'sibling_cache'):
for c in self.cohorts(cache=True): pass
ancestors = self.ancestors(focusbead)
descendants = self.descendants(focusbead)
kwargs['vertex_halo_size'] = 1.5
for v in self.vertices():
# Decide the bead's color
if v.out_degree() == 0:
self.vcolors[v] = genesis_color
elif v.in_degree() == 0:
self.vcolors[v] = tip_color
else:
self.vcolors[v] = bead_color
# Decide the highlight color
if v == focusbead:
self.vhcolors[v] = me_color
elif v in ancestors and highlightancestors:
self.vhcolors[v] = ancestor_color
elif v in descendants and highlightancestors:
self.vhcolors[v] = descendant_color
else:
self.vhcolors[v] = nohighlight_color
# Label our siblings with their rank
siblings = self.siblings()
for cohort in self.cohorts():
if focusbead in cohort:
for c in cohort:
self.vcolors[c] = cohort_color
for (s,v),(m,n) in siblings.items():
if v == focusbead:
vlabel[self.vertex(s)] = "%d,%d"%(m,n)
#self.vcolors[s] = sibling_color
break
else:
cnum = 0
if cohorts:
for c in self.cohorts():
for v in c:
if focuscohort == cnum:
self.vhcolors[v] = highlight1_color
self.vcolors[v] = color_palette[cnum%len(color_palette)]
self.vhcolors[v] = nohighlight_color
cnum += 1
if numbervertices: kwargs['vertex_text'] = self.vertex_index
return gtdraw.graph_draw(self, **kwargs)
def braid_layout(self, **kwargs):
""" Create a position vertex property for a braid. We use the actual bead time for the x
coordinate, and a spring model for determining y.
FIXME how do we minimize crossing edges?
"""
# FIXME what I really want here is symmetry about a horizontal line, and a spring-block layout that
# enforces the symmetry.
# 1. randomly assign vertices to "above" or "below" the midline.
# 2. Compute how many edges cross the midline (
# 3. Compute SFDP holding everything fixed except those above, in a single cohort.
# 4. Repeat for the half of the cohort below the midline
# 5. Iterate this a couple times...
groups = self.new_vertex_property("int")
pos = self.new_vertex_property("vector<double>")
pin = self.new_vertex_property("bool")
xpos = 1
for c in self.cohorts():
head = gen = self.children(self.parents(c)-c)
for (v,m) in zip(head, range(len(head))):
pin[v] = True
pos[v] = np.array([xpos, len(head)-1-2*m])
while gen.intersection(c):
gen = self.children(gen).intersection(c)
xpos += 1
xpos -= 1 # We already stepped over the tail in the above loop
tail = self.parents(self.children(c) - c) - head
for (v,m) in zip(tail, range(len(tail))):
pin[v] = True
pos[v] = np.array([xpos, len(tail)-1-2*m])
xpos += 1
# position remaining beads not in a cohort but not tips
gen = self.children(c) - c
for (v,m) in zip(gen,range(len(gen))):
pos[v] = np.array([xpos, len(gen)-1-2*m])
while True: # Count number of generations to tips
gen = self.children(gen) - gen
if not gen: break
xpos += 1
# position tips
tips = frozenset(map(lambda x: self.vhashes[x.hash], self.tips))
for (v,m) in zip(tips,range(len(tips))):
pos[v] = np.array([xpos, len(tips)-1-2*m])
pin[v] = True
# feed it all to the spring-block algorithm.
if 'C' not in kwargs: kwargs['C'] = 0.1
if 'K' not in kwargs: kwargs['K'] = 2
return gt.sfdp_layout(self, pos=pos, pin=pin, groups=groups, **kwargs)
| mcelrath/braidcoin | braids.py | Python | gpl-3.0 | 30,925 |
import random
from CommandTemplate import CommandTemplate
from IrcMessage import IrcMessage
class Command(CommandTemplate):
triggers = ['dice', 'roll']
helptext = "Roll dice. Simple. Format is either <sides> [<rolls>], or <rolls>d<sides> like in those nerdy tabletop games"
def execute(self, message):
"""
:type message: IrcMessage
"""
replytext = u""
rollcount = 1
sides = -1
total = 0.0
rollLimit = 1000
displayRollsLimit = 20 #If there's more than this many rolls, don't list individual rolls
displaySidesLimit = 999999999 #1 billion -1, if there's more than this many sides, don't list all the rolls
if message.messagePartsLength == 0:
replytext = u"You want the classic six-sided die, I assume? Rolling... And it lands on a... {}!".format(random.randint(1, 6))
else:
#No '1d12' or anything, just numbers
if 'd' not in message.messageParts[0].lower():
#assuming '!dice [sides] [rolls]'
try:
sides = int(message.messageParts[0])
except ValueError:
sides = 6
replytext += u"(I don't think '{}' is a valid number of sides, I'll just use {} sides) ".format(message.messageParts[0], sides)
if message.messagePartsLength > 1:
try:
rollcount = int(message.messageParts[1])
except ValueError:
replytext += u"(I don't know how to roll '{}' times, so I'm just gonna roll once) ".format(message.messageParts[1])
rollcount = 1
else:
#There's a 'd' in the message, so it's probably something like '1d12
diceroll = message.messageParts[0].lower().split("d")
#Verify that the number of sides was entered correctly
if len(diceroll) == 1 or len(diceroll[1]) == 0:
sides = 20
replytext += u"(I think you forgot to add the number of sides, I'll just assume you want {}) ".format(sides)
else:
try:
sides = int(diceroll[1])
except ValueError:
sides = 20
replytext += u"(I don't know what to do with '{}', I'll just use {}-sided dice) ".format(diceroll[1], sides)
#Do the same check for the number of dice rolls
if len(diceroll) == 0 or len(diceroll[0]) == 0:
replytext += u"(Did you forget the number of rolls? I'll just roll once then) "
rollcount = 1
else:
try:
rollcount = int(diceroll[0])
except ValueError:
rollcount = 1
replytext += u"(I don't know how many rolls '{}' is, so I'll just roll once) ".format(diceroll[0])
#Preventing negative numbers
if rollcount <= 0:
replytext += u"(I can't roll {} times, so I'm gonna assume you want a single roll) ".format(rollcount)
rollcount = 1
if sides <= 0:
replytext += u"(A die with {} sides is a bit weird, I'll just use this one with 6 sides) ".format(sides)
sides = 6
elif sides == 1:
replytext += u"(A single side? But that... Fine, I'll just roll with it) "
elif sides == 2:
replytext += u"(I'd suggest flipping a coin, but this'll work too) "
#Only keep the actual rolls if there's not too many
keepRollValues = (rollcount <= displayRollsLimit and sides <= displaySidesLimit)
rollValues = []
#On to the actual rolling!
if rollcount <= rollLimit:
for roll in xrange(rollcount):
rollValue = random.randint(1, sides)
if keepRollValues:
rollValues.append("{:,}".format(rollValue)) #Use format to get thousands-separators
total += rollValue
else:
#Far too much rolls, estimate expected value. With floats to allow for decimals
sidesFloat = float(sides)
total = (sidesFloat + 1) * (sidesFloat / 2) * (float(rollcount) / sidesFloat)
#Clean up any trailing decimal zeroes if necessary
if int(total) == total:
total = int(total)
average = float(total) / float(rollcount)
if rollcount == 1:
replytext += u"A single {:,}-sided die roll, I can do that. Rolling, rolling... and it lands on... {:,}!".format(sides, total)
elif rollcount <= displayRollsLimit:
if sides <= displaySidesLimit:
replytext += u"{:,} rolls with {:,}-sided dice: {} = {:,}, average of {:,}".format(rollcount, sides, u" + ".join(rollValues), total, average)
else:
replytext += u"{:,} rolls with {:,}-sided dice. That's a lot of sides, I hope you don't mind that I don't show them all. " \
u"Your total is... (oof, clumsy large dice)... {:,}, with an average of {:,}".format(rollcount, sides, total, average)
elif rollcount <= rollLimit:
replytext += u"{} rolls with {:,}-sided dice. That's a quite a few rolls, but luckily I'm pretty fast. Your total is... hang on... {:,}, " \
u"with an average roll of {:,}".format(rollcount, sides, total, average)
else:
replytext += u"{:,} is a LOT of rolls, even I would spend ages on that. I'll just give you the expected value, that'll be close enough. " \
u"And that is... {:,}!".format(rollcount, total)
message.reply(replytext, "say")
| Didero/DideRobot | commands/Dice.py | Python | mit | 4,880 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils import random_string, nowdate
from erpnext.hr.doctype.expense_claim.expense_claim import make_bank_entry
from erpnext.accounts.doctype.payment_entry.payment_entry import get_payment_entry
test_records = frappe.get_test_records('Expense Claim')
class TestExpenseClaim(unittest.TestCase):
def test_total_expense_claim_for_project(self):
frappe.db.sql("""delete from `tabTask` where project = "_Test Project 1" """)
frappe.db.sql("""delete from `tabProject` where name = "_Test Project 1" """)
frappe.get_doc({
"project_name": "_Test Project 1",
"doctype": "Project",
"tasks" :
[{ "title": "_Test Project Task 1", "status": "Open" }]
}).save()
existing_claimed_amount = frappe.db.get_value("Project", "_Test Project 1", "total_expense_claim")
task_name = frappe.db.get_value("Task", {"project": "_Test Project 1"})
payable_account = get_payable_account("Wind Power LLC")
make_expense_claim(300, 200,"Travel Expenses - WP", "Wind Power LLC",
payable_account, "_Test Project 1", task_name)
self.assertEqual(frappe.db.get_value("Task", task_name, "total_expense_claim"), 200)
self.assertEqual(frappe.db.get_value("Project", "_Test Project 1", "total_expense_claim"),
existing_claimed_amount + 200)
expense_claim2 = make_expense_claim(600, 500, "Travel Expenses - WP", "Wind Power LLC",
payable_account, "_Test Project 1", task_name)
self.assertEqual(frappe.db.get_value("Task", task_name, "total_expense_claim"), 700)
self.assertEqual(frappe.db.get_value("Project", "_Test Project 1", "total_expense_claim"),
existing_claimed_amount + 700)
expense_claim2.cancel()
frappe.delete_doc("Expense Claim", expense_claim2.name)
self.assertEqual(frappe.db.get_value("Task", task_name, "total_expense_claim"), 200)
self.assertEqual(frappe.db.get_value("Project", "_Test Project 1", "total_expense_claim"),
existing_claimed_amount+200)
def test_expense_claim_status(self):
payable_account = get_payable_account("Wind Power LLC")
expense_claim = make_expense_claim(300, 200, "Travel Expenses - WP",
"Wind Power LLC", payable_account)
je_dict = make_bank_entry("Expense Claim", expense_claim.name)
je = frappe.get_doc(je_dict)
je.posting_date = nowdate()
je.cheque_no = random_string(5)
je.cheque_date = nowdate()
je.submit()
expense_claim = frappe.get_doc("Expense Claim", expense_claim.name)
self.assertEqual(expense_claim.status, "Paid")
je.cancel()
expense_claim = frappe.get_doc("Expense Claim", expense_claim.name)
self.assertEqual(expense_claim.status, "Unpaid")
def test_expense_claim_gl_entry(self):
payable_account = get_payable_account("Wind Power LLC")
expense_claim = make_expense_claim(300, 200, "Travel Expenses - WP",
"Wind Power LLC", payable_account)
expense_claim.submit()
gl_entries = frappe.db.sql("""select account, debit, credit
from `tabGL Entry` where voucher_type='Expense Claim' and voucher_no=%s
order by account asc""", expense_claim.name, as_dict=1)
self.assertTrue(gl_entries)
expected_values = dict((d[0], d) for d in [
[payable_account, 0.0, 200.0],
["Travel Expenses - WP", 200.0, 0.0]
])
for gle in gl_entries:
self.assertEquals(expected_values[gle.account][0], gle.account)
self.assertEquals(expected_values[gle.account][1], gle.debit)
self.assertEquals(expected_values[gle.account][2], gle.credit)
def test_rejected_expense_claim(self):
payable_account = get_payable_account("Wind Power LLC")
expense_claim = frappe.get_doc({
"doctype": "Expense Claim",
"employee": "_T-Employee-0001",
"payable_account": payable_account,
"approval_status": "Rejected",
"expenses":
[{ "expense_type": "Travel", "default_account": "Travel Expenses - WP", "claim_amount": 300, "sanctioned_amount": 200 }]
})
expense_claim.submit()
self.assertEquals(expense_claim.status, 'Rejected')
self.assertEquals(expense_claim.total_sanctioned_amount, 0.0)
gl_entry = frappe.get_all('GL Entry', {'voucher_type': 'Expense Claim', 'voucher_no': expense_claim.name})
self.assertEquals(len(gl_entry), 0)
def test_advance_payment(self):
expense_claim = make_expense_claim(150, 150, "Travel Expenses - _TC",
advance_required=1, submit=False)
payment_entry = get_payment_entry("Expense Claim", expense_claim.name, bank_amount=50)
payment_entry.received_amount = payment_entry.paid_amount = 50
payment_entry.get("references")[0].allocated_amount = 50
payment_entry.reference_no = "1"
payment_entry.reference_date = "2016-01-01"
payment_entry.save()
payment_entry.submit()
expense_claim.load_from_db()
self.assertEqual(expense_claim.total_advance_paid, 50)
expense_claim.submit()
payment_entry = get_payment_entry("Expense Claim", expense_claim.name)
payment_entry.reference_no = "1"
payment_entry.reference_date = "2016-01-01"
payment_entry.save()
payment_entry.submit()
expense_claim.load_from_db()
self.assertEqual(expense_claim.total_advance_paid, 50)
self.assertEqual(expense_claim.total_amount_reimbursed, 100)
gl_entries = frappe.db.sql("""select account, debit, credit
from `tabGL Entry` where voucher_type='Expense Claim' and voucher_no=%s
order by account asc""", expense_claim.name, as_dict=1)
self.assertTrue(gl_entries)
expected_values = dict((d[0], d) for d in [
[get_advance_account("_Test Company"), 0.0, 50.0],
[get_payable_account("_Test Company"), 0.0, 100.0],
["Travel Expenses - _TC", 150.0, 0.0]
])
for gle in gl_entries:
self.assertEquals(expected_values[gle.account][0], gle.account)
self.assertEquals(expected_values[gle.account][1], gle.debit)
self.assertEquals(expected_values[gle.account][2], gle.credit)
def get_payable_account(company):
return frappe.db.get_value('Company', company, 'default_payable_account')
def get_advance_account(company):
return frappe.db.get_value('Company', company, 'default_advance_account') \
or frappe.db.get_value('Company', company, 'default_receivable_account')
def make_expense_claim(claim_amount, sanctioned_amount, expense_account, company="_Test Company",
payable_account=None, project=None, task_name=None,
advance_required=0, advance_account=None, submit=True):
expense_claim = frappe.get_doc({
"doctype": "Expense Claim",
"employee": "_T-Employee-0001",
"payable_account": payable_account or get_payable_account(company),
"advance_account": advance_account or get_advance_account(company),
"advance_required": advance_required,
"approval_status": "Approved",
"company": company,
"expenses": [{
"expense_type": "Travel",
"default_account": expense_account,
"claim_amount": claim_amount,
"sanctioned_amount": sanctioned_amount
}]
})
if project:
expense_claim.project = project
if task_name:
expense_claim.task = task_name
expense_claim.save()
if submit:
expense_claim.submit()
return expense_claim
| manqala/erpnext | erpnext/hr/doctype/expense_claim/test_expense_claim.py | Python | gpl-3.0 | 7,091 |
import mimetypes
# This module is used by PastyAppConfig.ready().
EXTRA_TYPES = {
'.yaml': 'application/x-yaml',
'.json': 'application/json', # App Engine: text/plain
'.js': 'application/javascript', # App Engine: application/x-javascript
}
def add_content_types():
"""Load extra content types for classifying pastes."""
for ext in EXTRA_TYPES:
mimetypes.add_type(EXTRA_TYPES[ext], ext)
| davidwtbuxton/captain-pasty | pasty/content_types.py | Python | mit | 426 |
#! python3
# ==========================================
# Unity Project - A Test Framework for C
# Copyright (c) 2015 Alexander Mueller / XelaRellum@web.de
# [Released under MIT License. Please refer to license.txt for details]
# Based on the ruby script by Mike Karlesky, Mark VanderVoord, Greg Williams
# ==========================================
import sys
import os
import re
from glob import glob
class UnityTestSummary:
def __init__(self):
self.report = ''
self.total_tests = 0
self.failures = 0
self.ignored = 0
def run(self):
# Clean up result file names
results = []
for target in self.targets:
results.append(target.replace('\\', '/'))
# Dig through each result file, looking for details on pass/fail:
failure_output = []
ignore_output = []
for result_file in results:
lines = list(map(lambda line: line.rstrip(), open(result_file, "r").read().split('\n')))
if len(lines) == 0:
raise Exception("Empty test result file: %s" % result_file)
details = self.get_details(result_file, lines)
failures = details['failures']
ignores = details['ignores']
if len(failures) > 0: failure_output.append('\n'.join(failures))
if len(ignores) > 0: ignore_output.append('n'.join(ignores))
tests,failures,ignored = self.parse_test_summary('\n'.join(lines))
self.total_tests += tests
self.failures += failures
self.ignored += ignored
if self.ignored > 0:
self.report += "\n"
self.report += "--------------------------\n"
self.report += "UNITY IGNORED TEST SUMMARY\n"
self.report += "--------------------------\n"
self.report += "\n".join(ignore_output)
if self.failures > 0:
self.report += "\n"
self.report += "--------------------------\n"
self.report += "UNITY FAILED TEST SUMMARY\n"
self.report += "--------------------------\n"
self.report += '\n'.join(failure_output)
self.report += "\n"
self.report += "--------------------------\n"
self.report += "OVERALL UNITY TEST SUMMARY\n"
self.report += "--------------------------\n"
self.report += "{total_tests} TOTAL TESTS {failures} TOTAL FAILURES {ignored} IGNORED\n".format(total_tests = self.total_tests, failures=self.failures, ignored=self.ignored)
self.report += "\n"
return self.report
def set_targets(self, target_array):
self.targets = target_array
def set_root_path(self, path):
self.root = path
def usage(self, err_msg=None):
print("\nERROR: ")
if err_msg:
print(err_msg)
print("\nUsage: unity_test_summary.py result_file_directory/ root_path/")
print(" result_file_directory - The location of your results files.")
print(" Defaults to current directory if not specified.")
print(" Should end in / if specified.")
print(" root_path - Helpful for producing more verbose output if using relative paths.")
sys.exit(1)
def get_details(self, result_file, lines):
results = { 'failures': [], 'ignores': [], 'successes': [] }
for line in lines:
parts = line.split(':')
if len(parts) != 5:
continue
src_file,src_line,test_name,status,msg = parts
if len(self.root) > 0:
line_out = "%s%s" % (self.root, line)
else:
line_out = line
if status == 'IGNORE':
results['ignores'].append(line_out)
elif status == 'FAIL':
results['failures'].append(line_out)
elif status == 'PASS':
results['successes'].append(line_out)
return results
def parse_test_summary(self, summary):
m = re.search(r"([0-9]+) Tests ([0-9]+) Failures ([0-9]+) Ignored", summary)
if not m:
raise Exception("Couldn't parse test results: %s" % summary)
return int(m.group(1)), int(m.group(2)), int(m.group(3))
if __name__ == '__main__':
uts = UnityTestSummary()
try:
#look in the specified or current directory for result files
if len(sys.argv) > 1:
targets_dir = sys.argv[1]
else:
targets_dir = './'
targets = list(map(lambda x: x.replace('\\', '/'), glob(targets_dir + '*.test*')))
if len(targets) == 0:
raise Exception("No *.testpass or *.testfail files found in '%s'" % targets_dir)
uts.set_targets(targets)
#set the root path
if len(sys.argv) > 2:
root_path = sys.argv[2]
else:
root_path = os.path.split(__file__)[0]
uts.set_root_path(root_path)
#run the summarizer
print(uts.run())
except Exception as e:
uts.usage(e)
| Stanford-BDML/super-scamp | vendor/unity/auto/unity_test_summary.py | Python | gpl-3.0 | 5,033 |
# https://leetcode.com/problems/consecutive-characters/
# The power of the string is the maximum length of a non-empty substring that
# contains only one unique character.
# Given a string s, return the power of s.
import pytest
class Solution:
def maxPower(self, s: str) -> int:
count = 1
i = 0
while i < len(s):
j = i + 1
c = 1
while j < len(s) and s[i] == s[j]:
c += 1
i += 1
j += 1
if count < c:
count = c
i += 1
return count
@pytest.mark.parametrize(
("s", "result"),
[("cc", 2), ("leetcode", 2), ("ccbccbb", 2), ("hooraaaaaaaaaaay", 11)],
)
def test_maxPower(s: str, result: int) -> None:
assert result == Solution().maxPower(s)
| anu-ka/coding-problems | Python/consecutive_character.py | Python | mit | 812 |
from . import Job
import mirror
import logging
class Publish(Job):
def __init__(self, pages = None, export_ns = '', publisher = None):
Job.__init__(self)
self.pages = pages
self.export_ns = export_ns
self.publisher = publisher
def summary(self):
return "Publishing pages %s" % self.pages
def required(self):
return True
def perform(self, fidoc):
dw = fidoc.get_wiki()
export_ns = []
dw.resolve(self.export_ns, [], export_ns)
logging.info("Export to namespace %s" % export_ns)
# print(export_ns)
# sys.exit()
restpub = mirror.create_restrictedwikipublisher(fidoc, export_ns)
pages = []
if self.pages is not None:
all_pages_info = dw.allpages()
rx_pages = [re.compile(p) for p in self.pages]
for info in all_pages_info:
p = dw.resolve(info['id'])
if p is None:
continue
for rx in rx_pages:
if rx.match(p) is not None:
pages.append(p)
break
else:
# rx_pages = mirror.public_pages()
pages = mirror.list_all_public_pages(dw, restpub)
# print(pages)
pages.sort()
mirror.publish_pages(dw, restpub, pages, export_ns)
logging.info("Finished!")
return True
def responsible(self, fidoc):
return self.publisher
| stlemme/python-dokuwiki-export | jobs/publish.py | Python | mit | 1,237 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "icekit.project.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| ic-labs/django-icekit | icekit/bin/manage.py | Python | mit | 258 |
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------------------------------------------------------------------------
# Scraper para pelisalacarta, palco y otros plugin de XBMC/Kodi basado en el Api de https://www.themoviedb.org/
# version 1.3:
# - Corregido error al devolver None el path_poster y el backdrop_path
# - Corregido error que hacia que en el listado de generos se fueran acumulando de una llamada a otra
# - A�adido metodo get_generos()
# - A�adido parametros opcional idioma_alternativo al metodo get_sinopsis()
#
#
# Uso:
# Metodos constructores:
# Tmdb(texto_buscado, tipo)
# Parametros:
# texto_buscado:(str) Texto o parte del texto a buscar
# tipo: ("movie" o "tv") Tipo de resultado buscado peliculas o series. Por defecto "movie"
# (opcional) idioma_busqueda: (str) codigo del idioma segun ISO 639-1
# (opcional) include_adult: (bool) Se incluyen contenidos para adultos en la busqueda o no. Por defecto 'False'
# (opcional) year: (str) A�o de lanzamiento.
# (opcional) page: (int) Cuando hay muchos resultados para una busqueda estos se organizan por paginas.
# Podemos cargar la pagina que deseemos aunque por defecto siempre es la primera.
# Return:
# Esta llamada devuelve un objeto Tmdb que contiene la primera pagina del resultado de buscar 'texto_buscado'
# en la web themoviedb.org. Cuantos mas parametros opcionales se incluyan mas precisa sera la busqueda.
# Ademas el objeto esta inicializado con el primer resultado de la primera pagina de resultados.
# Tmdb(id_Tmdb,tipo)
# Parametros:
# id_Tmdb: (str) Codigo identificador de una determinada pelicula o serie en themoviedb.org
# tipo: ("movie" o "tv") Tipo de resultado buscado peliculas o series. Por defecto "movie"
# (opcional) idioma_busqueda: (str) codigo del idioma segun ISO 639-1
# Return:
# Esta llamada devuelve un objeto Tmdb que contiene el resultado de buscar una pelicula o serie con el identificador id_Tmd
# en la web themoviedb.org.
# Tmdb(external_id, external_source, tipo)
# Parametros:
# external_id: (str) Codigo identificador de una determinada pelicula o serie en la web referenciada por 'external_source'.
# external_source: (Para series:"imdb_id","freebase_mid","freebase_id","tvdb_id","tvrage_id"; Para peliculas:"imdb_id")
# tipo: ("movie" o "tv") Tipo de resultado buscado peliculas o series. Por defecto "movie"
# (opcional) idioma_busqueda: (str) codigo del idioma segun ISO 639-1
# Return:
# Esta llamada devuelve un objeto Tmdb que contiene el resultado de buscar una pelicula o serie con el identificador 'external_id' de
# la web referenciada por 'external_source' en la web themoviedb.org.
#
# Metodos principales:
# get_id(): Retorna un str con el identificador Tmdb de la pelicula o serie cargada o una cadena vacia si no hubiese nada cargado.
# get_sinopsis(idioma_alternativo): Retorna un str con la sinopsis de la serie o pelicula cargada.
# get_poster (tipo_respuesta,size): Obtiene el poster o un listado de posters.
# get_backdrop (tipo_respuesta,size): Obtiene una imagen de fondo o un listado de imagenes de fondo.
# get_fanart (tipo,idioma,temporada): Obtiene un listado de imagenes del tipo especificado de la web Fanart.tv
# get_episodio (temporada, capitulo): Obtiene un diccionario con datos especificos del episodio.
# get_generos(): Retorna un str con la lista de generos a los que pertenece la pelicula o serie.
#
#
# Otros metodos:
# load_resultado(resultado, page): Cuando la busqueda devuelve varios resultados podemos seleccionar que resultado concreto y de que pagina cargar los datos.
#
# Informacion sobre la api : http://docs.themoviedb.apiary.io
# --------------------------------------------------------------------------------------------------------------------------------------------
import traceback
import urllib2
from core import logger
from core import scrapertools
class Tmdb(object):
# Atributo de clase
dic_generos = {}
'''
dic_generos={"id_idioma1": {"tv": {"id1": "name1",
"id2": "name2"
},
"movie": {"id1": "name1",
"id2": "name2"
}
}
}
'''
def __search(self, index_resultado=0, page=1):
# http://api.themoviedb.org/3/search/movie?api_key=f7f51775877e0bb6703520952b3c7840&query=superman&language=es&include_adult=false&page=1
url = 'http://api.themoviedb.org/3/search/%s?api_key=f7f51775877e0bb6703520952b3c7840&query=%s&language=%s&include_adult=%s&page=%s' % (
self.busqueda["tipo"], self.busqueda["texto"].replace(' ', '%20'), self.busqueda["idioma"],
self.busqueda["include_adult"], str(page))
if self.busqueda["year"] != '': url += '&year=' + self.busqueda["year"]
buscando = self.busqueda["texto"].capitalize()
logger.info("[Tmdb.py] Buscando '" + buscando + "' en pagina " + str(page))
# print url
response_dic = self.__get_json(url)
self.total_results = response_dic["total_results"]
self.total_pages = response_dic["total_pages"]
if self.total_results > 0:
self.results = response_dic["results"]
if len(self.results) > 0:
self.__leer_resultado(self.results[index_resultado])
else:
# No hay resultados de la busqueda
logger.info("[Tmdb.py] La busqueda de '" + buscando + "' no dio resultados para la pagina " + str(page))
def __by_id(self, source="tmdb"):
if source == "tmdb":
# http://api.themoviedb.org/3/movie/1924?api_key=f7f51775877e0bb6703520952b3c7840&language=es&append_to_response=images,videos,external_ids&include_image_language=es,null
url = 'http://api.themoviedb.org/3/%s/%s?api_key=f7f51775877e0bb6703520952b3c7840&language=%s&append_to_response=images,videos,external_ids&include_image_language=%s,null' % (
self.busqueda["tipo"], self.busqueda["id"], self.busqueda["idioma"], self.busqueda["idioma"])
buscando = "id_Tmdb: " + self.busqueda["id"]
else:
# http://api.themoviedb.org/3/find/%s?external_source=imdb_id&api_key=f7f51775877e0bb6703520952b3c7840
url = 'http://api.themoviedb.org/3/find/%s?external_source=%s&api_key=f7f51775877e0bb6703520952b3c7840&language=%s' % (
self.busqueda["id"], source, self.busqueda["idioma"])
buscando = source.capitalize() + ": " + self.busqueda["id"]
logger.info("[Tmdb.py] Buscando " + buscando)
# print url
resultado = self.__get_json(url)
if source != "tmdb":
if self.busqueda["tipo"] == "movie":
resultado = resultado["movie_results"]
else:
resultado = resultado["tv_results"]
if len(resultado) > 0:
resultado = resultado[0]
if len(resultado) > 0:
if self.total_results == 0:
self.results.append(resultado)
self.total_results = 1
self.total_pages = 1
# print resultado
self.__leer_resultado(resultado)
else: # No hay resultados de la busqueda
logger.info("[Tmdb.py] La busqueda de " + buscando + " no dio resultados.")
def __get_json(self, url):
try:
headers = {'Accept': 'application/json'}
request = urllib2.Request(url, headers=headers)
response_body = urllib2.urlopen(request).read()
except:
logger.info("[Tmdb.py] Fallo la busqueda")
logger.info(traceback.format_exc())
return None
try:
try:
from core import jsontools # 1� opcion utilizar jsontools.py ...
return jsontools.load_json(response_body)
except:
import json # ... y si falla probar con el json incluido
return json.loads(response_body)
except:
logger.info("[Tmdb.py] Fallo json")
logger.info(traceback.format_exc())
return None
def __inicializar(self):
# Inicializamos las colecciones de resultados, fanart y temporada
for i in (self.result, self.fanart, self.temporada):
for k in i.keys():
if type(i[k]) == str:
i[k] = ""
elif type(i[k]) == list:
i[k] = []
elif type(i[k]) == dict:
i[k] = {}
def __init__(self, **kwargs):
self.page = kwargs.get('page', 1)
self.results = []
self.total_pages = 0
self.total_results = 0
self.fanart = {}
self.temporada = {}
self.busqueda = {'id': "",
'texto': "",
'tipo': kwargs.get('tipo', 'movie'),
'idioma': kwargs.get('idioma_busqueda', 'it'),
'include_adult': str(kwargs.get('include_adult', 'false')),
'year': kwargs.get('year', '')
}
self.result = {'adult': "",
'backdrop_path': "", # ruta imagen de fondo mas valorada
# belongs_to_collection
'budget': "", # Presupuesto
'genres': [], # lista de generos
'homepage': "",
'id': "", 'imdb_id': "", 'freebase_mid': "", 'freebase_id': "", 'tvdb_id': "", 'tvrage_id': "",
# IDs equivalentes
'original_language': "",
'original_title': "",
'overview': "", # sinopsis
# popularity
'poster_path': "",
# production_companies
# production_countries
'release_date': "",
'revenue': "", # recaudacion
# runtime
# spoken_languages
'status': "",
'tagline': "",
'title': "",
'video': "", # ("true" o "false") indica si la busqueda movies/id/videos devolvera algo o no
'vote_average': "",
'vote_count': "",
'name': "", # nombre en caso de personas o series (tv)
'profile_path': "", # ruta imagenes en caso de personas
'known_for': {}, # Diccionario de peliculas en caso de personas (id_pelicula:titulo)
'images_backdrops': [],
'images_posters': [],
'images_profiles': [],
'videos': []
}
def rellenar_dic_generos():
# Rellenar diccionario de generos del tipo e idioma seleccionados
if not Tmdb.dic_generos.has_key(self.busqueda["idioma"]):
Tmdb.dic_generos[self.busqueda["idioma"]] = {}
if not Tmdb.dic_generos[self.busqueda["idioma"]].has_key(self.busqueda["tipo"]):
Tmdb.dic_generos[self.busqueda["idioma"]][self.busqueda["tipo"]] = {}
url = 'http://api.themoviedb.org/3/genre/%s/list?api_key=f7f51775877e0bb6703520952b3c7840&language=%s' % (
self.busqueda["tipo"], self.busqueda["idioma"])
lista_generos = self.__get_json(url)["genres"]
for i in lista_generos:
Tmdb.dic_generos[self.busqueda["idioma"]][self.busqueda["tipo"]][str(i["id"])] = i["name"]
if self.busqueda["tipo"] == 'movie' or self.busqueda["tipo"] == "tv":
if not Tmdb.dic_generos.has_key(self.busqueda["idioma"]):
rellenar_dic_generos()
elif not Tmdb.dic_generos[self.busqueda["idioma"]].has_key(self.busqueda["tipo"]):
rellenar_dic_generos()
else:
# La busqueda de personas no esta soportada en esta version.
raise Exception("Parametros no validos al crear el objeto Tmdb.\nConsulte los modos de uso.")
if kwargs.has_key('id_Tmdb'):
self.busqueda["id"] = kwargs.get('id_Tmdb')
self.__by_id()
elif kwargs.has_key('texto_buscado'):
self.busqueda["texto"] = kwargs.get('texto_buscado')
self.__search(page=self.page)
elif kwargs.has_key('external_source') and kwargs.has_key('external_id'):
# TV Series: imdb_id, freebase_mid, freebase_id, tvdb_id, tvrage_id
# Movies: imdb_id
if (self.busqueda["tipo"] == 'movie' and kwargs.get('external_source') == "imdb_id") or (
self.busqueda["tipo"] == 'tv' and kwargs.get('external_source') in (
"imdb_id", "freebase_mid", "freebase_id", "tvdb_id", "tvrage_id")):
self.busqueda["id"] = kwargs.get('external_id')
self.__by_id(source=kwargs.get('external_source'))
else:
raise Exception("Parametros no validos al crear el objeto Tmdb.\nConsulte los modos de uso.")
def __leer_resultado(self, data):
for k, v in data.items():
if k == "genre_ids": # Lista de generos (lista con los id de los generos)
for i in v:
try:
self.result["genres"].append(
self.dic_generos[self.busqueda["idioma"]][self.busqueda["tipo"]][str(i)])
except:
pass
elif k == "genre": # Lista de generos (lista de objetos {id,nombre})
for i in v:
self.result["genres"].append(i['name'])
elif k == "known_for": # Lista de peliculas de un actor
for i in v:
self.result["known_for"][i['id']] = i['title']
elif k == "images": # Se incluyen los datos de las imagenes
if v.has_key("backdrops"): self.result["images_backdrops"] = v["backdrops"]
if v.has_key("posters"): self.result["images_posters"] = v["posters"]
if v.has_key("profiles"): self.result["images_profiles"] = v["profiles"]
elif k == "videos": # Se incluyen los datos de los videos
self.result["videos"] = v["results"]
elif k == "external_ids": # Listado de IDs externos
for kj, id in v.items():
# print kj + ":" + str(id)
if self.result.has_key(kj): self.result[kj] = str(id)
elif self.result.has_key(k): # el resto
if type(v) == list or type(v) == dict:
self.result[k] = v
elif v is None:
self.result[k] = ""
else:
self.result[k] = str(v)
def load_resultado(self, index_resultado=0, page=1):
if self.total_results <= 1: # Si no hay mas un resultado no podemos cambiar
return None
if page < 1 or page > self.total_pages: page = 1
if index_resultado < 0: index_resultado = 0
self.__inicializar()
if page != self.page:
self.__search(index_resultado=index_resultado, page=page)
else:
print self.result["genres"]
self.__leer_resultado(self.results[index_resultado])
def get_generos(self):
# --------------------------------------------------------------------------------------------------------------------------------------------
# Parametros:
# none
# Return: (str)
# Devuelve la lista de generos a los que pertenece la pelicula o serie.
# --------------------------------------------------------------------------------------------------------------------------------------------
return ', '.join(self.result["genres"])
def get_id(self):
# --------------------------------------------------------------------------------------------------------------------------------------------
# Parametros:
# none
# Return: (str)
# Devuelve el identificador Tmdb de la pelicula o serie cargada o una cadena vacia en caso de que no hubiese nada cargado.
# Se puede utilizar este metodo para saber si una busqueda ha dado resultado o no.
# --------------------------------------------------------------------------------------------------------------------------------------------
return str(self.result['id'])
def get_sinopsis(self, idioma_alternativo=""):
# --------------------------------------------------------------------------------------------------------------------------------------------
# Parametros:
# idioma_alternativo: (str) codigo del idioma, segun ISO 639-1, en el caso de que en el idioma fijado para la busqueda no exista sinopsis.
# Por defecto, se utiliza el idioma original. Si se utiliza None como idioma_alternativo, solo se buscara en el idioma fijado.
# Return: (str)
# Devuelve la sinopsis de una pelicula o serie
# --------------------------------------------------------------------------------------------------------------------------------------------
ret = ""
if self.result['id']:
ret = self.result['overview']
if self.result['overview'] == "" and str(idioma_alternativo).lower() != 'none':
# Vamos a lanzar una busqueda por id y releer de nuevo la sinopsis
self.busqueda["id"] = str(self.result["id"])
if idioma_alternativo:
self.busqueda["idioma"] = idioma_alternativo
else:
self.busqueda["idioma"] = self.result['original_language']
url = 'http://api.themoviedb.org/3/%s/%s?api_key=f7f51775877e0bb6703520952b3c7840&language=%s' % (
self.busqueda["tipo"], self.busqueda["id"], self.busqueda["idioma"])
resultado = self.__get_json(url)
if resultado:
if resultado.has_key('overview'):
self.result['overview'] = resultado['overview']
ret = self.result['overview']
return ret
def get_poster(self, tipo_respuesta="str", size="original"):
# --------------------------------------------------------------------------------------------------------------------------------------------
# Parametros:
# tipo_respuesta: ("list", "str") Tipo de dato devuelto por este metodo. Por defecto "str"
# size: ("w45", "w92", "w154", "w185", "w300", "w342", "w500", "w600", "h632", "w780", "w1280", "original")
# Indica la anchura(w) o altura(h) de la imagen a descargar. Por defecto "original"
# Return:
# Si el tipo_respuesta es "list" devuelve un listado con todas las urls de las imagenes tipo poster del tama�o especificado.
# Si el tipo_respuesta es "str" devuelve la url de la imagen tipo poster, mas valorada, del tama�o especificado.
# Si el tama�o especificado no existe se retornan las imagenes al tama�o original.
# --------------------------------------------------------------------------------------------------------------------------------------------
ret = []
if not size in (
"w45", "w92", "w154", "w185", "w300", "w342", "w500", "w600", "h632", "w780", "w1280", "original"):
size = "original"
if self.result["poster_path"] is None or self.result["poster_path"] == "":
poster_path = ""
else:
poster_path = 'http://image.tmdb.org/t/p/' + size + self.result["poster_path"]
if tipo_respuesta == 'str':
return poster_path
elif self.result["id"] == "":
return []
if len(self.result['images_posters']) == 0:
# Vamos a lanzar una busqueda por id y releer de nuevo todo
self.busqueda["id"] = str(self.result["id"])
self.__by_id()
if len(self.result['images_posters']) > 0:
for i in self.result['images_posters']:
imagen_path = i['file_path']
if size != "original":
# No podemos pedir tama�os mayores que el original
if size[1] == 'w' and int(imagen['width']) < int(size[1:]):
size = "original"
elif size[1] == 'h' and int(imagen['height']) < int(size[1:]):
size = "original"
ret.append('http://image.tmdb.org/t/p/' + size + imagen_path)
else:
ret.append(poster_path)
return ret
def get_backdrop(self, tipo_respuesta="str", size="original"):
# --------------------------------------------------------------------------------------------------------------------------------------------
# Parametros:
# tipo_respuesta: ("list", "str") Tipo de dato devuelto por este metodo. Por defecto "str"
# size: ("w45", "w92", "w154", "w185", "w300", "w342", "w500", "w600", "h632", "w780", "w1280", "original")
# Indica la anchura(w) o altura(h) de la imagen a descargar. Por defecto "original"
# Return:
# Si el tipo_respuesta es "list" devuelve un listado con todas las urls de las imagenes tipo backdrop del tama�o especificado.
# Si el tipo_respuesta es "str" devuelve la url de la imagen tipo backdrop, mas valorada, del tama�o especificado.
# Si el tama�o especificado no existe se retornan las imagenes al tama�o original.
# --------------------------------------------------------------------------------------------------------------------------------------------
ret = []
if not size in (
"w45", "w92", "w154", "w185", "w300", "w342", "w500", "w600", "h632", "w780", "w1280", "original"):
size = "original"
if self.result["backdrop_path"] is None or self.result["backdrop_path"] == "":
backdrop_path = ""
else:
backdrop_path = 'http://image.tmdb.org/t/p/' + size + self.result["backdrop_path"]
if tipo_respuesta == 'str':
return backdrop_path
elif self.result["id"] == "":
return []
if len(self.result['images_backdrops']) == 0:
# Vamos a lanzar una busqueda por id y releer de nuevo todo
self.busqueda["id"] = str(self.result["id"])
self.__by_id()
if len(self.result['images_backdrops']) > 0:
for i in self.result['images_backdrops']:
imagen_path = i['file_path']
if size != "original":
# No podemos pedir tama�os mayores que el original
if size[1] == 'w' and int(imagen['width']) < int(size[1:]):
size = "original"
elif size[1] == 'h' and int(imagen['height']) < int(size[1:]):
size = "original"
ret.append('http://image.tmdb.org/t/p/' + size + imagen_path)
else:
ret.append(backdrop_path)
return ret
def get_fanart(self, tipo="hdclearart", idioma=["all"], temporada="all"):
# --------------------------------------------------------------------------------------------------------------------------------------------
# Parametros:
# tipo: ("hdclearlogo", "poster", "banner", "thumbs", "hdclearart", "clearart", "background", "clearlogo", "characterart", "seasonthumb", "seasonposter", "seasonbanner", "moviedisc")
# Indica el tipo de Art que se desea obtener, segun la web Fanart.tv. Alguno de estos tipos pueden estar solo disponibles para peliculas o series segun el caso. Por defecto "hdclearart"
# (opcional) idioma: (list) Codigos del idioma segun ISO 639-1, "all" (por defecto) para todos los idiomas o "00" para ninguno. Por ejemplo: idioma=["es","00","en"] Incluiria los resultados en espa�ol, sin idioma definido y en ingles, en este orden.
# (opcional solo para series) temporada: (str) Un numero entero que representa el numero de temporada, el numero cero para especiales o "all" para imagenes validas para cualquier temporada. Por defecto "all"
# Return: (list)
# Retorna una lista con las url de las imagenes segun los parametros de entrada y ordenadas segun las votaciones de Fanart.tv
# --------------------------------------------------------------------------------------------------------------------------------------------
if self.result["id"] == "": return []
if len(self.fanart) == 0: # Si esta vacio acceder a Fanart.tv y cargar el resultado
if self.busqueda['tipo'] == 'movie':
# http://assets.fanart.tv/v3/movies/1924?api_key=dffe90fba4d02c199ae7a9e71330c987
url = "http://assets.fanart.tv/v3/movies/" + str(
self.result["id"]) + "?api_key=dffe90fba4d02c199ae7a9e71330c987"
temporada = ""
elif self.busqueda['tipo'] == 'tv':
# En este caso necesitamos el tvdb_id
if self.result["tvdb_id"] == '':
# Vamos lanzar una busqueda por id y releer de nuevo todo
self.busqueda["id"] = str(self.result["id"])
self.__by_id()
# http://assets.fanart.tv/v3/tv/153021?api_key=dffe90fba4d02c199ae7a9e71330c987
url = "http://assets.fanart.tv/v3/tv/" + str(
self.result["tvdb_id"]) + "?api_key=dffe90fba4d02c199ae7a9e71330c987"
else:
# 'person' No soportado
return None
fanarttv = self.__get_json(url)
if fanarttv is None: # Si el item buscado no esta en Fanart.tv devolvemos una lista vacia
return []
for k, v in fanarttv.items():
if k in ("hdtvlogo", "hdmovielogo"):
self.fanart["hdclearlogo"] = v
elif k in ("tvposter", "movieposter"):
self.fanart["poster"] = v
elif k in ("tvbanner", "moviebanner"):
self.fanart["banner"] = v
elif k in ("tvthumb", "moviethumb"):
self.fanart["thumbs"] = v
elif k in ("hdclearart", "hdmovieclearart"):
self.fanart["hdclearart"] = v
elif k in ("clearart", "movieart"):
self.fanart["clearart"] = v
elif k in ("showbackground", "moviebackground"):
self.fanart["background"] = v
elif k in ("clearlogo", "movielogo"):
self.fanart["clearlogo"] = v
elif k in ("characterart", "seasonthumb", "seasonposter", "seasonbanner", "moviedisc"):
self.fanart[k] = v
# inicializamos el diccionario con los idiomas
ret_dic = {}
for i in idioma:
ret_dic[i] = []
for i in self.fanart[tipo]:
if i["lang"] in idioma:
if not i.has_key("season"):
ret_dic[i["lang"]].append(i["url"])
elif temporada == "" or (temporada == 'all' and i["season"] == 'all'):
ret_dic[i["lang"]].append(i["url"])
else:
if i["season"] == "":
i["season"] = 0
else:
i["season"] = int(i["season"])
if i["season"] == int(temporada):
ret_dic[i["lang"]].append(i["url"])
elif "all" in idioma:
ret_dic["all"].append(i["url"])
ret_list = []
for i in idioma:
ret_list.extend(ret_dic[i])
# print ret_list
return ret_list
def get_episodio(self, temporada=1, capitulo=1):
# --------------------------------------------------------------------------------------------------------------------------------------------
# Parametros:
# temporada: (int) Numero de temporada. Por defecto 1.
# capitulo: (int) Numero de capitulo. Por defecto 1.
# Return: (dic)
# Devuelve un dicionario con los siguientes elementos:
# "temporada_nombre", "temporada_sinopsis", "temporada_poster", "episodio_titulo", "episodio_sinopsis" y "episodio_imagen"
# --------------------------------------------------------------------------------------------------------------------------------------------
if self.result["id"] == "" or self.busqueda["tipo"] != "tv": return {}
temporada = int(temporada)
capitulo = int(capitulo)
if temporada < 0: temporada = 1
if capitulo < 1: capitulo = 1
if not self.temporada.has_key("season_number") or self.temporada["season_number"] != temporada:
# Si no hay datos sobre la temporada solicitada, consultar en la web
# http://api.themoviedb.org/3/tv/1402/season/4?api_key=f7f51775877e0bb6703520952b3c7840&language=es
url = "http://api.themoviedb.org/3/tv/%s/season/%s?api_key=f7f51775877e0bb6703520952b3c7840&language=%s" % (
self.result["id"], temporada, self.busqueda["idioma"])
buscando = "id_Tmdb: " + str(self.result["id"]) + " temporada: " + str(temporada) + " capitulo: " + str(
capitulo)
logger.info("[Tmdb.py] Buscando " + buscando)
# print url
self.temporada = self.__get_json(url)
if self.temporada.has_key("status_code") or len(self.temporada["episodes"]) < capitulo:
# Se ha producido un error
self.temporada = {}
logger.info("[Tmdb.py] La busqueda de " + buscando + " no dio resultados.")
return {}
ret_dic = {"temporada_nombre": self.temporada["name"], "temporada_sinopsis": self.temporada["overview"],
"temporada_poster": ('http://image.tmdb.org/t/p/original' + self.temporada["poster_path"]) if \
self.temporada["poster_path"] else ""}
episodio = self.temporada["episodes"][capitulo - 1]
ret_dic["episodio_titulo"] = episodio["name"]
ret_dic["episodio_sinopsis"] = episodio["overview"]
ret_dic["episodio_imagen"] = ('http://image.tmdb.org/t/p/original' + episodio["still_path"]) if episodio[
"still_path"] else ""
return ret_dic
####################################################################################################
# for StreamOnDemand by costaplus
# ===================================================================================================
def info(title, year, tipo):
logger.info("streamondemand.core.tmdb info")
try:
oTmdb = Tmdb(texto_buscado=title, year=year, tipo=tipo, include_adult="false", idioma_busqueda="it")
if oTmdb.total_results > 0:
infolabels = {"year": oTmdb.result["release_date"][:4],
"genre": ", ".join(oTmdb.result["genres"]),
"rating": float(oTmdb.result["vote_average"])}
fanart = oTmdb.get_backdrop()
poster = oTmdb.get_poster()
infolabels['plot'] = oTmdb.get_sinopsis()
plot = {"infoLabels": infolabels}
return plot, fanart, poster
except:
plot = ""
fanart = ""
poster = ""
return plot, fanart, poster
# ----------------------------------------------------------------------------------------------------
# ====================================================================================================
def infoSod(item, tipo="movie", ):
'''
:param item: item
:return: ritorna un'item completo esente da errori di codice
'''
logger.info("streamondemand.core.tmdb infoSod")
logger.info("channel=[" + item.channel + "], action=[" + item.action + "], title[" + item.title + "], url=[" + item.url + "], thumbnail=[" + item.thumbnail + "], tipo=[" + tipo + "]")
try:
tmdbtitle = item.fulltitle.split("|")[0].split("{")[0].split("[")[0].split("(")[0]
year = scrapertools.find_single_match(item.fulltitle, '\((\d{4})\)')
plot, fanart, poster = info(tmdbtitle, year, tipo)
item.fanart = fanart if fanart != "" else poster
if plot:
if not plot['infoLabels']['plot']:
plot['infoLabels']['plot'] = item.plot
item.plot = str(plot)
except:
pass
return item
# ===================================================================================================
| dentaku65/plugin.video.sod | core/tmdb.py | Python | gpl-3.0 | 33,580 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reads Summaries from and writes Summaries to event files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python.framework import ops
from tensorflow.python.summary.writer.writer import LegacySummaryWriter as SummaryWriter
class SummaryWriterCache(object):
"""Cache for summary writers.
This class caches summary writers, one per directory.
"""
# Cache, keyed by directory.
_cache = {}
# Lock protecting _SUMMARY_WRITERS.
_lock = threading.RLock()
@staticmethod
def clear():
"""Clear cached summary writers. Currently only used for unit tests."""
with SummaryWriterCache._lock:
SummaryWriterCache._cache = {}
@staticmethod
def get(logdir):
"""Returns the SummaryWriter for the specified directory.
Args:
logdir: str, name of the directory.
Returns:
A `SummaryWriter`.
"""
with SummaryWriterCache._lock:
if logdir not in SummaryWriterCache._cache:
SummaryWriterCache._cache[logdir] = SummaryWriter(
logdir, graph=ops.get_default_graph())
return SummaryWriterCache._cache[logdir]
| nanditav/15712-TensorFlow | tensorflow/python/summary/writer/writer_cache.py | Python | apache-2.0 | 1,878 |
import clientPackets
import serverPackets
def handle(userToken, packetData):
# get token data
username = userToken.username
# Read packet data
packetData = clientPackets.setAwayMessage(packetData)
# Set token away message
userToken.setAwayMessage(packetData["awayMessage"])
# Send private message from Rohwabot
if packetData["awayMessage"] == "":
fokaMessage = "Your away message has been reset"
else:
fokaMessage = "Your away message is now: {}".format(packetData["awayMessage"])
userToken.enqueue(serverPackets.sendMessage("Rohwabot", username, fokaMessage))
print("{} has changed their away message to: {}".format(username, packetData["awayMessage"]))
| RlSEN/bannedcho | c.ppy.sh/setAwayMessageEvent.py | Python | gpl-3.0 | 674 |
from tornado.testing import gen_test
from .. server import AuthServer
from .. import options as _opts
from anthill.common.testing import ServerTestCase
class AccountsTestCase(ServerTestCase):
@classmethod
def need_test_db(cls):
return True
@classmethod
def get_server_instance(cls, db=None):
return AuthServer(db)
@gen_test
async def test_accounts(self):
async with self.test_db.acquire() as db:
account_id = int(await self.application.accounts.create_account(db=db))
self.assertGreater(account_id, 0)
info = await self.application.accounts.get_account_info(account_id, db=db)
self.assertEqual(info, {})
async def test_info(value, check):
await self.application.accounts.update_account_info(account_id, value, db=db)
account_info = await self.application.accounts.get_account_info(account_id, db=db)
self.assertEqual(account_info, check)
await test_info({"test": True}, {"test": True})
await test_info({"test": False}, {"test": False})
await test_info({"a": "string"}, {"test": False, "a": "string"})
await test_info({"b": 5}, {"test": False, "a": "string", "b": 5})
await test_info({"test": None}, {"a": "string", "b": 5})
await test_info({"test": ["a", "b"]}, {"test": ["a", "b"], "a": "string", "b": 5})
| anthill-services/anthill-login | anthill/login/tests/test_accounts.py | Python | mit | 1,446 |
from chatterbot import ChatBot
from plugin_system import Plugin
try:
from settings import USE_CHATTER
except ImportError:
USE_CHATTER = False
if not USE_CHATTER:
plugin = Plugin('Переписка с ботом',
usage=['бот [сообщение] - сообщение боту'])
chatbot = ChatBot(
'Валера',
trainer='chatterbot.trainers.ChatterBotCorpusTrainer'
)
chatbot.train("chatterbot.corpus.russian")
@plugin.on_command('бот', 'бот,')
async def chat(msg, args):
return await msg.answer(str(chatbot.get_response(msg.text)))
| mrlinux777/vkbottsone | plugins/chatter.py | Python | mit | 628 |
from api.tests.test_api import TestAPICase
from api.views import OrgProfileViewSet
class TestOrgsAPI(TestAPICase):
def setUp(self):
super(TestOrgsAPI, self).setUp()
self.view = OrgProfileViewSet.as_view({
'get': 'list',
'post': 'create'
})
def test_orgs_list(self):
self._org_create()
request = self.factory.get('/', **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, [self.company_data])
def test_orgs_get(self):
self._org_create()
view = OrgProfileViewSet.as_view({
'get': 'retrieve'
})
request = self.factory.get('/', **self.extra)
response = view(request)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.data, {'detail': 'Expected URL keyword argument `user`.'})
request = self.factory.get('/', **self.extra)
response = view(request, user='denoinc')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, self.company_data)
def test_orgs_create(self):
self._org_create()
| SEL-Columbia/formhub | api/tests/test_orgs_api.py | Python | bsd-2-clause | 1,217 |
import pytest
from machina.core.db.models import get_model
from machina.core.loading import get_class
from machina.test.factories import (
PostFactory, UserFactory, create_category_forum, create_forum, create_topic
)
Forum = get_model('forum', 'Forum')
ForumVisibilityContentTree = get_class('forum.visibility', 'ForumVisibilityContentTree')
@pytest.mark.django_db
class TestForumVisibilityContentTree(object):
@pytest.fixture(autouse=True)
def setup(self):
self.user = UserFactory.create()
# Set up the following forum tree:
#
# top_level_cat
# forum_1
# forum_2
# forum_2_child_1
# top_level_forum_1
# top_level_forum_2
# sub_cat
# sub_sub_forum
# top_level_forum_3
# forum_3
# forum_3_child_1
# forum_3_child_1_1
# deep_forum
# last_forum
#
self.top_level_cat = create_category_forum()
self.forum_1 = create_forum(parent=self.top_level_cat)
self.forum_2 = create_forum(parent=self.top_level_cat)
self.forum_2_child_1 = create_forum(parent=self.forum_2)
self.top_level_forum_1 = create_forum()
self.top_level_forum_2 = create_forum()
self.sub_cat = create_category_forum(parent=self.top_level_forum_2)
self.sub_sub_forum = create_forum(parent=self.sub_cat)
self.top_level_forum_3 = create_forum()
self.forum_3 = create_forum(parent=self.top_level_forum_3)
self.forum_3_child_1 = create_forum(parent=self.forum_3)
self.forum_3_child_1_1 = create_forum(parent=self.forum_3_child_1)
self.deep_forum = create_forum(parent=self.forum_3_child_1_1)
self.last_forum = create_forum()
# Set up a topic and some posts
self.topic_1 = create_topic(forum=self.forum_1, poster=self.user)
self.post_1 = PostFactory.create(topic=self.topic_1, poster=self.user)
self.topic_2 = create_topic(forum=self.forum_2, poster=self.user)
self.post_2 = PostFactory.create(topic=self.topic_2, poster=self.user)
self.topic_3 = create_topic(forum=self.forum_2_child_1, poster=self.user)
self.post_3 = PostFactory.create(topic=self.topic_3, poster=self.user)
def test_can_be_initialized_from_a_list_of_forums(self):
# Run & check
visibility_tree = ForumVisibilityContentTree.from_forums(Forum.objects.all())
for forum in Forum.objects.all():
assert forum in visibility_tree.forums
def test_can_return_the_root_level_number(self):
# Setup
visibility_tree = ForumVisibilityContentTree.from_forums(Forum.objects.all())
# Run & check
assert visibility_tree.root_level == 0
def test_can_return_its_top_nodes(self):
# Setup
visibility_tree = ForumVisibilityContentTree.from_forums(Forum.objects.all())
# Run & check
assert [n.obj for n in visibility_tree.top_nodes] == [
self.top_level_cat, self.top_level_forum_1, self.top_level_forum_2,
self.top_level_forum_3, self.last_forum, ]
def test_can_return_its_visible_forums(self):
# Setup
visibility_tree = ForumVisibilityContentTree.from_forums(Forum.objects.all())
# Run & check
assert [n.obj for n in visibility_tree.visible_nodes] == [
self.top_level_cat, self.forum_1, self.forum_2, self.forum_2_child_1,
self.top_level_forum_1, self.top_level_forum_2, self.sub_cat, self.top_level_forum_3,
self.forum_3, self.last_forum, ]
assert visibility_tree.visible_forums == [
self.top_level_cat, self.forum_1, self.forum_2, self.forum_2_child_1,
self.top_level_forum_1, self.top_level_forum_2, self.sub_cat, self.top_level_forum_3,
self.forum_3, self.last_forum, ]
@pytest.mark.django_db
class TestForumVisibilityContentNode(object):
@pytest.fixture(autouse=True)
def setup(self):
self.user = UserFactory.create()
# Set up the following forum tree:
#
# top_level_cat
# forum_1
# forum_2
# forum_2_child_1
# top_level_forum_1
# top_level_forum_2
# sub_cat
# sub_sub_forum
# top_level_forum_3
# forum_3
# forum_3_child_1
# forum_3_child_1_1
# deep_forum
# last_forum
#
self.top_level_cat = create_category_forum()
self.forum_1 = create_forum(parent=self.top_level_cat)
self.forum_2 = create_forum(parent=self.top_level_cat)
self.forum_2_child_1 = create_forum(parent=self.forum_2)
self.top_level_forum_1 = create_forum()
self.top_level_forum_2 = create_forum()
self.sub_cat = create_category_forum(parent=self.top_level_forum_2)
self.sub_sub_forum = create_forum(parent=self.sub_cat)
self.top_level_forum_3 = create_forum()
self.forum_3 = create_forum(parent=self.top_level_forum_3)
self.forum_3_child_1 = create_forum(parent=self.forum_3)
self.forum_3_child_1_1 = create_forum(parent=self.forum_3_child_1)
self.deep_forum = create_forum(parent=self.forum_3_child_1_1)
self.last_forum = create_forum()
# Set up a topic and some posts
self.topic_1 = create_topic(forum=self.forum_1, poster=self.user)
self.post_1 = PostFactory.create(topic=self.topic_1, poster=self.user)
self.topic_2 = create_topic(forum=self.forum_2, poster=self.user)
self.post_2 = PostFactory.create(topic=self.topic_2, poster=self.user)
self.topic_3 = create_topic(forum=self.forum_2_child_1, poster=self.user)
self.post_3 = PostFactory.create(topic=self.topic_3, poster=self.user)
def test_can_return_its_last_post(self):
# Setup
visibility_tree = ForumVisibilityContentTree.from_forums(Forum.objects.all())
# Run & check
assert visibility_tree.as_dict[self.top_level_cat.id].last_post == self.post_3
def test_can_return_its_last_post_date(self):
# Setup
visibility_tree = ForumVisibilityContentTree.from_forums(Forum.objects.all())
# Run & check
assert visibility_tree.as_dict[self.top_level_cat.id].last_post_on == self.post_3.created
def test_can_return_its_next_sibiling(self):
# Setup
visibility_tree = ForumVisibilityContentTree.from_forums(Forum.objects.all())
# Run & check
assert visibility_tree.as_dict[self.forum_1.id].next_sibling \
== visibility_tree.as_dict[self.forum_2.id]
assert visibility_tree.as_dict[self.top_level_cat.id].next_sibling \
== visibility_tree.as_dict[self.top_level_forum_1.id]
assert visibility_tree.as_dict[self.forum_3_child_1_1.id].next_sibling is None
def test_can_return_its_previous_sibiling(self):
# Setup
visibility_tree = ForumVisibilityContentTree.from_forums(Forum.objects.all())
# Run & check
assert visibility_tree.as_dict[self.forum_2.id].previous_sibling \
== visibility_tree.as_dict[self.forum_1.id]
assert visibility_tree.as_dict[self.top_level_forum_1.id].previous_sibling \
== visibility_tree.as_dict[self.top_level_cat.id]
assert visibility_tree.as_dict[self.forum_3_child_1_1.id].previous_sibling is None
def test_can_return_its_post_count(self):
# Setup
visibility_tree = ForumVisibilityContentTree.from_forums(Forum.objects.all())
# Run & check
assert visibility_tree.as_dict[self.top_level_cat.id].posts_count == 3
def test_can_return_its_topic_count(self):
# Setup
visibility_tree = ForumVisibilityContentTree.from_forums(Forum.objects.all())
# Run & check
assert visibility_tree.as_dict[self.top_level_cat.id].topics_count == 3
def test_can_return_an_appropriate_boolean_value(self):
visibility_tree_1 = ForumVisibilityContentTree.from_forums(Forum.objects.all())
visibility_tree_2 = ForumVisibilityContentTree.from_forums(
self.last_forum.get_descendants()
)
assert visibility_tree_1
assert not visibility_tree_2
| ellmetha/django-machina | tests/unit/apps/forum/test_visibility.py | Python | bsd-3-clause | 8,480 |
# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
| harshilasu/LinkurApp | y/google-cloud-sdk/platform/gsutil/third_party/boto/boto/sdb/db/__init__.py | Python | gpl-3.0 | 1,108 |
# mapache, @cesans 2016 (c)
import matplotlib.pylab as plt
import matplotlib
import numpy as np
from sklearn import gaussian_process
import time
import datetime
class SingleBars:
def __init__(self, poll, parties, elections=None, join_coalitions=True):
plt.rcParams['figure.figsize'] = (12, 6)
self._fig, ax = plt.subplots()
plt.rcParams['xtick.labelsize'] = 16
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['font.weight'] = 'normal'
plt.rcParams['xtick.major.pad']='16'
plt.rcParams['axes.titlesize'] = 20
plt.rcParams['axes.titleweight'] = 'bold'
parties_votes = []
for i, p in enumerate(parties.parties.values()):
parties_votes.append((p, poll.get_party(p,join_coalitions)))
parties_votes.sort(key=lambda x: x[1], reverse=True)
parties_votes = []
for i, p in enumerate(parties.parties.values()):
parties_votes.append((p, poll.get_party(p,join_coalitions)))
parties_votes.sort(key=lambda x: x[1], reverse=True)
width = 0.6
left_lim = 0.1
plt.title(poll.pollster + poll.date.strftime(' - %-d %b'), loc='left', x=0, y=1.1, fontdict={'ha':'left'})
names = []
for i, (p, votes) in enumerate(parties_votes):
a = ax.bar(left_lim+i, votes, width=width, color=p.color, edgecolor='none')
ax.text(left_lim+i+width/2, votes-4, '{0}%'.format(votes),
fontdict={'weight':'bold', 'color':'w', 'fontsize':'20', 'ha':'center', 'va':'center'})
names.append(p.short_name)
if elections:
vot =elections.get_party(p,join_coalitions)
if a:
plt.plot([left_lim+i-0.1*width, left_lim+i+width+0.1*width], [vot, vot], color=[0.2,0.2,0.2], linewidth=3)
idx = np.arange(len(parties.parties))+width/2 + left_lim
ax.set_xticks(idx)
ax.set_xlim([0, idx[-1]+width/2 + left_lim])
ax.set_xticklabels(names);
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
if poll.error:
plt.figtext(0.125,.94,'({}% error)'.format(poll.error), fontdict={'fontsize': 12})
def export(self, filename):
""" TODO
:param filename:
:return:
"""
self._fig.savefig(filename)
def _percentage_formatter(y, _):
""" TODO
:param y:
:return:
"""
s = str(y)
if matplotlib.rcParams['text.usetex'] is True:
return s + r'$\%$'
else:
return s + '%'
class TimeSeries:
""" TODO
"""
def __init__(self, parties):
""" TODO
:param parties:
:return:
"""
#TODO sure?
plt.rcParams['figure.figsize'] = (18,12)
self.parties = parties
self.columns = []
self.__up_to_date = False
self.__fig = None
def add_column(self, polls, main=False):
""" TODO
:param polls:
:param main:
:return:
"""
self.__fig = None
self.columns.append({'polls': polls, 'main': main})
def show(self):
""" TODO
:return:
"""
if self.__fig is None:
self.__create_fig()
plt.show()
def export(self, filename):
""" TODO
:param filename:
:return:
"""
# TODO
if self.__fig is None:
self.__create_fig()
pass
def __create_fig(self):
""" TODO
:return:
"""
self.__fig = plt.figure()
if not self.columns:
print('No columns have been added')
return
range_lengths = []
for c in self.columns:
# TODO add get_dates() to ListPolls!!
dates = [p.date for p in c['polls'].polls]
range_lengths.append((max(dates) - min(dates)).days)
# range_lengths = [c['polls']['dates'][-1] - c['polls']['dates'][0] for c in self.columns]
range_lengths_nonzero = [r for r in range_lengths if r != 0]
total_length = (sum(range_lengths) / (1 - (len(self.columns) - len(range_lengths_nonzero)) * 0.1))
range_lengths = [r / total_length if r != 0 else 0.1 for r in range_lengths]
gs = matplotlib.gridspec.GridSpec(1, len(self.columns), width_ratios=range_lengths)
for i, c in enumerate(self.columns):
ax = plt.subplot(gs[i])
first = False
last = False
if i == 0:
first = True
if i == len(self.columns) - 1:
last = True
self.__draw_column(c['polls'], ax, first, last)
max_percentage = 0
for i, c in enumerate(self.columns):
for poll in c['polls'].polls:
for name, percentages in poll.parties.items():
max_percentage = max(max_percentage, np.max(percentages))
yticks = [tick for tick in [10, 20, 30, 40, 50, 60, 70, 80, 90] if tick < max_percentage]
for g in gs:
ax = plt.subplot(g)
ax.set_yticks(yticks, minor=False)
ax.set_ylim(0, min(max_percentage + 5, 100))
def __draw_column(self, polls, ax, first=False, last=False):
""" TODO
:param polls:
:param first:
:param last:
:return:
"""
self.__fig = None
#From type!!
dates = [p.date for p in polls.polls]
single = len(dates) == 1
title_loc = 'left'
if single:
title_loc = 'center'
ax.set_title(polls._name, loc=title_loc)
self.__scatter(polls, self.parties, ax, single, last)
if not single:
self.__gp(polls, self.parties, ax)
ax.set_yticks([10, 20, 30, 40, 50, 60, 70, 80, 90], minor=False)
ax.yaxis.grid(True, which='major')
ax.yaxis.grid(True, which='minor')
ax.spines['top'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(False)
ax.yaxis.set_ticks_position('none')
ax.xaxis.set_ticks_position('bottom')
formatter = matplotlib.ticker.FuncFormatter(_percentage_formatter)
ax.get_yaxis().set_major_formatter(formatter)
# ax.set_xlim(polls['dates'][0] - 0.5, polls['dates'][-1] + 0.5)
if not first:
ax.set_yticklabels([])
if single:
#TODO fix!
ax.set_xticks([polls.polls[0].date], minor=False)
pass
def __scatter(self, polls, parties, ax, single=False, last=False):
""" TODO
:param single:
:return:
"""
last_date = datetime.datetime.min
for party in parties.parties.values():
polls_party = polls.get_party(party)
dates = [x[0] for x in polls_party]
votes = [x[1] for x in polls_party]
if single:
ax.scatter(dates, votes, 70, c=party.color, edgecolors='none', label=u'Observations')
else:
ax.scatter(dates, votes, c=np.append(party.color, [0.6]), edgecolors='none', s=40,
label=u'Observations')
last_date = max(last_date, max(dates))
if last:
# TODO move to last point of regression, not poll
#TODO add name label at the end!
for party in parties.parties.values():
polls_party = polls.get_party(party)
last_date_arg = np.argmin([x[0] for x in polls_party])
votes = polls_party[last_date_arg][1]
polls_party = polls.get_party(party)
plt.text(last_date, votes, ' ' + party.short_name,
color=party.color, weight='bold',
verticalalignment='center', fontsize=20)
def __gp(self,polls, parties, ax):
""" TODO
:param x:
:param ax:
:param partyname:
:return:
"""
for party in parties.parties.values():
polls_party = polls.get_party(party)
dates = [x[0] for x in polls_party]
votes = [x[1] for x in polls_party]
x = dates
y = votes
# + 0.5 - 0.5?
x_dense = np.atleast_2d(np.linspace(time.mktime(x[0].timetuple()),
time.mktime(x[-1].timetuple()), 1000)).T
#x_dense = np.atleast_2d(np.linspace(x[0], x[-1], 1000)).T
np.random.seed(1)
gp = gaussian_process.GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
random_start=100, nugget=10 - 8)
x = [time.mktime(xi.timetuple()) for xi in x]
gp.fit(np.reshape(x, (-1, 1)) + np.random.randn(len(x),1)*0.01, np.reshape(y,(-1, 1)))
y_pred, mse = gp.predict(x_dense, eval_MSE=True)
sigma = np.sqrt(mse)
x_dense = [datetime.datetime.fromtimestamp(xi) for xi in x_dense]
ax.plot(x_dense, y_pred, 'b-', label=u'Prediction', c=party.color, linewidth=3)
# TODO Check and (maybe) fix?
# ax.fill(np.concatenate([x_dense, x_dense[::-1]]),
# np.concatenate([y_pred - 1.9600 * sigma,
# (y_pred + 1.9600 * sigma)[::-1]]),
# color=np.append(party.color, [0.1]), fc='b', ec='None',
# label='95% confidence interval')
| cesans/mapache | mapache/vis.py | Python | bsd-3-clause | 10,110 |
from suma.api.schemas import LinkSchema
from schematics.exceptions import ModelValidationError, ModelConversionError
import pytest
def test_valid_link_schema():
schema = LinkSchema({"url": "https://google.com"})
schema.validate()
assert schema.url == "https://google.com"
assert schema.user_id is None
def test_link_schema_url_required():
schema = LinkSchema({})
with pytest.raises(ModelValidationError) as excinfo:
schema.validate()
assert 'url' in str(excinfo.value)
def test_valid_link_schema_with_user_id():
schema = LinkSchema({"url": "https://google.com", "user_id": 1})
schema.validate()
assert schema.url == "https://google.com"
assert schema.user_id == 1
def test_link_schema_with_invalid_url():
schema = LinkSchema({"url": "fail", "user_id": 1L})
with pytest.raises(ModelValidationError) as excinfo:
schema.validate()
assert 'url' in str(excinfo.value)
def test_link_schema_with_invalid_user_id():
with pytest.raises(ModelConversionError) as excinfo:
schema = LinkSchema({"url": "https://google.com", "user_id": "fail"})
schema.validate()
assert 'user_id' in str(excinfo.value)
| rach/suma | tests/api/schemas/test_link_schema.py | Python | apache-2.0 | 1,194 |
import time
from utils import yellow, green, blue, mangenta, red
class Convert(object):
vocabulary = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
def __init__(self, number, _from, to, log):
self.log = log
self.number = number
self._from = _from
self.to = to
@property
def is_negative(self):
return self.number[0] == '-'
def run(self):
if self.is_negative:
if self._from != 10:
self.log.error(red("I dont know how to convert negative numbers if"
" there are not in decimal base"))
return False
else:
number = self.transform_negative()
else:
number = self.transform_positive()
print green("Your number is %s" % (red(number)))
def transform_positive(self):
self.log.info("Start transforming %s from base %s to base %s" %
(self.number, self._from, self.to))
self.number = self.number.split('.')
decimal = self.number[0]
floating = self.number[1] if len(self.number) > 1 else ''
if self._from != 10:
now = time.time()
self.number = str(self.to_decimal(decimal, self._from))
if floating:
self.number += ".%s" % str(self.to_decimal(floating, self._from, True))[2:]
self.log.debug(blue("Transforming the number from base %s into decimal"
" took %f seconds" % (self._from,
round(time.time() - now, 10))))
self.log.info("Decimal representation of the number is %s" % self.number)
self.number = self.number.split('.')
decimal = self.number[0]
floating = self.number[1] if len(self.number) > 1 else ''
if self.to != 10:
now = time.time()
self.number = str(self.to_base(decimal, self.to))
if floating:
self.number += ".%s" % str(self._get_floating(floating, self.to))
self.log.debug(blue("Transforming the number from decimal into base %s"
" took %f seconds" % (self.to,
round(time.time() - now, 10))))
return self.number
def transform_negative(self):
self.log.info("Start transforming %s from base %s to base %s" %
(self.number, self._from, self.to))
now = time.time()
number = self.number[1:]
# TODO: don't use builtin functions
complement = str(bin(~int(number)))[3:]
self.log.debug(blue("2's complement of %s is %s" %
(self.number, complement)))
new_number = self.to_base(self.to_decimal(complement, 2), self.to)
self.log.debug(blue("Transforming the number from decimal into base %s"
" took %f seconds" % (self.to,
round(time.time() - now, 10))))
return new_number
def _get_floating(self, number, to):
number = float('0.%s' % number)
new_number = ''
digits = self.digits(to)
trend = ''
while number > 0.0:
digit = digits[int(number * to)]
if len(trend) > 100:
if trend and trend in new_number:
break
trend = digit
else:
trend += digit
new_number += digit
number = float(number * to) - int(number * to)
return new_number
def to_decimal(self, number, _from, floating=False):
if not floating:
number = number[::-1]
new_number = 0
digits = self.digits(_from)
for index in xrange(len(number)):
value = digits.index(number[index])
if not floating:
digit = int(value) * (_from ** index)
self.log.debug(blue("%s * (%s ** %s) = %s + %s = %s" %
(value, _from, index, digit, new_number,
new_number + digit)))
else:
digit = float(value) / (_from ** (index + 1))
self.log.debug(blue("%s / (%s ** %s) = %s + %s = %s" %
(value, _from, index, digit, new_number,
new_number + digit)))
new_number += digit
return new_number
def to_base(self, number, to):
number = int(number)
new_number = ''
digits = self.digits(to)
while number >= to:
self.log.debug(blue("%s / %s = %s %s" %
(number, to, number / to, number % to)))
new_number += digits[number % to]
number /= to
if number != 0:
new_number += str(number)
return new_number[::-1]
def digits(self, radix):
if radix < 2 or radix > 62:
self.log.error("Radix base should be between 2 and 62, not %s" % radix)
raise ValueError("Radix base should be between 2 and 62, not %s" % radix)
return self.vocabulary[:radix]
| vtemian/university_projects | arhitecure/hmw1/conversion.py | Python | apache-2.0 | 4,722 |
import where_query
| galtys/galtys-addons | account_move_line_where_query/__init__.py | Python | agpl-3.0 | 19 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Routine for decoding the CIFAR-10 binary file format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Process images of this size. Note that this differs from the original CIFAR
# image size of 32 x 32. If one alters this number, then the entire model
# architecture will change and any model would need to be retrained.
#IMAGE_SIZE = 24
IMAGE_SIZE = 256
# Global constants describing the CIFAR-10 data set.
NUM_CLASSES = 7
#NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 10000
#NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 2000
PATH = "/home/danielll/data/"
def read_cifar10(filename_queue):
"""Reads and parses examples from CIFAR10 data files.
Recommendation: if you want N-way read parallelism, call this function
N times. This will give you N independent Readers reading different
files & positions within those files, which will give better mixing of
examples.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
An object representing a single example, with the following fields:
height: number of rows in the result (32)
width: number of columns in the result (32)
depth: number of color channels in the result (3)
key: a scalar string Tensor describing the filename & record number
for this example.
label: an int32 Tensor with the label in the range 0..9.
uint8image: a [height, width, depth] uint8 Tensor with the image data
"""
class CIFAR10Record(object):
pass
result = CIFAR10Record()
# Dimensions of the images in the CIFAR-10 dataset.
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
label_bytes = 1 # 2 for CIFAR-100
result.height = 480
result.width = 640
result.depth = 1
image_bytes = result.height * result.width * result.depth
# Every record consists of a label followed by the image, with a
# fixed number of bytes for each.
record_bytes = label_bytes + image_bytes
# Read a record, getting filenames from the filename_queue. No
# header or footer in the CIFAR-10 format, so we leave header_bytes
# and footer_bytes at their default of 0.
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
result.key, value = reader.read(filename_queue)
# Convert from a string to a vector of uint8 that is record_bytes long.
record_bytes = tf.decode_raw(value, tf.uint8)
# The first bytes represent the label, which we convert from uint8->int32.
result.label = tf.cast(
tf.slice(record_bytes, [0], [label_bytes]), tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(tf.slice(record_bytes, [label_bytes], [image_bytes]),
[result.depth, result.height, result.width])
# Convert from [depth, height, width] to [height, width, depth].
result.uint8image = tf.transpose(depth_major, [1, 2, 0])
return result
def _generate_image_and_label_batch(image, label, min_queue_examples,
batch_size, shuffle):
"""Construct a queued batch of images and labels.
Args:
image: 3-D Tensor of [height, width, 3] of type.float32.
label: 1-D Tensor of type.int32
min_queue_examples: int32, minimum number of samples to retain
in the queue that provides of batches of examples.
batch_size: Number of images per batch.
shuffle: boolean indicating whether to use a shuffling queue.
Returns:
images: Images. 4D tensor of [batch_size, height, width, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + labels from the example queue.
num_preprocess_threads = 16
if shuffle:
images, label_batch = tf.train.shuffle_batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
else:
images, label_batch = tf.train.batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size)
# Display the training images in the visualizer.
tf.image_summary('images', images, max_images=10)
return images, tf.reshape(label_batch, [batch_size])
def distorted_inputs(data_dir, batch_size):
"""Construct distorted input for CIFAR training using the Reader ops.
Args:
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)
for i in xrange(1, 6)]
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
"""
path = PATH
filenames = [os.path.join(path, 'kh.bin')]
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for training the network. Note the many random
# distortions applied to the image.
# Randomly crop a [height, width] section of the image.
distorted_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
width, height)
#distorted_image = tf.random_crop(reshaped_image, [height, width, 1])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing
# the order their operation.
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(distorted_image)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
min_fraction_of_examples_in_queue)
print ('Filling queue with %d CIFAR images before starting to train. '
'This will take a few minutes.' % min_queue_examples)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size,
shuffle=True)
def inputs(eval_data, data_dir, batch_size):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
if not eval_data:
filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)
for i in xrange(1, 6)]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
else:
filenames = [os.path.join(data_dir, 'test_batch.bin')]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
"""
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
path = PATH
filenames = [os.path.join(path, 'kh.bin')]
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
width, height)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(resized_image)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(num_examples_per_epoch *
min_fraction_of_examples_in_queue)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size,
shuffle=True) | dllatas/deepLearning | uppmax/cifar10_input.py | Python | mit | 10,086 |
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import nox
LOCAL_DEPS = (
os.path.join('..', 'api_core'),
os.path.join('..', 'core'),
)
def default(session):
"""Default unit test session.
This is intended to be run **without** an interpreter set, so
that the current ``python`` (on the ``PATH``) or the version of
Python corresponding to the ``nox`` binary the ``PATH`` can
run the tests.
"""
# Install all test dependencies, then install local packages in-place.
session.install('mock', 'pytest', 'pytest-cov')
for local_dep in LOCAL_DEPS:
session.install('-e', local_dep)
session.install('-e', '.')
# Run py.test against the unit tests.
session.run(
'py.test',
'--quiet',
'--cov=google.cloud.dns',
'--cov=tests.unit',
'--cov-append',
'--cov-config=.coveragerc',
'--cov-report=',
'--cov-fail-under=97',
'tests/unit',
)
@nox.session(python=['2.7', '3.5', '3.6', '3.7'])
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python='3.6')
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install('flake8', *LOCAL_DEPS)
session.install('.')
session.run('flake8', 'google', 'tests')
@nox.session(python='3.6')
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install('docutils', 'Pygments')
session.run(
'python', 'setup.py', 'check', '--restructuredtext', '--strict')
@nox.session(python='3.6')
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install('coverage', 'pytest-cov')
session.run('coverage', 'report', '--show-missing', '--fail-under=100')
session.run('coverage', 'erase')
| jonparrott/gcloud-python | dns/noxfile.py | Python | apache-2.0 | 2,619 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-29 19:04
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('operation_finance', '0007_auto_20170629_1840'),
]
operations = [
migrations.AlterField(
model_name='invoice',
name='due_by',
field=models.DateField(default=datetime.datetime(2017, 7, 24, 19, 4, 7, 120287)),
),
]
| michealcarrerweb/LHVent_app | operation_finance/migrations/0008_auto_20170629_1904.py | Python | mit | 519 |
#!/usr/bin/env python
from optparse import OptionParser
import socket
import sys
import httplib
import json
PASS = 0
WARNING = 1
CRITICAL = 2
def get_bongo_host(server, app):
try:
con = httplib.HTTPConnection(server, timeout=45)
con.request("GET","/v2/apps/" + app)
data = con.getresponse()
if data.status >= 300:
print "eventanomaly check get_bongo_host= Recieved non-2xx response= %s" % (data.status)
sys.exit(WARNING)
json_data = json.loads(data.read())
host = json_data['app']['tasks'][0]['host']
port = json_data['app']['tasks'][0]['ports'][0]
con.close()
return host, port
except Exception, e:
print "eventanomaly check get_bongo_host= %s Exception caught" % (e)
sys.exit(WARNING)
def get_status(host, group, time):
try:
con = httplib.HTTPConnection(host,timeout=45)
con.request("GET","/v1/eventdrop/" + group + "/" + time)
data = con.getresponse()
if data.status >= 300:
print "Event Anomaly Check Status= Recieved non-2xx response= %s" % (data.status)
sys.exit(WARNING)
json_data = json.loads(data.read())
con.close()
if json_data['status'] == 2:
print "Event Anomaly Check Status for `%s` = %s" % (time,json_data['msg'])
sys.exit(CRITICAL)
elif json_data['status'] == 1:
print "Event Anomaly Check Status for `%s` = %s" % (time,json_data['msg'])
sys.exit(WARNING)
else:
print "Event Anomaly Check Status for `%s` = %s" % (time,json_data['msg'])
sys.exit(PASS)
except Exception, e:
print "Event Anomaly Check Status= %s Exception caught" % (e)
sys.exit(WARNING)
if __name__=="__main__":
parser = OptionParser()
parser.add_option("-s", dest="server", action="store", default="localhost:8080", help="Marathon Cluster address with port no")
parser.add_option("-a", dest="app", action="store", default="bongo.useast.prod", help="App Id to retrieve the slave address")
parser.add_option("-g", dest="group", action="store", default="pmi", help="The group of event pmi or adevents")
parser.add_option("-t", dest="time", action="store", default="10min", help="The time gap for which the difference is to be calculated")
(options, args) = parser.parse_args()
host, port = get_bongo_host(options.server, options.app)
if "useast" in host:
host = host.rsplit("prd",1)
consul_host = "%snode.us-east-1.consul:%s" % (host[0], port)
else:
consul_host = "%s:%s" % (host, port)
get_status(consul_host, options.group, options.time)
| yieldbot/sensu-yieldbot-plugins | plugins/bongo/check-eventanomaly.py | Python | mit | 2,703 |
import heuristic_bursts.agent
import heuristic_bursts.team
import heuristic_bursts.options
import truss.truss
import truss.truss_visual
import time
import csv
options = heuristic_bursts.options.Options()
# with open('probabilistic_selection_test_500_iterations.csv', 'w') as sim_data_file:
# fieldnames = ['repetition', 'iteration', 'rule tier', 'rule number', 'quality before rule', 'quality after rule',
# 'current solution quality', 'rule acceptance', 'lower tier preference', 'higher tier preference',
# 'error', 'probability array', 'current solution mass', 'current solution min fos', 'current solution target fos']
#
# csv_writer = csv.DictWriter(sim_data_file, fieldnames=fieldnames)
# csv_writer.writeheader()
for sim_num in range(0, 20):
# Instantiate team
team = heuristic_bursts.team.Team(options)
# Run team for number of iterations listed in Options
team.run()
# Perform one last team interaction
team.interact()
# Take the solution of Agent 1 as the solution for the team
solution = team.agent_list[0].current_solution
results = team.agent_list[0].current_results
quality = team.agent_list[0].current_solution_quality
rules = team.agent_list[0].current_solution.applied_rules
all_qualities = team.agent_list[0].all_solution_qualities
simulation_data = team.agent_list[0].simulation_data
with open('probabilistic_selection_test_500_iterations.csv', 'r') as sim_data_file:
csv_reader = csv.DictReader(sim_data_file)
last_rep = -1
for row in csv_reader:
last_rep = int(row['repetition'])
with open('probabilistic_selection_test_500_iterations.csv', 'a') as sim_data_file:
csv_writer = csv.writer(sim_data_file)
for iteration_data in simulation_data:
iteration_data[0] = last_rep + 1
csv_writer.writerow(iteration_data)
| HSDL/HeuristicBursts | tests/truss_tests/Main Truss Tests/probabilistic_selection_test_both_tiers.py | Python | mit | 1,929 |
#!/usr/bin/env python
import sys, os, shutil, glob, imp, subprocess, time, shlex
import helper_func
from helper_func import *
####################################
# Helper functions
####################################
def _generate_a2_cfgFile(spl, cfgFile):
# Pad the A2 image up to 1M
# HPS will fail to boot with too small of a2 partition
a2size = 1024*1024
f = open(cfgFile, 'w')
f.write("flash dummy {\n")
f.write(" pebsize = 1\n")
f.write(" lebsize = 1\n")
f.write(" numpebs = %d\n" % a2size)
f.write("}\n")
f.write("image boot.a2 {\n")
f.write(" flash{\n")
f.write(" }\n")
f.write(' flashtype = "dummy"\n')
f.write(" partition spl {\n")
f.write(' in-partition-table = "no"\n')
f.write(' image = "%s" \n' % spl)
f.write(" size = %d\n" % a2size)
f.write(" }\n")
f.write("}\n")
f.close()
def _generate_a2_img():
imgFile = os.path.realpath("%s/boot.a2" % ENV['IMAGE_DIR'])
rm(imgFile)
print_msg("Generating a2 image: %s" % imgFile)
cfgFile = "%s/boot.a2.cfg" % ENV['IMAGE_DIR']
spl = "%s/u-boot-spl.bin.crc" % (ENV['IMAGE_DIR'])
_generate_a2_cfgFile(spl, cfgFile)
run_genimage(cfgFile, ENV['IMAGE_DIR'])
return imgFile
##############
# Build the SD disk iamge
##############
def _make_sdimage(outputDir, image, catalog):
buildDate = time.strftime("%F")
tmpImg = "%s/sdcard.img" % ENV['IMAGE_DIR']
imageFile = "%s/%s_sdcard_%s_%s.img" % (
outputDir, catalog['boardName'], image['imageName'], buildDate)
# Cleanup any previous images
files = [tmpImg, imageFile, imageFile + ".gz"]
for f in files:
rm(f)
# Generate the SD FAT partition config file
gen_sd_fat_cfg()
# Generate the A2 parition
_generate_a2_img()
print_msg("Generating target image: %s" % imageFile)
run_genimage(catalog['defaultInfo']['genimage'], ENV['IMAGE_DIR'], None)
# Rename the image file
os.rename(tmpImg, imageFile)
argStr = "gzip %s" % (imageFile)
subprocess.call( argStr.split(), cwd=ENV['IMAGE_DIR'] )
####################################
# Public Functions
####################################
##############
# Set the default dtb
##############
def set_default_dtb(defaultApp):
defaultDTB = "devicetree_%s.dtb" % (defaultApp['name'])
shutil.copyfile("%s/%s" %(ENV['SD_DIR'], defaultDTB), "%s/socfpga.dtb" % (ENV['SD_DIR']))
##############
# Set the default bitstream
##############
def set_default_bitsream(defaultApp):
shutil.copyfile(defaultApp['bit'], "%s/socfpga.rbf" % (ENV['SD_DIR']))
##############
# Build the SD card image
##############
def build_sdimage(outputDir, image, catalog):
defaultApp = image['defaultApp']
##############
# Move the kernel
##############
shutil.copy("%s/zImage" % (ENV['IMAGE_DIR']), ENV['SD_DIR'] )
##############
# Copy over the application specific rbfs
##############
for app in image['appList']:
if not app['bit'] is None:
appBit = "%s/socfpga_%s.rbf" % (ENV['SD_DIR'], app['name'])
shutil.copy(app['bit'], appBit)
##############
# Copy over the u-boot script
##############
scriptSrc = "%s/boot/u-boot-scr.txt" % (_PLATFORM_DIR)
if os.path.exists(scriptSrc):
# Copy to image dir
shutil.copy(scriptSrc, "%s/u-boot-scr.txt" % (ENV['IMAGE_DIR']) )
# Convert to uimage
argStr = """%s/usr/bin/mkimage -A arm -O linux -T script -C none -a 0 -e 0 -n "U-Boot Script" -d u-boot-scr.txt u-boot.scr""" % (ENV['HOST_DIR'])
subprocess.call(shlex.split(argStr), cwd=ENV['IMAGE_DIR'])
# move to sd card
shutil.move("%s/u-boot.scr" % (ENV['IMAGE_DIR']), "%s/u-boot.scr" % (ENV['SD_DIR']))
##############
# Copy over u-boot (SPL will load u-boot.img)
##############
shutil.copy("%s/u-boot.img" % (ENV['IMAGE_DIR']), "%s/u-boot.img" % (ENV['SD_DIR']))
##############
# Call the Altera Script
##############
_make_sdimage(outputDir, image, catalog)
####################################
# Module Globals
####################################
_PLATFORM_SCRIPTS = os.path.dirname(os.path.realpath(__file__))
_PLATFORM_DIR = os.path.dirname(_PLATFORM_SCRIPTS)
| mfornero/buildroot | board/mathworks/socfpga/scripts/postimage_common.py | Python | gpl-2.0 | 4,337 |
import examples.kristen_support
import imagepipe.traversals as uf
import imagepipe.wrapped_functions as wf
from imagepipe import core_functions as cf
# Goal of this pipeline
# 1. Detect the number of cells that were properly stained/transfected
# quantification only for successfully transfected cells
# 2. For the successfully stained cells, determine how much GFP is located inside the mCHerry-stained mitochondria
translator = {'C1': 0,
'C3': 1,
'C4': 2}
source = examples.kristen_support.Kristen_traverse('/run/user/1000/gvfs/smb-share:server=10.17.0.219,share=common/Users/kristen/Split GFP quant_Andrei/20170209', matching_map=translator)
named_source = uf.name_channels(source, ['DAPI', 'GFP', 'mCherry'])
max_mCherry = wf.max_projection(named_source, in_channel='mCherry', out_channel='max_mCherry')
max_GFP = wf.max_projection(max_mCherry, in_channel='GFP', out_channel='max_GFP')
stabilized_mCherry = wf.gamma_stabilize(max_GFP, in_channel='max_mCherry', floor_method='min', alpha_clean=.5)
smoothed_mCherry = wf.smooth_2d(stabilized_mCherry, in_channel='max_mCherry', smoothing_px=.5)
mCherry_o_n_segmented = wf.robust_binarize(smoothed_mCherry,
in_channel='max_mCherry',
out_channel='max_mCherry_binary',
heterogeity_size=10, feature_size=250)
running_render = examples.kristen_support.Kristen_render(mCherry_o_n_segmented, in_channel=['name pattern',
'group id',
'max_mCherry',
'max_mCherry_binary',
'GFP',
'mCherry'],
out_channel='_',
output='Kristen_Transfection_B_and_C_GFP_analysis_results.csv',
save=True)
#
# Kristen_summary = rdr.Kristen_summarize_a(running_render, in_channel = ['name pattern', 'q_mean','q_median', 'q_std', 'nq_mean', 'nq_median', 'nq_std', 'slope', 'r2', 'p'],
# out_channel='_',
# output='Kristen_Transfection_B_and_C_GFP_analysis_results.csv')
for i in enumerate(running_render):
print 'Analyzed group %s - image %s' % (['group id'], ['name pattern']) | chiffa/Image_pipe | examples/Kristen_pipeline.py | Python | bsd-3-clause | 2,699 |
#####################################
# #
# Arena Boss Code #
# By: Mateo Aguirre #
# and Calvin Adams #
# #
#####################################
#Starts up the game
import pygame
from pygame.locals import *
pygame.init()
#Sets the games icon/caption
icon = pygame.image.load('arenaboss_icon.png')
pygame.display.set_icon(icon)
pygame.display.set_caption("Arena Boss")
#Loads the screen
screen_length = 1250
screen_width = 720
screen = pygame.display.set_mode([screen_length,screen_width])
#Sets everything on MENU screen
BLACK = 0,0,0
WHITE = 255,255,255
background_color = BLACK
screen.fill(background_color)
title = pygame.image.load('arenaboss_title.png')
screen.blit((title), [450,50])
singleplayer_button = pygame.image.load('arenaboss_singleplayer.png')
screen.blit((singleplayer_button), [100,400])
multiplayer_button = pygame.image.load('arenaboss_multiplayer.png')
screen.blit((multiplayer_button), [500,400])
options_button = pygame.image.load('arenaboss_options.png')
screen.blit((options_button), [900,400])
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
| eadamsatx/arena-boss | arenabossmenu.py | Python | gpl-2.0 | 1,307 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the library of spherical harmonics."""
import math
from absl.testing import absltest
import jax.numpy as jnp
import numpy as np
import scipy.special as sp_special
from simulation_research.signal_processing.spherical import spherical_harmonics
def _compute_spherical_harmonics(l_max, theta, phi, nonnegative_order=True):
"""Computes the spherical harmonics."""
num_theta = theta.shape[0]
num_phi = phi.shape[0]
phi, theta = np.meshgrid(phi, theta)
sph_harm = np.zeros((l_max + 1, l_max + 1, num_theta, num_phi), dtype=complex)
if nonnegative_order:
for l in np.arange(l_max + 1):
for m in np.arange(l + 1):
sph_harm[l, m, :, :] = sp_special.sph_harm(m, l, phi, theta)
else:
for l in np.arange(l_max + 1):
for m in np.arange(l + 1):
sph_harm[l, m, :, :] = sp_special.sph_harm(-m, l, phi, theta)
return jnp.asarray(sph_harm)
class SphericalHarmonicsTest(absltest.TestCase):
def testOrderZeroDegreeZero(self):
"""Tests the spherical harmonics of order zero and degree zero."""
num_theta = 6
num_phi = 4
expected = (1.0 / jnp.sqrt(4.0 * math.pi) *
jnp.ones((1, 1, num_theta, num_phi)))
theta = jnp.linspace(0, math.pi, num_theta)
phi = jnp.linspace(0, 2.0 * math.pi, num_phi)
sph_harm = spherical_harmonics.SphericalHarmonics(
l_max=0, theta=theta, phi=phi)
actual = jnp.real(sph_harm.harmonics_nonnegative_order())
np.testing.assert_allclose(actual, expected, rtol=1.1e-7, atol=3e-8)
def testOrderOneDegreeZero(self):
"""Tests the spherical harmonics of order one and degree zero."""
num_theta = 4
num_phi = 6
theta = jnp.linspace(0, math.pi, num_theta)
phi = jnp.linspace(0, 2.0 * math.pi, num_phi)
expected = jnp.sqrt(3.0 / (4.0 * math.pi)) * jnp.outer(
jnp.cos(theta), jnp.ones_like(phi))
sph_harm = spherical_harmonics.SphericalHarmonics(
l_max=1, theta=theta, phi=phi)
actual = jnp.real(sph_harm.harmonics_nonnegative_order()[1, 0, :, :])
np.testing.assert_allclose(actual, expected, rtol=7e-8, atol=1.5e-8)
def testOrderOneDegreeOne(self):
"""Tests the spherical harmonics of order one and degree one."""
num_theta = 7
num_phi = 8
theta = jnp.linspace(0, math.pi, num_theta)
phi = jnp.linspace(0, 2.0 * math.pi, num_phi)
expected = -1.0 / 2.0 * jnp.sqrt(3.0 / (2.0 * math.pi)) * jnp.outer(
jnp.sin(theta), jnp.exp(1j * phi))
sph_harm = spherical_harmonics.SphericalHarmonics(
l_max=1, theta=theta, phi=phi)
actual = sph_harm.harmonics_nonnegative_order()[1, 1, :, :]
np.testing.assert_allclose(
jnp.abs(actual), jnp.abs(expected), rtol=1e-8, atol=6e-8)
def testAgainstScipySpecialSphHarmNonnegativeOrder(self):
"""Tests the accuracy against scipy.special.sph_harm."""
l_max = 64
num_theta = 128
num_phi = 128
theta = jnp.linspace(0, math.pi, num_theta)
phi = jnp.linspace(0, 2.0 * math.pi, num_phi)
expected = _compute_spherical_harmonics(l_max=l_max, theta=theta, phi=phi)
sph_harm = spherical_harmonics.SphericalHarmonics(
l_max=l_max, theta=theta, phi=phi)
actual = sph_harm.harmonics_nonnegative_order()
np.testing.assert_allclose(
jnp.abs(actual), jnp.abs(expected), rtol=1e-8, atol=9e-5)
def testAgainstScipySpecialSphHarmNonpositiveOrder(self):
"""Tests the accuracy against scipy.special.sph_harm."""
l_max = 64
num_theta = 128
num_phi = 128
theta = jnp.linspace(0, math.pi, num_theta)
phi = jnp.linspace(0, 2.0 * math.pi, num_phi)
expected = _compute_spherical_harmonics(
l_max=l_max, theta=theta, phi=phi, nonnegative_order=False)
sph_harm = spherical_harmonics.SphericalHarmonics(
l_max=l_max, theta=theta, phi=phi)
actual = sph_harm.harmonics_nonpositive_order()
np.testing.assert_allclose(
jnp.abs(actual), jnp.abs(expected), rtol=1e-8, atol=9e-5)
if __name__ == '__main__':
absltest.main()
| google-research/google-research | simulation_research/signal_processing/spherical/spherical_harmonics_test.py | Python | apache-2.0 | 4,604 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Statistics utility functions of NCF."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
def random_int32():
return np.random.randint(low=0, high=np.iinfo(np.int32).max, dtype=np.int32)
def permutation(args):
"""Fork safe permutation function.
This function can be called within a multiprocessing worker and give
appropriately random results.
Args:
args: A size two tuple that will unpacked into the size of the permutation
and the random seed. This form is used because starmap is not universally
available.
returns:
A NumPy array containing a random permutation.
"""
x, seed = args
# If seed is None NumPy will seed randomly.
state = np.random.RandomState(seed=seed) # pylint: disable=no-member
output = np.arange(x, dtype=np.int32)
state.shuffle(output)
return output
def very_slightly_biased_randint(max_val_vector):
sample_dtype = np.uint64
out_dtype = max_val_vector.dtype
samples = np.random.randint(low=0, high=np.iinfo(sample_dtype).max,
size=max_val_vector.shape, dtype=sample_dtype)
return np.mod(samples, max_val_vector.astype(sample_dtype)).astype(out_dtype)
def mask_duplicates(x, axis=1): # type: (np.ndarray, int) -> np.ndarray
"""Identify duplicates from sampling with replacement.
Args:
x: A 2D NumPy array of samples
axis: The axis along which to de-dupe.
Returns:
A NumPy array with the same shape as x with one if an element appeared
previously along axis 1, else zero.
"""
if axis != 1:
raise NotImplementedError
x_sort_ind = np.argsort(x, axis=1, kind="mergesort")
sorted_x = x[np.arange(x.shape[0])[:, np.newaxis], x_sort_ind]
# compute the indices needed to map values back to their original position.
inv_x_sort_ind = np.argsort(x_sort_ind, axis=1, kind="mergesort")
# Compute the difference of adjacent sorted elements.
diffs = sorted_x[:, :-1] - sorted_x[:, 1:]
# We are only interested in whether an element is zero. Therefore left padding
# with ones to restore the original shape is sufficient.
diffs = np.concatenate(
[np.ones((diffs.shape[0], 1), dtype=diffs.dtype), diffs], axis=1)
# Duplicate values will have a difference of zero. By definition the first
# element is never a duplicate.
return np.where(diffs[np.arange(x.shape[0])[:, np.newaxis],
inv_x_sort_ind], 0, 1)
| tombstone/models | official/recommendation/stat_utils.py | Python | apache-2.0 | 3,179 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.