repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
rr-/ida-images
rgb-ida.py
1
3099
import idaapi import librgb from librgb.qt_shims import QtGui # important for PySide legacy IDA from librgb.qt_shims import QtWidgets try: MAJOR, MINOR = map(int, idaapi.get_kernel_version().split(".")) except AttributeError: MAJOR, MINOR = 6, 6 USING_IDA7API = MAJOR > 6 USING_PYQT5 = USING_IDA7API or (MAJOR == 6 and MINOR >= 9) class DockableShim(object): def __init__(self, title): self._title = title # IDA 7+ Widgets if USING_IDA7API: import sip self._form = idaapi.create_empty_widget(self._title) self.widget = sip.wrapinstance(long(self._form), QtWidgets.QWidget) # legacy IDA PluginForm's else: self._form = idaapi.create_tform(self._title, None) if USING_PYQT5: self.widget = idaapi.PluginForm.FormToPyQtWidget(self._form) else: self.widget = idaapi.PluginForm.FormToPySideWidget(self._form) def show(self): if USING_IDA7API: flags = ( idaapi.PluginForm.WOPN_TAB | idaapi.PluginForm.WOPN_MENU | idaapi.PluginForm.WOPN_RESTORE | idaapi.PluginForm.WOPN_PERSIST ) idaapi.display_widget(self._form, flags) # legacy IDA PluginForm's else: flags = ( idaapi.PluginForm.FORM_TAB | idaapi.PluginForm.FORM_MENU | idaapi.PluginForm.FORM_RESTORE | idaapi.PluginForm.FORM_PERSIST | 0x80 ) # idaapi.PluginForm.FORM_QWIDGET idaapi.open_tform(self._form, flags) class ImagePreviewPlugin(idaapi.plugin_t): flags = 0 wanted_name = "Image previewer" wanted_hotkey = "Alt + I" comment = "Preview memory as image" help = "https://github.com/rr-/ida-images" def init(self): return idaapi.PLUGIN_OK def term(self): pass def run(self, arg): class IdaWindowAdapter(librgb.GenericWindowAdapter): def ask_address(self, address): return AskAddr(address, "Please enter an address") def ask_file(self): return AskFile(1, "*.png", "Save the image as...") image_preview_form = DockableShim("Image preview") params = librgb.RendererParams() params.readers = [librgb.MemoryReader()] params.format = librgb.PixelFormats.GRAY8 params.width = 800 params.height = 600 params.flip = False params.brightness = 50 adapter = IdaWindowAdapter(params) shortcut_manager = librgb.ShortcutManager(adapter, params) for shortcut, func in shortcut_manager.shortcuts.items(): adapter.define_shortcut(shortcut, image_preview_form.widget, func) layout = adapter.create_layout() image_preview_form.widget.setLayout(layout) adapter.draw() image_preview_form.show() def PLUGIN_ENTRY(): return ImagePreviewPlugin() if __name__ == "__main__": ImagePreviewPlugin().run(0)
mit
-2,430,317,480,103,620,600
29.683168
79
0.595999
false
3.663121
false
false
false
zencore-dobetter/zencore-utils
src/zencore/utils/redis.py
1
3995
import uuid import math import time import logging import redis as engine from zencore.errors import WrongParameterTypeError from .types import smart_force_to_string logger = logging.getLogger(__name__) class RedisLock(object): def __init__(self, url, name=None, app_name=None, expire=None, prefix="zencore:lock:", tick=5, **kwargs): self.url = url self.connection = engine.Redis.from_url(url, **kwargs) self.app_name = app_name or str(uuid.uuid4()) self.prefix = prefix self.expire = expire self.tick = tick if name: self.setup(name) def setup(self, name): self.lock_name = ":".join([self.prefix, name]) self.signal_name = ":".join([self.prefix, name, "signal"]) def acquire(self, blocking=True, timeout=-1): stime = time.clock() while True: result = self.acquire_nowait() if result: return True if not blocking: return False if timeout == 0: return False if timeout > 0: delta = math.ceil(timeout - (time.clock() - stime)) if delta < 0: return False if delta > self.tick: delta = self.tick else: delta = self.tick event = self.connection.blpop(self.signal_name, timeout=delta) if event is None: return False def acquire_nowait(self): result = self.connection.setnx(self.lock_name, self.app_name) if result: if self.expire: self.connection.expire(self.lock_name, self.expire) self.connection.delete(self.signal_name) return True return False def release(self): if self.is_lock_owner(): self.connection.delete(self.lock_name) self.connection.rpush(self.signal_name, 1) def force_clean(self): self.connection.delete(self.lock_name) self.connection.rpush(self.signal_name, 1) def get_current_lock_owner(self, ): return smart_force_to_string(self.connection.get(self.lock_name)) def is_lock_owner(self): return self.get_current_lock_owner() == self.app_name class Counter(object): def __init__(self, connection, namespace): self.connection = connection self.namespace = namespace def incr(self, name): key = self.make_counter_key(name) self.connection.incr(key) def get(self, name): key = self.make_counter_key(name) return int(self.connection.get(key)) def getall(self): keys = self.connection.keys(self.make_counter_pattern()) if not keys: return {} keys = [key.decode("utf-8") for key in keys] values = [int(value) for value in self.connection.mget(*keys)] return dict(zip(keys, values)) def make_counter_key(self, name): return "{}:{}".format(self.namespace, name) def make_counter_pattern(self): return "{}:*".format(self.namespace) def get_redis(config): """ 从配置文件获取redis对象。 """ if isinstance(config, engine.StrictRedis): return config if isinstance(config, str): return engine.Redis.from_url(config) if isinstance(config, dict): url = config.get("url") host = config.get("host") if url: db = config.get("db", None) options = config.get("options", {}) return engine.Redis.from_url(url, db, **options) if host: return engine.Redis(**config) logger.error("get_redis got wrong parameter type error.") raise WrongParameterTypeError() # ########################################################################### # 重复或不推荐使用 # ########################################################################### make_redis_instance = get_redis
mit
3,164,183,319,776,613,000
29.929688
109
0.558474
false
3.9749
true
false
false
p/webracer
tests/request_via_form_test.py
1
1451
import webracer import nose.plugins.attrib from . import utils from .apps import form_app utils.app_runner_setup(__name__, form_app.app, 8059) @nose.plugins.attrib.attr('client') @webracer.config(host='localhost', port=8059) class RequestViaFormTest(webracer.WebTestCase): def test_get_form_as_url(self): self.get('/method_check_form') self.assert_status(200) form = self.response.form() self.get(form) self.assertEqual('GET', self.response.body) def test_post_form_as_url(self): self.get('/textarea') self.assert_status(200) form = self.response.form() self.post(form) self.assertEqual('{}', self.response.body) def test_post_form_with_elements(self): self.get('/textarea') self.assert_status(200) form = self.response.form() elements = form.elements self.post(form, elements) json = self.response.json self.assertEqual(dict(field='hello world'), json) def test_post_form_with_mutated_elements(self): self.get('/textarea') self.assert_status(200) form = self.response.form() elements = form.elements.mutable elements.set_value('field', 'changed') self.post(form, elements) json = self.response.json self.assertEqual(dict(field='changed'), json)
bsd-2-clause
4,729,082,334,174,765,000
28.02
57
0.598897
false
3.818421
true
false
false
Ichag/django-timelinejs3
timeline/migrations/0009_auto_20150819_0648.py
1
3020
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('timeline', '0008_auto_20150818_2241'), ] operations = [ migrations.AlterField( model_name='options', name='duration', field=models.IntegerField(null=True, blank=True), ), migrations.AlterField( model_name='options', name='height', field=models.IntegerField(null=True, blank=True), ), migrations.AlterField( model_name='options', name='marker_height_min', field=models.IntegerField(null=True, blank=True), ), migrations.AlterField( model_name='options', name='marker_padding', field=models.IntegerField(null=True, blank=True), ), migrations.AlterField( model_name='options', name='marker_width_min', field=models.IntegerField(null=True, blank=True), ), migrations.AlterField( model_name='options', name='menubar_height', field=models.IntegerField(null=True, blank=True), ), migrations.AlterField( model_name='options', name='optimal_tick_width', field=models.IntegerField(null=True, blank=True), ), migrations.AlterField( model_name='options', name='scale_factor', field=models.IntegerField(null=True, blank=True), ), migrations.AlterField( model_name='options', name='skinny_size', field=models.IntegerField(null=True, blank=True), ), migrations.AlterField( model_name='options', name='slide_default_fade', field=models.IntegerField(null=True, blank=True), ), migrations.AlterField( model_name='options', name='slide_padding_lr', field=models.IntegerField(null=True, blank=True), ), migrations.AlterField( model_name='options', name='start_at_slide', field=models.IntegerField(null=True, blank=True), ), migrations.AlterField( model_name='options', name='timenav_height', field=models.IntegerField(null=True, blank=True), ), migrations.AlterField( model_name='options', name='timenav_height_min', field=models.IntegerField(null=True, blank=True), ), migrations.AlterField( model_name='options', name='timenav_height_percentage', field=models.IntegerField(null=True, blank=True), ), migrations.AlterField( model_name='options', name='width', field=models.IntegerField(null=True, blank=True), ), ]
bsd-3-clause
4,725,792,827,878,784,000
31.12766
61
0.54404
false
4.596651
false
false
false
whiteshield/EHScripter
EHScripter/netsparker.py
1
6143
##!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import sys import re import string from io import StringIO from lxml import etree try: from .util import * except Exception as e: from util import * class NetsparkerToMarkdown: def __init__(self, options): self.options=options self.template=string.Template(self.options['template']) if self.options['merge']: self.template=string.Template(self.options['merge_template']) self.merge_findinglist_template=string.Template(self.options['merge_findinglist_template']) self.process() def process(self): if not os.path.exists(self.options['output_dir']): os.makedirs(self.options['output_dir']) filelist=[] if os.path.isfile(self.options['load_file']): filelist.append(self.options['load_file']) elif os.path.isdir(self.options['load_file']): for name in os.listdir(self.options["load_file"]): if os.path.isfile(self.options['load_file']+'/'+name) and len(name)>11 and name[-11:]==".netsparker": filelist.append(self.options["load_file"]+'/'+name) counter=1 findings={} for processfile in filelist: content=open(processfile).read() fileparts=content.split('<!-- Vulnerability Details -->') vulns=fileparts[1].split('<h1') fullparser=etree.HTMLParser() fullhtml=etree.parse(StringIO(content), fullparser) Target=self.attrib(fullhtml.xpath("//span[@class='dashboard-url']/a"),'href','N/A') for vuln in vulns[1:]: vuln='<h1'+vuln parser=etree.HTMLParser() vulnobj=etree.parse(StringIO(vuln), parser) h1=self.value(vulnobj.xpath('//h1//text()'),'N/A') Vulnerability=re.sub(r'\d+\\\. ','',h1) Risk=self.value(vulnobj.xpath("//div[@class='vuln-block']/div[2]//text()"),'N/A').title() VulnDesc=self.value(vulnobj.xpath("//div[@class='vulndesc']//text()"),'N/A') if Risk=='Information': Risk='Info' if Risk=='Important': Risk='High' VulnDetails=vulnobj.xpath("//div[@class='vulnerability-detail']") for VulnDetail in VulnDetails: h2=self.value(VulnDetail.xpath('./div/h2//text()'),'N/A') SubVulnerability=re.sub(r'\d+\.\d+\. ','',h2) Link=self.attrib(VulnDetail.xpath('./div/div[2]/a'),'href','N/A') ParamTableRows=VulnDetail.xpath('./div/table//tr') lines=0; ParamTable='' for ParamTableRow in ParamTableRows: ParamTableCells=ParamTableRow.xpath('./td') cells=0 for ParamTableCell in ParamTableCells: cell=self.value(ParamTableCell.xpath('.//text()'),'N/A').strip() ParamTable+='| %s '%cell cells+=1 ParamTable='%s|\n'%ParamTable if lines==0: sepstr='' for i in range(0,cells): sepstr+='| ------- ' sepstr='%s|\n'%sepstr ParamTable+=sepstr lines+=1 d={'Target':Target, 'Vulnerability':Vulnerability, 'Risk':Risk, 'VulnDesc':VulnDesc, 'SubVulnerability':SubVulnerability, 'Link':Link, 'ParamTable':ParamTable,'findinglist':''} if not self.options['merge']: dirname=slugify('%s-%s-%s-%04d-netsparker'%(Risk, Target, Vulnerability, counter)) if not os.path.exists(self.options['output_dir']+'/'+dirname): os.makedirs(self.options['output_dir']+'/'+dirname) counter+=1 temp=self.template text=temp.substitute(d) if self.options['result_overwrite'] or (not os.path.exists(self.options['output_dir']+'/'+dirname+'/document.md')): tmpfile = open(self.options['output_dir']+'/'+dirname+'/document.md', 'w'); tmpfile.write(text) tmpfile.close() else : slug=slugify('%s-%s-netsparker'%(Risk, Vulnerability)) if not findings.get(slug): findings[slug]=[] findings[slug].append(d) for key, values in findings.items(): findinglist = '' for d in values: d['VulnDesc']=d['VulnDesc'].replace('$','$$') d['ParamTable']=d['ParamTable'].replace('$','$$') d['Link']=d['Link'].replace('$','$$') temp=self.merge_findinglist_template text=temp.substitute(d) findinglist+=text+"\n\n" d['findinglist']=findinglist filename=key+".md"; temp=self.template text=temp.substitute(d) if self.options['result_overwrite'] or (not os.path.exists(self.options['output_dir']+'/'+filename)): tmpfile = open(self.options['output_dir']+'/'+filename, 'w'); tmpfile.write(text) tmpfile.close() def value(self, x, default): try: #ret=x[0].strip() ret="\n".join([html2markdown(html2markdown(y.strip(), True)) for y in x]) except Exception as e: try: ret=x.strip() except Exception as ee: ret=default return ret def attrib(self, x, attr, default): try: ret=x[0].attrib[attr] except Exception as e: try: ret=x.attrib[attr] except Exception as ee: ret=default return ret
gpl-2.0
4,605,033,301,372,981,000
44.503704
196
0.496988
false
4.122819
false
false
false
swcurran/tfrs
backend/api/models/UserViewModel.py
1
1507
""" REST API Documentation for the NRS TFRS Credit Trading Application The Transportation Fuels Reporting System is being designed to streamline compliance reporting for transportation fuel suppliers in accordance with the Renewable & Low Carbon Fuel Requirements Regulation. OpenAPI spec version: v1 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from django.db import models class UserViewModel(models.Model): given_name = models.CharField(max_length=255, blank=True, null=True) surname = models.CharField(max_length=255, blank=True, null=True) email = models.CharField(max_length=255, blank=True, null=True) active = models.BooleanField() sm_authorization_id = models.CharField(max_length=255, blank=True, null=True) user_roles = models.ManyToManyField('UserRole', related_name='UserViewModeluser_roles', blank=True) class Meta: abstract = True
apache-2.0
9,088,370,143,756,785,000
40.861111
208
0.704048
false
4.406433
false
false
false
vanesa/kid-o
kido/admin/utils.py
1
3921
# -*- coding: utf-8 -*- """ Flask-Admin utilities.""" from flask import abort, redirect, request, url_for from flask_admin import AdminIndexView, expose from flask_admin.base import MenuLink from flask_admin.contrib.sqla import ModelView from flask_login import current_user from functools import wraps from kido import app from kido.constants import PERMISSION_ADMIN def admin_required(f): @wraps(f) def decorated(*args, **kwargs): if not current_user.is_authenticated: return redirect(url_for("views.general.login", next=request.url)) users_permissions = current_user.permissions if PERMISSION_ADMIN not in users_permissions: app.logger.debug("Not an admin") abort(404) return f(*args, **kwargs) return decorated def permission_required(permissions): if not isinstance(permissions, (list, set, tuple)): permissions = [permissions] permissions = [x.upper() for x in permissions] def decorator(method): @wraps(method) def f(*args, **kwargs): if not current_user.is_authenticated: return redirect(url_for("views.general.login", next=request.url)) users_permissions = current_user.permissions if PERMISSION_ADMIN not in users_permissions: for permission in permissions: if permission not in users_permissions: app.logger.debug("Missing permission: {0}".format(permission)) abort(404) return method(*args, **kwargs) return f return decorator class AuthenticatedMenuLink(MenuLink): def is_accessible(self): return current_user.is_authenticated class CustomAdminIndexView(AdminIndexView): extra_css = None extra_js = None @expose("/") @admin_required def index(self): if not current_user.is_authenticated: return redirect(url_for("views.general.login", next=request.url)) return super(CustomAdminIndexView, self).index() @expose("/login/") def login_view(self): return redirect(url_for("views.general.login", next=request.url)) @expose("/logout/") def logout_view(self): return redirect("/logout") class CustomModelView(ModelView): page_size = 50 extra_css = None extra_js = None action_template = "admin/action.html" edit_template = "admin/model/edit.html" create_template = "admin/model/create.html" list_template = "admin/model/custom_list.html" _include = None class_attributes = [ "page_size", "can_create", "can_edit", "can_delete", "column_searchable_list", "column_filters", "column_exclude_list", "column_default_sort", ] def __init__(self, *args, **kwargs): if "exclude" in kwargs: self.form_excluded_columns = kwargs["exclude"] del kwargs["exclude"] if "include" in kwargs: self._include = kwargs["include"] del kwargs["include"] for item in self.class_attributes: if item in kwargs: setattr(self, item, kwargs[item]) del kwargs[item] super(CustomModelView, self).__init__(*args, **kwargs) def get_list_columns(self): if self._include: return self.get_column_names( only_columns=self.scaffold_list_columns() + self._include, excluded_columns=self.column_exclude_list, ) return super(CustomModelView, self).get_list_columns() def is_accessible(self): if not current_user.is_authenticated: return False users_permissions = current_user.permissions return PERMISSION_ADMIN in users_permissions def inaccessible_callback(self, name, **kwargs): return abort(404)
bsd-3-clause
-352,002,768,738,183,740
28.931298
86
0.61872
false
4.193583
false
false
false
eepgwde/pyeg0
gmus/GMus0.py
1
1699
## @file GMus0.py # @brief Application support class for the Unofficial Google Music API. # @author weaves # # @details # This class uses @c gmusicapi. # # @note # An application support class is one that uses a set of driver classes # to provide a set of higher-level application specific methods. # # @see # https://github.com/simon-weber/Unofficial-Google-Music-API # http://unofficial-google-music-api.readthedocs.org/en/latest/ from __future__ import print_function from GMus00 import GMus00 import logging import ConfigParser, os, logging import pandas as pd import json from gmusicapi import Mobileclient ## Set of file paths for the configuration file. paths = ['site.cfg', os.path.expanduser('~/share/site/.safe/gmusic.cfg')] ## Google Music API login, search and result cache. # # The class needs to a configuration file with these contents. (The # values of the keys must be a valid Google Play account.) # # <pre> # [credentials] # username=username\@gmail.com # password=SomePassword9 # </pre> class GMus0(GMus00): ## Ad-hoc method to find the indices of duplicated entries. def duplicated(self): # self._df = self._df.sort(['album', 'title', 'creationTimestamp'], # ascending=[1, 1, 0]) df = self.df[list(['title', 'album', 'creationTimestamp'])] df['n0'] = df['title'] + '|' + df['album'] df = df.sort(['n0','creationTimestamp'], ascending=[1, 0]) # Only rely on counts of 2. s0 = pd.Series(df.n0) s1 = s0.value_counts() s2 = set( (s1[s1.values >= 2]).index ) df1 = df[df.n0.isin(s2)] df1['d'] = df1.duplicated('n0') s3 = list(df1[df1.d].index) return s3
gpl-3.0
6,076,076,360,128,342,000
30.462963
74
0.656857
false
3.23619
false
false
false
mdmueller/ascii-profiling
parallel.py
1
4245
import timeit import time from astropy.io import ascii import pandas import numpy as np from astropy.table import Table, Column from tempfile import NamedTemporaryFile import random import string import matplotlib.pyplot as plt import webbrowser def make_table(table, size=10000, n_floats=10, n_ints=0, n_strs=0, float_format=None, str_val=None): if str_val is None: str_val = "abcde12345" cols = [] for i in xrange(n_floats): dat = np.random.uniform(low=1, high=10, size=size) cols.append(Column(dat, name='f{}'.format(i))) for i in xrange(n_ints): dat = np.random.randint(low=-9999999, high=9999999, size=size) cols.append(Column(dat, name='i{}'.format(i))) for i in xrange(n_strs): if str_val == 'random': dat = np.array([''.join([random.choice(string.letters) for j in range(10)]) for k in range(size)]) else: dat = np.repeat(str_val, size) cols.append(Column(dat, name='s{}'.format(i))) t = Table(cols) if float_format is not None: for col in t.columns.values(): if col.name.startswith('f'): col.format = float_format t.write(table.name, format='ascii') output_text = [] def plot_case(n_floats=10, n_ints=0, n_strs=0, float_format=None, str_val=None): global table1, output_text n_rows = (10000, 20000, 50000, 100000, 200000) # include 200000 for publish run numbers = (1, 1, 1, 1, 1) repeats = (3, 2, 1, 1, 1) times_fast = [] times_fast_parallel = [] times_pandas = [] for n_row, number, repeat in zip(n_rows, numbers, repeats): table1 = NamedTemporaryFile() make_table(table1, n_row, n_floats, n_ints, n_strs, float_format, str_val) t = timeit.repeat("ascii.read(table1.name, format='basic', guess=False, use_fast_converter=True)", setup='from __main__ import ascii, table1', number=number, repeat=repeat) times_fast.append(min(t) / number) t = timeit.repeat("ascii.read(table1.name, format='basic', guess=False, parallel=True, use_fast_converter=True)", setup='from __main__ import ascii, table1', number=number, repeat=repeat) times_fast_parallel.append(min(t) / number) t = timeit.repeat("pandas.read_csv(table1.name, sep=' ', header=0)", setup='from __main__ import table1, pandas', number=number, repeat=repeat) times_pandas.append(min(t) / number) plt.loglog(n_rows, times_fast, '-or', label='io.ascii Fast-c') plt.loglog(n_rows, times_fast_parallel, '-og', label='io.ascii Parallel Fast-c') plt.loglog(n_rows, times_pandas, '-oc', label='Pandas') plt.grid() plt.legend(loc='best') plt.title('n_floats={} n_ints={} n_strs={} float_format={} str_val={}'.format( n_floats, n_ints, n_strs, float_format, str_val)) plt.xlabel('Number of rows') plt.ylabel('Time (sec)') img_file = 'graph{}.png'.format(len(output_text) + 1) plt.savefig(img_file) plt.clf() text = 'Pandas to io.ascii Fast-C speed ratio: {:.2f} : 1<br/>'.format(times_fast[-1] / times_pandas[-1]) text += 'io.ascii parallel to Pandas speed ratio: {:.2f} : 1'.format(times_pandas[-1] / times_fast_parallel[-1]) output_text.append((img_file, text)) plot_case(n_floats=10, n_ints=0, n_strs=0) plot_case(n_floats=10, n_ints=10, n_strs=10) plot_case(n_floats=10, n_ints=10, n_strs=10, float_format='%.4f') plot_case(n_floats=10, n_ints=0, n_strs=0, float_format='%.4f') plot_case(n_floats=0, n_ints=0, n_strs=10) plot_case(n_floats=0, n_ints=0, n_strs=10, str_val="'asdf asdfa'") plot_case(n_floats=0, n_ints=0, n_strs=10, str_val="random") plot_case(n_floats=0, n_ints=10, n_strs=0) html_file = open('out.html', 'w') html_file.write('<html><head><meta charset="utf-8"/><meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>') html_file.write('</html><body><h1 style="text-align:center;">Profile of io.ascii</h1>') for img, descr in output_text: html_file.write('<img src="{}"><p style="font-weight:bold;">{}</p><hr>'.format(img, descr)) html_file.write('</body></html>') html_file.close() webbrowser.open('out.html')
mit
-9,088,434,750,835,889,000
45.141304
122
0.623793
false
2.983134
false
false
false
patriciohc/carga-de-xls-a-MySQL
Choose_file.py
1
3639
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'Choose_file.ui' # # Created: Sat Oct 17 15:55:19 2015 # by: PyQt4 UI code generator 4.10.4 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(_fromUtf8("MainWindow")) MainWindow.resize(524, 146) self.centralwidget = QtGui.QWidget(MainWindow) self.centralwidget.setObjectName(_fromUtf8("centralwidget")) self.verticalLayoutWidget = QtGui.QWidget(self.centralwidget) self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 501, 81)) self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget")) self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget) self.verticalLayout.setMargin(0) self.verticalLayout.setObjectName(_fromUtf8("verticalLayout")) self.horizontalLayout_2 = QtGui.QHBoxLayout() self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2")) self.label = QtGui.QLabel(self.verticalLayoutWidget) self.label.setObjectName(_fromUtf8("label")) self.horizontalLayout_2.addWidget(self.label) self.txtFile = QtGui.QLineEdit(self.verticalLayoutWidget) self.txtFile.setObjectName(_fromUtf8("txtFile")) self.horizontalLayout_2.addWidget(self.txtFile) self.btChooseFile = QtGui.QPushButton(self.verticalLayoutWidget) self.btChooseFile.setObjectName(_fromUtf8("btChooseFile")) self.horizontalLayout_2.addWidget(self.btChooseFile) self.verticalLayout.addLayout(self.horizontalLayout_2) self.horizontalLayout = QtGui.QHBoxLayout() self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout")) self.btClose = QtGui.QPushButton(self.verticalLayoutWidget) self.btClose.setObjectName(_fromUtf8("btClose")) self.horizontalLayout.addWidget(self.btClose) self.btLoadFile = QtGui.QPushButton(self.verticalLayoutWidget) self.btLoadFile.setObjectName(_fromUtf8("btLoadFile")) self.horizontalLayout.addWidget(self.btLoadFile) self.verticalLayout.addLayout(self.horizontalLayout) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtGui.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 524, 25)) self.menubar.setObjectName(_fromUtf8("menubar")) MainWindow.setMenuBar(self.menubar) self.statusbar = QtGui.QStatusBar(MainWindow) self.statusbar.setObjectName(_fromUtf8("statusbar")) MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None)) self.label.setText(_translate("MainWindow", "File", None)) self.btChooseFile.setText(_translate("MainWindow", "Choose", None)) self.btClose.setText(_translate("MainWindow", "Cerrar", None)) self.btLoadFile.setText(_translate("MainWindow", "Cargar Archivo", None))
apache-2.0
-8,237,645,389,629,048,000
46.25974
82
0.718054
false
4.206936
false
false
false
c0cky/mediathread
mediathread/djangosherd/api.py
1
4549
# pylint: disable-msg=R0904 from tastypie import fields from tastypie.resources import ModelResource from mediathread.api import UserResource, TagResource from mediathread.assetmgr.models import Asset from mediathread.djangosherd.models import SherdNote, DiscussionIndex from mediathread.projects.models import ProjectNote from mediathread.taxonomy.api import TermResource from mediathread.taxonomy.models import TermRelationship class SherdNoteResource(ModelResource): author = fields.ForeignKey(UserResource, 'author', full=True, null=True, blank=True) class Meta: queryset = SherdNote.objects.select_related('asset').order_by("id") excludes = ['tags', 'body', 'added', 'modified'] list_allowed_methods = [] detail_allowed_methods = [] def dehydrate(self, bundle): try: bundle.data['is_global_annotation'] = \ bundle.obj.is_global_annotation() bundle.data['asset_id'] = str(bundle.obj.asset.id) bundle.data['is_null'] = bundle.obj.is_null() bundle.data['annotation'] = bundle.obj.annotation() bundle.data['url'] = bundle.obj.get_absolute_url() modified = bundle.obj.modified.strftime("%m/%d/%y %I:%M %p") \ if bundle.obj.modified else '' bundle.data['metadata'] = { 'tags': TagResource().render_list(bundle.request, bundle.obj.tags_split()), 'body': bundle.obj.body.strip() if bundle.obj.body else '', 'primary_type': bundle.obj.asset.primary.label, 'modified': modified, 'timecode': bundle.obj.range_as_timecode(), 'title': bundle.obj.title } editable = (bundle.request.user.id == getattr(bundle.obj, 'author_id', -1)) citable = bundle.request.GET.get('citable', '') == 'true' # assumed: there is only one ProjectNote per annotation reference = ProjectNote.objects.filter( annotation__id=bundle.obj.id).first() if reference: # notes in a submitted response are not editable editable = editable and not reference.project.is_submitted() if citable: # this is a heavy operation. don't call it unless needed citable = reference.project.can_cite(bundle.request.course, bundle.request.user) bundle.data['editable'] = editable bundle.data['citable'] = citable termResource = TermResource() vocabulary = {} related = TermRelationship.objects.get_for_object( bundle.obj).prefetch_related('term__vocabulary') for rel in related: if rel.term.vocabulary.id not in vocabulary: vocabulary[rel.term.vocabulary.id] = { 'id': rel.term.vocabulary.id, 'display_name': rel.term.vocabulary.display_name, 'terms': [] } vocabulary[rel.term.vocabulary.id]['terms'].append( termResource.render_one(bundle.request, rel.term)) bundle.data['vocabulary'] = vocabulary.values() except Asset.DoesNotExist: bundle.data['asset_id'] = '' bundle.data['metadata'] = {'title': 'Item Deleted'} return bundle def render_one(self, request, selection, asset_key): # assumes user is allowed to see this note bundle = self.build_bundle(obj=selection, request=request) dehydrated = self.full_dehydrate(bundle) bundle.data['asset_key'] = '%s_%s' % (asset_key, bundle.data['asset_id']) return self._meta.serializer.to_simple(dehydrated, None) class DiscussionIndexResource(object): def render_list(self, request, indicies): collaborations = DiscussionIndex.with_permission(request, indicies) ctx = { 'references': [{ 'id': obj.collaboration.object_pk, 'title': obj.collaboration.title, 'type': obj.get_type_label(), 'url': obj.get_absolute_url(), 'modified': obj.modified.strftime("%m/%d/%y %I:%M %p")} for obj in collaborations]} return ctx
gpl-2.0
-7,704,276,080,529,471,000
41.915094
79
0.56386
false
4.275376
false
false
false
fdslight/fdslight
freenet/handlers/tundev.py
1
5566
#!/usr/bin/env python3 import os, sys import pywind.evtframework.handlers.handler as handler import freenet.lib.fn_utils as fn_utils import freenet.lib.simple_qos as simple_qos try: import fcntl except ImportError: pass class tun_base(handler.handler): __creator_fd = None # 要写入到tun的IP包 ___ip_packets_for_write = [] # 写入tun设备的最大IP数据包的个数 __MAX_WRITE_QUEUE_SIZE = 1024 # 当前需要写入tun设备的IP数据包的个数 __current_write_queue_n = 0 __BLOCK_SIZE = 16 * 1024 __qos = None def __create_tun_dev(self, name): """创建tun 设备 :param name: :return fd: """ tun_fd = fn_utils.tuntap_create(name, fn_utils.IFF_TUN | fn_utils.IFF_NO_PI) fn_utils.interface_up(name) if tun_fd < 0: raise SystemError("can not create tun device,please check your root") return tun_fd @property def creator(self): return self.__creator_fd def init_func(self, creator_fd, tun_dev_name, *args, **kwargs): """ :param creator_fd: :param tun_dev_name:tun 设备名称 :param subnet:如果是服务端则需要则个参数 """ tun_fd = self.__create_tun_dev(tun_dev_name) if tun_fd < 3: print("error:create tun device failed:%s" % tun_dev_name) sys.exit(-1) self.__creator_fd = creator_fd self.__qos = simple_qos.qos(simple_qos.QTYPE_DST) self.set_fileno(tun_fd) fcntl.fcntl(tun_fd, fcntl.F_SETFL, os.O_NONBLOCK) self.dev_init(tun_dev_name, *args, **kwargs) return tun_fd def dev_init(self, dev_name, *args, **kwargs): pass def evt_read(self): for i in range(32): try: ip_packet = os.read(self.fileno, self.__BLOCK_SIZE) except BlockingIOError: break self.__qos.add_to_queue(ip_packet) self.__qos_from_tundev() def task_loop(self): self.__qos_from_tundev() def __qos_from_tundev(self): results = self.__qos.get_queue() for ip_packet in results: self.handle_ip_packet_from_read(ip_packet) if not results: self.del_loop_task(self.fileno) else: self.add_to_loop_task(self.fileno) def evt_write(self): try: ip_packet = self.___ip_packets_for_write.pop(0) except IndexError: self.remove_evt_write(self.fileno) return self.__current_write_queue_n -= 1 try: os.write(self.fileno, ip_packet) except BlockingIOError: self.__current_write_queue_n += 1 self.___ip_packets_for_write.insert(0, ip_packet) return '''''' def handle_ip_packet_from_read(self, ip_packet): """处理读取过来的IP包,重写这个方法 :param ip_packet: :return None: """ pass def handle_ip_packet_for_write(self, ip_packet): """处理要写入的IP包,重写这个方法 :param ip_packet: :return new_ip_packet: """ pass def error(self): self.dev_error() def dev_error(self): """重写这个方法 :return: """ pass def timeout(self): self.dev_timeout() def dev_timeout(self): """重写这个方法 :return: """ pass def delete(self): self.dev_delete() def dev_delete(self): """重写这个方法 :return: """ pass def add_to_sent_queue(self, ip_packet): # 丢到超出规定的数据包,防止内存过度消耗 n_ip_message = self.handle_ip_packet_for_write(ip_packet) if not n_ip_message: return if self.__current_write_queue_n == self.__MAX_WRITE_QUEUE_SIZE: # 删除第一个包,防止队列过多 self.__current_write_queue_n -= 1 self.___ip_packets_for_write.pop(0) return self.__current_write_queue_n += 1 self.___ip_packets_for_write.append(n_ip_message) class tundevs(tun_base): """服务端的tun数据处理 """ def dev_init(self, dev_name): self.register(self.fileno) self.add_evt_read(self.fileno) def handle_ip_packet_from_read(self, ip_packet): self.dispatcher.send_msg_to_tunnel_from_tun(ip_packet) def handle_ip_packet_for_write(self, ip_packet): return ip_packet def dev_delete(self): self.unregister(self.fileno) os.close(self.fileno) def dev_error(self): self.delete_handler(self.fileno) def dev_timeout(self): pass def handle_msg_from_tunnel(self, message): self.add_to_sent_queue(message) self.add_evt_write(self.fileno) class tundevc(tun_base): def dev_init(self, dev_name): self.register(self.fileno) self.add_evt_read(self.fileno) def handle_ip_packet_from_read(self, ip_packet): self.dispatcher.handle_msg_from_tundev(ip_packet) def handle_ip_packet_for_write(self, ip_packet): return ip_packet def dev_delete(self): self.unregister(self.fileno) os.close(self.fileno) def dev_error(self): self.delete_handler(self.fileno) def dev_timeout(self): pass def msg_from_tunnel(self, message): self.add_to_sent_queue(message) self.add_evt_write(self.fileno)
bsd-2-clause
794,198,108,318,362,000
23.490741
84
0.567864
false
3.099004
false
false
false
vesellov/bitdust.devel
customer/data_sender.py
1
14665
#!/usr/bin/python # data_sender.py # # Copyright (C) 2008-2018 Veselin Penev, https://bitdust.io # # This file (data_sender.py) is part of BitDust Software. # # BitDust is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # BitDust Software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with BitDust Software. If not, see <http://www.gnu.org/licenses/>. # # Please contact us if you have any questions at bitdust.io@gmail.com # # # # """ .. module:: data_sender. .. raw:: html <a href="https://bitdust.io/automats/data_sender/data_sender.png" target="_blank"> <img src="https://bitdust.io/automats/data_sender/data_sender.png" style="max-width:100%;"> </a> A state machine to manage data sending process, acts very simple: 1) when new local data is created it tries to send it to the correct supplier 2) wait while ``p2p.io_throttle`` is doing some data transmission to remote suppliers 3) calls ``p2p.backup_matrix.ScanBlocksToSend()`` to get a list of pieces needs to be send 4) this machine is restarted every minute to check if some more data needs to be send 5) also can be restarted at any time when it is needed EVENTS: * :red:`block-acked` * :red:`block-failed` * :red:`init` * :red:`new-data` * :red:`restart` * :red:`scan-done` * :red:`timer-1min` * :red:`timer-1sec` """ #------------------------------------------------------------------------------ from __future__ import absolute_import from io import open #------------------------------------------------------------------------------ _Debug = True _DebugLevel = 12 #------------------------------------------------------------------------------ import os import time #------------------------------------------------------------------------------ from logs import lg from automats import automat from automats import global_state from lib import misc from lib import packetid from contacts import contactsdb from userid import my_id from main import settings from p2p import contact_status from . import io_throttle #------------------------------------------------------------------------------ _DataSender = None _ShutdownFlag = False #------------------------------------------------------------------------------ def A(event=None, arg=None): """ Access method to interact with the state machine. """ global _DataSender if _DataSender is None: _DataSender = DataSender( name='data_sender', state='READY', debug_level=_DebugLevel, log_events=_Debug, log_transitions=_Debug, ) if event is not None: _DataSender.automat(event, arg) return _DataSender def Destroy(): """ Destroy the state machine and remove the instance from memory. """ global _DataSender if _DataSender is None: return _DataSender.destroy() del _DataSender _DataSender = None class DataSender(automat.Automat): """ A class to manage process of sending data packets to remote suppliers. """ timers = { 'timer-1min': (60, ['READY']), 'timer-1min': (60, ['READY']), 'timer-1sec': (1.0, ['SENDING']), } statistic = {} def state_changed(self, oldstate, newstate, event, arg): global_state.set_global_state('DATASEND ' + newstate) def A(self, event, arg): #---READY--- if self.state == 'READY': if event == 'new-data' or event == 'timer-1min' or event == 'restart': self.state = 'SCAN_BLOCKS' self.doScanAndQueue(arg) elif event == 'init': pass #---SCAN_BLOCKS--- elif self.state == 'SCAN_BLOCKS': if event == 'scan-done' and self.isQueueEmpty(arg): self.state = 'READY' self.doRemoveUnusedFiles(arg) elif event == 'scan-done' and not self.isQueueEmpty(arg): self.state = 'SENDING' #---SENDING--- elif self.state == 'SENDING': if event == 'restart' or ( ( event == 'timer-1sec' or event == 'block-acked' or event == 'block-failed' or event == 'new-data' ) and self.isQueueEmpty(arg) ): self.state = 'SCAN_BLOCKS' self.doScanAndQueue(arg) return None def isQueueEmpty(self, arg): if not arg: return io_throttle.IsSendingQueueEmpty() remoteID, _ = arg return io_throttle.OkToSend(remoteID) def doScanAndQueue(self, arg): global _ShutdownFlag if _Debug: lg.out(_DebugLevel, 'data_sender.doScanAndQueue _ShutdownFlag=%r' % _ShutdownFlag) if _Debug: log = open(os.path.join(settings.LogsDir(), 'data_sender.log'), 'w') log.write(u'doScanAndQueue %s\n' % time.asctime()) # .decode('utf-8') if _ShutdownFlag: if _Debug: log.write(u'doScanAndQueue _ShutdownFlag is True\n') self.automat('scan-done') if _Debug: log.flush() log.close() return for customer_idurl in contactsdb.known_customers(): if '' not in contactsdb.suppliers(customer_idurl): from storage import backup_matrix for backupID in misc.sorted_backup_ids( list(backup_matrix.local_files().keys()), True): this_customer_idurl = packetid.CustomerIDURL(backupID) if this_customer_idurl != customer_idurl: continue packetsBySupplier = backup_matrix.ScanBlocksToSend(backupID) if _Debug: log.write(u'%s\n' % packetsBySupplier) for supplierNum in packetsBySupplier.keys(): supplier_idurl = contactsdb.supplier(supplierNum, customer_idurl=customer_idurl) if not supplier_idurl: lg.warn('unknown supplier_idurl supplierNum=%s for %s, customer_idurl=%s' % ( supplierNum, backupID, customer_idurl)) continue for packetID in packetsBySupplier[supplierNum]: backupID_, _, supplierNum_, _ = packetid.BidBnSnDp(packetID) if backupID_ != backupID: lg.warn('unexpected backupID supplierNum=%s for %s, customer_idurl=%s' % ( packetID, backupID, customer_idurl)) continue if supplierNum_ != supplierNum: lg.warn('unexpected supplierNum %s for %s, customer_idurl=%s' % ( packetID, backupID, customer_idurl)) continue if io_throttle.HasPacketInSendQueue( supplier_idurl, packetID): if _Debug: log.write(u'%s already in sending queue for %s\n' % (packetID, supplier_idurl)) continue if not io_throttle.OkToSend(supplier_idurl): if _Debug: log.write(u'skip, not ok to send %s\n' % supplier_idurl) continue customerGlobalID, pathID = packetid.SplitPacketID(packetID) # tranByID = gate.transfers_out_by_idurl().get(supplier_idurl, []) # if len(tranByID) > 3: # log.write(u'transfers by %s: %d\n' % (supplier_idurl, len(tranByID))) # continue customerGlobalID, pathID = packetid.SplitPacketID(packetID) filename = os.path.join( settings.getLocalBackupsDir(), customerGlobalID, pathID, ) if not os.path.isfile(filename): if _Debug: log.write(u'%s is not a file\n' % filename) continue if io_throttle.QueueSendFile( filename, packetID, supplier_idurl, my_id.getLocalID(), self._packetAcked, self._packetFailed, ): if _Debug: log.write(u'io_throttle.QueueSendFile %s\n' % packetID) else: if _Debug: log.write(u'io_throttle.QueueSendFile FAILED %s\n' % packetID) # lg.out(6, ' %s for %s' % (packetID, backupID)) # DEBUG # break self.automat('scan-done') if _Debug: log.flush() log.close() # def doPrintStats(self, arg): # """ # """ # if lg.is_debug(18): # transfers = transport_control.current_transfers() # bytes_stats = transport_control.current_bytes_transferred() # s = '' # for info in transfers: # s += '%s ' % (diskspace.MakeStringFromBytes(bytes_stats[info.transfer_id]).replace(' ', '').replace('bytes', 'b')) # lg.out(0, 'transfers: ' + s[:120]) def doRemoveUnusedFiles(self, arg): # we want to remove files for this block # because we only need them during rebuilding if settings.getBackupsKeepLocalCopies() is True: # if user set this in settings - he want to keep the local files return # ... user do not want to keep local backups if settings.getGeneralWaitSuppliers() is True: from customer import fire_hire # but he want to be sure - all suppliers are green for a long time if len(contact_status.listOfflineSuppliers()) > 0 or time.time( ) - fire_hire.GetLastFireTime() < 24 * 60 * 60: # some people are not there or we do not have stable team yet # do not remove the files because we need it to rebuild return count = 0 from storage import backup_matrix from storage import restore_monitor from storage import backup_rebuilder if _Debug: lg.out(_DebugLevel, 'data_sender.doRemoveUnusedFiles') for backupID in misc.sorted_backup_ids( list(backup_matrix.local_files().keys())): if restore_monitor.IsWorking(backupID): if _Debug: lg.out( _DebugLevel, ' %s : SKIP, because restoring' % backupID) continue if backup_rebuilder.IsBackupNeedsWork(backupID): if _Debug: lg.out( _DebugLevel, ' %s : SKIP, because needs rebuilding' % backupID) continue if not backup_rebuilder.ReadStoppedFlag(): if backup_rebuilder.A().currentBackupID is not None: if backup_rebuilder.A().currentBackupID == backupID: if _Debug: lg.out( _DebugLevel, ' %s : SKIP, because rebuilding is in process' % backupID) continue packets = backup_matrix.ScanBlocksToRemove( backupID, settings.getGeneralWaitSuppliers()) for packetID in packets: customer, pathID = packetid.SplitPacketID(packetID) filename = os.path.join(settings.getLocalBackupsDir(), customer, pathID) if os.path.isfile(filename): try: os.remove(filename) # lg.out(6, ' ' + os.path.basename(filename)) except: lg.exc() continue count += 1 if _Debug: lg.out(_DebugLevel, ' %d files were removed' % count) backup_matrix.ReadLocalFiles() def _packetAcked(self, packet, ownerID, packetID): from storage import backup_matrix backupID, blockNum, supplierNum, dataORparity = packetid.BidBnSnDp(packetID) backup_matrix.RemoteFileReport( backupID, blockNum, supplierNum, dataORparity, True) if ownerID not in self.statistic: self.statistic[ownerID] = [0, 0] self.statistic[ownerID][0] += 1 self.automat('block-acked', (ownerID, packetID)) def _packetFailed(self, remoteID, packetID, why): from storage import backup_matrix backupID, blockNum, supplierNum, dataORparity = packetid.BidBnSnDp( packetID) backup_matrix.RemoteFileReport( backupID, blockNum, supplierNum, dataORparity, False) if remoteID not in self.statistic: self.statistic[remoteID] = [0, 0] self.statistic[remoteID][1] += 1 self.automat('block-failed', (remoteID, packetID)) def statistic(): """ The ``data_sender()`` keeps track of sending results with every supplier. This is used by ``fire_hire()`` to decide how reliable is given supplier. """ global _DataSender if _DataSender is None: return {} return _DataSender.statistic def SetShutdownFlag(): """ Set flag to indicate that no need to send anything anymore. """ global _ShutdownFlag _ShutdownFlag = True
agpl-3.0
4,224,043,803,797,804,000
38.422043
170
0.512104
false
4.385467
false
false
false
agacek/camkes-tool
camkes/internal/version.py
1
1813
# # Copyright 2014, NICTA # # This software may be distributed and modified according to the terms of # the BSD 2-Clause license. Note that NO WARRANTY is provided. # See "LICENSE_BSD2.txt" for details. # # @TAG(NICTA_BSD) # '''Versioning functionality. This computes a version identifier based on the current source code state. It was decided this was more reliable while the tool is under active development. Note that any extraneous files in your source directory that match the version filters will be accumulated in the version computation.''' from memoization import memoized import hashlib, os, re @memoized def version(): # Files to consider relevant. Each entry should be a pair of (path, filter) # where 'path' is relative to the directory of this file and 'filter' is a # regex describing which filenames to match under the given path. SOURCES = [ ('../', r'^.*\.py$'), # Python sources ('../templates', r'.*'), # Templates ] my_path = os.path.dirname(os.path.abspath(__file__)) sources = set() # Accumulate all relevant source files. for s in SOURCES: path = os.path.join(my_path, s[0]) regex = re.compile(s[1]) for root, _, files in os.walk(path): for f in files: if regex.match(f): sources.add(os.path.abspath(os.path.join(root, f))) # Hash each file and hash a concatenation of these hashes. Note, hashing a # hash is not good practice for cryptography, but it's fine for this # purpose. hfinal = hashlib.sha1() #pylint: disable=E1101 for s in sources: with open(s, 'r') as f: h = hashlib.sha1(f.read()).hexdigest() #pylint: disable=E1101 hfinal.update('%s|' % h) #pylint: disable=E1101 return hfinal.hexdigest()
bsd-2-clause
-41,992,440,657,990,110
35.26
79
0.656922
false
3.832981
false
false
false
ibrica/universe-server
play.py
1
1073
from multiprocessing import Process import time import gym import universe from universe.spaces.vnc_event import keycode from envs import create_env def start_game(model, env_name): """regular Python process, not using torch""" p = Process(target=play_game, args=(model,env_name)) p.start() # Don't wait with join, respond to user request def play_game(model, env_name): """Play game with saved model if ther's no model play random""" env = create_env(env_name, client_id="play1",remotes=1) # Local docker container max_game_length = 10000 state = env.reset() reward_sum = 0 start_time = time.time() for step in range(max_game_length ): state, reward, done, _ = env.step( ['up' for i in range(60)]) #no saved model for now keep pressing up, 60 times in minute reward_sum += reward print("Time {}, game reward {}, game length {}".format( time.strftime("%Hh %Mm %Ss"), reward_sum, time.gmtime(time.time() - start_time))) if done: break
mit
-6,093,060,618,181,687,000
33.645161
130
0.630941
false
3.764912
false
false
false
droundy/deft
talks/colloquium/figs/plot-walls.py
1
3242
#!/usr/bin/python # We need the following two lines in order for matplotlib to work # without access to an X server. from __future__ import division import matplotlib matplotlib.use('Agg') import pylab, numpy, sys xmax = 2.5 xmin = -0.4 def plotit(dftdata, mcdata): dft_len = len(dftdata[:,0]) dft_dr = dftdata[2,0] - dftdata[1,0] mcdata = numpy.insert(mcdata,0,0,0) mcdata[0,0]=-10 mcoffset = 10/2 offset = -3/2 n0 = dftdata[:,6] nA = dftdata[:,8] nAmc = mcdata[:,11] n0mc = mcdata[:,10] pylab.figure(figsize=(6, 6)) pylab.subplots_adjust(hspace=0.001) n_plt = pylab.subplot(3,1,3) n_plt.plot(mcdata[:,0]/2+mcoffset,mcdata[:,1]*4*numpy.pi/3,"b-",label='$n$ Monte Carlo') n_plt.plot(dftdata[:,0]/2+offset,dftdata[:,1]*4*numpy.pi/3,"b--",label='$n$ DFT') n_plt.legend(loc='best', ncol=1).draw_frame(False) #.get_frame().set_alpha(0.5) n_plt.yaxis.set_major_locator(pylab.MaxNLocator(6,steps=[1,5,10],prune='upper')) pylab.ylim(ymin=0) pylab.xlim(xmin, xmax) pylab.xlabel("$z/\sigma$") pylab.ylabel("$n(\mathbf{r})$") n_plt.axvline(x=0, color='k', linestyle=':') n = len(mcdata[:,0]) #pylab.twinx() dftr = dftdata[:,0]/2+offset thiswork = dftdata[:,5] gross = dftdata[:,7] stop_here = int(dft_len - 1/dft_dr) print stop_here start_here = int(2.5/dft_dr) off = 1 me = 40 A_plt = pylab.subplot(3,1,1) A_plt.axvline(x=0, color='k', linestyle=':') A_plt.plot(mcdata[:,0]/2+mcoffset,mcdata[:,2+2*off]/nAmc,"r-",label="$g_\sigma^A$ Monte Carlo") A_plt.plot(dftr[dftr>=0],thiswork[dftr>=0],"ro",markevery=me*.8,label="$g_\sigma^A$ this work") A_plt.plot(dftr[dftr>=0],gross[dftr>=0],"rx",markevery=me,label="Gross", markerfacecolor='none',markeredgecolor='red', markeredgewidth=1) A_plt.legend(loc='best', ncol=1).draw_frame(False) #.get_frame().set_alpha(0.5) A_plt.yaxis.set_major_locator(pylab.MaxNLocator(integer=True,prune='upper')) pylab.ylim(ymin=0) pylab.ylabel("$g_\sigma^A$") pylab.xlim(xmin, xmax) n0mc[0]=1 mcdata[0,10]=1 S_plt = pylab.subplot(3,1,2) S_plt.axvline(x=0, color='k', linestyle=':') S_plt.plot(mcdata[:,0]/2+mcoffset,mcdata[:,3+2*off]/n0mc,"g-",label="$g_\sigma^S$ Monte Carlo") S_plt.plot(dftdata[:,0]/2+offset,dftdata[:,4],"gx",markevery=me/2,label="Yu and Wu") S_plt.legend(loc='best', ncol=1).draw_frame(False) #.get_frame().set_alpha(0.5) #pylab.ylim(ymax=12) S_plt.yaxis.set_major_locator(pylab.MaxNLocator(5,integer=True,prune='upper')) pylab.xlim(xmin, xmax) pylab.ylim(ymin=0) pylab.ylabel("$g_\sigma^S$") xticklabels = A_plt.get_xticklabels() + S_plt.get_xticklabels() pylab.setp(xticklabels, visible=False) mcdata10 = numpy.loadtxt('../../papers/contact/figs/mc-walls-20-196.dat') dftdata10 = numpy.loadtxt('../../papers/contact/figs/wallsWB-0.10.dat') mcdata40 = numpy.loadtxt('../../papers/contact/figs/mc-walls-20-817.dat') dftdata40 = numpy.loadtxt('../../papers/contact/figs/wallsWB-0.40.dat') plotit(dftdata10, mcdata10) pylab.savefig('figs/walls-10.pdf', transparent=True) plotit(dftdata40, mcdata40) pylab.savefig('figs/walls-40.pdf', transparent=True)
gpl-2.0
-4,687,526,299,663,627,000
33.489362
99
0.637569
false
2.515128
false
false
false
AprilBrother/esptool
esptool.py
1
28432
#!/usr/bin/env python # # ESP8266 ROM Bootloader Utility # https://github.com/themadinventor/esptool # # Copyright (C) 2014 Fredrik Ahlberg # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 Franklin # Street, Fifth Floor, Boston, MA 02110-1301 USA. import sys import struct import serial import time import argparse import os import subprocess import tempfile class ESPROM: # These are the currently known commands supported by the ROM ESP_FLASH_BEGIN = 0x02 ESP_FLASH_DATA = 0x03 ESP_FLASH_END = 0x04 ESP_MEM_BEGIN = 0x05 ESP_MEM_END = 0x06 ESP_MEM_DATA = 0x07 ESP_SYNC = 0x08 ESP_WRITE_REG = 0x09 ESP_READ_REG = 0x0a # Maximum block sized for RAM and Flash writes, respectively. ESP_RAM_BLOCK = 0x1800 ESP_FLASH_BLOCK = 0x100 # Default baudrate. The ROM auto-bauds, so we can use more or less whatever we want. ESP_ROM_BAUD = 115200 # First byte of the application image ESP_IMAGE_MAGIC = 0xe9 # Initial state for the checksum routine ESP_CHECKSUM_MAGIC = 0xef # OTP ROM addresses ESP_OTP_MAC0 = 0x3ff00050 ESP_OTP_MAC1 = 0x3ff00054 # Sflash stub: an assembly routine to read from spi flash and send to host SFLASH_STUB = "\x80\x3c\x00\x40\x1c\x4b\x00\x40\x21\x11\x00\x40\x00\x80" \ "\xfe\x3f\xc1\xfb\xff\xd1\xf8\xff\x2d\x0d\x31\xfd\xff\x41\xf7\xff\x4a" \ "\xdd\x51\xf9\xff\xc0\x05\x00\x21\xf9\xff\x31\xf3\xff\x41\xf5\xff\xc0" \ "\x04\x00\x0b\xcc\x56\xec\xfd\x06\xff\xff\x00\x00" def __init__(self, port=0, baud=ESP_ROM_BAUD): self._port = serial.Serial(port) # setting baud rate in a separate step is a workaround for # CH341 driver on some Linux versions (this opens at 9600 then # sets), shouldn't matter for other platforms/drivers. See # https://github.com/themadinventor/esptool/issues/44#issuecomment-107094446 self._port.baudrate = baud """ Read bytes from the serial port while performing SLIP unescaping """ def read(self, length=1): b = '' while len(b) < length: c = self._port.read(1) if c == '\xdb': c = self._port.read(1) if c == '\xdc': b = b + '\xc0' elif c == '\xdd': b = b + '\xdb' else: raise FatalError('Invalid SLIP escape') else: b = b + c return b """ Write bytes to the serial port while performing SLIP escaping """ def write(self, packet): buf = '\xc0' \ + (packet.replace('\xdb','\xdb\xdd').replace('\xc0','\xdb\xdc')) \ + '\xc0' self._port.write(buf) """ Calculate checksum of a blob, as it is defined by the ROM """ @staticmethod def checksum(data, state=ESP_CHECKSUM_MAGIC): for b in data: state ^= ord(b) return state """ Send a request and read the response """ def command(self, op=None, data=None, chk=0): if op: pkt = struct.pack('<BBHI', 0x00, op, len(data), chk) + data self.write(pkt) # tries to get a response until that response has the # same operation as the request or a retries limit has # exceeded. This is needed for some esp8266s that # reply with more sync responses than expected. retries = 100 while retries > 0: (op_ret, val, body) = self.receive_response() if op is None or op_ret == op: return val, body # valid response received retries = retries - 1 raise FatalError("Response doesn't match request") """ Receive a response to a command """ def receive_response(self): # Read header of response and parse if self._port.read(1) != '\xc0': raise FatalError('Invalid head of packet') hdr = self.read(8) (resp, op_ret, len_ret, val) = struct.unpack('<BBHI', hdr) if resp != 0x01: raise FatalError('Invalid response 0x%02x" to command' % resp) # The variable-length body body = self.read(len_ret) # Terminating byte if self._port.read(1) != chr(0xc0): raise FatalError('Invalid end of packet') return op_ret, val, body """ Perform a connection test """ def sync(self): self.command(ESPROM.ESP_SYNC, '\x07\x07\x12\x20' + 32 * '\x55') for i in xrange(7): self.command() """ Try connecting repeatedly until successful, or giving up """ def connect(self): print 'Connecting...' for _ in xrange(4): # worst-case latency timer should be 255ms (probably <20ms) self._port.timeout = 0.3 for _ in xrange(4): try: self._port.flushInput() self._port.flushOutput() self.sync() self._port.timeout = 5 return except: time.sleep(0.05) raise FatalError('Failed to connect to ESP8266') """ Read memory address in target """ def read_reg(self, addr): res = self.command(ESPROM.ESP_READ_REG, struct.pack('<I', addr)) if res[1] != "\0\0": raise FatalError('Failed to read target memory') return res[0] """ Write to memory address in target """ def write_reg(self, addr, value, mask, delay_us=0): if self.command(ESPROM.ESP_WRITE_REG, struct.pack('<IIII', addr, value, mask, delay_us))[1] != "\0\0": raise FatalError('Failed to write target memory') """ Start downloading an application image to RAM """ def mem_begin(self, size, blocks, blocksize, offset): if self.command(ESPROM.ESP_MEM_BEGIN, struct.pack('<IIII', size, blocks, blocksize, offset))[1] != "\0\0": raise FatalError('Failed to enter RAM download mode') """ Send a block of an image to RAM """ def mem_block(self, data, seq): if self.command(ESPROM.ESP_MEM_DATA, struct.pack('<IIII', len(data), seq, 0, 0) + data, ESPROM.checksum(data))[1] != "\0\0": raise FatalError('Failed to write to target RAM') """ Leave download mode and run the application """ def mem_finish(self, entrypoint=0): if self.command(ESPROM.ESP_MEM_END, struct.pack('<II', int(entrypoint == 0), entrypoint))[1] != "\0\0": raise FatalError('Failed to leave RAM download mode') """ Start downloading to Flash (performs an erase) """ def flash_begin(self, size, offset): old_tmo = self._port.timeout num_blocks = (size + ESPROM.ESP_FLASH_BLOCK - 1) / ESPROM.ESP_FLASH_BLOCK sectors_per_block = 16 sector_size = 4096 num_sectors = (size + sector_size - 1) / sector_size start_sector = offset / sector_size head_sectors = sectors_per_block - (start_sector % sectors_per_block) if num_sectors < head_sectors: head_sectors = num_sectors if num_sectors < 2 * head_sectors: erase_size = (num_sectors + 1) / 2 * sector_size else: erase_size = (num_sectors - head_sectors) * sector_size self._port.timeout = 10 result = self.command(ESPROM.ESP_FLASH_BEGIN, struct.pack('<IIII', erase_size, num_blocks, ESPROM.ESP_FLASH_BLOCK, offset))[1] if result != "\0\0": raise FatalError.WithResult('Failed to enter Flash download mode (result "%s")', result) self._port.timeout = old_tmo """ Write block to flash """ def flash_block(self, data, seq): result = self.command(ESPROM.ESP_FLASH_DATA, struct.pack('<IIII', len(data), seq, 0, 0) + data, ESPROM.checksum(data))[1] if result != "\0\0": raise FatalError.WithResult('Failed to write to target Flash after seq %d (got result %%s)' % seq, result) """ Leave flash mode and run/reboot """ def flash_finish(self, reboot=False): pkt = struct.pack('<I', int(not reboot)) if self.command(ESPROM.ESP_FLASH_END, pkt)[1] != "\0\0": raise FatalError('Failed to leave Flash mode') """ Run application code in flash """ def run(self, reboot=False): # Fake flash begin immediately followed by flash end self.flash_begin(0, 0) self.flash_finish(reboot) """ Read MAC from OTP ROM """ def read_mac(self): mac0 = self.read_reg(self.ESP_OTP_MAC0) mac1 = self.read_reg(self.ESP_OTP_MAC1) if ((mac1 >> 16) & 0xff) == 0: oui = (0x18, 0xfe, 0x34) elif ((mac1 >> 16) & 0xff) == 1: oui = (0xac, 0xd0, 0x74) else: raise FatalError("Unknown OUI") return oui + ((mac1 >> 8) & 0xff, mac1 & 0xff, (mac0 >> 24) & 0xff) """ Read SPI flash manufacturer and device id """ def flash_id(self): self.flash_begin(0, 0) self.write_reg(0x60000240, 0x0, 0xffffffff) self.write_reg(0x60000200, 0x10000000, 0xffffffff) flash_id = self.read_reg(0x60000240) self.flash_finish(False) return flash_id """ Read SPI flash """ def flash_read(self, offset, size, count=1): # Create a custom stub stub = struct.pack('<III', offset, size, count) + self.SFLASH_STUB # Trick ROM to initialize SFlash self.flash_begin(0, 0) # Download stub self.mem_begin(len(stub), 1, len(stub), 0x40100000) self.mem_block(stub, 0) self.mem_finish(0x4010001c) # Fetch the data data = '' for _ in xrange(count): if self._port.read(1) != '\xc0': raise FatalError('Invalid head of packet (sflash read)') data += self.read(size) if self._port.read(1) != chr(0xc0): raise FatalError('Invalid end of packet (sflash read)') return data """ Abuse the loader protocol to force flash to be left in write mode """ def flash_unlock_dio(self): # Enable flash write mode self.flash_begin(0, 0) # Reset the chip rather than call flash_finish(), which would have # write protected the chip again (why oh why does it do that?!) self.mem_begin(0,0,0,0x40100000) self.mem_finish(0x40000080) """ Perform a chip erase of SPI flash """ def flash_erase(self): # Trick ROM to initialize SFlash self.flash_begin(0, 0) # This is hacky: we don't have a custom stub, instead we trick # the bootloader to jump to the SPIEraseChip() routine and then halt/crash # when it tries to boot an unconfigured system. self.mem_begin(0,0,0,0x40100000) self.mem_finish(0x40004984) # Yup - there's no good way to detect if we succeeded. # It it on the other hand unlikely to fail. class ESPFirmwareImage: def __init__(self, filename=None): self.segments = [] self.entrypoint = 0 self.flash_mode = 0 self.flash_size_freq = 0 if filename is not None: f = file(filename, 'rb') (magic, segments, self.flash_mode, self.flash_size_freq, self.entrypoint) = struct.unpack('<BBBBI', f.read(8)) # some sanity check if magic != ESPROM.ESP_IMAGE_MAGIC or segments > 16: raise FatalError('Invalid firmware image') for i in xrange(segments): (offset, size) = struct.unpack('<II', f.read(8)) if offset > 0x40200000 or offset < 0x3ffe0000 or size > 65536: raise FatalError('Suspicious segment 0x%x, length %d' % (offset, size)) segment_data = f.read(size) if len(segment_data) < size: raise FatalError('End of file reading segment 0x%x, length %d (actual length %d)' % (offset, size, len(segment_data))) self.segments.append((offset, size, segment_data)) # Skip the padding. The checksum is stored in the last byte so that the # file is a multiple of 16 bytes. align = 15 - (f.tell() % 16) f.seek(align, 1) self.checksum = ord(f.read(1)) def add_segment(self, addr, data): # Data should be aligned on word boundary l = len(data) if l % 4: data += b"\x00" * (4 - l % 4) if l > 0: self.segments.append((addr, len(data), data)) def save(self, filename): f = file(filename, 'wb') f.write(struct.pack('<BBBBI', ESPROM.ESP_IMAGE_MAGIC, len(self.segments), self.flash_mode, self.flash_size_freq, self.entrypoint)) checksum = ESPROM.ESP_CHECKSUM_MAGIC for (offset, size, data) in self.segments: f.write(struct.pack('<II', offset, size)) f.write(data) checksum = ESPROM.checksum(data, checksum) align = 15 - (f.tell() % 16) f.seek(align, 1) f.write(struct.pack('B', checksum)) class ELFFile: def __init__(self, name): self.name = name self.symbols = None def _fetch_symbols(self): if self.symbols is not None: return self.symbols = {} try: tool_nm = "xtensa-lx106-elf-nm" if os.getenv('XTENSA_CORE') == 'lx106': tool_nm = "xt-nm" proc = subprocess.Popen([tool_nm, self.name], stdout=subprocess.PIPE) except OSError: print "Error calling %s, do you have Xtensa toolchain in PATH?" % tool_nm sys.exit(1) for l in proc.stdout: fields = l.strip().split() try: if fields[0] == "U": print "Warning: ELF binary has undefined symbol %s" % fields[1] continue self.symbols[fields[2]] = int(fields[0], 16) except ValueError: raise FatalError("Failed to strip symbol output from nm: %s" % fields) def get_symbol_addr(self, sym): self._fetch_symbols() return self.symbols[sym] def get_entry_point(self): tool_readelf = "xtensa-lx106-elf-readelf" if os.getenv('XTENSA_CORE') == 'lx106': tool_readelf = "xt-readelf" try: proc = subprocess.Popen([tool_readelf, "-h", self.name], stdout=subprocess.PIPE) except OSError: print "Error calling %s, do you have Xtensa toolchain in PATH?" % tool_readelf sys.exit(1) for l in proc.stdout: fields = l.strip().split() if fields[0] == "Entry": return int(fields[3], 0) def load_section(self, section): tool_objcopy = "xtensa-lx106-elf-objcopy" if os.getenv('XTENSA_CORE') == 'lx106': tool_objcopy = "xt-objcopy" tmpsection = tempfile.mktemp(suffix=".section") try: subprocess.check_call([tool_objcopy, "--only-section", section, "-Obinary", self.name, tmpsection]) with open(tmpsection, "rb") as f: data = f.read() finally: os.remove(tmpsection) return data def arg_auto_int(x): return int(x, 0) def div_roundup(a, b): """ Return a/b rounded up to nearest integer, equivalent result to int(math.ceil(float(int(a)) / float(int(b))), only without possible floating point accuracy errors. """ return (int(a) + int(b) - 1) / int(b) class FatalError(RuntimeError): """ Wrapper class for runtime errors that aren't caused by internal bugs, but by ESP8266 responses or input content. """ def __init__(self, message): RuntimeError.__init__(self, message) @staticmethod def WithResult(message, result): """ Return a fatal error object that includes the hex values of 'result' as a string formatted argument. """ return FatalError(message % ", ".join(hex(ord(x)) for x in result)) def main(): parser = argparse.ArgumentParser(description='ESP8266 ROM Bootloader Utility', prog='esptool') parser.add_argument( '--port', '-p', help='Serial port device', default='/dev/ttyUSB0') parser.add_argument( '--baud', '-b', help='Serial port baud rate', type=arg_auto_int, default=ESPROM.ESP_ROM_BAUD) subparsers = parser.add_subparsers( dest='operation', help='Run esptool {command} -h for additional help') parser_load_ram = subparsers.add_parser( 'load_ram', help='Download an image to RAM and execute') parser_load_ram.add_argument('filename', help='Firmware image') parser_dump_mem = subparsers.add_parser( 'dump_mem', help='Dump arbitrary memory to disk') parser_dump_mem.add_argument('address', help='Base address', type=arg_auto_int) parser_dump_mem.add_argument('size', help='Size of region to dump', type=arg_auto_int) parser_dump_mem.add_argument('filename', help='Name of binary dump') parser_read_mem = subparsers.add_parser( 'read_mem', help='Read arbitrary memory location') parser_read_mem.add_argument('address', help='Address to read', type=arg_auto_int) parser_write_mem = subparsers.add_parser( 'write_mem', help='Read-modify-write to arbitrary memory location') parser_write_mem.add_argument('address', help='Address to write', type=arg_auto_int) parser_write_mem.add_argument('value', help='Value', type=arg_auto_int) parser_write_mem.add_argument('mask', help='Mask of bits to write', type=arg_auto_int) parser_write_flash = subparsers.add_parser( 'write_flash', help='Write a binary blob to flash') parser_write_flash.add_argument('addr_filename', nargs='+', help='Address and binary file to write there, separated by space') parser_write_flash.add_argument('--flash_freq', '-ff', help='SPI Flash frequency', choices=['40m', '26m', '20m', '80m'], default='40m') parser_write_flash.add_argument('--flash_mode', '-fm', help='SPI Flash mode', choices=['qio', 'qout', 'dio', 'dout'], default='qio') parser_write_flash.add_argument('--flash_size', '-fs', help='SPI Flash size in Mbit', choices=['4m', '2m', '8m', '16m', '32m', '16m-c1', '32m-c1', '32m-c2'], default='4m') subparsers.add_parser( 'run', help='Run application code in flash') parser_image_info = subparsers.add_parser( 'image_info', help='Dump headers from an application image') parser_image_info.add_argument('filename', help='Image file to parse') parser_make_image = subparsers.add_parser( 'make_image', help='Create an application image from binary files') parser_make_image.add_argument('output', help='Output image file') parser_make_image.add_argument('--segfile', '-f', action='append', help='Segment input file') parser_make_image.add_argument('--segaddr', '-a', action='append', help='Segment base address', type=arg_auto_int) parser_make_image.add_argument('--entrypoint', '-e', help='Address of entry point', type=arg_auto_int, default=0) parser_elf2image = subparsers.add_parser( 'elf2image', help='Create an application image from ELF file') parser_elf2image.add_argument('input', help='Input ELF file') parser_elf2image.add_argument('--output', '-o', help='Output filename prefix', type=str) parser_elf2image.add_argument('--flash_freq', '-ff', help='SPI Flash frequency', choices=['40m', '26m', '20m', '80m'], default='40m') parser_elf2image.add_argument('--flash_mode', '-fm', help='SPI Flash mode', choices=['qio', 'qout', 'dio', 'dout'], default='qio') parser_elf2image.add_argument('--flash_size', '-fs', help='SPI Flash size in Mbit', choices=['4m', '2m', '8m', '16m', '32m', '16m-c1', '32m-c1', '32m-c2'], default='4m') subparsers.add_parser( 'read_mac', help='Read MAC address from OTP ROM') subparsers.add_parser( 'flash_id', help='Read SPI flash manufacturer and device ID') parser_read_flash = subparsers.add_parser( 'read_flash', help='Read SPI flash content') parser_read_flash.add_argument('address', help='Start address', type=arg_auto_int) parser_read_flash.add_argument('size', help='Size of region to dump', type=arg_auto_int) parser_read_flash.add_argument('filename', help='Name of binary dump') subparsers.add_parser( 'erase_flash', help='Perform Chip Erase on SPI flash') args = parser.parse_args() # Create the ESPROM connection object, if needed esp = None if args.operation not in ('image_info','make_image','elf2image'): esp = ESPROM(args.port, args.baud) esp.connect() # Do the actual work. Should probably be split into separate functions. if args.operation == 'load_ram': image = ESPFirmwareImage(args.filename) print 'RAM boot...' for (offset, size, data) in image.segments: print 'Downloading %d bytes at %08x...' % (size, offset), sys.stdout.flush() esp.mem_begin(size, div_roundup(size, esp.ESP_RAM_BLOCK), esp.ESP_RAM_BLOCK, offset) seq = 0 while len(data) > 0: esp.mem_block(data[0:esp.ESP_RAM_BLOCK], seq) data = data[esp.ESP_RAM_BLOCK:] seq += 1 print 'done!' print 'All segments done, executing at %08x' % image.entrypoint esp.mem_finish(image.entrypoint) elif args.operation == 'read_mem': print '0x%08x = 0x%08x' % (args.address, esp.read_reg(args.address)) elif args.operation == 'write_mem': esp.write_reg(args.address, args.value, args.mask, 0) print 'Wrote %08x, mask %08x to %08x' % (args.value, args.mask, args.address) elif args.operation == 'dump_mem': f = file(args.filename, 'wb') for i in xrange(args.size / 4): d = esp.read_reg(args.address + (i * 4)) f.write(struct.pack('<I', d)) if f.tell() % 1024 == 0: print '\r%d bytes read... (%d %%)' % (f.tell(), f.tell() * 100 / args.size), sys.stdout.flush() print 'Done!' elif args.operation == 'write_flash': assert len(args.addr_filename) % 2 == 0 flash_mode = {'qio':0, 'qout':1, 'dio':2, 'dout': 3}[args.flash_mode] flash_size_freq = {'4m':0x00, '2m':0x10, '8m':0x20, '16m':0x30, '32m':0x40, '16m-c1': 0x50, '32m-c1':0x60, '32m-c2':0x70}[args.flash_size] flash_size_freq += {'40m':0, '26m':1, '20m':2, '80m': 0xf}[args.flash_freq] flash_info = struct.pack('BB', flash_mode, flash_size_freq) while args.addr_filename: address = int(args.addr_filename[0], 0) filename = args.addr_filename[1] args.addr_filename = args.addr_filename[2:] image = file(filename, 'rb').read() print 'Erasing flash...' blocks = div_roundup(len(image), esp.ESP_FLASH_BLOCK) esp.flash_begin(blocks * esp.ESP_FLASH_BLOCK, address) seq = 0 written = 0 t = time.time() while len(image) > 0: print '\rWriting at 0x%08x... (%d %%)' % (address + seq * esp.ESP_FLASH_BLOCK, 100 * (seq + 1) / blocks), sys.stdout.flush() block = image[0:esp.ESP_FLASH_BLOCK] # Fix sflash config data if address == 0 and seq == 0 and block[0] == '\xe9': block = block[0:2] + flash_info + block[4:] # Pad the last block block = block + '\xff' * (esp.ESP_FLASH_BLOCK - len(block)) esp.flash_block(block, seq) image = image[esp.ESP_FLASH_BLOCK:] seq += 1 written += len(block) t = time.time() - t print '\rWrote %d bytes at 0x%08x in %.1f seconds (%.1f kbit/s)...' % (written, address, t, written / t * 8 / 1000) print '\nLeaving...' if args.flash_mode == 'dio': esp.flash_unlock_dio() else: esp.flash_begin(0, 0) esp.flash_finish(False) elif args.operation == 'run': esp.run() elif args.operation == 'image_info': image = ESPFirmwareImage(args.filename) print ('Entry point: %08x' % image.entrypoint) if image.entrypoint != 0 else 'Entry point not set' print '%d segments' % len(image.segments) print checksum = ESPROM.ESP_CHECKSUM_MAGIC for (idx, (offset, size, data)) in enumerate(image.segments): print 'Segment %d: %5d bytes at %08x' % (idx + 1, size, offset) checksum = ESPROM.checksum(data, checksum) print print 'Checksum: %02x (%s)' % (image.checksum, 'valid' if image.checksum == checksum else 'invalid!') elif args.operation == 'make_image': image = ESPFirmwareImage() if len(args.segfile) == 0: raise FatalError('No segments specified') if len(args.segfile) != len(args.segaddr): raise FatalError('Number of specified files does not match number of specified addresses') for (seg, addr) in zip(args.segfile, args.segaddr): data = file(seg, 'rb').read() image.add_segment(addr, data) image.entrypoint = args.entrypoint image.save(args.output) elif args.operation == 'elf2image': if args.output is None: args.output = args.input + '-' e = ELFFile(args.input) image = ESPFirmwareImage() image.entrypoint = e.get_entry_point() for section, start in ((".text", "_text_start"), (".data", "_data_start"), (".rodata", "_rodata_start")): data = e.load_section(section) image.add_segment(e.get_symbol_addr(start), data) image.flash_mode = {'qio':0, 'qout':1, 'dio':2, 'dout': 3}[args.flash_mode] image.flash_size_freq = {'4m':0x00, '2m':0x10, '8m':0x20, '16m':0x30, '32m':0x40, '16m-c1': 0x50, '32m-c1':0x60, '32m-c2':0x70}[args.flash_size] image.flash_size_freq += {'40m':0, '26m':1, '20m':2, '80m': 0xf}[args.flash_freq] image.save(args.output + "0x00000.bin") data = e.load_section(".irom0.text") off = e.get_symbol_addr("_irom0_text_start") - 0x40200000 assert off >= 0 f = open(args.output + "0x%05x.bin" % off, "wb") f.write(data) f.close() elif args.operation == 'read_mac': mac = esp.read_mac() print 'MAC: %s' % ':'.join(map(lambda x: '%02x' % x, mac)) elif args.operation == 'flash_id': flash_id = esp.flash_id() print 'Manufacturer: %02x' % (flash_id & 0xff) print 'Device: %02x%02x' % ((flash_id >> 8) & 0xff, (flash_id >> 16) & 0xff) elif args.operation == 'read_flash': print 'Please wait...' file(args.filename, 'wb').write(esp.flash_read(args.address, 1024, div_roundup(args.size, 1024))[:args.size]) elif args.operation == 'erase_flash': esp.flash_erase() if __name__ == '__main__': try: main() except FatalError as e: print '\nA fatal error occurred: %s' % e sys.exit(2)
gpl-2.0
1,992,661,783,218,690,600
38.709497
152
0.573579
false
3.507958
false
false
false
eharney/cinder
cinder/api/v3/attachments.py
1
11362
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volumes attachments API.""" from oslo_log import log as logging import webob from cinder.api import common from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.v3.views import attachments as attachment_views from cinder import exception from cinder.i18n import _ from cinder import objects from cinder import utils from cinder.volume import api as volume_api LOG = logging.getLogger(__name__) class AttachmentsController(wsgi.Controller): """The Attachments API controller for the OpenStack API.""" _view_builder_class = attachment_views.ViewBuilder allowed_filters = {'volume_id', 'status', 'instance_id', 'attach_status'} def __init__(self, ext_mgr=None): """Initialize controller class.""" self.volume_api = volume_api.API() self.ext_mgr = ext_mgr super(AttachmentsController, self).__init__() @wsgi.Controller.api_version(mv.NEW_ATTACH) def show(self, req, id): """Return data about the given attachment.""" context = req.environ['cinder.context'] attachment = objects.VolumeAttachment.get_by_id(context, id) return attachment_views.ViewBuilder.detail(attachment) @wsgi.Controller.api_version(mv.NEW_ATTACH) def index(self, req): """Return a summary list of attachments.""" attachments = self._items(req) return attachment_views.ViewBuilder.list(attachments) @wsgi.Controller.api_version(mv.NEW_ATTACH) def detail(self, req): """Return a detailed list of attachments.""" attachments = self._items(req) return attachment_views.ViewBuilder.list(attachments, detail=True) @common.process_general_filtering('attachment') def _process_attachment_filtering(self, context=None, filters=None, req_version=None): utils.remove_invalid_filter_options(context, filters, self.allowed_filters) def _items(self, req): """Return a list of attachments, transformed through view builder.""" context = req.environ['cinder.context'] req_version = req.api_version_request # Pop out non search_opts and create local variables search_opts = req.GET.copy() sort_keys, sort_dirs = common.get_sort_params(search_opts) marker, limit, offset = common.get_pagination_params(search_opts) self._process_attachment_filtering(context=context, filters=search_opts, req_version=req_version) if search_opts.get('instance_id', None): search_opts['instance_uuid'] = search_opts.pop('instance_id', None) if context.is_admin and 'all_tenants' in search_opts: del search_opts['all_tenants'] return objects.VolumeAttachmentList.get_all( context, search_opts=search_opts, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_direction=sort_dirs) else: return objects.VolumeAttachmentList.get_all_by_project( context, context.project_id, search_opts=search_opts, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_direction=sort_dirs) @wsgi.Controller.api_version(mv.NEW_ATTACH) @wsgi.response(202) def create(self, req, body): """Create an attachment. This method can be used to create an empty attachment (reserve) or to create and initialize a volume attachment based on the provided input parameters. If the caller does not yet have the connector information but needs to reserve an attachment for the volume (ie Nova BootFromVolume) the create can be called with just the volume-uuid and the server identifier. This will reserve an attachment, mark the volume as reserved and prevent any new attachment_create calls from being made until the attachment is updated (completed). The alternative is that the connection can be reserved and initialized all at once with a single call if the caller has all of the required information (connector data) at the time of the call. NOTE: In Nova terms server == instance, the server_id parameter referenced below is the UUID of the Instance, for non-nova consumers this can be a server UUID or some other arbitrary unique identifier. Expected format of the input parameter 'body': .. code-block:: json { "attachment": { "volume_uuid": "volume-uuid", "instance_uuid": "nova-server-uuid", "connector": "null|<connector-object>" } } Example connector: .. code-block:: json { "connector": { "initiator": "iqn.1993-08.org.debian:01:cad181614cec", "ip":"192.168.1.20", "platform": "x86_64", "host": "tempest-1", "os_type": "linux2", "multipath": false, "mountpoint": "/dev/vdb", "mode": "null|rw|ro" } } NOTE all that's required for a reserve is volume_uuid and an instance_uuid. returns: A summary view of the attachment object """ context = req.environ['cinder.context'] instance_uuid = body['attachment'].get('instance_uuid', None) if not instance_uuid: raise webob.exc.HTTPBadRequest( explanation=_("Must specify 'instance_uuid' " "to create attachment.")) volume_uuid = body['attachment'].get('volume_uuid', None) if not volume_uuid: raise webob.exc.HTTPBadRequest( explanation=_("Must specify 'volume_uuid' " "to create attachment.")) volume_ref = objects.Volume.get_by_id( context, volume_uuid) connector = body['attachment'].get('connector', None) err_msg = None try: attachment_ref = ( self.volume_api.attachment_create(context, volume_ref, instance_uuid, connector=connector)) except exception.NotAuthorized: raise except exception.CinderException as ex: err_msg = _( "Unable to create attachment for volume (%s).") % ex.msg LOG.exception(err_msg) except Exception as ex: err_msg = _("Unable to create attachment for volume.") LOG.exception(err_msg) finally: if err_msg: raise webob.exc.HTTPInternalServerError(explanation=err_msg) return attachment_views.ViewBuilder.detail(attachment_ref) @wsgi.Controller.api_version(mv.NEW_ATTACH) def update(self, req, id, body): """Update an attachment record. Update a reserved attachment record with connector information and set up the appropriate connection_info from the driver. Expected format of the input parameter 'body': .. code:: json { "attachment": { "connector": { "initiator": "iqn.1993-08.org.debian:01:cad181614cec", "ip":"192.168.1.20", "platform": "x86_64", "host": "tempest-1", "os_type": "linux2", "multipath": False, "mountpoint": "/dev/vdb", "mode": None|"rw"|"ro", } } } """ context = req.environ['cinder.context'] attachment_ref = ( objects.VolumeAttachment.get_by_id(context, id)) connector = body['attachment'].get('connector', None) if not connector: raise webob.exc.HTTPBadRequest( explanation=_("Must specify 'connector' " "to update attachment.")) err_msg = None try: attachment_ref = ( self.volume_api.attachment_update(context, attachment_ref, connector)) except exception.NotAuthorized: raise except exception.CinderException as ex: err_msg = ( _("Unable to update attachment.(%s).") % ex.msg) LOG.exception(err_msg) except Exception: err_msg = _("Unable to update the attachment.") LOG.exception(err_msg) finally: if err_msg: raise webob.exc.HTTPInternalServerError(explanation=err_msg) # TODO(jdg): Test this out some more, do we want to return and object # or a dict? return attachment_views.ViewBuilder.detail(attachment_ref) @wsgi.Controller.api_version(mv.NEW_ATTACH) def delete(self, req, id): """Delete an attachment. Disconnects/Deletes the specified attachment, returns a list of any known shared attachment-id's for the effected backend device. returns: A summary list of any attachments sharing this connection """ context = req.environ['cinder.context'] attachment = objects.VolumeAttachment.get_by_id(context, id) attachments = self.volume_api.attachment_delete(context, attachment) return attachment_views.ViewBuilder.list(attachments) @wsgi.response(202) @wsgi.Controller.api_version(mv.NEW_ATTACH_COMPLETION) @wsgi.action('os-complete') def complete(self, req, id, body): """Mark a volume attachment process as completed (in-use).""" context = req.environ['cinder.context'] attachment_ref = ( objects.VolumeAttachment.get_by_id(context, id)) volume_ref = objects.Volume.get_by_id( context, attachment_ref.volume_id) attachment_ref.update({'attach_status': 'attached'}) attachment_ref.save() volume_ref.update({'status': 'in-use', 'attach_status': 'attached'}) volume_ref.save() def create_resource(ext_mgr): """Create the wsgi resource for this controller.""" return wsgi.Resource(AttachmentsController(ext_mgr))
apache-2.0
-5,867,306,877,933,996,000
38.451389
79
0.58106
false
4.6
false
false
false
turbokongen/home-assistant
homeassistant/components/plex/config_flow.py
1
15991
"""Config flow for Plex.""" import copy import logging from aiohttp import web_response import plexapi.exceptions from plexapi.gdm import GDM from plexauth import PlexAuth import requests.exceptions import voluptuous as vol from homeassistant import config_entries from homeassistant.components.http.view import HomeAssistantView from homeassistant.components.media_player import DOMAIN as MP_DOMAIN from homeassistant.const import ( CONF_CLIENT_ID, CONF_HOST, CONF_PORT, CONF_SOURCE, CONF_SSL, CONF_TOKEN, CONF_URL, CONF_VERIFY_SSL, ) from homeassistant.core import callback from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.network import get_url from .const import ( # pylint: disable=unused-import AUTH_CALLBACK_NAME, AUTH_CALLBACK_PATH, AUTOMATIC_SETUP_STRING, CONF_IGNORE_NEW_SHARED_USERS, CONF_IGNORE_PLEX_WEB_CLIENTS, CONF_MONITORED_USERS, CONF_SERVER, CONF_SERVER_IDENTIFIER, CONF_USE_EPISODE_ART, DEFAULT_PORT, DEFAULT_SSL, DEFAULT_VERIFY_SSL, DOMAIN, MANUAL_SETUP_STRING, PLEX_SERVER_CONFIG, SERVERS, X_PLEX_DEVICE_NAME, X_PLEX_PLATFORM, X_PLEX_PRODUCT, X_PLEX_VERSION, ) from .errors import NoServersFound, ServerNotSpecified from .server import PlexServer _LOGGER = logging.getLogger(__package__) @callback def configured_servers(hass): """Return a set of the configured Plex servers.""" return { entry.data[CONF_SERVER_IDENTIFIER] for entry in hass.config_entries.async_entries(DOMAIN) } async def async_discover(hass): """Scan for available Plex servers.""" gdm = GDM() await hass.async_add_executor_job(gdm.scan) for server_data in gdm.entries: await hass.config_entries.flow.async_init( DOMAIN, context={CONF_SOURCE: config_entries.SOURCE_INTEGRATION_DISCOVERY}, data=server_data, ) class PlexFlowHandler(config_entries.ConfigFlow, domain=DOMAIN): """Handle a Plex config flow.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH @staticmethod @callback def async_get_options_flow(config_entry): """Get the options flow for this handler.""" return PlexOptionsFlowHandler(config_entry) def __init__(self): """Initialize the Plex flow.""" self.current_login = {} self.available_servers = None self.plexauth = None self.token = None self.client_id = None self._manual = False async def async_step_user( self, user_input=None, errors=None ): # pylint: disable=arguments-differ """Handle a flow initialized by the user.""" if user_input is not None: return await self.async_step_plex_website_auth() if self.show_advanced_options: return await self.async_step_user_advanced(errors=errors) return self.async_show_form(step_id="user", errors=errors) async def async_step_user_advanced(self, user_input=None, errors=None): """Handle an advanced mode flow initialized by the user.""" if user_input is not None: if user_input.get("setup_method") == MANUAL_SETUP_STRING: self._manual = True return await self.async_step_manual_setup() return await self.async_step_plex_website_auth() data_schema = vol.Schema( { vol.Required("setup_method", default=AUTOMATIC_SETUP_STRING): vol.In( [AUTOMATIC_SETUP_STRING, MANUAL_SETUP_STRING] ) } ) return self.async_show_form( step_id="user_advanced", data_schema=data_schema, errors=errors ) async def async_step_manual_setup(self, user_input=None, errors=None): """Begin manual configuration.""" if user_input is not None and errors is None: user_input.pop(CONF_URL, None) host = user_input.get(CONF_HOST) if host: port = user_input[CONF_PORT] prefix = "https" if user_input.get(CONF_SSL) else "http" user_input[CONF_URL] = f"{prefix}://{host}:{port}" elif CONF_TOKEN not in user_input: return await self.async_step_manual_setup( user_input=user_input, errors={"base": "host_or_token"} ) return await self.async_step_server_validate(user_input) previous_input = user_input or {} data_schema = vol.Schema( { vol.Optional( CONF_HOST, description={"suggested_value": previous_input.get(CONF_HOST)}, ): str, vol.Required( CONF_PORT, default=previous_input.get(CONF_PORT, DEFAULT_PORT) ): int, vol.Required( CONF_SSL, default=previous_input.get(CONF_SSL, DEFAULT_SSL) ): bool, vol.Required( CONF_VERIFY_SSL, default=previous_input.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL), ): bool, vol.Optional( CONF_TOKEN, description={"suggested_value": previous_input.get(CONF_TOKEN)}, ): str, } ) return self.async_show_form( step_id="manual_setup", data_schema=data_schema, errors=errors ) async def async_step_server_validate(self, server_config): """Validate a provided configuration.""" errors = {} self.current_login = server_config plex_server = PlexServer(self.hass, server_config) try: await self.hass.async_add_executor_job(plex_server.connect) except NoServersFound: _LOGGER.error("No servers linked to Plex account") errors["base"] = "no_servers" except (plexapi.exceptions.BadRequest, plexapi.exceptions.Unauthorized): _LOGGER.error("Invalid credentials provided, config not created") errors[CONF_TOKEN] = "faulty_credentials" except requests.exceptions.SSLError as error: _LOGGER.error("SSL certificate error: [%s]", error) errors["base"] = "ssl_error" except (plexapi.exceptions.NotFound, requests.exceptions.ConnectionError): server_identifier = ( server_config.get(CONF_URL) or plex_server.server_choice or "Unknown" ) _LOGGER.error("Plex server could not be reached: %s", server_identifier) errors[CONF_HOST] = "not_found" except ServerNotSpecified as available_servers: self.available_servers = available_servers.args[0] return await self.async_step_select_server() except Exception as error: # pylint: disable=broad-except _LOGGER.exception("Unknown error connecting to Plex server: %s", error) return self.async_abort(reason="unknown") if errors: if self._manual: return await self.async_step_manual_setup( user_input=server_config, errors=errors ) return await self.async_step_user(errors=errors) server_id = plex_server.machine_identifier url = plex_server.url_in_use token = server_config.get(CONF_TOKEN) entry_config = {CONF_URL: url} if self.client_id: entry_config[CONF_CLIENT_ID] = self.client_id if token: entry_config[CONF_TOKEN] = token if url.startswith("https"): entry_config[CONF_VERIFY_SSL] = server_config.get( CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL ) data = { CONF_SERVER: plex_server.friendly_name, CONF_SERVER_IDENTIFIER: server_id, PLEX_SERVER_CONFIG: entry_config, } entry = await self.async_set_unique_id(server_id) if self.context[CONF_SOURCE] == config_entries.SOURCE_REAUTH: self.hass.config_entries.async_update_entry(entry, data=data) _LOGGER.debug("Updated config entry for %s", plex_server.friendly_name) await self.hass.config_entries.async_reload(entry.entry_id) return self.async_abort(reason="reauth_successful") self._abort_if_unique_id_configured() _LOGGER.debug("Valid config created for %s", plex_server.friendly_name) return self.async_create_entry(title=plex_server.friendly_name, data=data) async def async_step_select_server(self, user_input=None): """Use selected Plex server.""" config = dict(self.current_login) if user_input is not None: config[CONF_SERVER] = user_input[CONF_SERVER] return await self.async_step_server_validate(config) configured = configured_servers(self.hass) available_servers = [ name for (name, server_id) in self.available_servers if server_id not in configured ] if not available_servers: return self.async_abort(reason="all_configured") if len(available_servers) == 1: config[CONF_SERVER] = available_servers[0] return await self.async_step_server_validate(config) return self.async_show_form( step_id="select_server", data_schema=vol.Schema( {vol.Required(CONF_SERVER): vol.In(available_servers)} ), errors={}, ) async def async_step_integration_discovery(self, discovery_info): """Handle GDM discovery.""" machine_identifier = discovery_info["data"]["Resource-Identifier"] await self.async_set_unique_id(machine_identifier) self._abort_if_unique_id_configured() host = f"{discovery_info['from'][0]}:{discovery_info['data']['Port']}" name = discovery_info["data"]["Name"] self.context["title_placeholders"] = { "host": host, "name": name, } return await self.async_step_user() async def async_step_plex_website_auth(self): """Begin external auth flow on Plex website.""" self.hass.http.register_view(PlexAuthorizationCallbackView) hass_url = get_url(self.hass) headers = {"Origin": hass_url} payload = { "X-Plex-Device-Name": X_PLEX_DEVICE_NAME, "X-Plex-Version": X_PLEX_VERSION, "X-Plex-Product": X_PLEX_PRODUCT, "X-Plex-Device": self.hass.config.location_name, "X-Plex-Platform": X_PLEX_PLATFORM, "X-Plex-Model": "Plex OAuth", } session = async_get_clientsession(self.hass) self.plexauth = PlexAuth(payload, session, headers) await self.plexauth.initiate_auth() forward_url = f"{hass_url}{AUTH_CALLBACK_PATH}?flow_id={self.flow_id}" auth_url = self.plexauth.auth_url(forward_url) return self.async_external_step(step_id="obtain_token", url=auth_url) async def async_step_obtain_token(self, user_input=None): """Obtain token after external auth completed.""" token = await self.plexauth.token(10) if not token: return self.async_external_step_done(next_step_id="timed_out") self.token = token self.client_id = self.plexauth.client_identifier return self.async_external_step_done(next_step_id="use_external_token") async def async_step_timed_out(self, user_input=None): """Abort flow when time expires.""" return self.async_abort(reason="token_request_timeout") async def async_step_use_external_token(self, user_input=None): """Continue server validation with external token.""" server_config = {CONF_TOKEN: self.token} return await self.async_step_server_validate(server_config) async def async_step_reauth(self, data): """Handle a reauthorization flow request.""" self.current_login = dict(data) return await self.async_step_user() class PlexOptionsFlowHandler(config_entries.OptionsFlow): """Handle Plex options.""" def __init__(self, config_entry): """Initialize Plex options flow.""" self.options = copy.deepcopy(dict(config_entry.options)) self.server_id = config_entry.data[CONF_SERVER_IDENTIFIER] async def async_step_init(self, user_input=None): """Manage the Plex options.""" return await self.async_step_plex_mp_settings() async def async_step_plex_mp_settings(self, user_input=None): """Manage the Plex media_player options.""" plex_server = self.hass.data[DOMAIN][SERVERS][self.server_id] if user_input is not None: self.options[MP_DOMAIN][CONF_USE_EPISODE_ART] = user_input[ CONF_USE_EPISODE_ART ] self.options[MP_DOMAIN][CONF_IGNORE_NEW_SHARED_USERS] = user_input[ CONF_IGNORE_NEW_SHARED_USERS ] self.options[MP_DOMAIN][CONF_IGNORE_PLEX_WEB_CLIENTS] = user_input[ CONF_IGNORE_PLEX_WEB_CLIENTS ] account_data = { user: {"enabled": bool(user in user_input[CONF_MONITORED_USERS])} for user in plex_server.accounts } self.options[MP_DOMAIN][CONF_MONITORED_USERS] = account_data return self.async_create_entry(title="", data=self.options) available_accounts = {name: name for name in plex_server.accounts} available_accounts[plex_server.owner] += " [Owner]" default_accounts = plex_server.accounts known_accounts = set(plex_server.option_monitored_users) if known_accounts: default_accounts = { user for user in plex_server.option_monitored_users if plex_server.option_monitored_users[user]["enabled"] } for user in plex_server.accounts: if user not in known_accounts: available_accounts[user] += " [New]" if not plex_server.option_ignore_new_shared_users: for new_user in plex_server.accounts - known_accounts: default_accounts.add(new_user) return self.async_show_form( step_id="plex_mp_settings", data_schema=vol.Schema( { vol.Required( CONF_USE_EPISODE_ART, default=plex_server.option_use_episode_art, ): bool, vol.Optional( CONF_MONITORED_USERS, default=default_accounts ): cv.multi_select(available_accounts), vol.Required( CONF_IGNORE_NEW_SHARED_USERS, default=plex_server.option_ignore_new_shared_users, ): bool, vol.Required( CONF_IGNORE_PLEX_WEB_CLIENTS, default=plex_server.option_ignore_plexweb_clients, ): bool, } ), ) class PlexAuthorizationCallbackView(HomeAssistantView): """Handle callback from external auth.""" url = AUTH_CALLBACK_PATH name = AUTH_CALLBACK_NAME requires_auth = False async def get(self, request): """Receive authorization confirmation.""" hass = request.app["hass"] await hass.config_entries.flow.async_configure( flow_id=request.query["flow_id"], user_input=None ) return web_response.Response( headers={"content-type": "text/html"}, text="<script>window.close()</script>Success! This window can be closed", )
apache-2.0
1,489,924,755,053,045,500
36.36215
85
0.598837
false
4.022893
true
false
false
italomaia/turtle-linux
games/Dynamite/pgu/test.py
1
1624
import pygame from pygame.locals import * import gui screen = pygame.display.set_mode( (640, 480), FULLSCREEN ) # try adding DOUBLEBUF | HWSURFACE # pygame.mouse.set_visible(0) app = gui.App() c = gui.Container(width=640,height=480) ## ## dialog 1 ## t1 = gui.Table() t1.tr() t1.add(gui.Label("Gal Test")) t2 = gui.Table() t2.tr() t2.add(gui.Label("Gui Widgets")) t2.add(gui.Input()) t2.tr() t2.add(gui.Label("Button")) t2.add(gui.Button("Click Me!")) d1 = gui.Dialog(t1, t2) c.add(d1, 50, 150) ## ## dialog 2 ## t3 = gui.Table() t3.tr() t3.add(gui.Label("Another one")) t4 = gui.Table() t4.tr() t4.add(gui.Label("Name")) t4.add(gui.Input()) t4.tr() t4.add(gui.Label("Ohh")) b1 = gui.Button("OK") t4.add(b1) d2 = gui.Dialog(t3, t4) c.add(d2, 50, 300) ## ## some labels ## l1 = gui.Label("Suppose this is a menu", color=(255, 255, 255) ) c.add(l1, 50, 50) l2 = gui.Label("Click <SPACE> to hide top dialog", color=(255, 255, 255) ) c.add(l2, 50, 75) l3 = gui.Label("Opps... Did it happen?", color=(255, 255, 255) ) ## ## app begins ## app.init(widget=c,screen=screen) FRAME_EVT = USEREVENT + 1 pygame.event.Event(FRAME_EVT) pygame.time.set_timer(FRAME_EVT, 30) _quit = 0 while _quit == 0: event = pygame.event.wait() if event.type == FRAME_EVT: pygame.display.flip() continue if event.type == KEYDOWN: if event.key == K_ESCAPE: _quit = 1 continue elif event.key == K_SPACE: d1.close() c.add(l3, 100, 100) app._event(event) screen.fill((0,0,0)) app.paint(screen)
gpl-3.0
4,685,452,212,621,695,000
14.037037
68
0.589286
false
2.47561
false
false
false
wfx/epack
epack/libarchive/ffi.py
1
7623
# This file is part of a program licensed under the terms of the GNU Lesser # General Public License version 2 (or at your option any later version) # as published by the Free Software Foundation: http://www.gnu.org/licenses/ from __future__ import division, print_function, unicode_literals from ctypes import ( c_char_p, c_int, c_uint, c_longlong, c_size_t, c_void_p, c_wchar_p, CFUNCTYPE, POINTER, ) try: from ctypes import c_ssize_t except ImportError: from ctypes import c_longlong as c_ssize_t import ctypes from ctypes.util import find_library import logging import mmap import os from .exception import ArchiveError logger = logging.getLogger('libarchive') page_size = mmap.PAGESIZE libarchive_path = os.environ.get('LIBARCHIVE') or \ find_library('archive') or \ find_library('libarchive') or \ 'libarchive.so' libarchive = ctypes.cdll.LoadLibrary(libarchive_path) # Constants ARCHIVE_EOF = 1 # Found end of archive. ARCHIVE_OK = 0 # Operation was successful. ARCHIVE_RETRY = -10 # Retry might succeed. ARCHIVE_WARN = -20 # Partial success. ARCHIVE_FAILED = -25 # Current operation cannot complete. ARCHIVE_FATAL = -30 # No more operations are possible. AE_IFMT = 0o170000 AE_IFREG = 0o100000 AE_IFLNK = 0o120000 AE_IFSOCK = 0o140000 AE_IFCHR = 0o020000 AE_IFBLK = 0o060000 AE_IFDIR = 0o040000 AE_IFIFO = 0o010000 # Callback types WRITE_CALLBACK = CFUNCTYPE( c_ssize_t, c_void_p, c_void_p, POINTER(c_void_p), c_size_t ) OPEN_CALLBACK = CFUNCTYPE(c_int, c_void_p, c_void_p) CLOSE_CALLBACK = CFUNCTYPE(c_int, c_void_p, c_void_p) VOID_CB = lambda *_: ARCHIVE_OK # Type aliases, for readability c_archive_p = c_void_p c_archive_entry_p = c_void_p # Helper functions def _error_string(archive_p): msg = error_string(archive_p) if msg is None: return try: return msg.decode('ascii') except UnicodeDecodeError: return msg def archive_error(archive_p, retcode): msg = _error_string(archive_p) raise ArchiveError(msg, errno(archive_p), retcode, archive_p) def check_null(ret, func, args): if ret is None: raise ArchiveError(func.__name__+' returned NULL') return ret def check_int(retcode, func, args): if retcode >= 0: return retcode elif retcode == ARCHIVE_WARN: logger.warning(_error_string(args[0])) return retcode else: raise archive_error(args[0], retcode) def ffi(name, argtypes, restype, errcheck=None): f = getattr(libarchive, 'archive_'+name) f.argtypes = argtypes f.restype = restype if errcheck: f.errcheck = errcheck globals()[name] = f return f # FFI declarations # archive_util errno = ffi('errno', [c_archive_p], c_int) error_string = ffi('error_string', [c_archive_p], c_char_p) # archive_entry ffi('entry_new', [], c_archive_entry_p, check_null) ffi('entry_filetype', [c_archive_entry_p], c_int) ffi('entry_mtime', [c_archive_entry_p], c_int) ffi('entry_perm', [c_archive_entry_p], c_int) ffi('entry_pathname_w', [c_archive_entry_p], c_wchar_p) ffi('entry_sourcepath', [c_archive_entry_p], c_char_p) ffi('entry_size', [c_archive_entry_p], c_longlong) ffi('entry_size_is_set', [c_archive_entry_p], c_int) ffi('entry_update_pathname_utf8', [c_archive_entry_p, c_char_p], None) ffi('entry_clear', [c_archive_entry_p], c_archive_entry_p) ffi('entry_free', [c_archive_entry_p], None) # archive_read ffi('read_new', [], c_archive_p, check_null) READ_FORMATS = set(( '7zip', 'all', 'ar', 'cab', 'cpio', 'empty', 'iso9660', 'lha', 'mtree', 'rar', 'raw', 'tar', 'xar', 'zip' )) for f_name in list(READ_FORMATS): try: ffi('read_support_format_'+f_name, [c_archive_p], c_int, check_int) except AttributeError: # pragma: no cover logger.warning('read format "%s" is not supported' % f_name) READ_FORMATS.remove(f_name) READ_FILTERS = set(( 'all', 'bzip2', 'compress', 'grzip', 'gzip', 'lrzip', 'lzip', 'lzma', 'lzop', 'none', 'rpm', 'uu', 'xz' )) for f_name in list(READ_FILTERS): try: ffi('read_support_filter_'+f_name, [c_archive_p], c_int, check_int) except AttributeError: # pragma: no cover logger.warning('read filter "%s" is not supported' % f_name) READ_FILTERS.remove(f_name) ffi('read_open_fd', [c_archive_p, c_int, c_size_t], c_int, check_int) ffi('read_open_filename_w', [c_archive_p, c_wchar_p, c_size_t], c_int, check_int) ffi('read_open_memory', [c_archive_p, c_void_p, c_size_t], c_int, check_int) ffi('read_next_header', [c_archive_p, POINTER(c_void_p)], c_int, check_int) ffi('read_next_header2', [c_archive_p, c_void_p], c_int, check_int) ffi('read_close', [c_archive_p], c_int, check_int) ffi('read_free', [c_archive_p], c_int, check_int) # archive_read_disk ffi('read_disk_new', [], c_archive_p, check_null) ffi('read_disk_set_standard_lookup', [c_archive_p], c_int, check_int) ffi('read_disk_open', [c_archive_p, c_char_p], c_int, check_int) ffi('read_disk_open_w', [c_archive_p, c_wchar_p], c_int, check_int) ffi('read_disk_descend', [c_archive_p], c_int, check_int) # archive_read_data ffi('read_data_block', [c_archive_p, POINTER(c_void_p), POINTER(c_size_t), POINTER(c_longlong)], c_int, check_int) ffi('read_data', [c_archive_p, c_void_p, c_size_t], c_ssize_t, check_int) ffi('read_data_skip', [c_archive_p], c_int, check_int) # archive_write ffi('write_new', [], c_archive_p, check_null) ffi('write_disk_new', [], c_archive_p, check_null) ffi('write_disk_set_options', [c_archive_p, c_int], c_int, check_int) WRITE_FORMATS = set(( '7zip', 'ar_bsd', 'ar_svr4', 'cpio', 'cpio_newc', 'gnutar', 'iso9660', 'mtree', 'mtree_classic', 'pax', 'pax_restricted', 'shar', 'shar_dump', 'ustar', 'v7tar', 'xar', 'zip' )) for f_name in list(WRITE_FORMATS): try: ffi('write_set_format_'+f_name, [c_archive_p], c_int, check_int) except AttributeError: # pragma: no cover logger.warning('write format "%s" is not supported' % f_name) WRITE_FORMATS.remove(f_name) WRITE_FILTERS = set(( 'b64encode', 'bzip2', 'compress', 'grzip', 'gzip', 'lrzip', 'lzip', 'lzma', 'lzop', 'uuencode', 'xz' )) for f_name in list(WRITE_FILTERS): try: ffi('write_add_filter_'+f_name, [c_archive_p], c_int, check_int) except AttributeError: # pragma: no cover logger.warning('write filter "%s" is not supported' % f_name) WRITE_FILTERS.remove(f_name) ffi('write_open', [c_archive_p, c_void_p, OPEN_CALLBACK, WRITE_CALLBACK, CLOSE_CALLBACK], c_int, check_int) ffi('write_open_fd', [c_archive_p, c_int], c_int, check_int) ffi('write_open_filename', [c_archive_p, c_char_p], c_int, check_int) ffi('write_open_filename_w', [c_archive_p, c_wchar_p], c_int, check_int) ffi('write_open_memory', [c_archive_p, c_void_p, c_size_t, POINTER(c_size_t)], c_int, check_int) ffi('write_get_bytes_in_last_block', [c_archive_p], c_int, check_int) ffi('write_get_bytes_per_block', [c_archive_p], c_int, check_int) ffi('write_set_bytes_in_last_block', [c_archive_p, c_int], c_int, check_int) ffi('write_set_bytes_per_block', [c_archive_p, c_int], c_int, check_int) ffi('write_header', [c_archive_p, c_void_p], c_int, check_int) ffi('write_data', [c_archive_p, c_void_p, c_size_t], c_ssize_t, check_int) ffi('write_data_block', [c_archive_p, c_void_p, c_size_t, c_longlong], c_int, check_int) ffi('write_finish_entry', [c_archive_p], c_int, check_int) ffi('write_close', [c_archive_p], c_int, check_int) ffi('write_free', [c_archive_p], c_int, check_int)
gpl-3.0
-946,142,099,526,545,800
30.114286
79
0.647908
false
2.710882
false
false
false
chintak/scikit-image
skimage/feature/util.py
1
4726
import numpy as np from skimage.util import img_as_float class FeatureDetector(object): def __init__(self): self.keypoints_ = np.array([]) def detect(self, image): """Detect keypoints in image. Parameters ---------- image : 2D array Input image. """ raise NotImplementedError() class DescriptorExtractor(object): def __init__(self): self.descriptors_ = np.array([]) def extract(self, image, keypoints): """Extract feature descriptors in image for given keypoints. Parameters ---------- image : 2D array Input image. keypoints : (N, 2) array Keypoint locations as ``(row, col)``. """ raise NotImplementedError() def plot_matches(ax, image1, image2, keypoints1, keypoints2, matches, keypoints_color='k', matches_color=None, only_matches=False): """Plot matched features. Parameters ---------- ax : matplotlib.axes.Axes Matches and image are drawn in this ax. image1 : (N, M [, 3]) array First grayscale or color image. image2 : (N, M [, 3]) array Second grayscale or color image. keypoints1 : (K1, 2) array First keypoint coordinates as ``(row, col)``. keypoints2 : (K2, 2) array Second keypoint coordinates as ``(row, col)``. matches : (Q, 2) array Indices of corresponding matches in first and second set of descriptors, where ``matches[:, 0]`` denote the indices in the first and ``matches[:, 1]`` the indices in the second set of descriptors. keypoints_color : matplotlib color, optional Color for keypoint locations. matches_color : matplotlib color, optional Color for lines which connect keypoint matches. By default the color is chosen randomly. only_matches : bool, optional Whether to only plot matches and not plot the keypoint locations. """ image1 = img_as_float(image1) image2 = img_as_float(image2) new_shape1 = list(image1.shape) new_shape2 = list(image2.shape) if image1.shape[0] < image2.shape[0]: new_shape1[0] = image2.shape[0] elif image1.shape[0] > image2.shape[0]: new_shape2[0] = image1.shape[0] if image1.shape[1] < image2.shape[1]: new_shape1[1] = image2.shape[1] elif image1.shape[1] > image2.shape[1]: new_shape2[1] = image1.shape[1] if new_shape1 != image1.shape: new_image1 = np.zeros(new_shape1, dtype=image1.dtype) new_image1[:image1.shape[0], :image1.shape[1]] = image1 image1 = new_image1 if new_shape2 != image2.shape: new_image2 = np.zeros(new_shape2, dtype=image2.dtype) new_image2[:image2.shape[0], :image2.shape[1]] = image2 image2 = new_image2 image = np.concatenate([image1, image2], axis=1) offset = image1.shape if not only_matches: ax.scatter(keypoints1[:, 1], keypoints1[:, 0], facecolors='none', edgecolors=keypoints_color) ax.scatter(keypoints2[:, 1] + offset[1], keypoints2[:, 0], facecolors='none', edgecolors=keypoints_color) ax.imshow(image) ax.axis((0, 2 * offset[1], offset[0], 0)) for i in range(matches.shape[0]): idx1 = matches[i, 0] idx2 = matches[i, 1] if matches_color is None: color = np.random.rand(3, 1) else: color = matches_color ax.plot((keypoints1[idx1, 1], keypoints2[idx2, 1] + offset[1]), (keypoints1[idx1, 0], keypoints2[idx2, 0]), '-', color=color) def _prepare_grayscale_input_2D(image): image = np.squeeze(image) if image.ndim != 2: raise ValueError("Only 2-D gray-scale images supported.") return img_as_float(image) def _mask_border_keypoints(image_shape, keypoints, distance): """Mask coordinates that are within certain distance from the image border. Parameters ---------- image_shape : (2, ) array_like Shape of the image as ``(rows, cols)``. keypoints : (N, 2) array Keypoint coordinates as ``(rows, cols)``. distance : int Image border distance. Returns ------- mask : (N, ) bool array Mask indicating if pixels are within the image (``True``) or in the border region of the image (``False``). """ rows = image_shape[0] cols = image_shape[1] mask = (((distance - 1) < keypoints[:, 0]) & (keypoints[:, 0] < (rows - distance + 1)) & ((distance - 1) < keypoints[:, 1]) & (keypoints[:, 1] < (cols - distance + 1))) return mask
bsd-3-clause
973,310,001,294,730,000
28.354037
79
0.585485
false
3.71249
false
false
false
pavlov99/jsonapi
jsonapi/utils.py
1
2220
""" JSON:API utils.""" class _classproperty(property): """ Implement property behaviour for classes. class A(): @_classproperty @classmethod def name(cls): return cls.__name__ """ def __get__(self, obj, type_): return self.fget.__get__(None, type_)() def _cached(f): """ Decorator that makes a method cached.""" attr_name = '_cached_' + f.__name__ def wrapper(obj, *args, **kwargs): if not hasattr(obj, attr_name): setattr(obj, attr_name, f(obj, *args, **kwargs)) return getattr(obj, attr_name) return wrapper classproperty = lambda f: _classproperty(classmethod(f)) cached_property = lambda f: property(_cached(f)) cached_classproperty = lambda f: classproperty(_cached(f)) class Choices(object): """ Choices.""" def __init__(self, *choices): self._choices = [] self._choice_dict = {} for choice in choices: if isinstance(choice, (list, tuple)): if len(choice) == 2: choice = (choice[0], choice[1], choice[1]) elif len(choice) != 3: raise ValueError( "Choices can't handle a list/tuple of length {0}, only\ 2 or 3".format(choice)) else: choice = (choice, choice, choice) self._choices.append((choice[0], choice[2])) self._choice_dict[choice[1]] = choice[0] def __getattr__(self, attname): try: return self._choice_dict[attname] except KeyError: raise AttributeError(attname) def __iter__(self): return iter(self._choices) def __getitem__(self, index): return self._choices[index] def __delitem__(self, index): del self._choices[index] def __setitem__(self, index, value): self._choices[index] = value def __repr__(self): return "{0}({1})".format( self.__class__.__name__, self._choices ) def __len__(self): return len(self._choices) def __contains__(self, element): return element in self._choice_dict.values()
mit
-3,830,283,769,636,155,400
23.94382
79
0.530631
false
4.157303
false
false
false
NicWayand/xray
xarray/plot/utils.py
1
6442
import pkg_resources import numpy as np import pandas as pd from ..core.pycompat import basestring def _load_default_cmap(fname='default_colormap.csv'): """ Returns viridis color map """ from matplotlib.colors import LinearSegmentedColormap # Not sure what the first arg here should be f = pkg_resources.resource_stream(__name__, fname) cm_data = pd.read_csv(f, header=None).values return LinearSegmentedColormap.from_list('viridis', cm_data) def _determine_extend(calc_data, vmin, vmax): extend_min = calc_data.min() < vmin extend_max = calc_data.max() > vmax if extend_min and extend_max: extend = 'both' elif extend_min: extend = 'min' elif extend_max: extend = 'max' else: extend = 'neither' return extend def _build_discrete_cmap(cmap, levels, extend, filled): """ Build a discrete colormap and normalization of the data. """ import matplotlib as mpl if not filled: # non-filled contour plots extend = 'max' if extend == 'both': ext_n = 2 elif extend in ['min', 'max']: ext_n = 1 else: ext_n = 0 n_colors = len(levels) + ext_n - 1 pal = _color_palette(cmap, n_colors) new_cmap, cnorm = mpl.colors.from_levels_and_colors( levels, pal, extend=extend) # copy the old cmap name, for easier testing new_cmap.name = getattr(cmap, 'name', cmap) return new_cmap, cnorm def _color_palette(cmap, n_colors): import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap colors_i = np.linspace(0, 1., n_colors) if isinstance(cmap, (list, tuple)): # we have a list of colors try: # first try to turn it into a palette with seaborn from seaborn.apionly import color_palette pal = color_palette(cmap, n_colors=n_colors) except ImportError: # if that fails, use matplotlib # in this case, is there any difference between mpl and seaborn? cmap = ListedColormap(cmap, N=n_colors) pal = cmap(colors_i) elif isinstance(cmap, basestring): # we have some sort of named palette try: # first try to turn it into a palette with seaborn from seaborn.apionly import color_palette pal = color_palette(cmap, n_colors=n_colors) except (ImportError, ValueError): # ValueError is raised when seaborn doesn't like a colormap # (e.g. jet). If that fails, use matplotlib try: # is this a matplotlib cmap? cmap = plt.get_cmap(cmap) except ValueError: # or maybe we just got a single color as a string cmap = ListedColormap([cmap], N=n_colors) pal = cmap(colors_i) else: # cmap better be a LinearSegmentedColormap (e.g. viridis) pal = cmap(colors_i) return pal def _determine_cmap_params(plot_data, vmin=None, vmax=None, cmap=None, center=None, robust=False, extend=None, levels=None, filled=True, cnorm=None): """ Use some heuristics to set good defaults for colorbar and range. Adapted from Seaborn: https://github.com/mwaskom/seaborn/blob/v0.6/seaborn/matrix.py#L158 Parameters ========== plot_data: Numpy array Doesn't handle xarray objects Returns ======= cmap_params : dict Use depends on the type of the plotting function """ ROBUST_PERCENTILE = 2.0 import matplotlib as mpl calc_data = np.ravel(plot_data[~pd.isnull(plot_data)]) # Setting center=False prevents a divergent cmap possibly_divergent = center is not False # Set center to 0 so math below makes sense but remember its state center_is_none = False if center is None: center = 0 center_is_none = True # Setting both vmin and vmax prevents a divergent cmap if (vmin is not None) and (vmax is not None): possibly_divergent = False # vlim might be computed below vlim = None if vmin is None: if robust: vmin = np.percentile(calc_data, ROBUST_PERCENTILE) else: vmin = calc_data.min() elif possibly_divergent: vlim = abs(vmin - center) if vmax is None: if robust: vmax = np.percentile(calc_data, 100 - ROBUST_PERCENTILE) else: vmax = calc_data.max() elif possibly_divergent: vlim = abs(vmax - center) if possibly_divergent: # kwargs not specific about divergent or not: infer defaults from data divergent = ((vmin < 0) and (vmax > 0)) or not center_is_none else: divergent = False # A divergent map should be symmetric around the center value if divergent: if vlim is None: vlim = max(abs(vmin - center), abs(vmax - center)) vmin, vmax = -vlim, vlim # Now add in the centering value and set the limits vmin += center vmax += center # Choose default colormaps if not provided if cmap is None: if divergent: cmap = "RdBu_r" else: cmap = "viridis" # Allow viridis before matplotlib 1.5 if cmap == "viridis": cmap = _load_default_cmap() # Handle discrete levels if levels is not None: if isinstance(levels, int): ticker = mpl.ticker.MaxNLocator(levels) levels = ticker.tick_values(vmin, vmax) vmin, vmax = levels[0], levels[-1] if extend is None: extend = _determine_extend(calc_data, vmin, vmax) if levels is not None: cmap, cnorm = _build_discrete_cmap(cmap, levels, extend, filled) return dict(vmin=vmin, vmax=vmax, cmap=cmap, extend=extend, levels=levels, norm=cnorm) def _infer_xy_labels(darray, x, y): """ Determine x and y labels. For use in _plot2d darray must be a 2 dimensional data array. """ if x is None and y is None: if darray.ndim != 2: raise ValueError('DataArray must be 2d') y, x = darray.dims elif x is None or y is None: raise ValueError('cannot supply only one of x and y') elif any(k not in darray.coords for k in (x, y)): raise ValueError('x and y must be coordinate variables') return x, y
apache-2.0
5,193,884,461,763,979,000
28.686636
78
0.603695
false
3.789412
false
false
false
xozzo/pyfootball
setup.py
1
1257
from setuptools import setup, find_packages import os if os.path.exists('README.rst'): readme_path = 'README.rst' else: readme_path = 'README.md' setup( name='pyfootball', version='1.0.1', description='A client library for the football-data.org REST API', long_description=open(readme_path).read(), url='https://github.com/xozzo/pyfootball', author='Timothy Ng', author_email='hello@timothyng.xyz', license='MIT', # See https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Topic :: Software Development :: Libraries', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.5' ], keywords='api wrapper client library football data', packages=find_packages(exclude=['contrib', 'docs', 'tests', 'venv']), install_requires=['requests'], test_suite='tests', # List additional groups of dependencies here (e.g. development # dependencies). You can install these using the following syntax, # for example: # $ pip install -e .[dev] extras_require={ 'dev': ['sphinx', 'sphinx-autobuild'] } )
mit
-1,856,567,441,525,745,200
27.568182
73
0.6428
false
3.797583
false
false
false
frankk00/realtor
oauth_provider/oauth.py
1
23473
""" The MIT License Copyright (c) 2007 Leah Culver Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import logging logger = logging.getLogger() logger.setLevel(logging.DEBUG) import cgi import urllib import time import random import urlparse import hmac import binascii VERSION = '1.0' # Hi Blaine! HTTP_METHOD = 'GET' SIGNATURE_METHOD = 'PLAINTEXT' class OAuthError(RuntimeError): """Generic exception class.""" def __init__(self, message='OAuth error occured.'): self.message = message def build_authenticate_header(realm=''): """Optional WWW-Authenticate header (401 error)""" return {'WWW-Authenticate': 'OAuth realm="%s"' % realm} def escape(s): """Escape a URL including any /.""" return urllib.quote(s, safe='~') def _utf8_str(s): """Convert unicode to utf-8.""" if isinstance(s, unicode): return s.encode("utf-8") else: return str(s) def generate_timestamp(): """Get seconds since epoch (UTC).""" return int(time.time()) def generate_nonce(length=8): """Generate pseudorandom number.""" return ''.join([str(random.randint(0, 9)) for i in range(length)]) def generate_verifier(length=8): """Generate pseudorandom number.""" return ''.join([str(random.randint(0, 9)) for i in range(length)]) class OAuthConsumer(object): """Consumer of OAuth authentication. OAuthConsumer is a data type that represents the identity of the Consumer via its shared secret with the Service Provider. """ key = None secret = None def __init__(self, key, secret): self.key = key self.secret = secret class OAuthToken(object): """OAuthToken is a data type that represents an End User via either an access or request token. key -- the token secret -- the token secret """ key = None secret = None callback = None callback_confirmed = None verifier = None def __init__(self, key, secret): self.key = key self.secret = secret def set_callback(self, callback): self.callback = callback self.callback_confirmed = 'true' def set_verifier(self, verifier=None): if verifier is not None: self.verifier = verifier else: self.verifier = generate_verifier() def get_callback_url(self): if self.callback and self.verifier: # Append the oauth_verifier. parts = urlparse.urlparse(self.callback) scheme, netloc, path, params, query, fragment = parts[:6] if query: query = '%s&oauth_verifier=%s' % (query, self.verifier) else: query = 'oauth_verifier=%s' % self.verifier return urlparse.urlunparse((scheme, netloc, path, params, query, fragment)) return self.callback def to_string(self): data = { 'oauth_token': self.key, 'oauth_token_secret': self.secret, } if self.callback_confirmed is not None: data['oauth_callback_confirmed'] = self.callback_confirmed return urllib.urlencode(data) def from_string(s): """ Returns a token from something like: oauth_token_secret=xxx&oauth_token=xxx """ params = cgi.parse_qs(s, keep_blank_values=False) key = params['oauth_token'][0] secret = params['oauth_token_secret'][0] token = OAuthToken(key, secret) try: token.callback_confirmed = params['oauth_callback_confirmed'][0] except KeyError: pass # 1.0, no callback confirmed. return token from_string = staticmethod(from_string) def __str__(self): return self.to_string() class OAuthRequest(object): """OAuthRequest represents the request and can be serialized. OAuth parameters: - oauth_consumer_key - oauth_token - oauth_signature_method - oauth_signature - oauth_timestamp - oauth_nonce - oauth_version - oauth_verifier ... any additional parameters, as defined by the Service Provider. """ parameters = None # OAuth parameters. http_method = HTTP_METHOD http_url = None version = VERSION def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None): self.http_method = http_method self.http_url = http_url self.parameters = parameters or {} def set_parameter(self, parameter, value): self.parameters[parameter] = value def get_parameter(self, parameter): try: return self.parameters[parameter] except: raise OAuthError('Parameter not found: %s' % parameter) def _get_timestamp_nonce(self): return self.get_parameter('oauth_timestamp'), self.get_parameter( 'oauth_nonce') def get_nonoauth_parameters(self): """Get any non-OAuth parameters.""" parameters = {} for k, v in self.parameters.iteritems(): # Ignore oauth parameters. if k.find('oauth_') < 0: parameters[k] = v return parameters def to_header(self, realm=''): """Serialize as a header for an HTTPAuth request.""" auth_header = 'OAuth realm="%s"' % realm # Add the oauth parameters. if self.parameters: for k, v in self.parameters.iteritems(): if k[:6] == 'oauth_': auth_header += ', %s="%s"' % (k, escape(str(v))) return {'Authorization': auth_header} def to_postdata(self): """Serialize as post data for a POST request.""" return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) \ for k, v in self.parameters.iteritems()]) def to_url(self): """Serialize as a URL for a GET request.""" return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata()) def get_normalized_parameters(self): """Return a string that contains the parameters that must be signed.""" params = self.parameters try: # Exclude the signature if it exists. del params['oauth_signature'] except: pass # Escape key values before sorting. key_values = [(escape(_utf8_str(k)), escape(_utf8_str(v))) \ for k,v in params.items()] # Sort lexicographically, first after key, then after value. key_values.sort() # Combine key value pairs into a string. return '&'.join(['%s=%s' % (k, v) for k, v in key_values]) def get_normalized_http_method(self): """Uppercases the http method.""" return self.http_method.upper() def get_normalized_http_url(self): """Parses the URL and rebuilds it to be scheme://host/path.""" parts = urlparse.urlparse(self.http_url) scheme, netloc, path = parts[:3] # Exclude default port numbers. if scheme == 'http' and netloc[-3:] == ':80': netloc = netloc[:-3] elif scheme == 'https' and netloc[-4:] == ':443': netloc = netloc[:-4] return '%s://%s%s' % (scheme, netloc, path) def sign_request(self, signature_method, consumer, token): """Set the signature parameter to the result of build_signature.""" # Set the signature method. self.set_parameter('oauth_signature_method', signature_method.get_name()) # Set the signature. self.set_parameter('oauth_signature', self.build_signature(signature_method, consumer, token)) def build_signature(self, signature_method, consumer, token): """Calls the build signature method within the signature method.""" return signature_method.build_signature(self, consumer, token) def from_request(http_method, http_url, headers=None, parameters=None, query_string=None): """Combines multiple parameter sources.""" if parameters is None: parameters = {} # Headers if headers and 'Authorization' in headers: auth_header = headers['Authorization'] # Check that the authorization header is OAuth. if auth_header[:6] == 'OAuth ': auth_header = auth_header[6:] try: # Get the parameters from the header. header_params = OAuthRequest._split_header(auth_header) parameters.update(header_params) except: raise OAuthError('Unable to parse OAuth parameters from ' 'Authorization header.') # GET or POST query string. if query_string: query_params = OAuthRequest._split_url_string(query_string) parameters.update(query_params) # URL parameters. param_str = urlparse.urlparse(http_url)[4] # query url_params = OAuthRequest._split_url_string(param_str) parameters.update(url_params) if parameters: return OAuthRequest(http_method, http_url, parameters) return None from_request = staticmethod(from_request) def from_consumer_and_token(oauth_consumer, token=None, callback=None, verifier=None, http_method=HTTP_METHOD, http_url=None, parameters=None): if not parameters: parameters = {} defaults = { 'oauth_consumer_key': oauth_consumer.key, 'oauth_timestamp': generate_timestamp(), 'oauth_nonce': generate_nonce(), 'oauth_version': OAuthRequest.version, } defaults.update(parameters) parameters = defaults if token: parameters['oauth_token'] = token.key if token.callback: parameters['oauth_callback'] = token.callback # 1.0a support for verifier. if verifier: parameters['oauth_verifier'] = verifier elif callback: # 1.0a support for callback in the request token request. parameters['oauth_callback'] = callback return OAuthRequest(http_method, http_url, parameters) from_consumer_and_token = staticmethod(from_consumer_and_token) def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD, http_url=None, parameters=None): if not parameters: parameters = {} parameters['oauth_token'] = token.key if callback: parameters['oauth_callback'] = callback return OAuthRequest(http_method, http_url, parameters) from_token_and_callback = staticmethod(from_token_and_callback) def _split_header(header): """Turn Authorization: header into parameters.""" params = {} parts = header.split(',') for param in parts: # Ignore realm parameter. if param.find('realm') > -1: continue # Remove whitespace. param = param.strip() # Split key-value. param_parts = param.split('=', 1) # Remove quotes and unescape the value. params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"')) return params _split_header = staticmethod(_split_header) def _split_url_string(param_str): """Turn URL string into parameters.""" parameters = cgi.parse_qs(param_str, keep_blank_values=False) for k, v in parameters.iteritems(): parameters[k] = urllib.unquote(v[0]) return parameters _split_url_string = staticmethod(_split_url_string) class OAuthServer(object): """A worker to check the validity of a request against a data store.""" timestamp_threshold = 300 # In seconds, five minutes. version = VERSION signature_methods = None data_store = None def __init__(self, data_store=None, signature_methods=None): self.data_store = data_store self.signature_methods = signature_methods or {} def set_data_store(self, data_store): self.data_store = data_store def get_data_store(self): return self.data_store def add_signature_method(self, signature_method): self.signature_methods[signature_method.get_name()] = signature_method return self.signature_methods def fetch_request_token(self, oauth_request): """Processes a request_token request and returns the request token on success. """ try: # Get the request token for authorization. token = self._get_token(oauth_request, 'request') except OAuthError: # No token required for the initial token request. version = self._get_version(oauth_request) consumer = self._get_consumer(oauth_request) try: callback = self.get_callback(oauth_request) except OAuthError: callback = None # 1.0, no callback specified. self._check_signature(oauth_request, consumer, None) # Fetch a new token. token = self.data_store.fetch_request_token(consumer, callback) return token def fetch_access_token(self, oauth_request): logger.warning("!!! IN OAuthServer.fetch_access_token OAuth Params: %s"%oauth_request.parameters) """Processes an access_token request and returns the access token on success. """ version = self._get_version(oauth_request) consumer = self._get_consumer(oauth_request) try: verifier = self._get_verifier(oauth_request) except OAuthError: verifier = None # Get the request token. token = self._get_token(oauth_request, 'request') self._check_signature(oauth_request, consumer, token) new_token = self.data_store.fetch_access_token(consumer, token, verifier) return new_token def verify_request(self, oauth_request): """Verifies an api call and checks all the parameters.""" # -> consumer and token version = self._get_version(oauth_request) consumer = self._get_consumer(oauth_request) # Get the access token. token = self._get_token(oauth_request, 'access') self._check_signature(oauth_request, consumer, token) parameters = oauth_request.get_nonoauth_parameters() return consumer, token, parameters def authorize_token(self, token, user): """Authorize a request token.""" return self.data_store.authorize_request_token(token, user) def get_callback(self, oauth_request): """Get the callback URL.""" return oauth_request.get_parameter('oauth_callback') def build_authenticate_header(self, realm=''): """Optional support for the authenticate header.""" return {'WWW-Authenticate': 'OAuth realm="%s"' % realm} def _get_version(self, oauth_request): """Verify the correct version request for this server.""" try: version = oauth_request.get_parameter('oauth_version') except: version = VERSION if version and version != self.version: raise OAuthError('OAuth version %s not supported.' % str(version)) return version def _get_signature_method(self, oauth_request): """Figure out the signature with some defaults.""" try: signature_method = oauth_request.get_parameter( 'oauth_signature_method') except: signature_method = SIGNATURE_METHOD try: # Get the signature method object. signature_method = self.signature_methods[signature_method] except: signature_method_names = ', '.join(self.signature_methods.keys()) raise OAuthError('Signature method %s not supported try one of the ' 'following: %s' % (signature_method, signature_method_names)) return signature_method def _get_consumer(self, oauth_request): consumer_key = oauth_request.get_parameter('oauth_consumer_key') consumer = self.data_store.lookup_consumer(consumer_key) if not consumer: raise OAuthError('Invalid consumer.') return consumer def _get_token(self, oauth_request, token_type='access'): """Try to find the token for the provided request token key.""" token_field = oauth_request.get_parameter('oauth_token') token = self.data_store.lookup_token(token_type, token_field) if not token: raise OAuthError('Invalid %s token: %s' % (token_type, token_field)) return token def _get_verifier(self, oauth_request): return oauth_request.get_parameter('oauth_verifier') def _check_signature(self, oauth_request, consumer, token): timestamp, nonce = oauth_request._get_timestamp_nonce() self._check_timestamp(timestamp) self._check_nonce(consumer, token, nonce) signature_method = self._get_signature_method(oauth_request) try: signature = oauth_request.get_parameter('oauth_signature') except: raise OAuthError('Missing signature.') # Validate the signature. valid_sig = signature_method.check_signature(oauth_request, consumer, token, signature) if not valid_sig: key, base = signature_method.build_signature_base_string( oauth_request, consumer, token) logging.error("key: %s",key) logging.error("base: %s",base) raise OAuthError('Invalid signature. Expected signature base ' 'string: %s' % base) built = signature_method.build_signature(oauth_request, consumer, token) def _check_timestamp(self, timestamp): """Verify that timestamp is recentish.""" timestamp = int(timestamp) now = int(time.time()) lapsed = abs(now - timestamp) if lapsed > self.timestamp_threshold: raise OAuthError('Expired timestamp: given %d and now %s has a ' 'greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold)) def _check_nonce(self, consumer, token, nonce): """Verify that the nonce is uniqueish.""" nonce = self.data_store.lookup_nonce(consumer, token, nonce) if nonce: raise OAuthError('Nonce already used: %s' % str(nonce)) class OAuthClient(object): """OAuthClient is a worker to attempt to execute a request.""" consumer = None token = None def __init__(self, oauth_consumer, oauth_token): self.consumer = oauth_consumer self.token = oauth_token def get_consumer(self): return self.consumer def get_token(self): return self.token def fetch_request_token(self, oauth_request): """-> OAuthToken.""" raise NotImplementedError def fetch_access_token(self, oauth_request): """-> OAuthToken.""" raise NotImplementedError def access_resource(self, oauth_request): """-> Some protected resource.""" raise NotImplementedError class OAuthDataStore(object): """A database abstraction used to lookup consumers and tokens.""" def lookup_consumer(self, key): """-> OAuthConsumer.""" raise NotImplementedError def lookup_token(self, oauth_consumer, token_type, token_token): """-> OAuthToken.""" raise NotImplementedError def lookup_nonce(self, oauth_consumer, oauth_token, nonce): """-> OAuthToken.""" raise NotImplementedError def fetch_request_token(self, oauth_consumer, oauth_callback): """-> OAuthToken.""" raise NotImplementedError def fetch_access_token(self, oauth_consumer, oauth_token, oauth_verifier): """-> OAuthToken.""" raise NotImplementedError def authorize_request_token(self, oauth_token, user): """-> OAuthToken.""" raise NotImplementedError class OAuthSignatureMethod(object): """A strategy class that implements a signature method.""" def get_name(self): """-> str.""" raise NotImplementedError def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token): """-> str key, str raw.""" raise NotImplementedError def build_signature(self, oauth_request, oauth_consumer, oauth_token): """-> str.""" raise NotImplementedError def check_signature(self, oauth_request, consumer, token, signature): built = self.build_signature(oauth_request, consumer, token) logging.info("Built signature: %s"%(built)) return built == signature class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod): def get_name(self): return 'HMAC-SHA1' def build_signature_base_string(self, oauth_request, consumer, token): sig = ( escape(oauth_request.get_normalized_http_method()), escape(oauth_request.get_normalized_http_url()), escape(oauth_request.get_normalized_parameters()), ) key = '%s&' % escape(consumer.secret) if token: key += escape(token.secret) raw = '&'.join(sig) return key, raw def build_signature(self, oauth_request, consumer, token): """Builds the base signature string.""" key, raw = self.build_signature_base_string(oauth_request, consumer, token) # HMAC object. try: import hashlib # 2.5 hashed = hmac.new(key, raw, hashlib.sha1) except: import sha # Deprecated hashed = hmac.new(key, raw, sha) # Calculate the digest base 64. return binascii.b2a_base64(hashed.digest())[:-1] class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod): def get_name(self): return 'PLAINTEXT' def build_signature_base_string(self, oauth_request, consumer, token): """Concatenates the consumer key and secret.""" sig = '%s&' % escape(consumer.secret) if token: sig = sig + escape(token.secret) return sig, sig def build_signature(self, oauth_request, consumer, token): key, raw = self.build_signature_base_string(oauth_request, consumer, token) return key
bsd-3-clause
-6,451,250,116,917,315,000
34.35241
105
0.615388
false
4.342831
false
false
false
foursquare/pants
contrib/go/src/python/pants/contrib/go/tasks/go_test.py
1
2117
# coding=utf-8 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import absolute_import, division, print_function, unicode_literals from builtins import filter from pants.base.exceptions import TaskError from pants.base.workunit import WorkUnitLabel from pants.contrib.go.tasks.go_workspace_task import GoWorkspaceTask class GoTest(GoWorkspaceTask): """Runs `go test` on Go packages. To run a library's tests, GoTest only requires a Go workspace to be initialized (see GoWorkspaceTask) with links to necessary source files. It does not require GoCompile to first compile the library to be tested -- in fact, GoTest will ignore any binaries in "$GOPATH/pkg/", because Go test files (which live in the package they are testing) are ignored in normal compilation, so Go test must compile everything from scratch. """ @classmethod def register_options(cls, register): super(GoTest, cls).register_options(register) register('--build-and-test-flags', default='', fingerprint=True, help='Flags to pass in to `go test` tool.') @classmethod def supports_passthru_args(cls): return True def execute(self): # Only executes the tests from the package specified by the target roots, so # we don't run the tests for _all_ dependencies of said package. targets = filter(self.is_local_src, self.context.target_roots) for target in targets: self.ensure_workspace(target) self._go_test(target) def _go_test(self, target): args = (self.get_options().build_and_test_flags.split() + [target.import_path] + self.get_passthru_args()) result, go_cmd = self.go_dist.execute_go_cmd('test', gopath=self.get_gopath(target), args=args, workunit_factory=self.context.new_workunit, workunit_labels=[WorkUnitLabel.TEST]) if result != 0: raise TaskError('{} failed with exit code {}'.format(go_cmd, result))
apache-2.0
-9,153,807,366,505,908,000
38.943396
99
0.683042
false
4.00189
true
false
false
bodylabs/blmath
blmath/geometry/transform/correspondence.py
1
2095
# FIXME -- move back to core def apply_correspondence(correspondence_src, correspondence_dst, vertices): """ Apply a correspondence defined between two vertex sets to a new set. Identifies a correspondence between `correspondence_src` and `correspondence_dst` then applies that correspondence to `vertices`. That is, `correspondence_src` is to `correspondence_dst` as `vertices` is to [ return value ]. `correspondence_src` and `vertices` must have the same topology. The return value will have the same topology as `correspondence_dst`. Arguments can be passed as `chumpy` or `numpy` arrays. The most common usecase here is establishing a relationship between an alignment and a pointcloud or set of landmarks. The pointcloud or landmarks can then be moved automatically as the alignment is adjusted (e.g. fit to a different mesh, reposed, etc). Args: correspondence_src: The source vertices for the correspondence correspondence_dst: The destination vertices for the correspondence vertices: The vertices to map using the defined correspondence Returns: the mapped version of `vertices` Example usage ------------- >>> transformed_scan_vertices = apply_correspondence( ... correspondence_src=alignment.v, ... correspondence_dst=scan.v, ... vertices=reposed_alignment.v ... ) >>> transformed_scan = Mesh(v=transformed_scan_vertices, vc=scan.vc) """ import chumpy as ch from bodylabs.mesh.landmarking.transformed_lm import TransformedCoeffs from bodylabs.mesh.landmarking.transformed_lm import TransformedLms ch_desired = any([ isinstance(correspondence_src, ch.Ch), isinstance(correspondence_dst, ch.Ch), isinstance(vertices, ch.Ch), ]) coeffs = TransformedCoeffs( src_v=correspondence_src, dst_v=correspondence_dst) transformed_vertices = TransformedLms( transformed_coeffs=coeffs, src_v=vertices) return transformed_vertices if ch_desired else transformed_vertices.r
bsd-2-clause
-4,415,321,806,514,047,000
36.410714
79
0.705967
false
4.052224
false
false
false
lepinkainen/pyfibot
pyfibot/modules/module_geoip.py
1
1389
from __future__ import unicode_literals, print_function, division import pygeoip import os.path import sys import socket try: from modules.module_usertrack import get_table user_track_available = True except ImportError: user_track_available = False # http://dev.maxmind.com/geoip/legacy/geolite/ DATAFILE = os.path.join(sys.path[0], "GeoIP.dat") # STANDARD = reload from disk # MEMORY_CACHE = load to memory # MMAP_CACHE = memory using mmap gi4 = pygeoip.GeoIP(DATAFILE, pygeoip.MEMORY_CACHE) def command_geoip(bot, user, channel, args): """Determine the user's country based on host or nick, if module_usertrack is used.""" if not args: return bot.say(channel, "usage: .geoip HOST/NICK") host = args nick = None if user_track_available: table = get_table(bot, channel) user = table.find_one(nick=args) if user: nick = user["nick"] host = user["host"] try: country = gi4.country_name_by_name(host) except socket.gaierror: country = None if country: if nick: return bot.say(channel, "%s (%s) is in %s" % (nick, host, country)) return bot.say(channel, "%s is in %s" % (host, country)) if nick: return bot.say(channel, "Host not found for %s (%s)" % (nick, host)) return bot.say(channel, "Host not found for %s" % host)
bsd-3-clause
-4,394,593,471,870,656,500
26.78
90
0.636429
false
3.412776
false
false
false
llou/panopticon
panopticon/core/database.py
1
7145
# database.py is part of Panopticon. # Panopticon is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # Panopticon is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with Panopticon. If not, see <http://www.gnu.org/licenses/>. from contextlib import contextmanager from paramiko import RSAKey as pRSAKey, DSSKey from sqlalchemy import create_engine, Column, DateTime, String, Integer, Text, Boolean from sqlalchemy.orm import sessionmaker, relationship, backref from sqlalchemy.sql import not_ from sqlalchemy.schema import ForeignKey from sqlalchemy.pool import NullPool from sqlalchemy.ext.declarative import declarative_base from panopticon.core.util.database import key_value_property Base = declarative_base() class Value(Base): __tablename__ = "values" id = Column(Integer(), primary_key=True) name = Column(String(1000)) value = Column(String(1000), nullable=True) parent_id = Column(Integer, ForeignKey("values.id"), nullable=True) values = relationship("Value", backref=backref('parent', remote_side=[id], cascade="all")) type = Column(String(20)) def __init__(self, name, _type, value="", parent_id=None): self.name = name self.type = _type self.value = value self.parent_id = parent_id @property def root(self): return self.id == self.parent class Service(Base): __tablename__ = "services" name = Column(String(50), primary_key=True) class Computer(Base): __tablename__ = "computers" __table_args__ = {'sqlite_autoincrement':True} name = Column(String(255), primary_key=True) key_name = Column(String(100), ForeignKey('keys.name', onupdate="CASCADE")) active = Column(Boolean(), default=True) key = relationship("Key", backref=backref('computers')) logs = relationship("Log", backref="computer", order_by="Log.time") def __init__(self, name, key_name="", active=True): self.name = name self.active = active self.key_name = key_name class Log(Base): __tablename__ = "logs" id = Column('id', Integer, primary_key=True) time = Column(DateTime()) level = Column(String(10)) message = Column(Text()) computer_name = Column(String(255), ForeignKey('computers.name', ondelete="CASCADE", onupdate="CASCADE"), index=True) service_name = Column(String(255), ForeignKey('services.name', ondelete="CASCADE", onupdate="CASCADE"), index=True) role_name = Column(String(255), index=True) action_name = Column(String(255), index=True) def __init__(self, time, level, message, computer_name="", service_name="", role_name="", action_name=""): self.time = time self.level = level self.message = message self.computer_name = computer_name class FileTrack(Base): __tablename__ = "filetracks" uid = Column("uid", String(32), primary_key=True) _computer_name = Column("computer_name", String(255),ForeignKey('computers.name')) _path = Column("path", Text()) modification_time = Column("modification_time", DateTime()) md5 = Column("md5", String(32)) def __init__(self, computer_name, path, modification_time, md5=""): self.computer_name = computer_name self.path = path self.modification_time = modification_time self.md5 = md5 self.update_uid() @property def computer_name(self): return self._computer_name @computer_name.setter def computer_name(self, value): self._computer_name = value self.update_uid() @property def path(self): return self._path @path.setter def path(self, value): self._path = value self.update_uid() def update_uid(self): if self.computer_name and self.path: self.uid = "%s:%s" % (self.computer_name, self.path) else: self.uid = "" class Key(Base): __tablename__ = "keys" name = Column(String(100), primary_key=True) algorithm = Column(String(20)) v1 = Column(String(2048)) v2 = Column(String(2048)) v3 = Column(String(2048)) v4 = Column(String(2048)) key_class = None key_vals = [] __mapper_args__ = {'polymorphic_on' : algorithm} @classmethod def build_from_paramiko_key(cls, name, p_key): if isinstance(p_key, pRSAKey): return RSAKey(name, p_key.e, p_key.n) elif isinstance(p_key, DSSKey): return DSAKey(name, p_key.p, p_key.q, p_key.g, p_key.y) else: raise Exception("Not valid key") def __init__(self, name, algorithm, v1, v2, v3, v4): self.name = name self.algorithm = algorithm self.v1 = v1 self.v2 = v2 self.v3 = v3 self.v4 = v4 def get_paramiko_key(self): vals = [ getattr(self, x) for x in self.key_vals ] return self.key_class(vals=vals) class RSAKey(Key): __mapper_args__ = {'polymorphic_identity':'rsa'} key_class = pRSAKey key_vals = [ 'e', 'n' ] def __init__(self, name, e, n): self.name = name self.algorithm = "rsa" self.e = e self.n = n e = key_value_property("v1") n = key_value_property("v2") class DSAKey(Key): __mapper_args__ = {'polymorphic_identity':'dsa'} key_class = DSSKey key_vals = [ 'p', 'q', 'g', 'y' ] def __init__(self, name, p, q, g, y): self.name = name self.algorithm = "dsa" self.p = p self.q = q self.g = g self.y = y p = key_value_property("v1") q = key_value_property("v2") g = key_value_property("v3") y = key_value_property("v4") class PanopticonDB(object): def __init__(self, panopticon, engine=None): self.panopticon = panopticon self.engine = engine if engine is not None else create_engine(panopticon.db_url, poolclass=NullPool) Base.metadata.create_all(self.engine) self.Session = sessionmaker(bind=self.engine) self.sync() @contextmanager def get_session(self): session = self.Session() yield session session.commit() session.close() def purge(self,sure=False): if sure: Base.metadata.drop_all(self.engine) Base.metadata.create_all(self.engine) def sync(self): computer_names = [ x[0] for x in self.panopticon.computers ] with self.get_session() as session: session.execute(Computer.__table__.update().where(Computer.name.in_(computer_names)).values(active=True)) session.execute(Computer.__table__.update().where(not_(Computer.name.in_(computer_names))).values(active=True))
gpl-3.0
3,336,946,915,647,172,000
31.775229
123
0.626312
false
3.581454
false
false
false
i-namekawa/TopSideMonitor
plotting.py
1
37323
import os, sys, time from glob import glob import cv2 from pylab import * from mpl_toolkits.mplot3d import Axes3D from matplotlib.backends.backend_pdf import PdfPages matplotlib.rcParams['figure.facecolor'] = 'w' from scipy.signal import argrelextrema import scipy.stats as stats import scipy.io as sio from scipy import signal from xlwt import Workbook # specify these in mm to match your behavior chamber. CHMAMBER_LENGTH=235 WATER_HIGHT=40 # quick plot should also show xy_within and location_one_third etc # summary PDF: handle exception when a pickle file missing some fish in other pickle file ## these three taken from http://stackoverflow.com/a/18420730/566035 def strided_sliding_std_dev(data, radius=5): windowed = rolling_window(data, (2*radius, 2*radius)) shape = windowed.shape windowed = windowed.reshape(shape[0], shape[1], -1) return windowed.std(axis=-1) def rolling_window(a, window): """Takes a numpy array *a* and a sequence of (or single) *window* lengths and returns a view of *a* that represents a moving window.""" if not hasattr(window, '__iter__'): return rolling_window_lastaxis(a, window) for i, win in enumerate(window): if win > 1: a = a.swapaxes(i, -1) a = rolling_window_lastaxis(a, win) a = a.swapaxes(-2, i) return a def rolling_window_lastaxis(a, window): """Directly taken from Erik Rigtorp's post to numpy-discussion. <http://www.mail-archive.com/numpy-discussion@scipy.org/msg29450.html>""" if window < 1: raise ValueError, "`window` must be at least 1." if window > a.shape[-1]: raise ValueError, "`window` is too long." shape = a.shape[:-1] + (a.shape[-1] - window + 1, window) strides = a.strides + (a.strides[-1],) return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides) ## stealing ends here... // def filterheadxy(headx,heady,thrs_denom=10): b, a = signal.butter(8, 0.125) dhy = np.abs(np.hstack((0, np.diff(heady,1)))) thrs = np.nanstd(dhy)/thrs_denom ind2remove = dhy>thrs headx[ind2remove] = np.nan heady[ind2remove] = np.nan headx = interp_nan(headx) heady = interp_nan(heady) headx = signal.filtfilt(b, a, headx, padlen=150) heady = signal.filtfilt(b, a, heady, padlen=150) return headx,heady def smoothRad(theta, thrs=np.pi/4*3): jumps = (np.diff(theta) > thrs).nonzero()[0] print 'jumps.size', jumps.size while jumps.size: # print '%d/%d' % (jumps[0], theta.size) theta[jumps+1] -= np.pi jumps = (np.diff(theta) > thrs).nonzero()[0] return theta def datadct2array(data, key1, key2): # put these in a MATLAB CELL trialN = len(data[key1][key2]) matchedUSnameP = np.zeros((trialN,), dtype=np.object) fnameP = np.zeros((trialN,), dtype=np.object) # others to append to a list eventsP = [] speed3DP = [] movingSTDP = [] d2inflowP = [] xP, yP, zP = [], [], [] XP, YP, ZP = [], [], [] ringpixelsP = [] peaks_withinP = [] swimdir_withinP = [] xy_withinP = [] location_one_thirdP = [] dtheta_shapeP = [] dtheta_velP = [] turns_shapeP = [] turns_velP = [] for n, dct in enumerate(data[key1][key2]): # MATLAB CELL matchedUSnameP[n] = dct['matchedUSname'] fnameP[n] = dct['fname'] # 2D array eventsP.append([ele if type(ele) is not list else ele[0] for ele in dct['events']]) speed3DP.append(dct['speed3D']) movingSTDP.append(dct['movingSTD']) d2inflowP.append(dct['d2inflow']) xP.append(dct['x']) yP.append(dct['y']) zP.append(dct['z']) XP.append(dct['X']) YP.append(dct['Y']) ZP.append(dct['Z']) ringpixelsP.append(dct['ringpixels']) peaks_withinP.append(dct['peaks_within']) swimdir_withinP.append(dct['swimdir_within']) xy_withinP.append(dct['xy_within']) location_one_thirdP.append(dct['location_one_third']) dtheta_shapeP.append(dct['dtheta_shape']) dtheta_velP.append(dct['dtheta_vel']) turns_shapeP.append(dct['turns_shape']) turns_velP.append(dct['turns_vel']) TVroi = np.array(dct['TVroi']) SVroi = np.array(dct['SVroi']) return matchedUSnameP, fnameP, np.array(eventsP), np.array(speed3DP), np.array(d2inflowP), \ np.array(xP), np.array(yP), np.array(zP), np.array(XP), np.array(YP), np.array(ZP), \ np.array(ringpixelsP), np.array(peaks_withinP), np.array(swimdir_withinP), \ np.array(xy_withinP), np.array(dtheta_shapeP), np.array(dtheta_velP), \ np.array(turns_shapeP), np.array(turns_velP), TVroi, SVroi def pickle2mat(fp, data=None): # fp : full path to pickle file # data : option to provide data to skip np.load(fp) if not data: data = np.load(fp) for key1 in data.keys(): for key2 in data[key1].keys(): matchedUSname, fname, events, speed3D, d2inflow, x, y, z, X, Y, Z, \ ringpixels, peaks_within, swimdir_within, xy_within, dtheta_shape, dtheta_vel, \ turns_shape, turns_vel, TVroi, SVroi = datadct2array(data, key1, key2) datadict = { 'matchedUSname' : matchedUSname, 'fname' : fname, 'events' : events, 'speed3D' : speed3D, 'd2inflow' : d2inflow, 'x' : x, 'y' : y, 'z' : z, 'X' : X, 'Y' : Y, 'Z' : Z, 'ringpixels' : ringpixels, 'peaks_within' : peaks_within, 'swimdir_within' : swimdir_within, 'xy_within' : xy_within, 'dtheta_shape' : dtheta_shape, 'dtheta_vel' : dtheta_vel, 'turns_shape' : turns_shape, 'turns_vel' : turns_vel, 'TVroi' : TVroi, 'SVroi' : SVroi, } outfp = '%s_%s_%s.mat' % (fp[:-7],key1,key2) sio.savemat(outfp, datadict, oned_as='row', do_compression=True) def interp_nan(x): ''' Replace nan by interporation http://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array ''' ok = -np.isnan(x) if (ok == False).all(): return x else: xp = ok.ravel().nonzero()[0] fp = x[ok] _x = np.isnan(x).ravel().nonzero()[0] x[-ok] = np.interp(_x, xp, fp) return x def polytest(x,y,rx,ry,rw,rh,rang): points=cv2.ellipse2Poly( (rx,ry), axes=(rw/2,rh/2), angle=rang, arcStart=0, arcEnd=360, delta=3 ) return cv2.pointPolygonTest(np.array(points), (x,y), measureDist=1) def depthCorrection(z,x,TVx1,TVx2,SVy1,SVy2,SVy3): z0 = z - SVy1 x0 = x - TVx1 mid = (SVy2-SVy1)/2 adj = (z0 - mid) / (SVy2-SVy1) * (SVy2-SVy3) * (1-(x0)/float(TVx2-TVx1)) return z0 + adj + SVy1 # back to abs coord def putNp2xls(array, ws): for r, row in enumerate(array): for c, val in enumerate(row): ws.write(r, c, val) def drawLines(mi, ma, events, fps=30.0): CS, USs, preRange = events plot([CS-preRange, CS-preRange], [mi,ma], '--c') # 2 min prior odor plot([CS , CS ], [mi,ma], '--g', linewidth=2) # CS onset if USs: if len(USs) > 3: colors = 'r' * len(USs) else: colors = [_ for _ in ['r','b','c'][:len(USs)]] for c,us in zip(colors, USs): plot([us, us],[mi,ma], linestyle='--', color=c, linewidth=2) # US onset plot([USs[0]+preRange/2,USs[0]+preRange/2], [mi,ma], linestyle='--', color=c, linewidth=2) # end of US window xtck = np.arange(0, max(CS+preRange, max(USs)), 0.5*60*fps) # every 0.5 min tick else: xtck = np.arange(0, CS+preRange, 0.5*60*fps) # every 0.5 min tick xticks(xtck, xtck/fps/60) gca().xaxis.set_minor_locator(MultipleLocator(5*fps)) # 5 s minor ticks def approachevents(x,y,z, ringpolyTVArray, ringpolySVArray, fishlength=134, thrs=None): ''' fishlength: some old scrits may call this with fishlength thrs: multitrack GUI provides this by ringAppearochLevel spin control. can be an numpy array (to track water level change etc) ''' smoothedz = np.convolve(np.hanning(10)/np.hanning(10).sum(), z, 'same') peaks = argrelextrema(smoothedz, np.less)[0] # less because 0 is top in image. # now filter peaks by height. ringLevel = ringpolySVArray[:,1] if thrs is None: thrs = ringLevel+fishlength/2 if type(thrs) == int: # can be numpy array or int thrs = ringLevel.mean() + thrs peaks = peaks[ z[peaks] < thrs ] else: # numpy array should be ready to use peaks = peaks[ z[peaks] < thrs[peaks] ] # now filter out by TVringCenter peaks_within = get_withinring(ringpolyTVArray, peaks, x, y) return smoothedz, peaks_within def get_withinring(ringpolyTVArray, timepoints, x, y): rx = ringpolyTVArray[:,0].astype(np.int) ry = ringpolyTVArray[:,1].astype(np.int) rw = ringpolyTVArray[:,2].astype(np.int) rh = ringpolyTVArray[:,3].astype(np.int) rang = ringpolyTVArray[:,4].astype(np.int) # poly test peaks_within = [] for p in timepoints: points=cv2.ellipse2Poly( (rx[p],ry[p]), axes=(rw[p]/2,rh[p]/2), angle=rang[p], arcStart=0, arcEnd=360, delta=3 ) inout = cv2.pointPolygonTest(np.array(points), (x[p],y[p]), measureDist=1) if inout > 0: peaks_within.append(p) return peaks_within def location_ring(x,y,ringpolyTVArray): rx = ringpolyTVArray[:,0].astype(np.int) ry = ringpolyTVArray[:,1].astype(np.int) rw = ringpolyTVArray[:,2].astype(np.int) rh = ringpolyTVArray[:,3].astype(np.int) d2ringcenter = np.sqrt((x-rx)**2 + (y-ry)**2) # filter by radius 20% buffer in case the ring moves around indices = (d2ringcenter < 1.2*max(rw.max(), rh.max())).nonzero()[0] xy_within = get_withinring(ringpolyTVArray, indices, x, y) return xy_within def swimdir_analysis(x,y,z,ringpolyTVArray,ringpolySVArray,TVx1,TVy1,TVx2,TVy2,fps=30.0): # smoothing # z = np.convolve(np.hanning(16)/np.hanning(16).sum(), z, 'same') # two cameras have different zoom settings. So, distance per pixel is different. But, for # swim direction, it does not matter how much x,y are compressed relative to z. # ring z level from SV rz = ringpolySVArray[:,1].astype(np.int) # ring all other params from TV rx = ringpolyTVArray[:,0].astype(np.int) ry = ringpolyTVArray[:,1].astype(np.int) rw = ringpolyTVArray[:,2].astype(np.int) rh = ringpolyTVArray[:,3].astype(np.int) rang = ringpolyTVArray[:,4].astype(np.int) speed3D = np.sqrt( np.diff(x)**2 + np.diff(y)**2 + np.diff(z)**2 ) speed3D = np.hstack(([0], speed3D)) # line in 3D http://tutorial.math.lamar.edu/Classes/CalcIII/EqnsOfLines.aspx # x-x0 y-y0 z-z0 # ---- = ---- = ---- # a b c # solve them for z = rz. x0,y0,z0 are tvx, tvy, svy # x = (a * (rz-z)) / c + x0 dt = 3 # define slope as diff between current and dt frame before a = np.hstack( (np.ones(dt), x[dt:]-x[:-dt]) ) b = np.hstack( (np.ones(dt), y[dt:]-y[:-dt]) ) c = np.hstack( (np.ones(dt), z[dt:]-z[:-dt]) ) c[c==0] = np.nan # avoid zero division water_x = (a * (rz-z) / c) + x water_y = (b * (rz-z) / c) + y upwards = c<-2/30.0*fps # not accurate when c is small or negative xok = (TVx1 < water_x) & (water_x < TVx2) yok = (TVy1 < water_y) & (water_y < TVy2) filtered = upwards & xok & yok# & -np.isinf(water_x) & -np.isinf(water_y) water_x[-filtered] = np.nan water_y[-filtered] = np.nan # figure() # ax = subplot(111) # ax.imshow(npData['TVbg'], cmap=cm.gray) # clip out from TVx1,TVy1 # ax.plot(x-TVx1, y-TVy1, 'c') # ax.plot(water_x-TVx1, water_y-TVy1, 'r.') # xlim([0, TVx2-TVx1]); ylim([TVy2-TVy1, 0]) # draw(); show() SwimDir = [] for n in filtered.nonzero()[0]: inout = polytest(water_x[n],water_y[n],rx[n],ry[n],rw[n],rh[n],rang[n]) SwimDir.append((n, inout, speed3D[n])) # inout>0 are inside return SwimDir, water_x, water_y def plot_eachTr(events, x, y, z, inflowpos, ringpixels, peaks_within, swimdir_within=None, pp=None, _title=None, fps=30.0, inmm=False): CS, USs, preRange = events # preRange = 3600 2 min prior and 1 min after CS. +900 for 0.5 min if USs: xmin, xmax = CS-preRange-10*fps, USs[0]+preRange/2+10*fps else: xmin, xmax = CS-preRange-10*fps, CS+preRange/2+(23+10)*fps fig = figure(figsize=(12,8), facecolor='w') subplot(511) # Swimming speed speed3D = np.sqrt( np.diff(x)**2 + np.diff(y)**2 + np.diff(z)**2 ) drawLines(np.nanmin(speed3D), np.nanmax(speed3D), events, fps) # go behind plot(speed3D) movingSTD = np.append( np.zeros(fps*10), strided_sliding_std_dev(speed3D, fps*10) ) plot(movingSTD, linewidth=2) plot(np.ones_like(speed3D) * speed3D.std()*6, '-.', color='gray') ylim([-5, speed3D[xmin:xmax].max()]) xlim([xmin,xmax]); title(_title) if inmm: ylabel('Speed 3D (mm),\n6SD thr'); else: ylabel('Speed 3D, 6SD thr'); ax = subplot(512) # z level drawLines(z.min(), z.max(), events) plot(z, 'b') pkx = peaks_within.nonzero()[0] if inmm: plot(pkx, peaks_within[pkx]*z[xmin:xmax].max()*0.97, 'mo') if swimdir_within is not None: ___x = swimdir_within.nonzero()[0] plot(___x, swimdir_within[___x]*z[xmin:xmax].max()*0.96, 'g+') ylim([z[xmin:xmax].min()*0.95, z[xmin:xmax].max()]) xlim([xmin,xmax]); ylabel('Z (mm)') else: plot(pkx, peaks_within[pkx]*z[xmin:xmax].min()*0.97, 'mo') if swimdir_within is not None: ___x = swimdir_within.nonzero()[0] plot(___x, swimdir_within[___x]*z[xmin:xmax].min()*0.96, 'g+') ylim([z[xmin:xmax].min()*0.95, z[xmin:xmax].max()]) ax.invert_yaxis(); xlim([xmin,xmax]); ylabel('z') subplot(513) # x drawLines(x.min(), x.max(), events) plot(x, 'b') plot(y, 'g') xlim([xmin,xmax]); ylabel('x,y') subplot(514) # Distance to the inflow tube xin, yin, zin = inflowpos d2inflow = np.sqrt((x-xin) ** 2 + (y-yin) ** 2 + (z-zin) ** 2 ) drawLines(d2inflow.min(), d2inflow.max(), events) plot(d2inflow) ylim([d2inflow[xmin:xmax].min(), d2inflow[xmin:xmax].max()]) xlim([xmin,xmax]); ylabel('distance to\ninflow tube') subplot(515) # ringpixels: it seems i never considered TV x,y for this rpmax, rpmin = np.nanmax(ringpixels[xmin:xmax]), np.nanmin(ringpixels[xmin:xmax]) drawLines(rpmin, rpmax, events) plot(ringpixels) plot(pkx, peaks_within[pkx]*rpmax*1.06, 'mo') if swimdir_within is not None: plot(___x, swimdir_within[___x]*rpmax*1.15, 'g+') ylim([-100, rpmax*1.2]) xlim([xmin,xmax]); ylabel('ringpixels') tight_layout() if pp: fig.savefig(pp, format='pdf') rng = np.arange(CS-preRange, CS+preRange, dtype=np.int) return speed3D[rng], movingSTD[rng], d2inflow[rng], ringpixels[rng] def plot_turnrates(events, dthetasum_shape,dthetasum_vel,turns_shape,turns_vel, pp=None, _title=None, thrs=np.pi/4*(133.33333333333334/120), fps=30.0): CS, USs, preRange = events # preRange = 3600 2 min prior and 1 min after CS. +900 for 0.5 min if USs: xmin, xmax = CS-preRange-10*fps, USs[0]+preRange/2+10*fps else: xmin, xmax = CS-preRange-10*fps, CS+preRange/2+(23+10)*fps fig = figure(figsize=(12,8), facecolor='w') subplot(211) drawLines(dthetasum_shape.min(), dthetasum_shape.max(), events) plot(np.ones_like(dthetasum_shape)*thrs,'gray',linestyle='--') plot(-np.ones_like(dthetasum_shape)*thrs,'gray',linestyle='--') plot(dthetasum_shape) dmax = dthetasum_shape[xmin:xmax].max() plot(turns_shape, (0.5+dmax)*np.ones_like(turns_shape), 'o') temp = np.zeros_like(dthetasum_shape) temp[turns_shape] = 1 shape_cumsum = np.cumsum(temp) shape_cumsum -= shape_cumsum[xmin] plot( shape_cumsum / shape_cumsum[xmax] * (dmax-dthetasum_shape.min()) + dthetasum_shape.min()) xlim([xmin,xmax]); ylabel('Shape based'); title('Orientation change per 4 frames: ' + _title) ylim([dthetasum_shape[xmin:xmax].min()-1, dmax+1]) subplot(212) drawLines(dthetasum_vel.min(), dthetasum_vel.max(), events) plot(np.ones_like(dthetasum_vel)*thrs,'gray',linestyle='--') plot(-np.ones_like(dthetasum_vel)*thrs,'gray',linestyle='--') plot(dthetasum_vel) dmax = dthetasum_vel[xmin:xmax].max() plot(turns_vel, (0.5+dmax)*np.ones_like(turns_vel), 'o') temp = np.zeros_like(dthetasum_vel) temp[turns_vel] = 1 vel_cumsum = np.cumsum(temp) vel_cumsum -= vel_cumsum[xmin] plot( vel_cumsum / vel_cumsum[xmax] * (dmax-dthetasum_shape.min()) + dthetasum_shape.min()) ylim([dthetasum_vel[xmin:xmax].min()-1, dmax+1]) xlim([xmin,xmax]); ylabel('Velocity based') tight_layout() if pp: fig.savefig(pp, format='pdf') def trajectory(x, y, z, rng, ax, _xlim=[0,640], _ylim=[480,480+300], _zlim=[150,340], color='b', fps=30.0, ringpolygon=None): ax.plot(x[rng],y[rng],z[rng], color=color) ax.view_init(azim=-75, elev=-180+15) if ringpolygon: rx, ry, rz = ringpolygon ax.plot(rx, ry, rz, color='gray') ax.set_xlim(_xlim[0],_xlim[1]) ax.set_ylim(_ylim[0],_ylim[1]) ax.set_zlim(_zlim[0],_zlim[1]) title(("(%2.1f min to %2.1f min)" % (rng[0]/fps/60.0,(rng[-1]+1)/60.0/fps))) draw() def plotTrajectory(x, y, z, events, _xlim=None, _ylim=None, _zlim=None, fps=30.0, pp=None, ringpolygon=None): CS, USs, preRange = events rng1 = np.arange(CS-preRange, CS-preRange/2, dtype=int) rng2 = np.arange(CS-preRange/2, CS, dtype=int) if USs: rng3 = np.arange(CS, min(USs), dtype=int) rng4 = np.arange(min(USs), min(USs)+preRange/2, dtype=int) combined = np.hstack((rng1,rng2,rng3,rng4)) else: combined = np.hstack((rng1,rng2)) if _xlim is None: _xlim = map( int, ( x[combined].min(), x[combined].max() ) ) if _ylim is None: _ylim = map( int, ( y[combined].min(), y[combined].max() ) ) if _zlim is None: _zlim = map( int, ( z[combined].min(), z[combined].max() ) ) if ringpolygon: _zlim[0] = min( _zlim[0], int(ringpolygon[2][0]) ) fig3D = plt.figure(figsize=(12,8), facecolor='w') ax = fig3D.add_subplot(221, projection='3d'); trajectory(x,y,z,rng1,ax,_xlim,_ylim,_zlim,'c',fps,ringpolygon) ax = fig3D.add_subplot(222, projection='3d'); trajectory(x,y,z,rng2,ax,_xlim,_ylim,_zlim,'c',fps,ringpolygon) if USs: ax = fig3D.add_subplot(223, projection='3d'); trajectory(x,y,z,rng3,ax,_xlim,_ylim,_zlim,'g',fps,ringpolygon) ax = fig3D.add_subplot(224, projection='3d'); trajectory(x,y,z,rng4,ax,_xlim,_ylim,_zlim,'r',fps,ringpolygon) tight_layout() if pp: fig3D.savefig(pp, format='pdf') def add2DataAndPlot(fp, fish, data, createPDF): if createPDF: pp = PdfPages(fp[:-7]+'_'+fish+'.pdf') else: pp = None params = np.load(fp) fname = os.path.basename(fp).split('.')[0] + '.avi' dirname = os.path.dirname(fp) preRange = params[(fname, 'mog')]['preRange'] fps = params[(fname, 'mog')]['fps'] TVx1 = params[(fname, fish)]['TVx1'] TVy1 = params[(fname, fish)]['TVy1'] TVx2 = params[(fname, fish)]['TVx2'] TVy2 = params[(fname, fish)]['TVy2'] SVx1 = params[(fname, fish)]['SVx1'] SVx2 = params[(fname, fish)]['SVx2'] SVx3 = params[(fname, fish)]['SVx3'] SVy1 = params[(fname, fish)]['SVy1'] SVy2 = params[(fname, fish)]['SVy2'] SVy3 = params[(fname, fish)]['SVy3'] ringAppearochLevel = params[(fname, fish)]['ringAppearochLevel'] _npz = os.path.join(dirname, os.path.join('%s_%s.npz' % (fname[:-4], fish))) # if os.path.exists(_npz): npData = np.load(_npz) tvx = npData['TVtracking'][:,0] # x with nan tvy = npData['TVtracking'][:,1] # y headx = npData['TVtracking'][:,3] # headx heady = npData['TVtracking'][:,4] # heady svy = npData['SVtracking'][:,1] # z InflowTubeTVArray = npData['InflowTubeTVArray'] InflowTubeSVArray = npData['InflowTubeSVArray'] inflowpos = InflowTubeTVArray[:,0], InflowTubeTVArray[:,1], InflowTubeSVArray[:,1] ringpixels = npData['ringpixel'] ringpolyTVArray = npData['ringpolyTVArray'] ringpolySVArray = npData['ringpolySVArray'] TVbg = npData['TVbg'] print os.path.basename(_npz), 'loaded.' x,y,z = map(interp_nan, [tvx,tvy,svy]) # z level correction by depth (x) z = depthCorrection(z,x,TVx1,TVx2,SVy1,SVy2,SVy3) smoothedz, peaks_within = approachevents(x, y, z, ringpolyTVArray, ringpolySVArray, thrs=ringAppearochLevel) # convert to numpy array from list temp = np.zeros_like(x) temp[peaks_within] = 1 peaks_within = temp # normalize to mm longaxis = float(max((TVx2-TVx1), (TVy2-TVy1))) # before rotation H is applied they are orthogonal waterlevel = float(SVy2-SVy1) X = (x-TVx1) / longaxis * CHMAMBER_LENGTH Y = (TVy2-y) / longaxis * CHMAMBER_LENGTH Z = (SVy2-z) / waterlevel * WATER_HIGHT # bottom of chamber = 0, higher more positive inflowpos_mm = ((inflowpos[0]-TVx1) / longaxis * CHMAMBER_LENGTH, (TVy2-inflowpos[1]) / longaxis * CHMAMBER_LENGTH, (SVy2-inflowpos[2]) / waterlevel * WATER_HIGHT ) # do the swim direction analysis here swimdir, water_x, water_y = swimdir_analysis(x,y,z, ringpolyTVArray,ringpolySVArray,TVx1,TVy1,TVx2,TVy2,fps) # all of swimdir are within ROI (frame#, inout, speed) but not necessary within ring sdir = np.array(swimdir) withinRing = sdir[:,1]>0 # inout>0 are inside ring temp = np.zeros_like(x) temp[ sdir[withinRing,0].astype(int) ] = 1 swimdir_within = temp # location_ring xy_within = location_ring(x,y, ringpolyTVArray) temp = np.zeros_like(x) temp[xy_within] = 1 xy_within = temp # location_one_third if (TVx2-TVx1) > (TVy2-TVy1): if np.abs(np.arange(TVx1, longaxis+TVx1, longaxis/3) + longaxis/6 - inflowpos[0].mean()).argmin() == 2: location_one_third = x-TVx1 > longaxis/3*2 else: location_one_third = x < longaxis/3 else: if np.abs(np.arange(TVy1, longaxis+TVy1, longaxis/3) + longaxis/6 - inflowpos[1].mean()).argmin() == 2: location_one_third = y-TVy1 > longaxis/3*2 else: location_one_third = y < longaxis/3 # turn rate analysis (shape based) heady, headx = map(interp_nan, [heady, headx]) headx, heady = filterheadxy(headx, heady) dy = heady - y dx = headx - x theta_shape = np.arctan2(dy, dx) # velocity based cx, cy = filterheadxy(x.copy(), y.copy()) # centroid x,y vx = np.append(0, np.diff(cx)) vy = np.append(0, np.diff(cy)) theta_vel = np.arctan2(vy, vx) # prepare ringpolygon for trajectory plot rx, ry, rw, rh, rang = ringpolyTVArray.mean(axis=0).astype(int) # use mm ver above rz = ringpolySVArray.mean(axis=0)[1].astype(int) RX = (rx-TVx1) / longaxis * CHMAMBER_LENGTH RY = (TVy2-ry) / longaxis * CHMAMBER_LENGTH RW = rw / longaxis * CHMAMBER_LENGTH / 2 RH = rh / longaxis * CHMAMBER_LENGTH / 2 RZ = (SVy2-rz) / waterlevel * WATER_HIGHT points = cv2.ellipse2Poly( (RX.astype(int),RY.astype(int)), axes=(RW.astype(int),RH.astype(int)), angle=rang, arcStart=0, arcEnd=360, delta=3 ) ringpolygon = [points[:,0], points[:,1], np.ones(points.shape[0]) * RZ] eventTypeKeys = params[(fname, fish)]['EventData'].keys() CSs = [_ for _ in eventTypeKeys if _.startswith('CS')] USs = [_ for _ in eventTypeKeys if _.startswith('US')] # print CSs, USs # events for CS in CSs: CS_Timings = params[(fname, fish)]['EventData'][CS] CS_Timings.sort() # initialize when needed if CS not in data[fish].keys(): data[fish][CS] = [] # now look around for US after it within preRange for t in CS_Timings: tr = len(data[fish][CS])+1 rng = np.arange(t-preRange, t+preRange, dtype=np.int) matchedUSname = None for us in USs: us_Timings = params[(fname, fish)]['EventData'][us] matched = [_ for _ in us_Timings if t-preRange < _ < t+preRange] if matched: events = [t, matched, preRange] # ex. CS+ matchedUSname = us break else: continue _title = '(%s, %s) trial#%02d %s (%s)' % (CS, matchedUSname[0], tr, fname, fish) print _title, events _speed3D, _movingSTD, _d2inflow, _ringpixels = plot_eachTr(events, X, Y, Z, inflowpos_mm, ringpixels, peaks_within, swimdir_within, pp, _title, fps, inmm=True) # 3d trajectory _xlim = (0, CHMAMBER_LENGTH) _zlim = (RZ.max(),0) plotTrajectory(X, Y, Z, events, _xlim=_xlim, _zlim=_zlim, fps=fps, pp=pp, ringpolygon=ringpolygon) # turn rate analysis # shape based theta_shape[rng] = smoothRad(theta_shape[rng].copy(), thrs=np.pi/2) dtheta_shape = np.append(0, np.diff(theta_shape)) # full length kernel = np.ones(4) dthetasum_shape = np.convolve(dtheta_shape, kernel, 'same') # 4 frames = 1000/30.0*4 = 133.3 ms thrs = (np.pi / 2) * (133.33333333333334/120) # Braubach et al 2009 90 degree in 120 ms peaks_shape = argrelextrema(abs(dthetasum_shape), np.greater)[0] turns_shape = peaks_shape[ (abs(dthetasum_shape[peaks_shape]) > thrs).nonzero()[0] ] # velocity based theta_vel[rng] = smoothRad(theta_vel[rng].copy(), thrs=np.pi/2) dtheta_vel = np.append(0, np.diff(theta_vel)) dthetasum_vel = np.convolve(dtheta_vel, kernel, 'same') peaks_vel = argrelextrema(abs(dthetasum_vel), np.greater)[0] turns_vel = peaks_vel[ (abs(dthetasum_vel[peaks_vel]) > thrs).nonzero()[0] ] plot_turnrates(events, dthetasum_shape, dthetasum_vel, turns_shape, turns_vel, pp, _title, fps=fps) _temp = np.zeros_like(dtheta_shape) _temp[turns_shape] = 1 turns_shape_array = _temp _temp = np.zeros_like(dtheta_vel) _temp[turns_vel] = 1 turns_vel_array = _temp # plot swim direction analysis fig = figure(figsize=(12,8), facecolor='w') ax1 = subplot(211) ax1.imshow(TVbg, cmap=cm.gray) # TVbg is clip out of ROI ax1.plot(x[rng]-TVx1, y[rng]-TVy1, 'gray') ax1.plot(water_x[t-preRange:t]-TVx1, water_y[t-preRange:t]-TVy1, 'c.') if matched: ax1.plot( water_x[t:matched[0]]-TVx1, water_y[t:matched[0]]-TVy1, 'g.') ax1.plot( water_x[matched[0]:matched[0]+preRange/4]-TVx1, water_y[matched[0]:matched[0]+preRange/4]-TVy1, 'r.') xlim([0, TVx2-TVx1]); ylim([TVy2-TVy1, 0]) title(_title) ax2 = subplot(212) ax2.plot( swimdir_within ) ax2.plot( peaks_within*1.15-0.1, 'mo' ) if matched: xmin, xmax = t-preRange-10*fps, matched[0]+preRange/4 else: xmin, xmax = t-preRange-10*fps, t+preRange/2+10*fps gzcs = np.cumsum(swimdir_within) gzcs -= gzcs[xmin] ax2.plot( gzcs/gzcs[xmax] ) drawLines(0,1.2, events) ylim([0,1.2]) xlim([xmin, xmax]) ylabel('|: SwimDirection\no: approach events') data[fish][CS].append( { 'fname' : fname, 'x': x[rng], 'y': y[rng], 'z': z[rng], 'X': X[rng], 'Y': Y[rng], 'Z': Z[rng], # calibrate space (mm) 'speed3D': _speed3D, # calibrate space (mm) 'movingSTD' : _movingSTD, # calibrate space (mm) 'd2inflow': _d2inflow, # calibrate space (mm) 'ringpixels': _ringpixels, 'peaks_within': peaks_within[rng], 'xy_within': xy_within[rng], 'location_one_third' : location_one_third[rng], 'swimdir_within' : swimdir_within[rng], 'dtheta_shape': dtheta_shape[rng], 'dtheta_vel': dtheta_vel[rng], 'turns_shape': turns_shape_array[rng], # already +/- preRange 'turns_vel': turns_vel_array[rng], 'events' : events, 'matchedUSname' : matchedUSname, 'TVroi' : (TVx1,TVy1,TVx2,TVy2), 'SVroi' : (SVx1,SVy1,SVx2,SVy2), } ) if pp: fig.savefig(pp, format='pdf') close('all') # release memory ASAP! if pp: pp.close() def getPDFs(pickle_files, fishnames=None, createPDF=True): # type checking args if type(pickle_files) is str: pickle_files = [pickle_files] # convert to a list or set of fish names if type(fishnames) is str: fishnames = [fishnames] elif not fishnames: fishnames = set() # re-organize trials into a dict "data" data = {} # figure out trial number (sometime many trials in one files) for each fish # go through all pickle_files and use timestamps of file to sort events. timestamps = [] for fp in pickle_files: # collect ctime of pickled files fname = os.path.basename(fp).split('.')[0] + '.avi' timestamps.append( time.strptime(fname, "%b-%d-%Y_%H_%M_%S.avi") ) # look into the pickle and collect fish analyzed params = np.load(fp) # loading pickled file! if type(fishnames) is set: for fish in [fs for fl,fs in params.keys() if fl == fname and fs != 'mog']: fishnames.add(fish) timestamps = sorted(range(len(timestamps)), key=timestamps.__getitem__) # For each fish, go thru all pickled files for fish in fishnames: data[fish] = {} # now go thru the sorted for ind in timestamps: fp = pickle_files[ind] print 'processing #%d\n%s' % (ind, fp) add2DataAndPlot(fp, fish, data, createPDF) return data def plotTrials(data, fish, CSname, key, step, offset=0, pp=None): fig = figure(figsize=(12,8), facecolor='w') ax1 = fig.add_subplot(121) # raw trace ax2 = fig.add_subplot(222) # learning curve ax3 = fig.add_subplot(224) # bar plot preP, postP, postP2 = [], [], [] longestUS = 0 for n, measurement in enumerate(data[fish][CSname]): tr = n+1 CS, USs, preRange = measurement['events'] subplot(ax1) mi = -step*(tr-1) ma = mi + step drawLines(mi, ma, (preRange, [preRange+(USs[0]-CS)], preRange)) longestUS = max([us-CS+preRange*3/2 for us in USs]+[longestUS]) # 'measurement[key]': vector around the CS timing (+/-) preRange. i.e., preRange is the center ax1.plot(measurement[key]-step*(tr-1)+offset) title(CSname+': '+key) # cf. preRange = 3600 frames pre = measurement[key][:preRange].mean()+offset # 2 min window post = measurement[key][preRange:preRange+(USs[0]-CS)].mean()+offset # 23 s window post2 = measurement[key][preRange+(USs[0]-CS):preRange*3/2+(USs[0]-CS)].mean()+offset # 1 min window after US preP.append(pre) postP.append(post) postP2.append(post2) ax3.plot([1, 2, 3], [pre, post, post2],'o-') ax1.set_xlim([0,longestUS]) ax1.axis('off') subplot(ax2) x = range(1, tr+1) y = np.diff((preP,postP), axis=0).ravel() ax2.plot( x, y, 'ko-', linewidth=2 ) ax2.plot( x, np.zeros_like(x), '-.', linewidth=1, color='gray' ) # grid() slope, intercept, rvalue, pval, stderr = stats.stats.linregress(x,y) title('slope = zero? p-value = %f' % pval) ax2.set_xlabel("Trial#") ax2.set_xlim([0.5,tr+0.5]) ax2.set_ylabel('CS - pre') subplot(ax3) ax3.bar([0.6, 1.6, 2.6], [np.nanmean(preP), np.nanmean(postP), np.nanmean(postP2)], facecolor='none') t, pval = stats.ttest_rel(postP, preP) title('paired t p-value = %f' % pval) ax3.set_xticks([1,2,3]) ax3.set_xticklabels(['pre', CSname, measurement['matchedUSname']]) ax3.set_xlim([0.5,3.5]) ax3.set_ylabel('Raw mean values') tight_layout(2, h_pad=1, w_pad=1) if pp: fig.savefig(pp, format='pdf') close('all') return np.vstack((preP, postP, postP2)) def getSummary(data, dirname=None): for fish in data.keys(): for CSname in data[fish].keys(): if dirname: pp = PdfPages(os.path.join(dirname, '%s_for_%s.pdf' % (CSname,fish))) print 'generating %s_for_%s.pdf' % (CSname,fish) book = Workbook() sheet1 = book.add_sheet('speed3D') avgs = plotTrials(data, fish, CSname, 'speed3D', 30, pp=pp) putNp2xls(avgs, sheet1) sheet2 = book.add_sheet('d2inflow') avgs = plotTrials(data, fish, CSname, 'd2inflow', 200, pp=pp) putNp2xls(avgs, sheet2) # sheet3 = book.add_sheet('smoothedz') sheet3 = book.add_sheet('Z') # avgs = plotTrials(data, fish, CSname, 'smoothedz', 100, pp=pp) avgs = plotTrials(data, fish, CSname, 'Z', 30, pp=pp) putNp2xls(avgs, sheet3) sheet4 = book.add_sheet('ringpixels') avgs = plotTrials(data, fish, CSname, 'ringpixels', 1200, pp=pp) putNp2xls(avgs, sheet4) sheet5 = book.add_sheet('peaks_within') avgs = plotTrials(data, fish, CSname, 'peaks_within', 1.5, pp=pp) putNp2xls(avgs, sheet5) sheet6 = book.add_sheet('swimdir_within') avgs = plotTrials(data, fish, CSname, 'swimdir_within', 1.5, pp=pp) putNp2xls(avgs, sheet6) sheet7 = book.add_sheet('xy_within') avgs = plotTrials(data, fish, CSname, 'xy_within', 1.5, pp=pp) putNp2xls(avgs, sheet7) sheet8 = book.add_sheet('turns_shape') avgs = plotTrials(data, fish, CSname, 'turns_shape', 1.5, pp=pp) putNp2xls(avgs, sheet8) sheet9 = book.add_sheet('turns_vel') avgs = plotTrials(data, fish, CSname, 'turns_vel', 1.5, pp=pp) putNp2xls(avgs, sheet9) if dirname: pp.close() book.save(os.path.join(dirname, '%s_for_%s.xls' % (CSname,fish))) close('all') else: show() def add2Pickles(dirname, pickle_files): # dirname : folder to look for pickle files # pickle_files : output, a list to be concatenated. pattern = os.path.join(dirname, '*.pickle') temp = [_ for _ in glob(pattern) if not _.endswith('- Copy.pickle') and not os.path.basename(_).startswith('Summary')] pickle_files += temp if __name__ == '__main__': pickle_files = [] # small test data # add2Pickles('R:/Data/itoiori/behav/adult whitlock/conditioning/NeuroD/Aug4/test', pickle_files) # outputdir = 'R:/Data/itoiori/behav/adult whitlock/conditioning/NeuroD/Aug4/test' # show me what you got for pf in pickle_files: print pf fp = os.path.join(outputdir, 'Summary.pickle') createPDF = True # useful when plotting etc code updated if 1: # refresh analysis data = getPDFs(pickle_files, createPDF=createPDF) import cPickle as pickle with open(os.path.join(outputdir, 'Summary.pickle'), 'wb') as f: pickle.dump(data, f) else: # or reuse previous data = np.load(fp) getSummary(data, outputdir) pickle2mat(fp, data)
bsd-3-clause
4,991,329,245,887,673,000
36.435306
124
0.567291
false
2.972286
false
false
false
jkunimune15/Map-Projections
src/zupplemental/compose_maps.py
1
5115
#compose_maps.py #make ALL the maps import math from generate_borders import generate_borders from generate_graticule import generate_graticule, generate_backdrop from generate_indicatrices import generate_indicatrices from generate_orthodromes import generate_orthodromes from generate_shape import plot_shapes from generate_labels import generate_topographical_labels, label_shapes, label_points def compose_landmasses(): print('\t<g transform="matrix(1,0,0,-1,180,90)">') print('\t\t<g class="land">') plot_shapes('ne_50m_land', trim_antarctica=True) print('\t\t</g>') print('\t\t<g class="water">') plot_shapes('ne_50m_lakes', max_rank=4) print('\t\t</g>') print('\t</g>') def compose_graticule(): print('\t<g transform="matrix(1,0,0,-1,180,90)">') print('\t\t<g class="graticule">') generate_graticule(5, 1, include_tropics=True, adjust_poles=True) print('\t\t</g>') print('\t</g>') def compose_graticule2(): print('\t<g transform="matrix(1,0,0,-1,180,90)">') print('\t\t<g class="graticule">') generate_graticule(15, .25, include_tropics=True, adjust_poles=True, double_dateline=True) print('\t\t</g>') print('\t</g>') def compose_compound(): print('\t<g transform="matrix(1,0,0,-1,180,90)">') print('\t\t<g class="land">') plot_shapes('ne_50m_land', trim_antarctica=True) print('\t\t</g>') print('\t\t<g class="river">') plot_shapes('ne_50m_rivers_lake_centerlines', max_rank=4) print('\t\t</g>') print('\t\t<g class="lakes">') plot_shapes('ne_50m_lakes', max_rank=4) print('\t\t</g>') print('\t\t<g class="graticule">') generate_graticule(15, 1, include_tropics=True, adjust_poles=True) print('\t\t</g>') print('\t</g>') def compose_indicatrices(): print('\t<g transform="matrix(1,0,0,-1,180,90)">') print('\t\t<g class="land">') plot_shapes('ne_50m_land', trim_antarctica=True) print('\t\t</g>') print('\t\t<g class="lakes">') plot_shapes('ne_50m_lakes', max_rank=4) print('\t\t</g>') print('\t\t<g class="tissot">') generate_indicatrices(15, math.radians(3.75), resolution=180, adjust_poles=True) print('\t\t</g>') print('\t</g>') def compose_indicatrices2(ctr_meridian): print('\t<g transform="matrix(1,0,0,-1,180,90)">') print('\t\t<g class="water">') generate_backdrop(.5, ctr_meridian=ctr_meridian) print('\t\t</g>') print('\t\t<g class="land">') plot_shapes('ne_110m_land', flesh_out_antarctica=True) print('\t\t</g>') print('\t\t<g class="lakes">') plot_shapes('ne_110m_lakes') print('\t\t</g>') print('\t\t<g class="graticule">') generate_graticule(10, .5, double_dateline=(ctr_meridian==0)) print('\t\t</g>') print('\t\t<g class="tissot">') generate_indicatrices(30, 500/6371, ctr_meridian=ctr_meridian, adjust_poles=True, resolution=120, side_res=5, pole_res=120) print('\t\t</g>') print('\t</g>') def compose_political(): print('\t<g transform="matrix(1,0,0,-1,180,90)">') print('\t\t<g class="country">') generate_borders('ne_50m', trim_antarctica=True) print('\t\t</g>') print('\t\t<g class="lakes">') plot_shapes('ne_50m_lakes', max_rank=4) print('\t\t</g>') print('\t</g>') label_shapes('ne_50m_admin_0_countries', "pol") def compose_orthodromes(): print('\t<g transform="matrix(1,0,0,-1,180,90)">') print('\t\t<g class="lines">') generate_orthodromes() print('\t\t</g>') print('\t</g>') def compose_everything(): print('\t<g transform="matrix(1,0,0,-1,180,90)">') print('\t\t<g class="country">') generate_borders('ne_10m', trim_antarctica=True, borders_only=False) print('\t\t<g class="border">') generate_borders('ne_10m', trim_antarctica=True, borders_only=True) print('\t\t</g>') print('\t\t</g>') print('\t\t<g class="sovereign">') plot_shapes('ne_10m_admin_0_map_units') print('\t\t</g>') print('\t\t<g class="admin">') plot_shapes('ne_10m_admin_1_states_provinces_lines', filter_field='adm0_a3', filter_vals=['RUS','CAN','CHN','USA','BRA','AUS','IND','ARG','KAZ']) print('\t\t</g>') print('\t\t<g class="dispute">') plot_shapes('ne_10m_admin_0_boundary_lines_disputed_areas') print('\t\t</g>') print('\t\t<g class="coastline">') plot_shapes('ne_10m_coastline', trim_antarctica=True) print('\t\t</g>') print('\t\t<g class="river">') plot_shapes('ne_10m_rivers_lake_centerlines', max_rank=5) print('\t\t</g>') print('\t\t<g class="lake">') plot_shapes('ne_10m_lakes', max_rank=4) print('\t\t</g>') print('\t\t<g class="graticule">') generate_graticule(5, 1, include_tropics=True, adjust_poles=True) plot_shapes('ne_10m_geographic_lines', clazz="dateline", filter_field='name', filter_vals=["International Date Line"]) print('\t\t</g>') print('\t</g>') generate_topographical_labels('ne_50m', max_rank=2, text_size=4) label_shapes('ne_10m_lakes', "sea", max_rank=1, text_size=1) label_shapes('ne_10m_admin_0_countries', "pol", text_size=4) label_points('cities_capital', "cap", text_size=1) label_points('cities_other', "cit", text_size=0) if __name__ == '__main__': # compose_landmasses() # compose_graticule() # compose_compound() # compose_indicatrices() # compose_indicatrices2(-0) # compose_political() # compose_orthodromes() compose_everything()
mit
-1,186,010,880,348,605,000
32.874172
124
0.657869
false
2.37355
false
false
false
francisco-dlp/hyperspy
hyperspy/drawing/utils.py
1
57321
# -*- coding: utf-8 -*- # Copyright 2007-2016 The HyperSpy developers # # This file is part of HyperSpy. # # HyperSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # HyperSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with HyperSpy. If not, see <http://www.gnu.org/licenses/>. import copy import itertools import textwrap from traits import trait_base import matplotlib.pyplot as plt import matplotlib as mpl from mpl_toolkits.axes_grid1 import make_axes_locatable from matplotlib.backend_bases import key_press_handler import warnings import numpy as np from distutils.version import LooseVersion import logging import hyperspy as hs _logger = logging.getLogger(__name__) def contrast_stretching(data, saturated_pixels): """Calculate bounds that leaves out a given percentage of the data. Parameters ---------- data: numpy array saturated_pixels: scalar, None The percentage of pixels that are left out of the bounds. For example, the low and high bounds of a value of 1 are the 0.5% and 99.5% percentiles. It must be in the [0, 100] range. If None, set the value to 0. Returns ------- vmin, vmax: scalar The low and high bounds Raises ------ ValueError if the value of `saturated_pixels` is out of the valid range. """ # Sanity check if saturated_pixels is None: saturated_pixels = 0 if not 0 <= saturated_pixels <= 100: raise ValueError( "saturated_pixels must be a scalar in the range[0, 100]") vmin = np.nanpercentile(data, saturated_pixels / 2.) vmax = np.nanpercentile(data, 100 - saturated_pixels / 2.) return vmin, vmax MPL_DIVERGING_COLORMAPS = [ "BrBG", "bwr", "coolwarm", "PiYG", "PRGn", "PuOr", "RdBu", "RdGy", "RdYIBu", "RdYIGn", "seismic", "Spectral", ] # Add reversed colormaps MPL_DIVERGING_COLORMAPS += [cmap + "_r" for cmap in MPL_DIVERGING_COLORMAPS] def centre_colormap_values(vmin, vmax): """Calculate vmin and vmax to set the colormap midpoint to zero. Parameters ---------- vmin, vmax : scalar The range of data to display. Returns ------- cvmin, cvmax : scalar The values to obtain a centre colormap. """ absmax = max(abs(vmin), abs(vmax)) return -absmax, absmax def create_figure(window_title=None, _on_figure_window_close=None, disable_xyscale_keys=False, **kwargs): """Create a matplotlib figure. This function adds the possibility to execute another function when the figure is closed and to easily set the window title. Any keyword argument is passed to the plt.figure function Parameters ---------- window_title : string _on_figure_window_close : function disable_xyscale_keys : bool, disable the `k`, `l` and `L` shortcuts which toggle the x or y axis between linear and log scale. Returns ------- fig : plt.figure """ fig = plt.figure(**kwargs) if window_title is not None: # remove non-alphanumeric characters to prevent file saving problems # This is a workaround for: # https://github.com/matplotlib/matplotlib/issues/9056 reserved_characters = r'<>"/\|?*' for c in reserved_characters: window_title = window_title.replace(c, '') window_title = window_title.replace('\n', ' ') window_title = window_title.replace(':', ' -') fig.canvas.set_window_title(window_title) if disable_xyscale_keys and hasattr(fig.canvas, 'toolbar'): # hack the `key_press_handler` to disable the `k`, `l`, `L` shortcuts manager = fig.canvas.manager fig.canvas.mpl_disconnect(manager.key_press_handler_id) manager.key_press_handler_id = manager.canvas.mpl_connect( 'key_press_event', lambda event: key_press_handler_custom(event, manager.canvas)) if _on_figure_window_close is not None: on_figure_window_close(fig, _on_figure_window_close) return fig def key_press_handler_custom(event, canvas): if event.key not in ['k', 'l', 'L']: key_press_handler(event, canvas, canvas.manager.toolbar) def on_figure_window_close(figure, function): """Connects a close figure signal to a given function. Parameters ---------- figure : mpl figure instance function : function """ def function_wrapper(evt): function() figure.canvas.mpl_connect('close_event', function_wrapper) def plot_RGB_map(im_list, normalization='single', dont_plot=False): """Plot 2 or 3 maps in RGB. Parameters ---------- im_list : list of Signal2D instances normalization : {'single', 'global'} dont_plot : bool Returns ------- array: RGB matrix """ # from widgets import cursors height, width = im_list[0].data.shape[:2] rgb = np.zeros((height, width, 3)) rgb[:, :, 0] = im_list[0].data.squeeze() rgb[:, :, 1] = im_list[1].data.squeeze() if len(im_list) == 3: rgb[:, :, 2] = im_list[2].data.squeeze() if normalization == 'single': for i in range(len(im_list)): rgb[:, :, i] /= rgb[:, :, i].max() elif normalization == 'global': rgb /= rgb.max() rgb = rgb.clip(0, rgb.max()) if not dont_plot: figure = plt.figure() ax = figure.add_subplot(111) ax.frameon = False ax.set_axis_off() ax.imshow(rgb, interpolation='nearest') # cursors.set_mpl_ax(ax) figure.canvas.draw_idle() else: return rgb def subplot_parameters(fig): """Returns a list of the subplot parameters of a mpl figure. Parameters ---------- fig : mpl figure Returns ------- tuple : (left, bottom, right, top, wspace, hspace) """ wspace = fig.subplotpars.wspace hspace = fig.subplotpars.hspace left = fig.subplotpars.left right = fig.subplotpars.right top = fig.subplotpars.top bottom = fig.subplotpars.bottom return left, bottom, right, top, wspace, hspace class ColorCycle: _color_cycle = [mpl.colors.colorConverter.to_rgba(color) for color in ('b', 'g', 'r', 'c', 'm', 'y', 'k')] def __init__(self): self.color_cycle = copy.copy(self._color_cycle) def __call__(self): if not self.color_cycle: self.color_cycle = copy.copy(self._color_cycle) return self.color_cycle.pop(0) def plot_signals(signal_list, sync=True, navigator="auto", navigator_list=None, **kwargs): """Plot several signals at the same time. Parameters ---------- signal_list : list of BaseSignal instances If sync is set to True, the signals must have the same navigation shape, but not necessarily the same signal shape. sync : True or False, default "True" If True: the signals will share navigation, all the signals must have the same navigation shape for this to work, but not necessarily the same signal shape. navigator : {"auto", None, "spectrum", "slider", BaseSignal}, default "auto" See signal.plot docstring for full description navigator_list : {List of navigator arguments, None}, default None Set different navigator options for the signals. Must use valid navigator arguments: "auto", None, "spectrum", "slider", or a hyperspy Signal. The list must have the same size as signal_list. If None, the argument specified in navigator will be used. **kwargs Any extra keyword arguments are passed to each signal `plot` method. Example ------- >>> s_cl = hs.load("coreloss.dm3") >>> s_ll = hs.load("lowloss.dm3") >>> hs.plot.plot_signals([s_cl, s_ll]) Specifying the navigator: >>> s_cl = hs.load("coreloss.dm3") >>> s_ll = hs.load("lowloss.dm3") >>> hs.plot.plot_signals([s_cl, s_ll], navigator="slider") Specifying the navigator for each signal: >>> s_cl = hs.load("coreloss.dm3") >>> s_ll = hs.load("lowloss.dm3") >>> s_edx = hs.load("edx.dm3") >>> s_adf = hs.load("adf.dm3") >>> hs.plot.plot_signals( [s_cl, s_ll, s_edx], navigator_list=["slider",None,s_adf]) """ import hyperspy.signal if navigator_list: if not (len(signal_list) == len(navigator_list)): raise ValueError( "signal_list and navigator_list must" " have the same size") if sync: axes_manager_list = [] for signal in signal_list: axes_manager_list.append(signal.axes_manager) if not navigator_list: navigator_list = [] if navigator is None: navigator_list.extend([None] * len(signal_list)) elif isinstance(navigator, hyperspy.signal.BaseSignal): navigator_list.append(navigator) navigator_list.extend([None] * (len(signal_list) - 1)) elif navigator == "slider": navigator_list.append("slider") navigator_list.extend([None] * (len(signal_list) - 1)) elif navigator == "spectrum": navigator_list.extend(["spectrum"] * len(signal_list)) elif navigator == "auto": navigator_list.extend(["auto"] * len(signal_list)) else: raise ValueError( "navigator must be one of \"spectrum\",\"auto\"," " \"slider\", None, a Signal instance") # Check to see if the spectra have the same navigational shapes temp_shape_first = axes_manager_list[0].navigation_shape for i, axes_manager in enumerate(axes_manager_list): temp_shape = axes_manager.navigation_shape if not (temp_shape_first == temp_shape): raise ValueError( "The spectra does not have the same navigation shape") axes_manager_list[i] = axes_manager.deepcopy() if i > 0: for axis0, axisn in zip(axes_manager_list[0].navigation_axes, axes_manager_list[i].navigation_axes): axes_manager_list[i]._axes[axisn.index_in_array] = axis0 del axes_manager for signal, navigator, axes_manager in zip(signal_list, navigator_list, axes_manager_list): signal.plot(axes_manager=axes_manager, navigator=navigator, **kwargs) # If sync is False else: if not navigator_list: navigator_list = [] navigator_list.extend([navigator] * len(signal_list)) for signal, navigator in zip(signal_list, navigator_list): signal.plot(navigator=navigator, **kwargs) def _make_heatmap_subplot(spectra): from hyperspy._signals.signal2d import Signal2D im = Signal2D(spectra.data, axes=spectra.axes_manager._get_axes_dicts()) im.metadata.General.title = spectra.metadata.General.title im.plot() return im._plot.signal_plot.ax def set_xaxis_lims(mpl_ax, hs_axis): """ Set the matplotlib axis limits to match that of a HyperSpy axis Parameters ---------- mpl_ax : :class:`matplotlib.axis.Axis` The ``matplotlib`` axis to change hs_axis : :class:`~hyperspy.axes.DataAxis` The data axis that contains the values that control the scaling """ x_axis_lower_lim = hs_axis.axis[0] x_axis_upper_lim = hs_axis.axis[-1] mpl_ax.set_xlim(x_axis_lower_lim, x_axis_upper_lim) def _make_overlap_plot(spectra, ax, color="blue", line_style='-'): if isinstance(color, str): color = [color] * len(spectra) if isinstance(line_style, str): line_style = [line_style] * len(spectra) for spectrum_index, (spectrum, color, line_style) in enumerate( zip(spectra, color, line_style)): x_axis = spectrum.axes_manager.signal_axes[0] spectrum = _transpose_if_required(spectrum, 1) ax.plot(x_axis.axis, spectrum.data, color=color, ls=line_style) set_xaxis_lims(ax, x_axis) _set_spectrum_xlabel(spectra if isinstance(spectra, hs.signals.BaseSignal) else spectra[-1], ax) ax.set_ylabel('Intensity') ax.autoscale(tight=True) def _make_cascade_subplot( spectra, ax, color="blue", line_style='-', padding=1): max_value = 0 for spectrum in spectra: spectrum_yrange = (np.nanmax(spectrum.data) - np.nanmin(spectrum.data)) if spectrum_yrange > max_value: max_value = spectrum_yrange if isinstance(color, str): color = [color] * len(spectra) if isinstance(line_style, str): line_style = [line_style] * len(spectra) for spectrum_index, (spectrum, color, line_style) in enumerate( zip(spectra, color, line_style)): x_axis = spectrum.axes_manager.signal_axes[0] spectrum = _transpose_if_required(spectrum, 1) data_to_plot = ((spectrum.data - spectrum.data.min()) / float(max_value) + spectrum_index * padding) ax.plot(x_axis.axis, data_to_plot, color=color, ls=line_style) set_xaxis_lims(ax, x_axis) _set_spectrum_xlabel(spectra if isinstance(spectra, hs.signals.BaseSignal) else spectra[-1], ax) ax.set_yticks([]) ax.autoscale(tight=True) def _plot_spectrum(spectrum, ax, color="blue", line_style='-'): x_axis = spectrum.axes_manager.signal_axes[0] ax.plot(x_axis.axis, spectrum.data, color=color, ls=line_style) set_xaxis_lims(ax, x_axis) def _set_spectrum_xlabel(spectrum, ax): x_axis = spectrum.axes_manager.signal_axes[0] ax.set_xlabel("%s (%s)" % (x_axis.name, x_axis.units)) def _transpose_if_required(signal, expected_dimension): # EDS profiles or maps have signal dimension = 0 and navigation dimension # 1 or 2. For convenience transpose the signal if possible if (signal.axes_manager.signal_dimension == 0 and signal.axes_manager.navigation_dimension == expected_dimension): return signal.T else: return signal def plot_images(images, cmap=None, no_nans=False, per_row=3, label='auto', labelwrap=30, suptitle=None, suptitle_fontsize=18, colorbar='multi', centre_colormap="auto", saturated_pixels=0, scalebar=None, scalebar_color='white', axes_decor='all', padding=None, tight_layout=False, aspect='auto', min_asp=0.1, namefrac_thresh=0.4, fig=None, vmin=None, vmax=None, *args, **kwargs): """Plot multiple images as sub-images in one figure. Extra keyword arguments are passed to `matplotlib.figure`. Parameters ---------- images : list of Signal2D or BaseSignal `images` should be a list of Signals to plot. For `BaseSignal` with navigation dimensions 2 and signal dimension 0, the signal will be tranposed to form a `Signal2D`. Multi-dimensional images will have each plane plotted as a separate image. If any signal shape is not suitable, a ValueError will be raised. cmap : matplotlib colormap, list, or ``'mpl_colors'``, *optional* The colormap used for the images, by default read from ``pyplot``. A list of colormaps can also be provided, and the images will cycle through them. Optionally, the value ``'mpl_colors'`` will cause the cmap to loop through the default ``matplotlib`` colors (to match with the default output of the :py:func:`~.drawing.utils.plot_spectra` method. Note: if using more than one colormap, using the ``'single'`` option for ``colorbar`` is disallowed. no_nans : bool, optional If True, set nans to zero for plotting. per_row : int, optional The number of plots in each row label : None, str, or list of str, optional Control the title labeling of the plotted images. If None, no titles will be shown. If 'auto' (default), function will try to determine suitable titles using Signal2D titles, falling back to the 'titles' option if no good short titles are detected. Works best if all images to be plotted have the same beginning to their titles. If 'titles', the title from each image's metadata.General.title will be used. If any other single str, images will be labeled in sequence using that str as a prefix. If a list of str, the list elements will be used to determine the labels (repeated, if necessary). labelwrap : int, optional integer specifying the number of characters that will be used on one line If the function returns an unexpected blank figure, lower this value to reduce overlap of the labels between each figure suptitle : str, optional Title to use at the top of the figure. If called with label='auto', this parameter will override the automatically determined title. suptitle_fontsize : int, optional Font size to use for super title at top of figure colorbar : {'multi', None, 'single'} Controls the type of colorbars that are plotted. If None, no colorbar is plotted. If 'multi' (default), individual colorbars are plotted for each (non-RGB) image If 'single', all (non-RGB) images are plotted on the same scale, and one colorbar is shown for all centre_colormap : {"auto", True, False} If True the centre of the color scheme is set to zero. This is specially useful when using diverging color schemes. If "auto" (default), diverging color schemes are automatically centred. saturated_pixels: None, scalar or list of scalar, optional, default: 0 If list of scalar, the length should match the number of images to show. If provide in the list, set the value to 0. The percentage of pixels that are left out of the bounds. For example, the low and high bounds of a value of 1 are the 0.5% and 99.5% percentiles. It must be in the [0, 100] range. scalebar : {None, 'all', list of ints}, optional If None (or False), no scalebars will be added to the images. If 'all', scalebars will be added to all images. If list of ints, scalebars will be added to each image specified. scalebar_color : str, optional A valid MPL color string; will be used as the scalebar color axes_decor : {'all', 'ticks', 'off', None}, optional Controls how the axes are displayed on each image; default is 'all' If 'all', both ticks and axis labels will be shown If 'ticks', no axis labels will be shown, but ticks/labels will If 'off', all decorations and frame will be disabled If None, no axis decorations will be shown, but ticks/frame will padding : None or dict, optional This parameter controls the spacing between images. If None, default options will be used Otherwise, supply a dictionary with the spacing options as keywords and desired values as values Values should be supplied as used in pyplot.subplots_adjust(), and can be: 'left', 'bottom', 'right', 'top', 'wspace' (width), and 'hspace' (height) tight_layout : bool, optional If true, hyperspy will attempt to improve image placement in figure using matplotlib's tight_layout If false, repositioning images inside the figure will be left as an exercise for the user. aspect : str or numeric, optional If 'auto', aspect ratio is auto determined, subject to min_asp. If 'square', image will be forced onto square display. If 'equal', aspect ratio of 1 will be enforced. If float (or int/long), given value will be used. min_asp : float, optional Minimum aspect ratio to be used when plotting images namefrac_thresh : float, optional Threshold to use for auto-labeling. This parameter controls how much of the titles must be the same for the auto-shortening of labels to activate. Can vary from 0 to 1. Smaller values encourage shortening of titles by auto-labeling, while larger values will require more overlap in titles before activing the auto-label code. fig : mpl figure, optional If set, the images will be plotted to an existing MPL figure vmin, vmax : scalar or list of scalar, optional, default: None If list of scalar, the length should match the number of images to show. A list of scalar is not compatible with a single colorbar. See vmin, vmax of matplotlib.imshow() for more details. *args, **kwargs, optional Additional arguments passed to matplotlib.imshow() Returns ------- axes_list : list a list of subplot axes that hold the images See Also -------- plot_spectra : Plotting of multiple spectra plot_signals : Plotting of multiple signals plot_histograms : Compare signal histograms Notes ----- `interpolation` is a useful parameter to provide as a keyword argument to control how the space between pixels is interpolated. A value of ``'nearest'`` will cause no interpolation between pixels. `tight_layout` is known to be quite brittle, so an option is provided to disable it. Turn this option off if output is not as expected, or try adjusting `label`, `labelwrap`, or `per_row` """ def __check_single_colorbar(cbar): if cbar == 'single': raise ValueError('Cannot use a single colorbar with multiple ' 'colormaps. Please check for compatible ' 'arguments.') from hyperspy.drawing.widgets import ScaleBar from hyperspy.misc import rgb_tools from hyperspy.signal import BaseSignal # Check that we have a hyperspy signal im = [images] if not isinstance(images, (list, tuple)) else images for image in im: if not isinstance(image, BaseSignal): raise ValueError("`images` must be a list of image signals or a " "multi-dimensional signal." " " + repr(type(images)) + " was given.") # For list of EDS maps, transpose the BaseSignal if isinstance(images, (list, tuple)): images = [_transpose_if_required(image, 2) for image in images] # If input is >= 1D signal (e.g. for multi-dimensional plotting), # copy it and put it in a list so labeling works out as (x,y) when plotting if isinstance(images, BaseSignal) and images.axes_manager.navigation_dimension > 0: images = [images._deepcopy_with_new_data(images.data)] n = 0 for i, sig in enumerate(images): if sig.axes_manager.signal_dimension != 2: raise ValueError("This method only plots signals that are images. " "The signal dimension must be equal to 2. " "The signal at position " + repr(i) + " was " + repr(sig) + ".") # increment n by the navigation size, or by 1 if the navigation size is # <= 0 n += (sig.axes_manager.navigation_size if sig.axes_manager.navigation_size > 0 else 1) # If no cmap given, get default colormap from pyplot: if cmap is None: cmap = [plt.get_cmap().name] elif cmap == 'mpl_colors': for n_color, c in enumerate(mpl.rcParams['axes.prop_cycle']): make_cmap(colors=['#000000', c['color']], name='mpl{}'.format(n_color)) cmap = ['mpl{}'.format(i) for i in range(len(mpl.rcParams['axes.prop_cycle']))] __check_single_colorbar(colorbar) # cmap is list, tuple, or something else iterable (but not string): elif hasattr(cmap, '__iter__') and not isinstance(cmap, str): try: cmap = [c.name for c in cmap] # convert colormap to string except AttributeError: cmap = [c for c in cmap] # c should be string if not colormap __check_single_colorbar(colorbar) elif isinstance(cmap, mpl.colors.Colormap): cmap = [cmap.name] # convert single colormap to list with string elif isinstance(cmap, str): cmap = [cmap] # cmap is single string, so make it a list else: # Didn't understand cmap input, so raise error raise ValueError('The provided cmap value was not understood. Please ' 'check input values.') # If any of the cmaps given are diverging, and auto-centering, set the # appropriate flag: if centre_colormap == "auto": centre_colormaps = [] for c in cmap: if c in MPL_DIVERGING_COLORMAPS: centre_colormaps.append(True) else: centre_colormaps.append(False) # if it was True, just convert to list elif centre_colormap: centre_colormaps = [True] # likewise for false elif not centre_colormap: centre_colormaps = [False] # finally, convert lists to cycle generators for adaptive length: centre_colormaps = itertools.cycle(centre_colormaps) cmap = itertools.cycle(cmap) def _check_arg(arg, default_value, arg_name): if isinstance(arg, list): if len(arg) != n: _logger.warning('The provided {} values are ignored because the ' 'length of the list does not match the number of ' 'images'.format(arg_name)) arg = [default_value] * n else: arg = [arg] * n return arg vmin = _check_arg(vmin, None, 'vmin') vmax = _check_arg(vmax, None, 'vmax') saturated_pixels = _check_arg(saturated_pixels, 0, 'saturated_pixels') # Sort out the labeling: div_num = 0 all_match = False shared_titles = False user_labels = False if label is None: pass elif label == 'auto': # Use some heuristics to try to get base string of similar titles label_list = [x.metadata.General.title for x in images] # Find the shortest common string between the image titles # and pull that out as the base title for the sequence of images # array in which to store arrays res = np.zeros((len(label_list), len(label_list[0]) + 1)) res[:, 0] = 1 # j iterates the strings for j in range(len(label_list)): # i iterates length of substring test for i in range(1, len(label_list[0]) + 1): # stores whether or not characters in title match res[j, i] = label_list[0][:i] in label_list[j] # sum up the results (1 is True, 0 is False) and create # a substring based on the minimum value (this will be # the "smallest common string" between all the titles if res.all(): basename = label_list[0] div_num = len(label_list[0]) all_match = True else: div_num = int(min(np.sum(res, 1))) basename = label_list[0][:div_num - 1] all_match = False # trim off any '(' or ' ' characters at end of basename if div_num > 1: while True: if basename[len(basename) - 1] == '(': basename = basename[:-1] elif basename[len(basename) - 1] == ' ': basename = basename[:-1] else: break # namefrac is ratio of length of basename to the image name # if it is high (e.g. over 0.5), we can assume that all images # share the same base if len(label_list[0]) > 0: namefrac = float(len(basename)) / len(label_list[0]) else: # If label_list[0] is empty, it means there was probably no # title set originally, so nothing to share namefrac = 0 if namefrac > namefrac_thresh: # there was a significant overlap of label beginnings shared_titles = True # only use new suptitle if one isn't specified already if suptitle is None: suptitle = basename else: # there was not much overlap, so default back to 'titles' mode shared_titles = False label = 'titles' div_num = 0 elif label == 'titles': # Set label_list to each image's pre-defined title label_list = [x.metadata.General.title for x in images] elif isinstance(label, str): # Set label_list to an indexed list, based off of label label_list = [label + " " + repr(num) for num in range(n)] elif isinstance(label, list) and all( isinstance(x, str) for x in label): label_list = label user_labels = True # If list of labels is longer than the number of images, just use the # first n elements if len(label_list) > n: del label_list[n:] if len(label_list) < n: label_list *= (n // len(label_list)) + 1 del label_list[n:] else: raise ValueError("Did not understand input of labels.") # Determine appropriate number of images per row rows = int(np.ceil(n / float(per_row))) if n < per_row: per_row = n # Set overall figure size and define figure (if not pre-existing) if fig is None: k = max(plt.rcParams['figure.figsize']) / max(per_row, rows) f = plt.figure(figsize=(tuple(k * i for i in (per_row, rows)))) else: f = fig # Initialize list to hold subplot axes axes_list = [] # Initialize list of rgb tags isrgb = [False] * len(images) # Check to see if there are any rgb images in list # and tag them using the isrgb list for i, img in enumerate(images): if rgb_tools.is_rgbx(img.data): isrgb[i] = True # Determine how many non-rgb Images there are non_rgb = list(itertools.compress(images, [not j for j in isrgb])) if len(non_rgb) == 0 and colorbar is not None: colorbar = None warnings.warn("Sorry, colorbar is not implemented for RGB images.") # Find global min and max values of all the non-rgb images for use with # 'single' scalebar if colorbar == 'single': # get a g_saturated_pixels from saturated_pixels if isinstance(saturated_pixels, list): g_saturated_pixels = min(np.array([v for v in saturated_pixels])) else: g_saturated_pixels = saturated_pixels # estimate a g_vmin and g_max from saturated_pixels g_vmin, g_vmax = contrast_stretching(np.concatenate( [i.data.flatten() for i in non_rgb]), g_saturated_pixels) # if vmin and vmax are provided, override g_min and g_max if isinstance(vmin, list): _logger.warning('vmin have to be a scalar to be compatible with a ' 'single colorbar') else: g_vmin = vmin if vmin is not None else g_vmin if isinstance(vmax, list): _logger.warning('vmax have to be a scalar to be compatible with a ' 'single colorbar') else: g_vmax = vmax if vmax is not None else g_vmax if next(centre_colormaps): g_vmin, g_vmax = centre_colormap_values(g_vmin, g_vmax) # Check if we need to add a scalebar for some of the images if isinstance(scalebar, list) and all(isinstance(x, int) for x in scalebar): scalelist = True else: scalelist = False idx = 0 ax_im_list = [0] * len(isrgb) # Replot: create a list to store references to the images replot_ims = [] # Loop through each image, adding subplot for each one for i, ims in enumerate(images): # Get handles for the signal axes and axes_manager axes_manager = ims.axes_manager if axes_manager.navigation_dimension > 0: ims = ims._deepcopy_with_new_data(ims.data) for j, im in enumerate(ims): ax = f.add_subplot(rows, per_row, idx + 1) axes_list.append(ax) data = im.data centre = next(centre_colormaps) # get next value for centreing # Enable RGB plotting if rgb_tools.is_rgbx(data): data = rgb_tools.rgbx2regular_array(data, plot_friendly=True) l_vmin, l_vmax = None, None else: data = im.data # Find min and max for contrast l_vmin, l_vmax = contrast_stretching( data, saturated_pixels[idx]) l_vmin = vmin[idx] if vmin[idx] is not None else l_vmin l_vmax = vmax[idx] if vmax[idx] is not None else l_vmax if centre: l_vmin, l_vmax = centre_colormap_values(l_vmin, l_vmax) # Remove NaNs (if requested) if no_nans: data = np.nan_to_num(data) # Get handles for the signal axes and axes_manager axes_manager = im.axes_manager axes = axes_manager.signal_axes # Set dimensions of images xaxis = axes[0] yaxis = axes[1] extent = ( xaxis.low_value, xaxis.high_value, yaxis.high_value, yaxis.low_value, ) if not isinstance(aspect, (int, float)) and aspect not in [ 'auto', 'square', 'equal']: _logger.warning("Did not understand aspect ratio input. " "Using 'auto' as default.") aspect = 'auto' if aspect == 'auto': if float(yaxis.size) / xaxis.size < min_asp: factor = min_asp * float(xaxis.size) / yaxis.size elif float(yaxis.size) / xaxis.size > min_asp ** -1: factor = min_asp ** -1 * float(xaxis.size) / yaxis.size else: factor = 1 asp = np.abs(factor * float(xaxis.scale) / yaxis.scale) elif aspect == 'square': asp = abs(extent[1] - extent[0]) / abs(extent[3] - extent[2]) elif aspect == 'equal': asp = 1 elif isinstance(aspect, (int, float)): asp = aspect if 'interpolation' not in kwargs.keys(): kwargs['interpolation'] = 'nearest' # Get colormap for this image: cm = next(cmap) # Plot image data, using vmin and vmax to set bounds, # or allowing them to be set automatically if using individual # colorbars if colorbar == 'single' and not isrgb[i]: axes_im = ax.imshow(data, cmap=cm, extent=extent, vmin=g_vmin, vmax=g_vmax, aspect=asp, *args, **kwargs) ax_im_list[i] = axes_im else: axes_im = ax.imshow(data, cmap=cm, extent=extent, vmin=l_vmin, vmax=l_vmax, aspect=asp, *args, **kwargs) ax_im_list[i] = axes_im # If an axis trait is undefined, shut off : if isinstance(xaxis.units, trait_base._Undefined) or \ isinstance(yaxis.units, trait_base._Undefined) or \ isinstance(xaxis.name, trait_base._Undefined) or \ isinstance(yaxis.name, trait_base._Undefined): if axes_decor == 'all': _logger.warning( 'Axes labels were requested, but one ' 'or both of the ' 'axes units and/or name are undefined. ' 'Axes decorations have been set to ' '\'ticks\' instead.') axes_decor = 'ticks' # If all traits are defined, set labels as appropriate: else: ax.set_xlabel(axes[0].name + " axis (" + axes[0].units + ")") ax.set_ylabel(axes[1].name + " axis (" + axes[1].units + ")") if label: if all_match: title = '' elif shared_titles: title = label_list[i][div_num - 1:] else: if len(ims) == n: # This is true if we are plotting just 1 # multi-dimensional Signal2D title = label_list[idx] elif user_labels: title = label_list[idx] else: title = label_list[i] if ims.axes_manager.navigation_size > 1 and not user_labels: title += " %s" % str(ims.axes_manager.indices) ax.set_title(textwrap.fill(title, labelwrap)) # Set axes decorations based on user input set_axes_decor(ax, axes_decor) # If using independent colorbars, add them if colorbar == 'multi' and not isrgb[i]: div = make_axes_locatable(ax) cax = div.append_axes("right", size="5%", pad=0.05) plt.colorbar(axes_im, cax=cax) # Add scalebars as necessary if (scalelist and idx in scalebar) or scalebar == 'all': ax.scalebar = ScaleBar( ax=ax, units=axes[0].units, color=scalebar_color, ) # Replot: store references to the images replot_ims.append(im) idx += 1 # If using a single colorbar, add it, and do tight_layout, ensuring that # a colorbar is only added based off of non-rgb Images: if colorbar == 'single': foundim = None for i in range(len(isrgb)): if (not isrgb[i]) and foundim is None: foundim = i if foundim is not None: f.subplots_adjust(right=0.8) cbar_ax = f.add_axes([0.9, 0.1, 0.03, 0.8]) f.colorbar(ax_im_list[foundim], cax=cbar_ax) if tight_layout: # tight_layout, leaving room for the colorbar plt.tight_layout(rect=[0, 0, 0.9, 1]) elif tight_layout: plt.tight_layout() elif tight_layout: plt.tight_layout() # Set top bounds for shared titles and add suptitle if suptitle: f.subplots_adjust(top=0.85) f.suptitle(suptitle, fontsize=suptitle_fontsize) # If we want to plot scalebars, loop through the list of axes and add them if scalebar is None or scalebar is False: # Do nothing if no scalebars are called for pass elif scalebar == 'all': # scalebars were taken care of in the plotting loop pass elif scalelist: # scalebars were taken care of in the plotting loop pass else: raise ValueError("Did not understand scalebar input. Must be None, " "\'all\', or list of ints.") # Adjust subplot spacing according to user's specification if padding is not None: plt.subplots_adjust(**padding) # Replot: connect function def on_dblclick(event): # On the event of a double click, replot the selected subplot if not event.inaxes: return if not event.dblclick: return subplots = [axi for axi in f.axes if isinstance(axi, mpl.axes.Subplot)] inx = list(subplots).index(event.inaxes) im = replot_ims[inx] # Use some of the info in the subplot cm = subplots[inx].images[0].get_cmap() clim = subplots[inx].images[0].get_clim() sbar = False if (scalelist and inx in scalebar) or scalebar == 'all': sbar = True im.plot(colorbar=bool(colorbar), vmin=clim[0], vmax=clim[1], no_nans=no_nans, aspect=asp, scalebar=sbar, scalebar_color=scalebar_color, cmap=cm) f.canvas.mpl_connect('button_press_event', on_dblclick) return axes_list def set_axes_decor(ax, axes_decor): if axes_decor == 'off': ax.axis('off') elif axes_decor == 'ticks': ax.set_xlabel('') ax.set_ylabel('') elif axes_decor == 'all': pass elif axes_decor is None: ax.set_xlabel('') ax.set_ylabel('') ax.set_xticklabels([]) ax.set_yticklabels([]) def make_cmap(colors, name='my_colormap', position=None, bit=False, register=True): """ Create a matplotlib colormap with customized colors, optionally registering it with matplotlib for simplified use. Adapted from Chris Slocum's code at: https://github.com/CSlocumWX/custom_colormap/blob/master/custom_colormaps.py and used under the terms of that code's BSD-3 license Parameters ---------- colors : iterable list of either tuples containing rgb values, or html strings Colors should be arranged so that the first color is the lowest value for the colorbar and the last is the highest. name : str name of colormap to use when registering with matplotlib position : None or iterable list containing the values (from [0,1]) that dictate the position of each color within the colormap. If None (default), the colors will be equally-spaced within the colorbar. bit : boolean True if RGB colors are given in 8-bit [0 to 255] or False if given in arithmetic basis [0 to 1] (default) register : boolean switch to control whether or not to register the custom colormap with matplotlib in order to enable use by just the name string """ def _html_color_to_rgb(color_string): """ convert #RRGGBB to an (R, G, B) tuple """ color_string = color_string.strip() if color_string[0] == '#': color_string = color_string[1:] if len(color_string) != 6: raise ValueError( "input #{} is not in #RRGGBB format".format(color_string)) r, g, b = color_string[:2], color_string[2:4], color_string[4:] r, g, b = [int(n, 16) / 255 for n in (r, g, b)] return r, g, b bit_rgb = np.linspace(0, 1, 256) if position is None: position = np.linspace(0, 1, len(colors)) else: if len(position) != len(colors): raise ValueError("position length must be the same as colors") elif position[0] != 0 or position[-1] != 1: raise ValueError("position must start with 0 and end with 1") cdict = {'red': [], 'green': [], 'blue': []} for pos, color in zip(position, colors): if isinstance(color, str): color = _html_color_to_rgb(color) elif bit: color = (bit_rgb[color[0]], bit_rgb[color[1]], bit_rgb[color[2]]) cdict['red'].append((pos, color[0], color[0])) cdict['green'].append((pos, color[1], color[1])) cdict['blue'].append((pos, color[2], color[2])) cmap = mpl.colors.LinearSegmentedColormap(name, cdict, 256) if register: mpl.cm.register_cmap(name, cmap) return cmap def plot_spectra( spectra, style='overlap', color=None, line_style=None, padding=1., legend=None, legend_picking=True, legend_loc='upper right', fig=None, ax=None, **kwargs): """Plot several spectra in the same figure. Extra keyword arguments are passed to `matplotlib.figure`. Parameters ---------- spectra : list of Signal1D or BaseSignal Ordered spectra list of signal to plot. If `style` is "cascade" or "mosaic" the spectra can have different size and axes. For `BaseSignal` with navigation dimensions 1 and signal dimension 0, the signal will be tranposed to form a `Signal1D`. style : {'overlap', 'cascade', 'mosaic', 'heatmap'} The style of the plot. color : matplotlib color or a list of them or `None` Sets the color of the lines of the plots (no action on 'heatmap'). If a list, if its length is less than the number of spectra to plot, the colors will be cycled. If `None`, use default matplotlib color cycle. line_style: matplotlib line style or a list of them or `None` Sets the line style of the plots (no action on 'heatmap'). The main line style are '-','--','steps','-.',':'. If a list, if its length is less than the number of spectra to plot, line_style will be cycled. If If `None`, use continuous lines, eg: ('-','--','steps','-.',':') padding : float, optional, default 0.1 Option for "cascade". 1 guarantees that there is not overlapping. However, in many cases a value between 0 and 1 can produce a tighter plot without overlapping. Negative values have the same effect but reverse the order of the spectra without reversing the order of the colors. legend: None or list of str or 'auto' If list of string, legend for "cascade" or title for "mosaic" is displayed. If 'auto', the title of each spectra (metadata.General.title) is used. legend_picking: bool If true, a spectrum can be toggle on and off by clicking on the legended line. legend_loc : str or int This parameter controls where the legend is placed on the figure; see the pyplot.legend docstring for valid values fig : matplotlib figure or None If None, a default figure will be created. Specifying fig will not work for the 'heatmap' style. ax : matplotlib ax (subplot) or None If None, a default ax will be created. Will not work for 'mosaic' or 'heatmap' style. **kwargs remaining keyword arguments are passed to matplotlib.figure() or matplotlib.subplots(). Has no effect on 'heatmap' style. Example ------- >>> s = hs.load("some_spectra") >>> hs.plot.plot_spectra(s, style='cascade', color='red', padding=0.5) To save the plot as a png-file >>> hs.plot.plot_spectra(s).figure.savefig("test.png") Returns ------- ax: matplotlib axes or list of matplotlib axes An array is returned when `style` is "mosaic". """ import hyperspy.signal def _reverse_legend(ax_, legend_loc_): """ Reverse the ordering of a matplotlib legend (to be more consistent with the default ordering of plots in the 'cascade' and 'overlap' styles Parameters ---------- ax_: matplotlib axes legend_loc_: str or int This parameter controls where the legend is placed on the figure; see the pyplot.legend docstring for valid values """ l = ax_.get_legend() labels = [lb.get_text() for lb in list(l.get_texts())] handles = l.legendHandles ax_.legend(handles[::-1], labels[::-1], loc=legend_loc_) # Before v1.3 default would read the value from prefereces. if style == "default": style = "overlap" if color is not None: if isinstance(color, str): color = itertools.cycle([color]) elif hasattr(color, "__iter__"): color = itertools.cycle(color) else: raise ValueError("Color must be None, a valid matplotlib color " "string or a list of valid matplotlib colors.") else: if LooseVersion(mpl.__version__) >= "1.5.3": color = itertools.cycle( plt.rcParams['axes.prop_cycle'].by_key()["color"]) else: color = itertools.cycle(plt.rcParams['axes.color_cycle']) if line_style is not None: if isinstance(line_style, str): line_style = itertools.cycle([line_style]) elif hasattr(line_style, "__iter__"): line_style = itertools.cycle(line_style) else: raise ValueError("line_style must be None, a valid matplotlib" " line_style string or a list of valid matplotlib" " line_style.") else: line_style = ['-'] * len(spectra) if legend is not None: if isinstance(legend, str): if legend == 'auto': legend = [spec.metadata.General.title for spec in spectra] else: raise ValueError("legend must be None, 'auto' or a list of" " string") elif hasattr(legend, "__iter__"): legend = itertools.cycle(legend) if style == 'overlap': if fig is None: fig = plt.figure(**kwargs) if ax is None: ax = fig.add_subplot(111) _make_overlap_plot(spectra, ax, color=color, line_style=line_style,) if legend is not None: ax.legend(legend, loc=legend_loc) _reverse_legend(ax, legend_loc) if legend_picking is True: animate_legend(fig=fig, ax=ax) elif style == 'cascade': if fig is None: fig = plt.figure(**kwargs) if ax is None: ax = fig.add_subplot(111) _make_cascade_subplot(spectra, ax, color=color, line_style=line_style, padding=padding) if legend is not None: plt.legend(legend, loc=legend_loc) _reverse_legend(ax, legend_loc) elif style == 'mosaic': default_fsize = plt.rcParams["figure.figsize"] figsize = (default_fsize[0], default_fsize[1] * len(spectra)) fig, subplots = plt.subplots( len(spectra), 1, figsize=figsize, **kwargs) if legend is None: legend = [legend] * len(spectra) for spectrum, ax, color, line_style, legend in zip( spectra, subplots, color, line_style, legend): spectrum = _transpose_if_required(spectrum, 1) _plot_spectrum(spectrum, ax, color=color, line_style=line_style) ax.set_ylabel('Intensity') if legend is not None: ax.set_title(legend) if not isinstance(spectra, hyperspy.signal.BaseSignal): _set_spectrum_xlabel(spectrum, ax) if isinstance(spectra, hyperspy.signal.BaseSignal): _set_spectrum_xlabel(spectrum, ax) fig.tight_layout() elif style == 'heatmap': if not isinstance(spectra, hyperspy.signal.BaseSignal): import hyperspy.utils spectra = [_transpose_if_required(spectrum, 1) for spectrum in spectra] spectra = hyperspy.utils.stack(spectra) with spectra.unfolded(): ax = _make_heatmap_subplot(spectra) ax.set_ylabel('Spectra') ax = ax if style != "mosaic" else subplots return ax def animate_legend(fig=None, ax=None): """Animate the legend of a figure. A spectrum can be toggle on and off by clicking on the legended line. Parameters ---------- fig: None | matplotlib.figure If None pick the current figure using "plt.gcf" ax: None | matplotlib.axes If None pick the current axes using "plt.gca". Note ---- Code inspired from legend_picking.py in the matplotlib gallery """ if fig is None: fig = plt.gcf() if ax is None: ax = plt.gca() lines = ax.lines[::-1] lined = dict() leg = ax.get_legend() for legline, origline in zip(leg.get_lines(), lines): legline.set_picker(5) # 5 pts tolerance lined[legline] = origline def onpick(event): # on the pick event, find the orig line corresponding to the # legend proxy line, and toggle the visibility legline = event.artist if legline.axes == ax: origline = lined[legline] vis = not origline.get_visible() origline.set_visible(vis) # Change the alpha on the line in the legend so we can see what lines # have been toggled if vis: legline.set_alpha(1.0) else: legline.set_alpha(0.2) fig.canvas.draw_idle() fig.canvas.mpl_connect('pick_event', onpick) def plot_histograms(signal_list, bins='freedman', range_bins=None, color=None, line_style=None, legend='auto', fig=None, **kwargs): """Plot the histogram of every signal in the list in the same figure. This function creates a histogram for each signal and plot the list with the `utils.plot.plot_spectra` function. Parameters ---------- signal_list : iterable Ordered spectra list to plot. If `style` is "cascade" or "mosaic" the spectra can have different size and axes. bins : int or list or str, optional If bins is a string, then it must be one of: 'knuth' : use Knuth's rule to determine bins 'scotts' : use Scott's rule to determine bins 'freedman' : use the Freedman-diaconis rule to determine bins 'blocks' : use bayesian blocks for dynamic bin widths range_bins : tuple or None, optional. the minimum and maximum range for the histogram. If not specified, it will be (x.min(), x.max()) color : valid matplotlib color or a list of them or `None`, optional. Sets the color of the lines of the plots. If a list, if its length is less than the number of spectra to plot, the colors will be cycled. If If `None`, use default matplotlib color cycle. line_style: valid matplotlib line style or a list of them or `None`, optional. The main line style are '-','--','steps','-.',':'. If a list, if its length is less than the number of spectra to plot, line_style will be cycled. If If `None`, use continuous lines, eg: ('-','--','steps','-.',':') legend: None or list of str or 'auto', optional. Display a legend. If 'auto', the title of each spectra (metadata.General.title) is used. legend_picking: bool, optional. If true, a spectrum can be toggle on and off by clicking on the legended line. fig : matplotlib figure or None, optional. If None, a default figure will be created. **kwargs other keyword arguments (weight and density) are described in np.histogram(). Example ------- Histograms of two random chi-square distributions >>> img = hs.signals.Signal2D(np.random.chisquare(1,[10,10,100])) >>> img2 = hs.signals.Signal2D(np.random.chisquare(2,[10,10,100])) >>> hs.plot.plot_histograms([img,img2],legend=['hist1','hist2']) Returns ------- ax: matplotlib axes or list of matplotlib axes An array is returned when `style` is "mosaic". """ hists = [] for obj in signal_list: hists.append(obj.get_histogram(bins=bins, range_bins=range_bins, **kwargs)) if line_style is None: line_style = 'steps' return plot_spectra(hists, style='overlap', color=color, line_style=line_style, legend=legend, fig=fig)
gpl-3.0
685,807,880,072,156,300
36.986083
82
0.581131
false
4.057262
false
false
false
rg3915/django-experience
djexperience/settings.py
1
3763
import os from decouple import config, Csv from dj_database_url import parse as dburl # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = config('SECRET_KEY') DEBUG = config('DEBUG', default=False, cast=bool) ALLOWED_HOSTS = config('ALLOWED_HOSTS', default=[], cast=Csv()) # Application definition INSTALLED_APPS = [ # my apps 'djexperience.core', # default django apps 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # thirty apps 'django_extensions', 'bootstrapform', 'widget_tweaks', 'daterange_filter', 'django_activeurl', 'import_export', 'django_tables2', # my apps 'djexperience.bookstore', 'djexperience.company', 'djexperience.crm', 'djexperience.myemail', 'djexperience.product', 'djexperience.selling', 'djexperience.service', ] MIDDLEWARE_CLASSES = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'djexperience.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'djexperience.wsgi.application' # Database # https://docs.djangoproject.com/en/1.9/ref/settings/#databases default_dburl = 'sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3') DATABASES = { 'default': config('DATABASE_URL', default=default_dburl, cast=dburl), } # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] EMAIL_HOST = 'smtp.gmail.com' EMAIL_PORT = 587 EMAIL_HOST_USER = config('EMAIL_HOST_USER') EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD') EMAIL_USE_TLS = True DEFAULT_FROM_EMAIL = config('DEFAULT_FROM_EMAIL') # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ LANGUAGE_CODE = 'pt-br' TIME_ZONE = 'America/Sao_Paulo' USE_I18N = True USE_L10N = True USE_TZ = True USE_THOUSAND_SEPARATOR = True DECIMAL_SEPARATOR = ',' # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') LOGIN_URL = '/admin/login/'
mit
-8,715,243,961,084,812,000
25.687943
91
0.687749
false
3.520112
true
false
false
shoyer/xray
xarray/backends/locks.py
1
5397
import multiprocessing import threading import weakref from typing import Any, MutableMapping try: from dask.utils import SerializableLock except ImportError: # no need to worry about serializing the lock SerializableLock = threading.Lock try: from dask.distributed import Lock as DistributedLock except ImportError: DistributedLock = None # Locks used by multiple backends. # Neither HDF5 nor the netCDF-C library are thread-safe. HDF5_LOCK = SerializableLock() NETCDFC_LOCK = SerializableLock() _FILE_LOCKS = weakref.WeakValueDictionary() # type: MutableMapping[Any, threading.Lock] # noqa def _get_threaded_lock(key): try: lock = _FILE_LOCKS[key] except KeyError: lock = _FILE_LOCKS[key] = threading.Lock() return lock def _get_multiprocessing_lock(key): # TODO: make use of the key -- maybe use locket.py? # https://github.com/mwilliamson/locket.py del key # unused return multiprocessing.Lock() _LOCK_MAKERS = { None: _get_threaded_lock, 'threaded': _get_threaded_lock, 'multiprocessing': _get_multiprocessing_lock, 'distributed': DistributedLock, } def _get_lock_maker(scheduler=None): """Returns an appropriate function for creating resource locks. Parameters ---------- scheduler : str or None Dask scheduler being used. See Also -------- dask.utils.get_scheduler_lock """ return _LOCK_MAKERS[scheduler] def _get_scheduler(get=None, collection=None): """Determine the dask scheduler that is being used. None is returned if no dask scheduler is active. See also -------- dask.base.get_scheduler """ try: # dask 0.18.1 and later from dask.base import get_scheduler actual_get = get_scheduler(get, collection) except ImportError: try: from dask.utils import effective_get actual_get = effective_get(get, collection) except ImportError: return None try: from dask.distributed import Client if isinstance(actual_get.__self__, Client): return 'distributed' except (ImportError, AttributeError): try: import dask.multiprocessing if actual_get == dask.multiprocessing.get: return 'multiprocessing' else: return 'threaded' except ImportError: return 'threaded' def get_write_lock(key): """Get a scheduler appropriate lock for writing to the given resource. Parameters ---------- key : str Name of the resource for which to acquire a lock. Typically a filename. Returns ------- Lock object that can be used like a threading.Lock object. """ scheduler = _get_scheduler() lock_maker = _get_lock_maker(scheduler) return lock_maker(key) def acquire(lock, blocking=True): """Acquire a lock, possibly in a non-blocking fashion. Includes backwards compatibility hacks for old versions of Python, dask and dask-distributed. """ if blocking: # no arguments needed return lock.acquire() elif DistributedLock is not None and isinstance(lock, DistributedLock): # distributed.Lock doesn't support the blocking argument yet: # https://github.com/dask/distributed/pull/2412 return lock.acquire(timeout=0) else: # "blocking" keyword argument not supported for: # - threading.Lock on Python 2. # - dask.SerializableLock with dask v1.0.0 or earlier. # - multiprocessing.Lock calls the argument "block" instead. return lock.acquire(blocking) class CombinedLock: """A combination of multiple locks. Like a locked door, a CombinedLock is locked if any of its constituent locks are locked. """ def __init__(self, locks): self.locks = tuple(set(locks)) # remove duplicates def acquire(self, blocking=True): return all(acquire(lock, blocking=blocking) for lock in self.locks) def release(self): for lock in self.locks: lock.release() def __enter__(self): for lock in self.locks: lock.__enter__() def __exit__(self, *args): for lock in self.locks: lock.__exit__(*args) def locked(self): return any(lock.locked for lock in self.locks) def __repr__(self): return "CombinedLock(%r)" % list(self.locks) class DummyLock: """DummyLock provides the lock API without any actual locking.""" def acquire(self, blocking=True): pass def release(self): pass def __enter__(self): pass def __exit__(self, *args): pass def locked(self): return False def combine_locks(locks): """Combine a sequence of locks into a single lock.""" all_locks = [] for lock in locks: if isinstance(lock, CombinedLock): all_locks.extend(lock.locks) elif lock is not None: all_locks.append(lock) num_locks = len(all_locks) if num_locks > 1: return CombinedLock(all_locks) elif num_locks == 1: return all_locks[0] else: return DummyLock() def ensure_lock(lock): """Ensure that the given object is a lock.""" if lock is None or lock is False: return DummyLock() return lock
apache-2.0
-7,480,206,169,277,232,000
24.578199
96
0.631277
false
4.119847
false
false
false
brahle/eval2
scripts/haski/actions/reviewaction.py
1
1578
#!/usr/bin/env python3.2 # Copyright 2011 Bruno Rahle # # This file is part of Evaluator. # # Evaluator is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # Evaluator is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public # License along with Evaluator. If not, see # <http://www.gnu.org/licenses/>. from actions.baseaction import BaseHaskiAction import argparse class ReviewAction(BaseHaskiAction): """This class is the class that does linting work. """ RB_ID_STR = 'reviewboard id' def __call__(self, params): """Fetches the desired revision and then sends it to reviewboard. """ commit = self.get_commit(params) if not params.skip_lint: commit.lint(params) rb_id = commit.review() if params.revision != 'HEAD': if self.RB_ID_STR not in commit.message.fields: print('[WARNING] Please edit the message to incorporate', '`ReviewBoardID` field.') else: commit.message.set_field(self.RB_ID_STR, rb_id) commit.amend() def main(): pass if __name__ == '__main__': main()
agpl-3.0
-8,740,087,895,962,332,000
31.204082
73
0.665399
false
3.905941
false
false
false
pelodelfuego/word2vec-toolbox
toolbox/mlLib/conceptPairFeature.py
1
4358
#!/usr/bin/env python # -*- coding: utf-8 -*- import __init__ import numpy as np from scipy.weave import inline from sklearn.ensemble import RandomForestClassifier import cpLib.concept as cp import utils.skUtils as sku # PROJECTION def projCosSim(c1, c2): v1 = c1.vect v2 = c2.vect dimCount = len(v1) arr = np.zeros(dimCount, 'f') code = """ for(int i = 0; i < dimCount; i++) { float norm_v1 = 0.0; float norm_v2 = 0.0; float dot_pdt = 0.0; for(int j = 0; j < dimCount; j++) { if(i != j) { dot_pdt += v1[j] * v2[j]; norm_v1 += v1[j] * v1[j]; norm_v2 += v2[j] * v2[j]; } } norm_v1 = sqrtf(norm_v1); norm_v2 = sqrtf(norm_v2); arr[i] = dot_pdt / norm_v1 / norm_v2; } return_val = 1; """ inline(code, ['v1', 'v2', 'dimCount', 'arr'], headers = ['<math.h>'], compiler = 'gcc') return arr def projEuclDist(c1, c2): v1 = c1.vect v2 = c2.vect dimCount = len(v1) arr = np.zeros(dimCount, 'f') code = """ for(int i = 0; i < dimCount; i++) { float dist = 0.0; for(int j = 0; j < dimCount; j++) { if(i != j) { dist += pow(v1[j] - v2[j], 2); } } arr[i] = sqrt(dist); } return_val = 1; """ inline(code, ['v1', 'v2', 'dimCount', 'arr'], headers = ['<math.h>'], compiler = 'gcc') return arr def projManaDist(c1, c2): v1 = c1.vect v2 = c2.vect dimCount = len(v1) arr = np.zeros(dimCount, 'f') code = """ for(int i = 0; i < dimCount; i++) { float dist = 0.0; for(int j = 0; j < dimCount; j++) { if(i != j) { dist += fabs(v1[i] - v2[i]); } } arr[i] = dist; } return_val = 1; """ inline(code, ['v1', 'v2', 'dimCount', 'arr'], headers = ['<math.h>'], compiler = 'gcc') return arr # COMMUTATIVE FEATURE def subCarth(conceptPair): return conceptPair[2].vect - conceptPair[0].vect def subPolar(conceptPair): return conceptPair[2].polarVect() - conceptPair[0].polarVect() def subAngular(conceptPair): return conceptPair[2].angularVect() - conceptPair[0].angularVect() def concatCarth(conceptPair): return np.concatenate((conceptPair[0].vect, conceptPair[2].vect)) def concatPolar(conceptPair): return np.concatenate((conceptPair[0].polarVect(), conceptPair[2].polarVect())) def concatAngular(conceptPair): return np.concatenate((conceptPair[0].angularVect(), conceptPair[2].angularVect())) # NON COMMUATIVE FEATURE # PROJECTION SIMILARITY def pCosSim(conceptPair): return projCosSim(conceptPair[0], conceptPair[2]) def pEuclDist(conceptPair): return projEuclDist(conceptPair[0], conceptPair[2]) def pManaDist(conceptPair): return projManaDist(conceptPair[0], conceptPair[2]) # PROJECTION DISSIMILARITY def _projectionDissimarilty(projectionMetric, globalMetric, conceptPair): projectedFeature = projectionMetric(conceptPair[0], conceptPair[2]) globalFeature = globalMetric(conceptPair[0], conceptPair[2]) return np.array([(globalFeature - v) for v in projectedFeature]) def pdCosSim(conceptPair): return _projectionDissimarilty(projCosSim, cp.cosSim, conceptPair) def pdEuclDist(conceptPair): return _projectionDissimarilty(projEuclDist, cp.euclDist, conceptPair) def pdManaDist(conceptPair): return _projectionDissimarilty(projManaDist, cp.manaDist, conceptPair) # CLF class ConceptPairClf(object): def __init__(self, clf, featureExtractionFct): self.clf = clf self.featureExtractionFct = featureExtractionFct def fit(self, X, y): self.clf.fit([self.featureExtractionFct(x) for x in X], y) self.classes_ = self.clf.classes_ def predict(self, X): return self.clf.predict([self.featureExtractionFct(x) for x in X]) def predict_proba(self, X): return self.clf.predict_proba([self.featureExtractionFct(x) for x in X])
gpl-3.0
-1,004,693,826,859,406,600
26.2375
91
0.562184
false
3.228148
false
false
false
skodapetr/lbvs-environment
scripts/libs/core.py
1
1664
#!/usr/bin/env python # -*- coding: utf-8 -*- import json import csv import os import logging import gzip __license__ = "X11" def init_logging(): logging.basicConfig( level=logging.DEBUG, format='%(asctime)s [%(levelname)s] - %(message)s', datefmt='%H:%M:%S') def create_directory(path): if not os.path.exists(path) and not path == "": os.makedirs(path) def create_parent_directory(path): parent_directory = os.path.dirname(path) if not os.path.exists(parent_directory) and not parent_directory == "": os.makedirs(parent_directory) def read_json(path): if path.endswith(".gz"): with gzip.open(path, "rt") as stream: return json.load(stream) else: with open(path, "r") as stream: return json.load(stream) def write_json(path, object_to_write): create_parent_directory(path) if path.endswith(".gz"): with gzip.open(path, "wt") as stream: json.dump(object_to_write, stream, indent=2) else: with open(path, "w") as stream: json.dump(object_to_write, stream, indent=2) def read_csv_as_object(path): """ Read CSV lines as objects. """ results = [] with open(path) as stream: reader = csv.reader(stream, delimiter=",", quotechar='"') header = next(reader) for row in reader: new_object = {} for index in range(0, len(row)): new_object[header[index]] = row[index] results.append(new_object) return results if __name__ == "__main__": raise Exception("This module can be used only as a library!")
mit
453,216,932,470,209,800
23.470588
75
0.590144
false
3.570815
false
false
false
kaniblu/hangul-utils
hangul_utils/unicode.py
1
8775
__all__ = ["split_syllable_char", "split_syllables", "join_jamos", "join_jamos_char", "CHAR_INITIALS", "CHAR_MEDIALS", "CHAR_FINALS"] import itertools INITIAL = 0x001 MEDIAL = 0x010 FINAL = 0x100 CHAR_LISTS = { INITIAL: list(map(chr, [ 0x3131, 0x3132, 0x3134, 0x3137, 0x3138, 0x3139, 0x3141, 0x3142, 0x3143, 0x3145, 0x3146, 0x3147, 0x3148, 0x3149, 0x314a, 0x314b, 0x314c, 0x314d, 0x314e ])), MEDIAL: list(map(chr, [ 0x314f, 0x3150, 0x3151, 0x3152, 0x3153, 0x3154, 0x3155, 0x3156, 0x3157, 0x3158, 0x3159, 0x315a, 0x315b, 0x315c, 0x315d, 0x315e, 0x315f, 0x3160, 0x3161, 0x3162, 0x3163 ])), FINAL: list(map(chr, [ 0x3131, 0x3132, 0x3133, 0x3134, 0x3135, 0x3136, 0x3137, 0x3139, 0x313a, 0x313b, 0x313c, 0x313d, 0x313e, 0x313f, 0x3140, 0x3141, 0x3142, 0x3144, 0x3145, 0x3146, 0x3147, 0x3148, 0x314a, 0x314b, 0x314c, 0x314d, 0x314e ])) } CHAR_INITIALS = CHAR_LISTS[INITIAL] CHAR_MEDIALS = CHAR_LISTS[MEDIAL] CHAR_FINALS = CHAR_LISTS[FINAL] CHAR_SETS = {k: set(v) for k, v in CHAR_LISTS.items()} CHARSET = set(itertools.chain(*CHAR_SETS.values())) CHAR_INDICES = {k: {c: i for i, c in enumerate(v)} for k, v in CHAR_LISTS.items()} def is_hangul_syllable(c): return 0xac00 <= ord(c) <= 0xd7a3 # Hangul Syllables def is_hangul_jamo(c): return 0x1100 <= ord(c) <= 0x11ff # Hangul Jamo def is_hangul_compat_jamo(c): return 0x3130 <= ord(c) <= 0x318f # Hangul Compatibility Jamo def is_hangul_jamo_exta(c): return 0xa960 <= ord(c) <= 0xa97f # Hangul Jamo Extended-A def is_hangul_jamo_extb(c): return 0xd7b0 <= ord(c) <= 0xd7ff # Hangul Jamo Extended-B def is_hangul(c): return (is_hangul_syllable(c) or is_hangul_jamo(c) or is_hangul_compat_jamo(c) or is_hangul_jamo_exta(c) or is_hangul_jamo_extb(c)) def is_supported_hangul(c): return is_hangul_syllable(c) or is_hangul_compat_jamo(c) def check_hangul(c, jamo_only=False): if not ((jamo_only or is_hangul_compat_jamo(c)) or is_supported_hangul(c)): raise ValueError(f"'{c}' is not a supported hangul character. " f"'Hangul Syllables' (0xac00 ~ 0xd7a3) and " f"'Hangul Compatibility Jamos' (0x3130 ~ 0x318f) are " f"supported at the moment.") def get_jamo_type(c): check_hangul(c) assert is_hangul_compat_jamo(c), f"not a jamo: {ord(c):x}" return sum(t for t, s in CHAR_SETS.items() if c in s) def split_syllable_char(c): """ Splits a given korean syllable into its components. Each component is represented by Unicode in 'Hangul Compatibility Jamo' range. Arguments: c: A Korean character. Returns: A triple (initial, medial, final) of Hangul Compatibility Jamos. If no jamo corresponds to a position, `None` is returned there. Example: >>> split_syllable_char("안") ("ㅇ", "ㅏ", "ㄴ") >>> split_syllable_char("고") ("ㄱ", "ㅗ", None) >>> split_syllable_char("ㅗ") (None, "ㅗ", None) >>> split_syllable_char("ㅇ") ("ㅇ", None, None) """ check_hangul(c) if len(c) != 1: raise ValueError("Input string must have exactly one character.") init, med, final = None, None, None if is_hangul_syllable(c): offset = ord(c) - 0xac00 x = (offset - offset % 28) // 28 init, med, final = x // 21, x % 21, offset % 28 if not final: final = None else: final -= 1 else: pos = get_jamo_type(c) if pos & INITIAL == INITIAL: pos = INITIAL elif pos & MEDIAL == MEDIAL: pos = MEDIAL elif pos & FINAL == FINAL: pos = FINAL idx = CHAR_INDICES[pos][c] if pos == INITIAL: init = idx elif pos == MEDIAL: med = idx else: final = idx return tuple(CHAR_LISTS[pos][idx] if idx is not None else None for pos, idx in zip([INITIAL, MEDIAL, FINAL], [init, med, final])) def split_syllables(s, ignore_err=True, pad=None): """ Performs syllable-split on a string. Arguments: s (str): A string (possibly mixed with non-Hangul characters). ignore_err (bool): If set False, it ensures that all characters in the string are Hangul-splittable and throws a ValueError otherwise. (default: True) pad (str): Pad empty jamo positions (initial, medial, or final) with `pad` character. This is useful for cases where fixed-length strings are needed. (default: None) Returns: Hangul-split string Example: >>> split_syllables("안녕하세요") "ㅇㅏㄴㄴㅕㅇㅎㅏㅅㅔㅇㅛ" >>> split_syllables("안녕하세요~~", ignore_err=False) ValueError: encountered an unsupported character: ~ (0x7e) >>> split_syllables("안녕하세요ㅛ", pad="x") 'ㅇㅏㄴㄴㅕㅇㅎㅏxㅅㅔxㅇㅛxxㅛx' """ def try_split(c): try: return split_syllable_char(c) except ValueError: if ignore_err: return (c,) raise ValueError(f"encountered an unsupported character: " f"{c} (0x{ord(c):x})") s = map(try_split, s) if pad is not None: tuples = map(lambda x: tuple(pad if y is None else y for y in x), s) else: tuples = map(lambda x: filter(None, x), s) return "".join(itertools.chain(*tuples)) def join_jamos_char(init, med, final=None): """ Combines jamos into a single syllable. Arguments: init (str): Initial jao. med (str): Medial jamo. final (str): Final jamo. If not supplied, the final syllable is made without the final. (default: None) Returns: A Korean syllable. """ chars = (init, med, final) for c in filter(None, chars): check_hangul(c, jamo_only=True) idx = tuple(CHAR_INDICES[pos][c] if c is not None else c for pos, c in zip((INITIAL, MEDIAL, FINAL), chars)) init_idx, med_idx, final_idx = idx # final index must be shifted once as # final index with 0 points to syllables without final final_idx = 0 if final_idx is None else final_idx + 1 return chr(0xac00 + 28 * 21 * init_idx + 28 * med_idx + final_idx) def join_jamos(s, ignore_err=True): """ Combines a sequence of jamos to produce a sequence of syllables. Arguments: s (str): A string (possible mixed with non-jamo characters). ignore_err (bool): If set False, it will ensure that all characters will be consumed for the making of syllables. It will throw a ValueError when it fails to do so. (default: True) Returns: A string Example: >>> join_jamos("ㅇㅏㄴㄴㅕㅇㅎㅏㅅㅔㅇㅛ") "안녕하세요" >>> join_jamos("ㅇㅏㄴㄴㄴㅕㅇㅎㅏㅅㅔㅇㅛ") "안ㄴ녕하세요" >>> join_jamos() """ last_t = 0 queue = [] new_string = "" def flush(n=0): new_queue = [] while len(queue) > n: new_queue.append(queue.pop()) if len(new_queue) == 1: if not ignore_err: raise ValueError(f"invalid jamo character: {new_queue[0]}") result = new_queue[0] elif len(new_queue) >= 2: try: result = join_jamos_char(*new_queue) except (ValueError, KeyError): # Invalid jamo combination if not ignore_err: raise ValueError(f"invalid jamo characters: {new_queue}") result = "".join(new_queue) else: result = None return result for c in s: if c not in CHARSET: if queue: new_c = flush() + c else: new_c = c last_t = 0 else: t = get_jamo_type(c) new_c = None if t & FINAL == FINAL: if not (last_t == MEDIAL): new_c = flush() elif t == INITIAL: new_c = flush() elif t == MEDIAL: if last_t & INITIAL == INITIAL: new_c = flush(1) else: new_c = flush() last_t = t queue.insert(0, c) if new_c: new_string += new_c if queue: new_string += flush() return new_string
gpl-3.0
3,778,202,042,101,361,700
29.820789
79
0.551227
false
2.954983
false
false
false
drnextgis/QGIS
python/plugins/processing/core/parameters.py
1
55397
# -*- coding: utf-8 -*- """ *************************************************************************** Parameters.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ from builtins import str from builtins import range from builtins import object __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import sys import os import math from inspect import isclass from copy import deepcopy import numbers from qgis.utils import iface from qgis.PyQt.QtCore import QCoreApplication from qgis.core import (QgsRasterLayer, QgsVectorLayer, QgsMapLayer, QgsCoordinateReferenceSystem, QgsExpressionContext, QgsExpressionContextUtils, QgsExpression, QgsExpressionContextScope) from processing.tools.vector import resolveFieldIndex, features from processing.tools import dataobjects from processing.core.outputs import OutputNumber, OutputRaster, OutputVector from processing.tools.dataobjects import getObject def parseBool(s): if s is None or s == str(None).lower(): return None return str(s).lower() == str(True).lower() def _splitParameterOptions(line): tokens = line.split('=', 1) if tokens[1].lower().strip().startswith('optional'): isOptional = True definition = tokens[1].strip()[len('optional') + 1:] else: isOptional = False definition = tokens[1] return isOptional, tokens[0], definition def _createDescriptiveName(s): return s.replace('_', ' ') def _expressionContext(): context = QgsExpressionContext() context.appendScope(QgsExpressionContextUtils.globalScope()) context.appendScope(QgsExpressionContextUtils.projectScope()) if iface.mapCanvas(): context.appendScope(QgsExpressionContextUtils.mapSettingsScope(iface.mapCanvas().mapSettings())) processingScope = QgsExpressionContextScope() extent = iface.mapCanvas().fullExtent() processingScope.setVariable('fullextent_minx', extent.xMinimum()) processingScope.setVariable('fullextent_miny', extent.yMinimum()) processingScope.setVariable('fullextent_maxx', extent.xMaximum()) processingScope.setVariable('fullextent_maxy', extent.yMaximum()) context.appendScope(processingScope) return context def _resolveLayers(value): layers = dataobjects.getAllLayers() if value: inputlayers = value.split(';') for i, inputlayer in enumerate(inputlayers): for layer in layers: if layer.name() == inputlayer: inputlayers[i] = layer.source() break return ";".join(inputlayers) class Parameter(object): """ Base class for all parameters that a geoalgorithm might take as input. """ default_metadata = {} def __init__(self, name='', description='', default=None, optional=False, metadata={}): self.name = name self.description = description self.default = default self.value = default self.isAdvanced = False # A hidden parameter can be used to set a hard-coded value. # It can be used as any other parameter, but it will not be # shown to the user self.hidden = False self.optional = parseBool(optional) # TODO: make deep copy and deep update self.metadata = deepcopy(self.default_metadata) self.metadata.update(deepcopy(metadata)) def setValue(self, obj): """ Sets the value of the parameter. Returns true if the value passed is correct for the type of parameter. """ if obj is None: if not self.optional: return False self.value = None return True self.value = str(obj) return True def setDefaultValue(self): """ Sets the value of the parameter to the default one Returns true if the default value is correct for the type of parameter. """ return self.setValue(self.default) def __str__(self): return u'{} <{}>'.format(self.name, self.__class__.__name__) def getValueAsCommandLineParameter(self): """ Returns the value of this parameter as it should have been entered in the console if calling an algorithm using the Processing.runalg() method. """ return str(self.value) def typeName(self): return self.__class__.__name__.replace('Parameter', '').lower() def todict(self): o = deepcopy(self.__dict__) del o['metadata'] return o def tr(self, string, context=''): if context == '': context = 'Parameter' return QCoreApplication.translate(context, string) def wrapper(self, dialog, row=0, col=0): wrapper = self.metadata.get('widget_wrapper', None) # wrapper metadata should be a class path if isinstance(wrapper, str): tokens = wrapper.split('.') mod = __import__('.'.join(tokens[:-1]), fromlist=[tokens[-1]]) wrapper = getattr(mod, tokens[-1]) # or directly a class object if isclass(wrapper): wrapper = wrapper(self, dialog, row, col) # or a wrapper instance return wrapper def evaluate(self, alg): pass def evaluateForModeler(self, value, model): return value class ParameterBoolean(Parameter): default_metadata = { 'widget_wrapper': 'processing.gui.wrappers.BooleanWidgetWrapper' } def __init__(self, name='', description='', default=None, optional=False, metadata={}): Parameter.__init__(self, name, description, parseBool(default), optional, metadata) def setValue(self, value): if value is None: if not self.optional: return False self.value = None return True if isinstance(value, str): self.value = str(value).lower() == str(True).lower() else: self.value = bool(value) return True def getAsScriptCode(self): param_type = '' if self.optional: param_type += 'optional ' param_type += 'boolean ' return '##' + self.name + '=' + param_type + str(self.default) @classmethod def fromScriptCode(self, line): isOptional, name, definition = _splitParameterOptions(line) if definition.startswith("boolean"): descName = _createDescriptiveName(name) default = definition.strip()[len('boolean') + 1:] if default: param = ParameterBoolean(name, descName, default) else: param = ParameterBoolean(name, descName) param.optional = isOptional return param class ParameterCrs(Parameter): default_metadata = { 'widget_wrapper': 'processing.gui.wrappers.CrsWidgetWrapper' } def __init__(self, name='', description='', default=None, optional=False, metadata={}): '''The value is a string that uniquely identifies the coordinate reference system. Typically it is the auth id of the CRS (if the authority is EPSG) or proj4 string of the CRS (in case of other authorities or user defined projections).''' Parameter.__init__(self, name, description, default, optional, metadata) def setValue(self, value): if not bool(value): if not self.optional: return False self.value = None return True if isinstance(value, QgsCoordinateReferenceSystem): self.value = value.authid() return True if isinstance(value, QgsMapLayer): self.value = value.crs().authid() return True try: layer = dataobjects.getObjectFromUri(value) if layer is not None: self.value = layer.crs().authid() return True except: pass # TODO: check it is a valid authid self.value = value return True def getValueAsCommandLineParameter(self): return '"' + str(self.value) + '"' def getAsScriptCode(self): param_type = '' if self.optional: param_type += 'optional ' param_type += 'crs ' return '##' + self.name + '=' + param_type + str(self.default) @classmethod def fromScriptCode(self, line): isOptional, name, definition = _splitParameterOptions(line) if definition.startswith("crs"): descName = _createDescriptiveName(name) default = definition.strip()[len('crs') + 1:] if default: return ParameterCrs(name, descName, default, isOptional) else: return ParameterCrs(name, descName, None, isOptional) class ParameterDataObject(Parameter): def getValueAsCommandLineParameter(self): if self.value is None: return str(None) else: s = dataobjects.normalizeLayerSource(str(self.value)) s = '"%s"' % s return s def evaluate(self, alg): self.value = _resolveLayers(self.value) class ParameterExtent(Parameter): default_metadata = { 'widget_wrapper': 'processing.gui.wrappers.ExtentWidgetWrapper' } USE_MIN_COVERING_EXTENT = 'USE_MIN_COVERING_EXTENT' def __init__(self, name='', description='', default=None, optional=True): Parameter.__init__(self, name, description, default, optional) # The value is a string in the form "xmin, xmax, ymin, ymax" def setValue(self, value): if not value: if not self.optional: return False self.value = None return True if isinstance(value, QgsMapLayer): rect = value.extent() self.value = '{},{},{},{}'.format( rect.xMinimum(), rect.xMaximum(), rect.yMinimum(), rect.yMaximum()) return True try: layer = dataobjects.getObjectFromUri(value) if layer is not None: rect = layer.extent() self.value = '{},{},{},{}'.format( rect.xMinimum(), rect.xMaximum(), rect.yMinimum(), rect.yMaximum()) return True except: pass tokens = str(value).split(',') if len(tokens) != 4: return False try: float(tokens[0]) float(tokens[1]) float(tokens[2]) float(tokens[3]) self.value = value return True except: return False def getValueAsCommandLineParameter(self): return '"' + str(self.value) + '"' def getAsScriptCode(self): param_type = '' if self.optional: param_type += 'optional ' param_type += 'extent' return '##' + self.name + '=' + param_type @classmethod def fromScriptCode(self, line): isOptional, name, definition = _splitParameterOptions(line) if definition.startswith("extent"): descName = _createDescriptiveName(name) default = definition.strip()[len('extent') + 1:] or None return ParameterExtent(name, descName, default, isOptional) def evaluate(self, alg): if self.optional and not bool(self.value): self.value = self.getMinCoveringExtent(alg) def getMinCoveringExtent(self, alg): first = True found = False for param in alg.parameters: if param.value: if isinstance(param, (ParameterRaster, ParameterVector)): if isinstance(param.value, (QgsRasterLayer, QgsVectorLayer)): layer = param.value else: layer = dataobjects.getObject(param.value) if layer: found = True self.addToRegion(layer, first) first = False elif isinstance(param, ParameterMultipleInput): layers = param.value.split(';') for layername in layers: layer = dataobjects.getObject(layername) if layer: found = True self.addToRegion(layer, first) first = False if found: return '{},{},{},{}'.format( self.xmin, self.xmax, self.ymin, self.ymax) else: return None def addToRegion(self, layer, first): if first: self.xmin = layer.extent().xMinimum() self.xmax = layer.extent().xMaximum() self.ymin = layer.extent().yMinimum() self.ymax = layer.extent().yMaximum() else: self.xmin = min(self.xmin, layer.extent().xMinimum()) self.xmax = max(self.xmax, layer.extent().xMaximum()) self.ymin = min(self.ymin, layer.extent().yMinimum()) self.ymax = max(self.ymax, layer.extent().yMaximum()) class ParameterPoint(Parameter): default_metadata = { 'widget_wrapper': 'processing.gui.wrappers.PointWidgetWrapper' } def __init__(self, name='', description='', default=None, optional=False): Parameter.__init__(self, name, description, default, optional) # The value is a string in the form "x, y" def setValue(self, text): if text is None: if not self.optional: return False self.value = None return True tokens = str(text).split(',') if len(tokens) != 2: return False try: float(tokens[0]) float(tokens[1]) self.value = text return True except: return False def getValueAsCommandLineParameter(self): return '"' + str(self.value) + '"' def getAsScriptCode(self): param_type = '' if self.optional: param_type += 'optional ' param_type += 'point' return '##' + self.name + '=' + param_type @classmethod def fromScriptCode(self, line): isOptional, name, definition = _splitParameterOptions(line) if definition.startswith("point"): descName = _createDescriptiveName(name) default = definition.strip()[len('point') + 1:] or None return ParameterPoint(name, descName, default, isOptional) class ParameterFile(Parameter): default_metadata = { 'widget_wrapper': 'processing.gui.wrappers.FileWidgetWrapper' } def __init__(self, name='', description='', isFolder=False, optional=True, ext=None): Parameter.__init__(self, name, description, None, parseBool(optional)) self.ext = ext self.isFolder = parseBool(isFolder) def getValueAsCommandLineParameter(self): return '"' + str(self.value) + '"' def setValue(self, obj): if obj is None or obj.strip() == '': if not self.optional: return False self.value = None if obj is None else obj.strip() return True if self.ext is not None and obj != '' and not obj.endswith(self.ext): return False self.value = str(obj) return True def typeName(self): if self.isFolder: return 'directory' else: return 'file' def getAsScriptCode(self): param_type = '' if self.optional: param_type += 'optional ' if self.isFolder: param_type += 'folder' else: param_type += 'file' return '##' + self.name + '=' + param_type @classmethod def fromScriptCode(self, line): isOptional, name, definition = _splitParameterOptions(line) if definition.startswith("file") or definition.startswith("folder"): descName = _createDescriptiveName(name) return ParameterFile(name, descName, definition.startswith("folder"), isOptional) class ParameterFixedTable(Parameter): def __init__(self, name='', description='', numRows=3, cols=['value'], fixedNumOfRows=False, optional=False): Parameter.__init__(self, name, description, None, optional) self.cols = cols if isinstance(cols, str): self.cols = self.cols.split(";") self.numRows = int(numRows) self.fixedNumOfRows = parseBool(fixedNumOfRows) def setValue(self, obj): if obj is None: if not self.optional: return False self.value = None return True # TODO: check that it contains a correct number of elements if isinstance(obj, str): self.value = obj else: self.value = ParameterFixedTable.tableToString(obj) return True def getValueAsCommandLineParameter(self): return '"' + str(self.value) + '"' @staticmethod def tableToString(table): tablestring = '' for i in range(len(table)): for j in range(len(table[0])): tablestring = tablestring + table[i][j] + ',' tablestring = tablestring[:-1] return tablestring @classmethod def fromScriptCode(self, line): isOptional, name, definition = _splitParameterOptions(line) if definition.startswith("point"): descName = _createDescriptiveName(name) default = definition.strip()[len('point') + 1:] or None return ParameterPoint(name, descName, default, isOptional) class ParameterMultipleInput(ParameterDataObject): """A parameter representing several data objects. Its value is a string with substrings separated by semicolons, each of which represents the data source location of each element. """ default_metadata = { 'widget_wrapper': 'processing.gui.wrappers.MultipleInputWidgetWrapper' } exported = None def __init__(self, name='', description='', datatype=-1, optional=False): ParameterDataObject.__init__(self, name, description, None, optional) self.datatype = int(float(datatype)) self.exported = None self.minNumInputs = 0 """ Set minimum required number of inputs for parameter By default minimal number of inputs is set to 1 @type _minNumInputs: numeric type or None @param _minNumInputs: required minimum number of inputs for parameter. \ If user will pass None as parameter, we will use default minimal number of inputs (1) @return: result, if the minimum number of inputs were set. """ def setMinNumInputs(self, _minNumInputs): if _minNumInputs is None: self.minNumInputs = 0 return True if _minNumInputs < 1 and not self.optional: # don't allow to set negative or null number of inputs if parameter isn't optional return False self.minNumInputs = int(_minNumInputs) return True """ Get minimum required number of inputs for parameter @return: minimum number of inputs required for this parameter @see: setMinNumInputs() """ def getMinNumInputs(self): return self.minNumInputs def setValue(self, obj): self.exported = None if obj is None: if not self.optional: return False self.value = None return True if isinstance(obj, list): if len(obj) == 0: if self.optional: self.value = None return True else: return False # prevent setting value if we didn't provide required minimal number of inputs elif len(obj) < self.minNumInputs: return False self.value = ";".join([self.getAsString(lay) for lay in obj]) return True else: self.value = str(obj) return True def getSafeExportedLayers(self): """ Returns not the value entered by the user, but a string with semicolon-separated filenames which contains the data of the selected layers, but saved in a standard format (currently shapefiles for vector layers and GeoTiff for raster) so that they can be opened by most external applications. If there is a selection and QGIS is configured to use just the selection, it exports the layer even if it is already in a suitable format. Works only if the layer represented by the parameter value is currently loaded in QGIS. Otherwise, it will not perform any export and return the current value string. If the current value represents a layer in a suitable format, it does no export at all and returns that value. Currently, it works just for vector layer. In the case of raster layers, it returns the parameter value. The layers are exported just the first time the method is called. The method can be called several times and it will always return the same string, performing the export only the first time. """ if self.exported: return self.exported self.exported = self.value layers = self.value.split(';') if layers is None or len(layers) == 0: return self.value if self.datatype == dataobjects.TYPE_RASTER: for layerfile in layers: layer = dataobjects.getObjectFromUri(layerfile, False) if layer: filename = dataobjects.exportRasterLayer(layer) self.exported = self.exported.replace(layerfile, filename) return self.exported elif self.datatype == dataobjects.TYPE_FILE: return self.value else: for layerfile in layers: layer = dataobjects.getObjectFromUri(layerfile, False) if layer: filename = dataobjects.exportVectorLayer(layer) self.exported = self.exported.replace(layerfile, filename) return self.exported def getAsString(self, value): if self.datatype == dataobjects.TYPE_RASTER: if isinstance(value, QgsRasterLayer): return str(value.dataProvider().dataSourceUri()) else: s = str(value) layers = dataobjects.getRasterLayers() for layer in layers: if layer.name() == s: return str(layer.dataProvider().dataSourceUri()) return s if self.datatype == dataobjects.TYPE_FILE: return str(value) else: if isinstance(value, QgsVectorLayer): return str(value.source()) else: s = str(value) layers = dataobjects.getVectorLayers([self.datatype]) for layer in layers: if layer.name() == s: return str(layer.source()) return s def getFileFilter(self): if self.datatype == dataobjects.TYPE_RASTER: exts = dataobjects.getSupportedOutputRasterLayerExtensions() elif self.datatype == dataobjects.TYPE_FILE: return self.tr('All files (*.*)', 'ParameterMultipleInput') else: exts = dataobjects.getSupportedOutputVectorLayerExtensions() for i in range(len(exts)): exts[i] = self.tr('%s files(*.%s)', 'ParameterMultipleInput') % (exts[i].upper(), exts[i].lower()) return ';;'.join(exts) def dataType(self): if self.datatype == dataobjects.TYPE_VECTOR_POINT: return 'points' elif self.datatype == dataobjects.TYPE_VECTOR_LINE: return 'lines' elif self.datatype == dataobjects.TYPE_VECTOR_POLYGON: return 'polygons' elif self.datatype == dataobjects.TYPE_RASTER: return 'rasters' elif self.datatype == dataobjects.TYPE_FILE: return 'files' else: return 'any vectors' def getAsScriptCode(self): param_type = '' if self.optional: param_type += 'optional ' if self.datatype == dataobjects.TYPE_RASTER: param_type += 'multiple raster' if self.datatype == dataobjects.TYPE_FILE: param_type += 'multiple file' else: param_type += 'multiple vector' return '##' + self.name + '=' + param_type @classmethod def fromScriptCode(self, line): isOptional, name, definition = _splitParameterOptions(line) descName = _createDescriptiveName(name) if definition.lower().strip() == 'multiple raster': return ParameterMultipleInput(name, descName, dataobjects.TYPE_RASTER, isOptional) elif definition.lower().strip() == 'multiple vector': return ParameterMultipleInput(name, definition, dataobjects.TYPE_VECTOR_ANY, isOptional) def evaluate(self, alg): self.value = _resolveLayers(self.value) class ParameterNumber(Parameter): default_metadata = { 'widget_wrapper': 'processing.gui.wrappers.NumberWidgetWrapper' } def __init__(self, name='', description='', minValue=None, maxValue=None, default=None, optional=False): Parameter.__init__(self, name, description, default, optional) if default is not None: try: self.default = int(str(default)) self.isInteger = True except ValueError: self.default = float(default) self.isInteger = False else: self.isInteger = False if minValue is not None: self.min = int(float(minValue)) if self.isInteger else float(minValue) else: self.min = None if maxValue is not None: self.max = int(float(maxValue)) if self.isInteger else float(maxValue) else: self.max = None self.value = self.default def setValue(self, n): if n is None: if not self.optional: return False self.value = None return True if isinstance(n, str): try: v = self._evaluate(n) self.value = float(v) if self.isInteger: self.value = int(math.floor(self.value)) return True except: return False else: try: if float(n) - int(float(n)) == 0: value = int(float(n)) else: value = float(n) if self.min is not None: if value < self.min: return False if self.max is not None: if value > self.max: return False self.value = value return True except: raise return False def getAsScriptCode(self): param_type = '' if self.optional: param_type += 'optional ' param_type += 'number' code = '##' + self.name + '=' + param_type if self.default: code += str(self.default) return code @classmethod def fromScriptCode(self, line): isOptional, name, definition = _splitParameterOptions(line) descName = _createDescriptiveName(name) if definition.lower().strip().startswith('number'): default = definition.strip()[len('number'):] or None if default == 'None': default = None return ParameterNumber(name, descName, default=default, optional=isOptional) def _evaluate(self, value): exp = QgsExpression(value) if exp.hasParserError(): raise ValueError(self.tr("Error in parameter expression: ") + exp.parserErrorString()) result = exp.evaluate(_expressionContext()) if exp.hasEvalError(): raise ValueError("Error evaluating parameter expression: " + exp.evalErrorString()) if self.isInteger: return math.floor(result) else: return result def evaluate(self, alg): if isinstance(self.value, str) and bool(self.value): self.value = self._evaluate(self.value) def _layerVariables(self, element, alg=None): variables = {} layer = getObject(element.value) if layer is not None: name = element.name if alg is None else "%s_%s" % (alg.name, element.name) variables['@%s_minx' % name] = layer.extent().xMinimum() variables['@%s_miny' % name] = layer.extent().yMinimum() variables['@%s_maxx' % name] = layer.extent().yMaximum() variables['@%s_maxy' % name] = layer.extent().yMaximum() if isinstance(element, (ParameterRaster, OutputRaster)): stats = layer.dataProvider().bandStatistics(1) variables['@%s_avg' % name] = stats.mean variables['@%s_stddev' % name] = stats.stdDev variables['@%s_min' % name] = stats.minimumValue variables['@%s_max' % name] = stats.maximumValue return variables def evaluateForModeler(self, value, model): if isinstance(value, numbers.Number): return value variables = {} for param in model.parameters: if isinstance(param, ParameterNumber): variables["@" + param.name] = param.value if isinstance(param, (ParameterRaster, ParameterVector)): variables.update(self._layerVariables(param)) for alg in list(model.algs.values()): for out in alg.algorithm.outputs: if isinstance(out, OutputNumber): variables["@%s_%s" % (alg.name, out.name)] = out.value if isinstance(out, (OutputRaster, OutputVector)): variables.update(self._layerVariables(out, alg)) for k, v in list(variables.items()): value = value.replace(k, str(v)) return value def expressionContext(self): return _expressionContext() def getValueAsCommandLineParameter(self): if self.value is None: return str(None) if isinstance(self.value, str): return '"%s"' + self.value return str(self.value) class ParameterRange(Parameter): def __init__(self, name='', description='', default=None, optional=False): Parameter.__init__(self, name, description, default, optional) if default is not None: values = default.split(',') try: int(values[0]) int(values[1]) self.isInteger = True except: self.isInteger = False else: self.isInteger = False def setValue(self, text): if text is None: if not self.optional: return False self.value = None return True tokens = text.split(',') if len(tokens) != 2: return False try: float(tokens[0]) float(tokens[1]) self.value = text return True except: return False def getValueAsCommandLineParameter(self): return '"' + str(self.value) + '"' if self.value is not None else str(None) class ParameterRaster(ParameterDataObject): default_metadata = { 'widget_wrapper': 'processing.gui.wrappers.RasterWidgetWrapper' } def __init__(self, name='', description='', optional=False, showSublayersDialog=True): ParameterDataObject.__init__(self, name, description, None, optional) self.showSublayersDialog = parseBool(showSublayersDialog) self.exported = None def getSafeExportedLayer(self): """Returns not the value entered by the user, but a string with a filename which contains the data of this layer, but saved in a standard format (currently always a geotiff file) so that it can be opened by most external applications. Works only if the layer represented by the parameter value is currently loaded in QGIS. Otherwise, it will not perform any export and return the current value string. If the current value represents a layer in a suitable format, it does not export at all and returns that value. The layer is exported just the first time the method is called. The method can be called several times and it will always return the same file, performing the export only the first time. """ if self.exported: return self.exported layer = dataobjects.getObjectFromUri(self.value, False) if layer: self.exported = dataobjects.exportRasterLayer(layer) else: self.exported = self.value return self.exported def setValue(self, obj): self.exported = None if obj is None: if not self.optional: return False self.value = None return True if isinstance(obj, QgsRasterLayer): self.value = str(obj.dataProvider().dataSourceUri()) return True else: self.value = str(obj) return True def getFileFilter(self): exts = dataobjects.getSupportedOutputRasterLayerExtensions() for i in range(len(exts)): exts[i] = self.tr('%s files(*.%s)', 'ParameterRaster') % (exts[i].upper(), exts[i].lower()) return ';;'.join(exts) def getAsScriptCode(self): param_type = '' if self.optional: param_type += 'optional ' param_type += 'raster' return '##' + self.name + '=' + param_type @classmethod def fromScriptCode(self, line): isOptional, name, definition = _splitParameterOptions(line) descName = _createDescriptiveName(name) if definition.lower().strip().startswith('raster'): return ParameterRaster(name, descName, optional=isOptional) class ParameterSelection(Parameter): default_metadata = { 'widget_wrapper': 'processing.gui.wrappers.SelectionWidgetWrapper' } def __init__(self, name='', description='', options=[], default=None, isSource=False, multiple=False, optional=False): Parameter.__init__(self, name, description, default, optional) self.multiple = multiple isSource = parseBool(isSource) self.options = options if isSource: self.options = [] layer = QgsVectorLayer(options[0], "layer", "ogr") if layer.isValid(): try: index = resolveFieldIndex(layer, options[1]) feats = features(layer) for feature in feats: self.options.append(str(feature.attributes()[index])) except ValueError: pass elif isinstance(self.options, str): self.options = self.options.split(";") if default is not None: try: self.default = int(default) except: self.default = 0 self.value = self.default def setValue(self, value): if value is None: if not self.optional: return False self.value = 0 return True if isinstance(value, list): if not self.multiple: return False values = [] for v in value: try: n = int(v) values.append(n) except: return False if not self.optional and len(values) == 0: return False self.value = values return True else: try: n = int(value) self.value = n return True except: return False @classmethod def fromScriptCode(self, line): isOptional, name, definition = _splitParameterOptions(line) descName = _createDescriptiveName(name) if definition.lower().strip().startswith('selectionfromfile'): options = definition.strip()[len('selectionfromfile '):].split(';') return ParameterSelection(name, descName, options, isSource=True, optional=isOptional) elif definition.lower().strip().startswith('selection'): options = definition.strip()[len('selection '):].split(';') return ParameterSelection(name, descName, options, optional=isOptional) elif definition.lower().strip().startswith('multipleselectionfromfile'): options = definition.strip()[len('multipleselectionfromfile '):].split(';') return ParameterSelection(name, descName, options, isSource=True, multiple=True, optional=isOptional) elif definition.lower().strip().startswith('multipleselection'): options = definition.strip()[len('multipleselection '):].split(';') return ParameterSelection(name, descName, options, multiple=True, optional=isOptional) class ParameterEvaluationException(Exception): def __init__(self, param, msg): Exception.__init__(msg) self.param = param class ParameterString(Parameter): default_metadata = { 'widget_wrapper': 'processing.gui.wrappers.StringWidgetWrapper' } NEWLINE = '\n' ESCAPED_NEWLINE = '\\n' def __init__(self, name='', description='', default=None, multiline=False, optional=False, evaluateExpressions=False): Parameter.__init__(self, name, description, default, optional) self.multiline = parseBool(multiline) self.evaluateExpressions = parseBool(evaluateExpressions) def setValue(self, obj): if not bool(obj): if not self.optional: return False self.value = None return True self.value = str(obj).replace( ParameterString.ESCAPED_NEWLINE, ParameterString.NEWLINE ) return True def getValueAsCommandLineParameter(self): return ('"' + str(self.value.replace(ParameterString.NEWLINE, ParameterString.ESCAPED_NEWLINE)) + '"' if self.value is not None else str(None)) def getAsScriptCode(self): param_type = '' if self.optional: param_type += 'optional ' param_type += 'string' return '##' + self.name + '=' + param_type + self.default @classmethod def fromScriptCode(self, line): isOptional, name, definition = _splitParameterOptions(line) descName = _createDescriptiveName(name) if definition.lower().strip().startswith('string'): default = definition.strip()[len('string') + 1:] if default: return ParameterString(name, descName, default, optional=isOptional) else: return ParameterString(name, descName, optional=isOptional) elif definition.lower().strip().startswith('longstring'): default = definition.strip()[len('longstring') + 1:] if default: return ParameterString(name, descName, default, multiline=True, optional=isOptional) else: return ParameterString(name, descName, multiline=True, optional=isOptional) def evaluate(self, alg): if isinstance(self.value, str) and bool(self.value) and self.evaluateExpressions: exp = QgsExpression(self.value) if exp.hasParserError(): raise ValueError(self.tr("Error in parameter expression: ") + exp.parserErrorString()) result = exp.evaluate(_expressionContext()) if exp.hasEvalError(): raise ValueError("Error evaluating parameter expression: " + exp.evalErrorString()) self.value = result def expressionContext(self): return _expressionContext() class ParameterExpression(Parameter): default_metadata = { 'widget_wrapper': 'processing.gui.wrappers.ExpressionWidgetWrapper' } NEWLINE = '\n' ESCAPED_NEWLINE = '\\n' def __init__(self, name='', description='', default=None, optional=False, parent_layer=None): Parameter.__init__(self, name, description, default, optional) self.parent_layer = parent_layer def setValue(self, obj): if not bool(obj): if not self.optional: return False self.value = None return True self.value = str(obj).replace( ParameterString.ESCAPED_NEWLINE, ParameterString.NEWLINE ) return True def getValueAsCommandLineParameter(self): return ('"' + str(self.value.replace(ParameterExpression.NEWLINE, ParameterExpression.ESCAPED_NEWLINE)) + '"' if self.value is not None else str(None)) def getAsScriptCode(self): param_type = '' if self.optional: param_type += 'optional ' param_type += 'expression' return '##' + self.name + '=' + param_type + self.default @classmethod def fromScriptCode(self, line): isOptional, name, definition = _splitParameterOptions(line) if definition.lower().strip().startswith('expression'): descName = _createDescriptiveName(name) default = definition.strip()[len('expression') + 1:] if default: return ParameterExpression(name, descName, default, optional=isOptional) else: return ParameterExpression(name, descName, optional=isOptional) class ParameterTable(ParameterDataObject): default_metadata = { 'widget_wrapper': 'processing.gui.wrappers.TableWidgetWrapper' } def __init__(self, name='', description='', optional=False): ParameterDataObject.__init__(self, name, description, None, optional) self.exported = None def setValue(self, obj): self.exported = None if obj is None: if not self.optional: return False self.value = None return True if isinstance(obj, QgsVectorLayer): source = str(obj.source()) self.value = source return True else: self.value = str(obj) layers = dataobjects.getTables() for layer in layers: if layer.name() == self.value or layer.source() == self.value: source = str(layer.source()) self.value = source return True val = str(obj) self.value = val return os.path.exists(self.value) def getSafeExportedTable(self): """Returns not the value entered by the user, but a string with a filename which contains the data of this table, but saved in a standard format (currently always a DBF file) so that it can be opened by most external applications. Works only if the table represented by the parameter value is currently loaded in QGIS. Otherwise, it will not perform any export and return the current value string. If the current value represents a table in a suitable format, it does not export at all and returns that value. The table is exported just the first time the method is called. The method can be called several times and it will always return the same file, performing the export only the first time. """ if self.exported: return self.exported table = dataobjects.getObjectFromUri(self.value, False) if table: self.exported = dataobjects.exportTable(table) else: self.exported = self.value return self.exported def getFileFilter(self): exts = ['csv', 'dbf'] for i in range(len(exts)): exts[i] = self.tr('%s files(*.%s)', 'ParameterTable') % (exts[i].upper(), exts[i].lower()) return ';;'.join(exts) def getAsScriptCode(self): param_type = '' if self.optional: param_type += 'optional ' param_type += 'table' return '##' + self.name + '=' + param_type @classmethod def fromScriptCode(self, line): isOptional, name, definition = _splitParameterOptions(line) descName = _createDescriptiveName(name) if definition.lower().strip().startswith('table'): return ParameterTable(name, descName, isOptional) class ParameterTableField(Parameter): """A parameter representing a table field. Its value is a string that represents the name of the field. """ default_metadata = { 'widget_wrapper': 'processing.gui.wrappers.TableFieldWidgetWrapper' } DATA_TYPE_NUMBER = 0 DATA_TYPE_STRING = 1 DATA_TYPE_DATETIME = 2 DATA_TYPE_ANY = -1 def __init__(self, name='', description='', parent=None, datatype=-1, optional=False, multiple=False): Parameter.__init__(self, name, description, None, optional) self.parent = parent self.multiple = multiple self.datatype = int(datatype) def getValueAsCommandLineParameter(self): return '"' + str(self.value) + '"' if self.value is not None else str(None) def setValue(self, value): if not bool(value): if not self.optional: return False self.value = None return True if isinstance(value, list): if not self.multiple and len(value) > 1: return False self.value = ";".join(value) return True else: self.value = str(value) return True def __str__(self): return self.name + ' <' + self.__module__.split('.')[-1] + ' from ' \ + self.parent + '>' def dataType(self): if self.datatype == self.DATA_TYPE_NUMBER: return 'numeric' elif self.datatype == self.DATA_TYPE_STRING: return 'string' elif self.datatype == self.DATA_TYPE_DATETIME: return 'datetime' else: return 'any' def getAsScriptCode(self): param_type = '' if self.optional: param_type += 'optional ' param_type += 'field' return '##' + self.name + '=' + param_type + self.parent @classmethod def fromScriptCode(self, line): isOptional, name, definition = _splitParameterOptions(line) descName = _createDescriptiveName(name) if definition.lower().strip().startswith('field'): if definition.lower().strip().startswith('field number'): parent = definition.strip()[len('field number') + 1:] datatype = ParameterTableField.DATA_TYPE_NUMBER elif definition.lower().strip().startswith('field string'): parent = definition.strip()[len('field string') + 1:] datatype = ParameterTableField.DATA_TYPE_STRING elif definition.lower().strip().startswith('field datetime'): parent = definition.strip()[len('field datetime') + 1:] datatype = ParameterTableField.DATA_TYPE_DATETIME else: parent = definition.strip()[len('field') + 1:] datatype = ParameterTableField.DATA_TYPE_ANY return ParameterTableField(name, descName, parent, datatype, isOptional) class ParameterVector(ParameterDataObject): default_metadata = { 'widget_wrapper': 'processing.gui.wrappers.VectorWidgetWrapper' } def __init__(self, name='', description='', datatype=[-1], optional=False): ParameterDataObject.__init__(self, name, description, None, optional) if isinstance(datatype, int): datatype = [datatype] elif isinstance(datatype, str): datatype = [int(t) for t in datatype.split(',')] self.datatype = datatype self.exported = None self.allowOnlyOpenedLayers = False def setValue(self, obj): self.exported = None if obj is None: if not self.optional: return False self.value = None return True if isinstance(obj, QgsVectorLayer): self.value = str(obj.source()) return True else: self.value = str(obj) return True def getSafeExportedLayer(self): """Returns not the value entered by the user, but a string with a filename which contains the data of this layer, but saved in a standard format (currently always a shapefile) so that it can be opened by most external applications. If there is a selection and QGIS is configured to use just the selection, if exports the layer even if it is already in a suitable format. Works only if the layer represented by the parameter value is currently loaded in QGIS. Otherwise, it will not perform any export and return the current value string. If the current value represents a layer in a suitable format, it does not export at all and returns that value. The layer is exported just the first time the method is called. The method can be called several times and it will always return the same file, performing the export only the first time. """ if self.exported: return self.exported layer = dataobjects.getObjectFromUri(self.value, False) if layer: self.exported = dataobjects.exportVectorLayer(layer) else: self.exported = self.value return self.exported def getFileFilter(self): exts = dataobjects.getSupportedOutputVectorLayerExtensions() for i in range(len(exts)): exts[i] = self.tr('%s files(*.%s)', 'ParameterVector') % (exts[i].upper(), exts[i].lower()) return ';;'.join(exts) def dataType(self): return dataobjects.vectorDataType(self) def getAsScriptCode(self): param_type = '' if self.optional: param_type += 'optional ' param_type += 'vector' return '##' + self.name + '=' + param_type @classmethod def fromScriptCode(self, line): isOptional, name, definition = _splitParameterOptions(line) descName = _createDescriptiveName(name) if definition.lower().strip() == 'vector': return ParameterVector(name, descName, [dataobjects.TYPE_VECTOR_ANY], isOptional) elif definition.lower().strip() == 'vector point': return ParameterVector(name, descName, [dataobjects.TYPE_VECTOR_POINT], isOptional) elif definition.lower().strip() == 'vector line': return ParameterVector(name, descName, [dataobjects.TYPE_VECTOR_LINE], isOptional) elif definition.lower().strip() == 'vector polygon': return ParameterVector(name, descName, [dataobjects.TYPE_VECTOR_POLYGON], isOptional) class ParameterGeometryPredicate(Parameter): predicates = ('intersects', 'contains', 'disjoint', 'equals', 'touches', 'overlaps', 'within', 'crosses') def __init__(self, name='', description='', left=None, right=None, optional=False, enabledPredicates=None): Parameter.__init__(self, name, description, None, optional) self.left = left self.right = right self.value = None self.enabledPredicates = enabledPredicates if self.enabledPredicates is None: self.enabledPredicates = self.predicates def getValueAsCommandLineParameter(self): return str(self.value) def setValue(self, value): if value is None: if not self.optional: return False self.value = None return True elif len(value) == 0 and not self.optional: return False if isinstance(value, str): self.value = value.split(';') # relates to ModelerAlgorithm.resolveValue else: self.value = value return True paramClasses = [c for c in list(sys.modules[__name__].__dict__.values()) if isclass(c) and issubclass(c, Parameter)] def getParameterFromString(s): # Try the parameter definitions used in description files if '|' in s and (s.startswith("Parameter") or s.startswith("*Parameter")): isAdvanced = False if s.startswith("*"): s = s[1:] isAdvanced = True tokens = s.split("|") params = [t if str(t) != str(None) else None for t in tokens[1:]] try: clazz = getattr(sys.modules[__name__], tokens[0]) param = clazz(*params) param.isAdvanced = isAdvanced return param except: return None else: # try script syntax for paramClass in paramClasses: try: param = paramClass.fromScriptCode(s) if param is not None: return param except AttributeError: pass except: return None
gpl-2.0
6,668,968,050,453,547,000
34.083597
116
0.573605
false
4.626054
false
false
false
andrewk1/Climb-Bot
climb-bot.py
1
3083
import praw import requests import json import time import re # Function iterates over each submission title and checks if the title contains route syntax that indicates the post is about a route def parse_titles(bot, subreddit): start_time = time.time() for submission in subreddit.stream.submissions(): if (submission.created_utc < start_time): continue title = submission.title # regex matches sequence of capitalized words followed by climb grade notation (V or 5.) route_regex = '([A-Z][a-z]+(?=\s[A-Z])(?:\s[A-Z][a-z]+)+) [( ]?(5.[0-9][0-9]?[A-Za-z]|[Vv][0-9][0-9]?)' route_name = re.search(route_regex, title) print route_name comment = make_get_request(route_name.group(0)) if comment != 'NA': submission.reply(comment) # Call custom google search engine API to parse the formulated title and gather theCrag's metadata for the route def make_get_request(route): key = 'key=***' cx = 'cx=***' query= 'q='+route google_url = 'https://www.googleapis.com/customsearch/v1?' + key + cx + query response = requests.get(google_url) parsed_response= json.loads(response.text) return form_post(parsed_response) # Extract data from google's JSON response and form a post def form_post(parsed_response): # Check if Google search received a hit if parsed_response['searchInformation']['totalResults'] == 0 or 'items' not in parsed_response: return 'NA' title = parsed_response['items'][0]['title'] print title breadcrumb = parsed_response['items'][0]['pagemap']['breadcrumb'] count = 0 # Build up region string region_string = '' for key in breadcrumb: region = breadcrumb[count]['title'] if (count > 0) : region_string = region + ', ' + region_string else : region_string = region; count+=1 metatags = parsed_response['items'][0]['pagemap']['metatags'] country = breadcrumb[0]['title'] latitude = metatags[0]['place:location:latitude'] longitude = metatags[0]['place:location:longitude'] google_pin = 'https://www.google.com/maps/@?api=1&map_action=map&basemap=satellite&zoom=19&center=' + latitude + ',' + longitude link = metatags[0]['og:url'] if (' in ' in title): title = title[:title.index(' in ')] # Truncate values to 3rd decimal place lat_decimal = latitude.index('.') latitude = latitude[:lat_decimal+4] long_decimal = longitude.index('.') longitude = longitude[:long_decimal+4] # Format comment response return 'I found a route! [' + title + '](' + link + ') in ' + region_string + '\n\nGPS Location: [' + latitude + ', ' + longitude + ']('+google_pin+')' + '\n\n ' + '\n\n^^^I ^^^am ^^^a ^^^bot ^^^| ^^^Data ^^^from ^^^[theCrag.com](https://www.thecrag.com/) ^^^| ^^^Feedback ^^^welcome ^^^at ^^^[r/climbBot](https://www.reddit.com/r/climbBot/)' if __name__ == "__main__": bot = praw.Reddit( user_agent='climb-bot posts additional information on climbing routes it finds, created by /u/Akondrich, email: andrewkondrich@gmail.com', client_id='***', client_secret='***', username='climb-bot', password='***') subreddit = bot.subreddit('climbBot') parse_titles(bot, subreddit)
mit
-518,196,464,358,046,460
38.525641
343
0.67337
false
3.019589
false
false
false
yuxng/Deep_ISM
ISM/lib/setup.py
1
6351
# -------------------------------------------------------- # Fast R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Ross Girshick # -------------------------------------------------------- import os from os.path import join as pjoin import numpy as np from distutils.core import setup from distutils.extension import Extension from Cython.Distutils import build_ext def find_in_path(name, path): "Find a file in a search path" #adapted fom http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/ for dir in path.split(os.pathsep): binpath = pjoin(dir, name) if os.path.exists(binpath): return os.path.abspath(binpath) return None def locate_cuda(): """Locate the CUDA environment on the system Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64' and values giving the absolute path to each directory. Starts by looking for the CUDAHOME env variable. If not found, everything is based on finding 'nvcc' in the PATH. """ # first check if the CUDAHOME env variable is in use if 'CUDAHOME' in os.environ: home = os.environ['CUDAHOME'] nvcc = pjoin(home, 'bin', 'nvcc') else: # otherwise, search the PATH for NVCC default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin') nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path) if nvcc is None: raise EnvironmentError('The nvcc binary could not be ' 'located in your $PATH. Either add it to your path, or set $CUDAHOME') home = os.path.dirname(os.path.dirname(nvcc)) cudaconfig = {'home':home, 'nvcc':nvcc, 'include': pjoin(home, 'include'), 'lib64': pjoin(home, 'lib64')} for k, v in cudaconfig.iteritems(): if not os.path.exists(v): raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v)) return cudaconfig CUDA = locate_cuda() # Obtain the numpy include directory. This logic works across numpy versions. try: numpy_include = np.get_include() except AttributeError: numpy_include = np.get_numpy_include() def customize_compiler_for_nvcc(self): """inject deep into distutils to customize how the dispatch to gcc/nvcc works. If you subclass UnixCCompiler, it's not trivial to get your subclass injected in, and still have the right customizations (i.e. distutils.sysconfig.customize_compiler) run on it. So instead of going the OO route, I have this. Note, it's kindof like a wierd functional subclassing going on.""" # tell the compiler it can processes .cu self.src_extensions.append('.cu') # save references to the default compiler_so and _comple methods default_compiler_so = self.compiler_so super = self._compile # now redefine the _compile method. This gets executed for each # object but distutils doesn't have the ability to change compilers # based on source extension: we add it. def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts): if os.path.splitext(src)[1] == '.cu': # use the cuda for .cu files self.set_executable('compiler_so', CUDA['nvcc']) # use only a subset of the extra_postargs, which are 1-1 translated # from the extra_compile_args in the Extension class postargs = extra_postargs['nvcc'] else: postargs = extra_postargs['gcc'] super(obj, src, ext, cc_args, postargs, pp_opts) # reset the default compiler_so, which we might have changed for cuda self.compiler_so = default_compiler_so # inject our redefined _compile method into the class self._compile = _compile # run the customize_compiler class custom_build_ext(build_ext): def build_extensions(self): customize_compiler_for_nvcc(self.compiler) build_ext.build_extensions(self) ext_modules = [ Extension( "utils.cython_bbox", ["utils/bbox.pyx"], extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]}, ), Extension( "utils.cython_nms", ["utils/nms.pyx"], extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]}, ), Extension( "nms.cpu_nms", ["nms/cpu_nms.pyx"], extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]}, include_dirs = [numpy_include] ), Extension('nms.gpu_nms', ['nms/nms_kernel.cu', 'nms/gpu_nms.pyx'], library_dirs=[CUDA['lib64']], libraries=['cudart'], language='c++', runtime_library_dirs=[CUDA['lib64']], # this syntax is specific to this build system # we're only going to use certain compiler args with nvcc and not with gcc # the implementation of this trick is in customize_compiler() below extra_compile_args={'gcc': ["-Wno-unused-function"], 'nvcc': ['-arch=sm_35', '--ptxas-options=-v', '-c', '--compiler-options', "'-fPIC'"]}, include_dirs = [numpy_include, CUDA['include']] ), Extension('normals.gpu_normals', ['normals/compute_normals.cu', 'normals/gpu_normals.pyx'], library_dirs=[CUDA['lib64']], libraries=['cudart'], language='c++', runtime_library_dirs=[CUDA['lib64']], # this syntax is specific to this build system # we're only going to use certain compiler args with nvcc and not with gcc # the implementation of this trick is in customize_compiler() below extra_compile_args={'gcc': ["-Wno-unused-function"], 'nvcc': ['-arch=sm_35', '--ptxas-options=-v', '-c', '--compiler-options', "'-fPIC'"]}, include_dirs = [numpy_include, CUDA['include'], '/usr/local/include/eigen3'] ) ] setup( name='fast_rcnn', ext_modules=ext_modules, # inject our custom trigger cmdclass={'build_ext': custom_build_ext}, )
mit
-73,697,840,488,066,960
37.490909
91
0.587781
false
3.905904
false
false
false
gamechanger/kafka-python
kafka/protocol/admin.py
1
1182
from .struct import Struct from .types import Array, Bytes, Int16, Schema, String class ListGroupsResponse(Struct): SCHEMA = Schema( ('error_code', Int16), ('groups', Array( ('group', String('utf-8')), ('protocol_type', String('utf-8')))) ) class ListGroupsRequest(Struct): API_KEY = 16 API_VERSION = 0 RESPONSE_TYPE = ListGroupsResponse SCHEMA = Schema() class DescribeGroupsResponse(Struct): SCHEMA = Schema( ('groups', Array( ('error_code', Int16), ('group', String('utf-8')), ('state', String('utf-8')), ('protocol_type', String('utf-8')), ('protocol', String('utf-8')), ('members', Array( ('member_id', String('utf-8')), ('client_id', String('utf-8')), ('client_host', String('utf-8')), ('member_metadata', Bytes), ('member_assignment', Bytes))))) ) class DescribeGroupsRequest(Struct): API_KEY = 15 API_VERSION = 0 RESPONSE_TYPE = DescribeGroupsResponse SCHEMA = Schema( ('groups', Array(String('utf-8'))) )
apache-2.0
-8,683,488,429,018,159,000
25.863636
54
0.526227
false
3.953177
false
false
false
UTSA-ICS/keystone-SID
keystone/tests/test_auth.py
1
44678
# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import uuid import mock from keystone import assignment from keystone import auth from keystone.common import authorization from keystone.common import environment from keystone import config from keystone import exception from keystone.openstack.common import timeutils from keystone import tests from keystone.tests import default_fixtures from keystone import token from keystone import trust CONF = config.CONF TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ' DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id HOST_URL = 'http://keystone:5001' def _build_user_auth(token=None, user_id=None, username=None, password=None, tenant_id=None, tenant_name=None, trust_id=None): """Build auth dictionary. It will create an auth dictionary based on all the arguments that it receives. """ auth_json = {} if token is not None: auth_json['token'] = token if username or password: auth_json['passwordCredentials'] = {} if username is not None: auth_json['passwordCredentials']['username'] = username if user_id is not None: auth_json['passwordCredentials']['userId'] = user_id if password is not None: auth_json['passwordCredentials']['password'] = password if tenant_name is not None: auth_json['tenantName'] = tenant_name if tenant_id is not None: auth_json['tenantId'] = tenant_id if trust_id is not None: auth_json['trust_id'] = trust_id return auth_json class AuthTest(tests.TestCase): def setUp(self): super(AuthTest, self).setUp() self.load_backends() self.load_fixtures(default_fixtures) # need to register the token provider first because auth controller # depends on it token.provider.Manager() self.context_with_remote_user = {'environment': {'REMOTE_USER': 'FOO', 'AUTH_TYPE': 'Negotiate'}} self.empty_context = {'environment': {}} self.controller = token.controllers.Auth() #This call sets up, among other things, the call to popen #that will be used to run the CMS command. These tests were #passing only due to the global nature of the call. If the #tests in this file are run alone, API calls return unauthorized. environment.use_eventlet(monkeypatch_thread=False) def assertEqualTokens(self, a, b): """Assert that two tokens are equal. Compare two tokens except for their ids. This also truncates the time in the comparison. """ def normalize(token): token['access']['token']['id'] = 'dummy' del token['access']['token']['expires'] del token['access']['token']['issued_at'] return token self.assertCloseEnoughForGovernmentWork( timeutils.parse_isotime(a['access']['token']['expires']), timeutils.parse_isotime(b['access']['token']['expires'])) self.assertCloseEnoughForGovernmentWork( timeutils.parse_isotime(a['access']['token']['issued_at']), timeutils.parse_isotime(b['access']['token']['issued_at'])) return self.assertDictEqual(normalize(a), normalize(b)) class AuthBadRequests(AuthTest): def setUp(self): super(AuthBadRequests, self).setUp() def test_no_external_auth(self): """Verify that _authenticate_external() raises exception if N/A.""" self.assertRaises( token.controllers.ExternalAuthNotApplicable, self.controller._authenticate_external, {}, {}) def test_no_token_in_auth(self): """Verify that _authenticate_token() raises exception if no token.""" self.assertRaises( exception.ValidationError, self.controller._authenticate_token, None, {}) def test_no_credentials_in_auth(self): """Verify that _authenticate_local() raises exception if no creds.""" self.assertRaises( exception.ValidationError, self.controller._authenticate_local, None, {}) def test_authenticate_blank_request_body(self): """Verify sending empty json dict raises the right exception.""" self.assertRaises(exception.ValidationError, self.controller.authenticate, {}, {}) def test_authenticate_blank_auth(self): """Verify sending blank 'auth' raises the right exception.""" body_dict = _build_user_auth() self.assertRaises(exception.ValidationError, self.controller.authenticate, {}, body_dict) def test_authenticate_invalid_auth_content(self): """Verify sending invalid 'auth' raises the right exception.""" self.assertRaises(exception.ValidationError, self.controller.authenticate, {}, {'auth': 'abcd'}) def test_authenticate_user_id_too_large(self): """Verify sending large 'userId' raises the right exception.""" body_dict = _build_user_auth(user_id='0' * 65, username='FOO', password='foo2') self.assertRaises(exception.ValidationSizeError, self.controller.authenticate, {}, body_dict) def test_authenticate_username_too_large(self): """Verify sending large 'username' raises the right exception.""" body_dict = _build_user_auth(username='0' * 65, password='foo2') self.assertRaises(exception.ValidationSizeError, self.controller.authenticate, {}, body_dict) def test_authenticate_tenant_id_too_large(self): """Verify sending large 'tenantId' raises the right exception.""" body_dict = _build_user_auth(username='FOO', password='foo2', tenant_id='0' * 65) self.assertRaises(exception.ValidationSizeError, self.controller.authenticate, {}, body_dict) def test_authenticate_tenant_name_too_large(self): """Verify sending large 'tenantName' raises the right exception.""" body_dict = _build_user_auth(username='FOO', password='foo2', tenant_name='0' * 65) self.assertRaises(exception.ValidationSizeError, self.controller.authenticate, {}, body_dict) def test_authenticate_token_too_large(self): """Verify sending large 'token' raises the right exception.""" body_dict = _build_user_auth(token={'id': '0' * 8193}) self.assertRaises(exception.ValidationSizeError, self.controller.authenticate, {}, body_dict) def test_authenticate_password_too_large(self): """Verify sending large 'password' raises the right exception.""" length = CONF.identity.max_password_length + 1 body_dict = _build_user_auth(username='FOO', password='0' * length) self.assertRaises(exception.ValidationSizeError, self.controller.authenticate, {}, body_dict) class AuthWithToken(AuthTest): def setUp(self): super(AuthWithToken, self).setUp() def test_unscoped_token(self): """Verify getting an unscoped token with password creds.""" body_dict = _build_user_auth(username='FOO', password='foo2') unscoped_token = self.controller.authenticate({}, body_dict) self.assertNotIn('tenant', unscoped_token['access']['token']) def test_auth_invalid_token(self): """Verify exception is raised if invalid token.""" body_dict = _build_user_auth(token={"id": uuid.uuid4().hex}) self.assertRaises( exception.Unauthorized, self.controller.authenticate, {}, body_dict) def test_auth_bad_formatted_token(self): """Verify exception is raised if invalid token.""" body_dict = _build_user_auth(token={}) self.assertRaises( exception.ValidationError, self.controller.authenticate, {}, body_dict) def test_auth_unscoped_token_no_project(self): """Verify getting an unscoped token with an unscoped token.""" body_dict = _build_user_auth( username='FOO', password='foo2') unscoped_token = self.controller.authenticate({}, body_dict) body_dict = _build_user_auth( token=unscoped_token["access"]["token"]) unscoped_token_2 = self.controller.authenticate({}, body_dict) self.assertEqualTokens(unscoped_token, unscoped_token_2) def test_auth_unscoped_token_project(self): """Verify getting a token in a tenant with an unscoped token.""" # Add a role in so we can check we get this back self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], self.tenant_bar['id'], self.role_member['id']) # Get an unscoped tenant body_dict = _build_user_auth( username='FOO', password='foo2') unscoped_token = self.controller.authenticate({}, body_dict) # Get a token on BAR tenant using the unscoped tenant body_dict = _build_user_auth( token=unscoped_token["access"]["token"], tenant_name="BAR") scoped_token = self.controller.authenticate({}, body_dict) tenant = scoped_token["access"]["token"]["tenant"] roles = scoped_token["access"]["metadata"]["roles"] self.assertEqual(self.tenant_bar['id'], tenant["id"]) self.assertEqual(self.role_member['id'], roles[0]) def test_auth_token_project_group_role(self): """Verify getting a token in a tenant with group roles.""" # Add a v2 style role in so we can check we get this back self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], self.tenant_bar['id'], self.role_member['id']) # Now create a group role for this user as well domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex} self.assignment_api.create_domain(domain1['id'], domain1) new_group = {'id': uuid.uuid4().hex, 'domain_id': domain1['id'], 'name': uuid.uuid4().hex} self.identity_api.create_group(new_group['id'], new_group) self.identity_api.add_user_to_group(self.user_foo['id'], new_group['id']) self.assignment_api.create_grant( group_id=new_group['id'], project_id=self.tenant_bar['id'], role_id=self.role_admin['id']) # Get a scoped token for the tenant body_dict = _build_user_auth( username='FOO', password='foo2', tenant_name="BAR") scoped_token = self.controller.authenticate({}, body_dict) tenant = scoped_token["access"]["token"]["tenant"] roles = scoped_token["access"]["metadata"]["roles"] self.assertEqual(self.tenant_bar['id'], tenant["id"]) self.assertIn(self.role_member['id'], roles) self.assertIn(self.role_admin['id'], roles) def test_auth_token_cross_domain_group_and_project(self): """Verify getting a token in cross domain group/project roles.""" # create domain, project and group and grant roles to user domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex} self.assignment_api.create_domain(domain1['id'], domain1) project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'domain_id': domain1['id']} self.assignment_api.create_project(project1['id'], project1) role_foo_domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex} self.assignment_api.create_role(role_foo_domain1['id'], role_foo_domain1) role_group_domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex} self.assignment_api.create_role(role_group_domain1['id'], role_group_domain1) self.assignment_api.add_user_to_project(project1['id'], self.user_foo['id']) new_group = {'id': uuid.uuid4().hex, 'domain_id': domain1['id'], 'name': uuid.uuid4().hex} self.identity_api.create_group(new_group['id'], new_group) self.identity_api.add_user_to_group(self.user_foo['id'], new_group['id']) self.assignment_api.create_grant( user_id=self.user_foo['id'], project_id=project1['id'], role_id=self.role_member['id']) self.assignment_api.create_grant( group_id=new_group['id'], project_id=project1['id'], role_id=self.role_admin['id']) self.assignment_api.create_grant( user_id=self.user_foo['id'], domain_id=domain1['id'], role_id=role_foo_domain1['id']) self.assignment_api.create_grant( group_id=new_group['id'], domain_id=domain1['id'], role_id=role_group_domain1['id']) # Get a scoped token for the tenant body_dict = _build_user_auth( username=self.user_foo['name'], password=self.user_foo['password'], tenant_name=project1['name']) scoped_token = self.controller.authenticate({}, body_dict) tenant = scoped_token["access"]["token"]["tenant"] roles = scoped_token["access"]["metadata"]["roles"] self.assertEqual(project1['id'], tenant["id"]) self.assertIn(self.role_member['id'], roles) self.assertIn(self.role_admin['id'], roles) self.assertNotIn(role_foo_domain1['id'], roles) self.assertNotIn(role_group_domain1['id'], roles) def test_belongs_to_no_tenant(self): r = self.controller.authenticate( {}, auth={ 'passwordCredentials': { 'username': self.user_foo['name'], 'password': self.user_foo['password'] } }) unscoped_token_id = r['access']['token']['id'] self.assertRaises( exception.Unauthorized, self.controller.validate_token, dict(is_admin=True, query_string={'belongsTo': 'BAR'}), token_id=unscoped_token_id) def test_belongs_to(self): body_dict = _build_user_auth( username='FOO', password='foo2', tenant_name="BAR") scoped_token = self.controller.authenticate({}, body_dict) scoped_token_id = scoped_token['access']['token']['id'] self.assertRaises( exception.Unauthorized, self.controller.validate_token, dict(is_admin=True, query_string={'belongsTo': 'me'}), token_id=scoped_token_id) self.assertRaises( exception.Unauthorized, self.controller.validate_token, dict(is_admin=True, query_string={'belongsTo': 'BAR'}), token_id=scoped_token_id) def test_token_auth_with_binding(self): self.config_fixture.config(group='token', bind=['kerberos']) body_dict = _build_user_auth() unscoped_token = self.controller.authenticate( self.context_with_remote_user, body_dict) # the token should have bind information in it bind = unscoped_token['access']['token']['bind'] self.assertEqual('FOO', bind['kerberos']) body_dict = _build_user_auth( token=unscoped_token['access']['token'], tenant_name='BAR') # using unscoped token without remote user context fails self.assertRaises( exception.Unauthorized, self.controller.authenticate, self.empty_context, body_dict) # using token with remote user context succeeds scoped_token = self.controller.authenticate( self.context_with_remote_user, body_dict) # the bind information should be carried over from the original token bind = scoped_token['access']['token']['bind'] self.assertEqual('FOO', bind['kerberos']) def test_deleting_role_revokes_token(self): role_controller = assignment.controllers.Role() project1 = {'id': 'Project1', 'name': uuid.uuid4().hex, 'domain_id': DEFAULT_DOMAIN_ID} self.assignment_api.create_project(project1['id'], project1) role_one = {'id': 'role_one', 'name': uuid.uuid4().hex} self.assignment_api.create_role(role_one['id'], role_one) self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], project1['id'], role_one['id']) no_context = {} # Get a scoped token for the tenant body_dict = _build_user_auth( username=self.user_foo['name'], password=self.user_foo['password'], tenant_name=project1['name']) token = self.controller.authenticate(no_context, body_dict) # Ensure it is valid token_id = token['access']['token']['id'] self.controller.validate_token( dict(is_admin=True, query_string={}), token_id=token_id) # Delete the role, which should invalidate the token role_controller.delete_role( dict(is_admin=True, query_string={}), role_one['id']) # Check the token is now invalid self.assertRaises( exception.TokenNotFound, self.controller.validate_token, dict(is_admin=True, query_string={}), token_id=token_id) class AuthWithPasswordCredentials(AuthTest): def setUp(self): super(AuthWithPasswordCredentials, self).setUp() def test_auth_invalid_user(self): """Verify exception is raised if invalid user.""" body_dict = _build_user_auth( username=uuid.uuid4().hex, password=uuid.uuid4().hex) self.assertRaises( exception.Unauthorized, self.controller.authenticate, {}, body_dict) def test_auth_valid_user_invalid_password(self): """Verify exception is raised if invalid password.""" body_dict = _build_user_auth( username="FOO", password=uuid.uuid4().hex) self.assertRaises( exception.Unauthorized, self.controller.authenticate, {}, body_dict) def test_auth_empty_password(self): """Verify exception is raised if empty password.""" body_dict = _build_user_auth( username="FOO", password="") self.assertRaises( exception.Unauthorized, self.controller.authenticate, {}, body_dict) def test_auth_no_password(self): """Verify exception is raised if empty password.""" body_dict = _build_user_auth(username="FOO") self.assertRaises( exception.ValidationError, self.controller.authenticate, {}, body_dict) def test_authenticate_blank_password_credentials(self): """Sending empty dict as passwordCredentials raises a 400 error.""" body_dict = {'passwordCredentials': {}, 'tenantName': 'demo'} self.assertRaises(exception.ValidationError, self.controller.authenticate, {}, body_dict) def test_authenticate_no_username(self): """Verify skipping username raises the right exception.""" body_dict = _build_user_auth(password="pass", tenant_name="demo") self.assertRaises(exception.ValidationError, self.controller.authenticate, {}, body_dict) def test_bind_without_remote_user(self): self.config_fixture.config(group='token', bind=['kerberos']) body_dict = _build_user_auth(username='FOO', password='foo2', tenant_name='BAR') token = self.controller.authenticate({}, body_dict) self.assertNotIn('bind', token['access']['token']) def test_change_default_domain_id(self): # If the default_domain_id config option is not the default then the # user in auth data is from the new default domain. # 1) Create a new domain. new_domain_id = uuid.uuid4().hex new_domain = { 'description': uuid.uuid4().hex, 'enabled': True, 'id': new_domain_id, 'name': uuid.uuid4().hex, } self.assignment_api.create_domain(new_domain_id, new_domain) # 2) Create user "foo" in new domain with different password than # default-domain foo. new_user_id = uuid.uuid4().hex new_user_password = uuid.uuid4().hex new_user = { 'id': new_user_id, 'name': self.user_foo['name'], 'domain_id': new_domain_id, 'password': new_user_password, 'email': 'foo@bar2.com', } self.identity_api.create_user(new_user_id, new_user) # 3) Update the default_domain_id config option to the new domain self.config_fixture.config(group='identity', default_domain_id=new_domain_id) # 4) Authenticate as "foo" using the password in the new domain. body_dict = _build_user_auth( username=self.user_foo['name'], password=new_user_password) # The test is successful if this doesn't raise, so no need to assert. self.controller.authenticate({}, body_dict) class AuthWithRemoteUser(AuthTest): def setUp(self): super(AuthWithRemoteUser, self).setUp() def test_unscoped_remote_authn(self): """Verify getting an unscoped token with external authn.""" body_dict = _build_user_auth( username='FOO', password='foo2') local_token = self.controller.authenticate( {}, body_dict) body_dict = _build_user_auth() remote_token = self.controller.authenticate( self.context_with_remote_user, body_dict) self.assertEqualTokens(local_token, remote_token) def test_unscoped_remote_authn_jsonless(self): """Verify that external auth with invalid request fails.""" self.assertRaises( exception.ValidationError, self.controller.authenticate, {'REMOTE_USER': 'FOO'}, None) def test_scoped_remote_authn(self): """Verify getting a token with external authn.""" body_dict = _build_user_auth( username='FOO', password='foo2', tenant_name='BAR') local_token = self.controller.authenticate( {}, body_dict) body_dict = _build_user_auth( tenant_name='BAR') remote_token = self.controller.authenticate( self.context_with_remote_user, body_dict) self.assertEqualTokens(local_token, remote_token) def test_scoped_nometa_remote_authn(self): """Verify getting a token with external authn and no metadata.""" body_dict = _build_user_auth( username='TWO', password='two2', tenant_name='BAZ') local_token = self.controller.authenticate( {}, body_dict) body_dict = _build_user_auth(tenant_name='BAZ') remote_token = self.controller.authenticate( {'environment': {'REMOTE_USER': 'TWO'}}, body_dict) self.assertEqualTokens(local_token, remote_token) def test_scoped_remote_authn_invalid_user(self): """Verify that external auth with invalid user fails.""" body_dict = _build_user_auth(tenant_name="BAR") self.assertRaises( exception.Unauthorized, self.controller.authenticate, {'environment': {'REMOTE_USER': uuid.uuid4().hex}}, body_dict) def test_bind_with_kerberos(self): self.config_fixture.config(group='token', bind=['kerberos']) body_dict = _build_user_auth(tenant_name="BAR") token = self.controller.authenticate(self.context_with_remote_user, body_dict) self.assertEqual('FOO', token['access']['token']['bind']['kerberos']) def test_bind_without_config_opt(self): self.config_fixture.config(group='token', bind=['x509']) body_dict = _build_user_auth(tenant_name='BAR') token = self.controller.authenticate(self.context_with_remote_user, body_dict) self.assertNotIn('bind', token['access']['token']) class AuthWithTrust(AuthTest): def setUp(self): super(AuthWithTrust, self).setUp() trust.Manager() self.trust_controller = trust.controllers.TrustV3() self.auth_v3_controller = auth.controllers.Auth() self.trustor = self.user_foo self.trustee = self.user_two self.assigned_roles = [self.role_member['id'], self.role_browser['id']] for assigned_role in self.assigned_roles: self.assignment_api.add_role_to_user_and_project( self.trustor['id'], self.tenant_bar['id'], assigned_role) self.sample_data = {'trustor_user_id': self.trustor['id'], 'trustee_user_id': self.trustee['id'], 'project_id': self.tenant_bar['id'], 'impersonation': True, 'roles': [{'id': self.role_browser['id']}, {'name': self.role_member['name']}]} expires_at = timeutils.strtime(timeutils.utcnow() + datetime.timedelta(minutes=10), fmt=TIME_FORMAT) self.create_trust(expires_at=expires_at) def config_overrides(self): super(AuthWithTrust, self).config_overrides() self.config_fixture.config(group='trust', enabled=True) def _create_auth_context(self, token_id): token_ref = self.token_api.get_token(token_id) auth_context = authorization.token_to_auth_context( token_ref['token_data']) return {'environment': {authorization.AUTH_CONTEXT_ENV: auth_context}, 'token_id': token_id, 'host_url': HOST_URL} def create_trust(self, expires_at=None, impersonation=True): username = self.trustor['name'] password = 'foo2' body_dict = _build_user_auth(username=username, password=password) self.unscoped_token = self.controller.authenticate({}, body_dict) context = self._create_auth_context( self.unscoped_token['access']['token']['id']) trust_data = copy.deepcopy(self.sample_data) trust_data['expires_at'] = expires_at trust_data['impersonation'] = impersonation self.new_trust = self.trust_controller.create_trust( context, trust=trust_data)['trust'] def build_v2_token_request(self, username, password): body_dict = _build_user_auth(username=username, password=password) self.unscoped_token = self.controller.authenticate({}, body_dict) unscoped_token_id = self.unscoped_token['access']['token']['id'] request_body = _build_user_auth(token={'id': unscoped_token_id}, trust_id=self.new_trust['id'], tenant_id=self.tenant_bar['id']) return request_body def test_create_trust_bad_data_fails(self): context = self._create_auth_context( self.unscoped_token['access']['token']['id']) bad_sample_data = {'trustor_user_id': self.trustor['id'], 'project_id': self.tenant_bar['id'], 'roles': [{'id': self.role_browser['id']}]} self.assertRaises(exception.ValidationError, self.trust_controller.create_trust, context, trust=bad_sample_data) def test_create_trust_no_roles(self): context = {'token_id': self.unscoped_token['access']['token']['id']} self.sample_data['roles'] = [] self.assertRaises(exception.Forbidden, self.trust_controller.create_trust, context, trust=self.sample_data) def test_create_trust(self): self.assertEqual(self.trustor['id'], self.new_trust['trustor_user_id']) self.assertEqual(self.trustee['id'], self.new_trust['trustee_user_id']) role_ids = [self.role_browser['id'], self.role_member['id']] self.assertTrue(timeutils.parse_strtime(self.new_trust['expires_at'], fmt=TIME_FORMAT)) self.assertIn('%s/v3/OS-TRUST/' % HOST_URL, self.new_trust['links']['self']) self.assertIn('%s/v3/OS-TRUST/' % HOST_URL, self.new_trust['roles_links']['self']) for role in self.new_trust['roles']: self.assertIn(role['id'], role_ids) def test_create_trust_expires_bad(self): self.assertRaises(exception.ValidationTimeStampError, self.create_trust, expires_at="bad") self.assertRaises(exception.ValidationTimeStampError, self.create_trust, expires_at="") self.assertRaises(exception.ValidationTimeStampError, self.create_trust, expires_at="Z") def test_get_trust(self): context = {'token_id': self.unscoped_token['access']['token']['id'], 'host_url': HOST_URL} trust = self.trust_controller.get_trust(context, self.new_trust['id'])['trust'] self.assertEqual(self.trustor['id'], trust['trustor_user_id']) self.assertEqual(self.trustee['id'], trust['trustee_user_id']) role_ids = [self.role_browser['id'], self.role_member['id']] for role in self.new_trust['roles']: self.assertIn(role['id'], role_ids) def test_create_trust_no_impersonation(self): self.create_trust(expires_at=None, impersonation=False) self.assertEqual(self.trustor['id'], self.new_trust['trustor_user_id']) self.assertEqual(self.trustee['id'], self.new_trust['trustee_user_id']) self.assertIs(self.new_trust['impersonation'], False) auth_response = self.fetch_v2_token_from_trust() token_user = auth_response['access']['user'] self.assertEqual(token_user['id'], self.new_trust['trustee_user_id']) # TODO(ayoung): Endpoints def test_create_trust_impersonation(self): self.create_trust(expires_at=None) self.assertEqual(self.trustor['id'], self.new_trust['trustor_user_id']) self.assertEqual(self.trustee['id'], self.new_trust['trustee_user_id']) self.assertIs(self.new_trust['impersonation'], True) auth_response = self.fetch_v2_token_from_trust() token_user = auth_response['access']['user'] self.assertEqual(token_user['id'], self.new_trust['trustor_user_id']) def test_token_from_trust_wrong_user_fails(self): request_body = self.build_v2_token_request('FOO', 'foo2') self.assertRaises( exception.Forbidden, self.controller.authenticate, {}, request_body) def fetch_v2_token_from_trust(self): request_body = self.build_v2_token_request('TWO', 'two2') auth_response = self.controller.authenticate({}, request_body) return auth_response def fetch_v3_token_from_trust(self): v3_password_data = { 'identity': { "methods": ["password"], "password": { "user": { "id": self.trustee["id"], "password": self.trustee["password"]}} }, 'scope': { 'project': { 'id': self.tenant_baz['id']}}} auth_response = (self.auth_v3_controller.authenticate_for_token ({'environment': {}, 'query_string': {}}, v3_password_data)) token = auth_response.headers['X-Subject-Token'] v3_req_with_trust = { "identity": { "methods": ["token"], "token": {"id": token}}, "scope": { "OS-TRUST:trust": {"id": self.new_trust['id']}}} token_auth_response = (self.auth_v3_controller.authenticate_for_token ({'environment': {}, 'query_string': {}}, v3_req_with_trust)) return token_auth_response def test_create_v3_token_from_trust(self): auth_response = self.fetch_v3_token_from_trust() trust_token_user = auth_response.json['token']['user'] self.assertEqual(self.trustor['id'], trust_token_user['id']) trust_token_trust = auth_response.json['token']['OS-TRUST:trust'] self.assertEqual(trust_token_trust['id'], self.new_trust['id']) self.assertEqual(self.trustor['id'], trust_token_trust['trustor_user']['id']) self.assertEqual(self.trustee['id'], trust_token_trust['trustee_user']['id']) trust_token_roles = auth_response.json['token']['roles'] self.assertEqual(2, len(trust_token_roles)) def test_v3_trust_token_get_token_fails(self): auth_response = self.fetch_v3_token_from_trust() trust_token = auth_response.headers['X-Subject-Token'] v3_token_data = {'identity': { 'methods': ['token'], 'token': {'id': trust_token} }} self.assertRaises( exception.Forbidden, self.auth_v3_controller.authenticate_for_token, {'environment': {}, 'query_string': {}}, v3_token_data) def test_token_from_trust(self): auth_response = self.fetch_v2_token_from_trust() self.assertIsNotNone(auth_response) self.assertEqual(2, len(auth_response['access']['metadata']['roles']), "user_foo has three roles, but the token should" " only get the two roles specified in the trust.") def assert_token_count_for_trust(self, expected_value): tokens = self.trust_controller.token_api._list_tokens( self.trustee['id'], trust_id=self.new_trust['id']) token_count = len(tokens) self.assertEqual(expected_value, token_count) def test_delete_tokens_for_user_invalidates_tokens_from_trust(self): self.assert_token_count_for_trust(0) self.fetch_v2_token_from_trust() self.assert_token_count_for_trust(1) self.token_api.delete_tokens_for_user(self.trustee['id']) self.assert_token_count_for_trust(0) def test_token_from_trust_cant_get_another_token(self): auth_response = self.fetch_v2_token_from_trust() trust_token_id = auth_response['access']['token']['id'] request_body = _build_user_auth(token={'id': trust_token_id}, tenant_id=self.tenant_bar['id']) self.assertRaises( exception.Forbidden, self.controller.authenticate, {}, request_body) def test_delete_trust_revokes_token(self): context = self._create_auth_context( self.unscoped_token['access']['token']['id']) self.fetch_v2_token_from_trust() trust_id = self.new_trust['id'] tokens = self.token_api._list_tokens(self.trustor['id'], trust_id=trust_id) self.assertEqual(1, len(tokens)) self.trust_controller.delete_trust(context, trust_id=trust_id) tokens = self.token_api._list_tokens(self.trustor['id'], trust_id=trust_id) self.assertEqual(0, len(tokens)) def test_token_from_trust_with_no_role_fails(self): for assigned_role in self.assigned_roles: self.assignment_api.remove_role_from_user_and_project( self.trustor['id'], self.tenant_bar['id'], assigned_role) request_body = self.build_v2_token_request('TWO', 'two2') self.assertRaises( exception.Forbidden, self.controller.authenticate, {}, request_body) def test_expired_trust_get_token_fails(self): expiry = "1999-02-18T10:10:00Z" self.create_trust(expiry) request_body = self.build_v2_token_request('TWO', 'two2') self.assertRaises( exception.Forbidden, self.controller.authenticate, {}, request_body) def test_token_from_trust_with_wrong_role_fails(self): self.assignment_api.add_role_to_user_and_project( self.trustor['id'], self.tenant_bar['id'], self.role_other['id']) for assigned_role in self.assigned_roles: self.assignment_api.remove_role_from_user_and_project( self.trustor['id'], self.tenant_bar['id'], assigned_role) request_body = self.build_v2_token_request('TWO', 'two2') self.assertRaises( exception.Forbidden, self.controller.authenticate, {}, request_body) class TokenExpirationTest(AuthTest): @mock.patch.object(timeutils, 'utcnow') def _maintain_token_expiration(self, mock_utcnow): """Token expiration should be maintained after re-auth & validation.""" now = datetime.datetime.utcnow() mock_utcnow.return_value = now r = self.controller.authenticate( {}, auth={ 'passwordCredentials': { 'username': self.user_foo['name'], 'password': self.user_foo['password'] } }) unscoped_token_id = r['access']['token']['id'] original_expiration = r['access']['token']['expires'] mock_utcnow.return_value = now + datetime.timedelta(seconds=1) r = self.controller.validate_token( dict(is_admin=True, query_string={}), token_id=unscoped_token_id) self.assertEqual(original_expiration, r['access']['token']['expires']) mock_utcnow.return_value = now + datetime.timedelta(seconds=2) r = self.controller.authenticate( {}, auth={ 'token': { 'id': unscoped_token_id, }, 'tenantId': self.tenant_bar['id'], }) scoped_token_id = r['access']['token']['id'] self.assertEqual(original_expiration, r['access']['token']['expires']) mock_utcnow.return_value = now + datetime.timedelta(seconds=3) r = self.controller.validate_token( dict(is_admin=True, query_string={}), token_id=scoped_token_id) self.assertEqual(original_expiration, r['access']['token']['expires']) def test_maintain_uuid_token_expiration(self): self.config_fixture.config(group='signing', token_format='UUID') self._maintain_token_expiration() class AuthCatalog(tests.SQLDriverOverrides, AuthTest): """Tests for the catalog provided in the auth response.""" def config_files(self): config_files = super(AuthCatalog, self).config_files() # We need to use a backend that supports disabled endpoints, like the # SQL backend. config_files.append(tests.dirs.tests_conf('backend_sql.conf')) return config_files def _create_endpoints(self): def create_endpoint(service_id, region, **kwargs): id_ = uuid.uuid4().hex ref = { 'id': id_, 'interface': 'public', 'region': region, 'service_id': service_id, 'url': 'http://localhost/%s' % uuid.uuid4().hex, } ref.update(kwargs) self.catalog_api.create_endpoint(id_, ref) return ref # Create a service for use with the endpoints. def create_service(**kwargs): id_ = uuid.uuid4().hex ref = { 'id': id_, 'name': uuid.uuid4().hex, 'type': uuid.uuid4().hex, } ref.update(kwargs) self.catalog_api.create_service(id_, ref) return ref enabled_service_ref = create_service(enabled=True) disabled_service_ref = create_service(enabled=False) region = uuid.uuid4().hex # Create endpoints enabled_endpoint_ref = create_endpoint( enabled_service_ref['id'], region) create_endpoint( enabled_service_ref['id'], region, enabled=False, interface='internal') create_endpoint( disabled_service_ref['id'], region) return enabled_endpoint_ref def test_auth_catalog_disabled_endpoint(self): """On authenticate, get a catalog that excludes disabled endpoints.""" endpoint_ref = self._create_endpoints() # Authenticate body_dict = _build_user_auth( username='FOO', password='foo2', tenant_name="BAR") token = self.controller.authenticate({}, body_dict) # Check the catalog self.assertEqual(1, len(token['access']['serviceCatalog'])) endpoint = token['access']['serviceCatalog'][0]['endpoints'][0] self.assertEqual( 1, len(token['access']['serviceCatalog'][0]['endpoints'])) exp_endpoint = { 'id': endpoint_ref['id'], 'publicURL': endpoint_ref['url'], 'region': endpoint_ref['region'], } self.assertEqual(exp_endpoint, endpoint) def test_validate_catalog_disabled_endpoint(self): """On validate, get back a catalog that excludes disabled endpoints.""" endpoint_ref = self._create_endpoints() # Authenticate body_dict = _build_user_auth( username='FOO', password='foo2', tenant_name="BAR") token = self.controller.authenticate({}, body_dict) # Validate token_id = token['access']['token']['id'] validate_ref = self.controller.validate_token( dict(is_admin=True, query_string={}), token_id=token_id) # Check the catalog self.assertEqual(1, len(token['access']['serviceCatalog'])) endpoint = validate_ref['access']['serviceCatalog'][0]['endpoints'][0] self.assertEqual( 1, len(token['access']['serviceCatalog'][0]['endpoints'])) exp_endpoint = { 'id': endpoint_ref['id'], 'publicURL': endpoint_ref['url'], 'region': endpoint_ref['region'], } self.assertEqual(exp_endpoint, endpoint) class NonDefaultAuthTest(tests.TestCase): def test_add_non_default_auth_method(self): self.config_fixture.config(group='auth', methods=['password', 'token', 'custom']) config.setup_authentication() self.assertTrue(hasattr(CONF.auth, 'custom'))
apache-2.0
159,340,296,657,682,560
39.839122
79
0.575406
false
4.143758
true
false
false
kritak/textdungeon
Internal/pricerandomtester.py
1
1114
"""testing random frequency of items based on price for item. a cheap item is more common, a expensive item is very rare""" import random d = {"healing":50, "berserk":60, "clever":100, "swiftness":100, "might":100, "awesomeness":500, } # reverse d dr = [[1/b,a] for [a,b] in d.items()] # list of [price, drinkname] dr.sort() # sort this list by price pricelist1 = [a for [a,b] in dr] # list of price only drinklist = [b for [a,b] in dr] # list of drinkname only pricelist2 = [] # list of added up prices kprice = 0 for p in pricelist1: kprice += p pricelist2.append(kprice) print(pricelist1, pricelist2) result = {} print("calculating please wait...") for x in range(10000): y = random.random()*(pricelist2[-1]) # 1 to maxprice for p in pricelist2: if y < p: drinkname = drinklist[pricelist2.index(p)] if drinkname in result: result[drinkname] += 1 else: result[drinkname] = 1 break print(result)
gpl-2.0
-7,916,032,930,120,072,000
24.906977
66
0.561939
false
3.375758
false
false
false
unt-libraries/django-name
name/api/serializers.py
1
6208
"""Serializers for the Name App Models. This module leverages the Django Rest Framework's Serializer components to build JSON representations of the models defined in this app. These JSON representations are designed to be backwards compatible with the API documented in previous versions. For documentation regarding the Django Rest Framework Serializers go to http://www.django-rest-framework.org/api-guide/serializers/ """ from rest_framework import serializers from .. import models class IdentifierSerializer(serializers.ModelSerializer): """Serializer for the Identifier Model. The following fields have been renamed for backwards compatibility with previous versions of the API. label -> identifier.type href -> identifier.value """ label = serializers.StringRelatedField(source='type') href = serializers.CharField(source='value') class Meta: model = models.Identifier fields = ('label', 'href') class NoteSerializer(serializers.ModelSerializer): """Serializer for the Note Model.""" type = serializers.SerializerMethodField() class Meta: model = models.Note fields = ('note', 'type') def get_type(self, obj): """Sets the type field. Returns the Note Type label, instead of the Note Type ID, which is the default behavior. """ return obj.get_note_type_label().lower() class VariantSerializer(serializers.ModelSerializer): """Serializer for the Variant Model.""" type = serializers.SerializerMethodField() class Meta: model = models.Variant fields = ('variant', 'type') def get_type(self, obj): """Sets the type field. Returns the Variant Type label, instead of the Variant Type ID, which is the default behavior. """ return obj.get_variant_type_label().lower() class NameSerializer(serializers.ModelSerializer): """Serializer for the Name Model. This serializes the the Name model to include detailed information about the object, including the related Variants, Notes, and Identifiers. The following fields have been renamed for backwards compatibility with previous versions of the API. authoritative_name -> name.name begin_date -> name.begin end_date -> name.end The identifier field is the absolute url to the name detail page for the model instance. """ authoritative_name = serializers.CharField(source='name') begin_date = serializers.CharField(source='begin') name_type = serializers.SerializerMethodField() end_date = serializers.CharField(source='end') links = IdentifierSerializer(many=True, source='identifier_set') notes = NoteSerializer(many=True, source='note_set') variants = VariantSerializer(many=True, source='variant_set') identifier = serializers.HyperlinkedIdentityField( view_name='name:detail', lookup_field='name_id') class Meta: model = models.Name fields = ('authoritative_name', 'name_type', 'begin_date', 'end_date', 'identifier', 'links', 'notes', 'variants',) def get_name_type(self, obj): """Sets the name_type field. Returns the Name Type label, instead of the Name Type ID, which is the default behavior. """ return obj.get_name_type_label().lower() class NameSearchSerializer(serializers.ModelSerializer): """Name Model Serializer for the Name search/autocompletion endpoint. The following fields have been renamed for backwards compatibility with previous versions of the API. begin_date -> name.begin type -> name.get_name_type_label() label -> Formats name.name and name.disambiguation. The URL field is the absolute url to the name detail page for the model instance. """ begin_date = serializers.CharField(source='begin') type = serializers.SerializerMethodField() label = serializers.SerializerMethodField() URL = serializers.HyperlinkedIdentityField( view_name='name:detail', lookup_field='name_id') class Meta: model = models.Name fields = ('id', 'name', 'label', 'type', 'begin_date', 'disambiguation', 'URL') def get_type(self, obj): """Sets the type field. Returns the Name Type label, instead of the Name Type ID, which is the default behavior. """ return obj.get_name_type_label().lower() def get_label(self, obj): """Sets the label field. Returns a string in the form of "<name.name> (<name.disambiguation>)" """ if obj.disambiguation: return '{0} ({1})'.format(obj.name, obj.disambiguation) return obj.name class LocationSerializer(serializers.ModelSerializer): """Serailizer for the Locations Model. This includes the related Name via the belong_to_name field. The belong_to_name field uses the NameSerializer to nest the related Name model. """ belong_to_name = NameSerializer() class Meta: model = models.Location fields = '__all__' class NameStatisticsMonthSerializer(serializers.Serializer): """Serializer for the NameStatisticsMonth object.""" total = serializers.IntegerField() total_to_date = serializers.IntegerField() month = serializers.DateTimeField() class NameStatisticsTypeSerializer(serializers.Serializer): """Serializer for the NameStatisticsType object. This serializer utilizes the NameStatisticsTypeMonth to serialize the NameStatisticsMonth instances that the object instance contains. """ running_total = serializers.IntegerField() stats = NameStatisticsMonthSerializer(many=True) class NameStatisticsSerializer(serializers.Serializer): """Serializer for the NameStatistics object. This serializer utilizes the NameStatisticsTypeSerializer to serialize the NameStatisticsType instances that the object instance contains. """ created = NameStatisticsTypeSerializer() modified = NameStatisticsTypeSerializer() name_type_totals = serializers.DictField()
bsd-3-clause
-1,052,281,697,192,771,800
31.502618
78
0.6875
false
4.568065
false
false
false
mithron/opendatahack
web/main.py
1
1805
from datetime import datetime import json import os from urlparse import urlparse from pymongo.connection import Connection import tornado.httpserver import tornado.ioloop import tornado.options import tornado.web from tornado.options import define, options MONGO_URL = "" # found with $>heroku config we_live = True class Application(tornado.web.Application): def __init__(self): handlers = [ (r"/list/", MainHandler), (r"/([0-9]+)/", SchoolHandler) ] settings = dict( autoescape=None, ) tornado.web.Application.__init__(self, handlers, **settings) if we_live: self.con = Connection(MONGO_URL) self.database = self.con[urlparse(MONGO_URL).path[1:]] else: self.con = Connection('localhost', 27017) self.database = self.con["moscow"] class BaseHandler(tornado.web.RequestHandler): @property def db(self): return self.application.database class SchoolHandler(BaseHandler): def get(self, inn=None): if inn: suppliers = list(self.db["suppliers"].find({'inn': int(inn)}, fields={"_id": False})) self.write(json.dumps(suppliers, ensure_ascii=False, encoding='utf8')) else: self.write("[]") class MainHandler(BaseHandler): def get(self): schools = list(self.db["suppliers"].find(fields={"full_name": True, "inn": True, "_id": False})) self.write(json.dumps(schools, ensure_ascii=False, encoding='utf8')) def main(): tornado.options.parse_command_line() http_server = tornado.httpserver.HTTPServer(Application()) http_server.listen(int(os.environ.get("PORT", 8888))) tornado.ioloop.IOLoop.instance().start() if __name__ == "__main__": main()
mit
941,218,873,183,261,400
26.363636
104
0.628255
false
3.752599
false
false
false
CloudBoltSoftware/cloudbolt-forge
ui_extensions/veeam_admin_extension/restore_backup.py
1
1717
import requests import time from xml.dom import minidom from common.methods import set_progress from xui.veeam.veeam_admin import VeeamManager def run(server, *args, **kwargs): set_progress(f"Starting Veeam Backup restoration... ") veeam = VeeamManager() server_ci = veeam.get_connection_info() url = f'http://{server_ci.ip}:9399/api/vmRestorePoints/' + \ kwargs.get('restore_point_href') + '?action=restore' session_id = veeam.get_veeam_server_session_id() header = {"X-RestSvcSessionId": session_id} response = requests.post(url=url, headers=header) task = minidom.parseString(response.content.decode('utf-8')) items = task.getElementsByTagName('Task')[0].attributes.items() restoration_url = [item for item in items if item[0] == 'Href'][0][-1] def check_state(): response = requests.get(restoration_url, headers=header) dom = minidom.parseString(response.content.decode('utf-8')) state = dom.getElementsByTagName('State')[0] child = state.firstChild return child # Wait until the restoration to completed. while check_state().data == 'Running': # wait set_progress("Waiting for restoration to complete...") time.sleep(10) if check_state().data == 'Finished': set_progress("Server restoration completed successfully") return "SUCCESS", "Server restoration completed successfully", "" else: set_progress("Server restoration didn't complete successfully") return "FAILURE", "", "Server restoration didn't complete successfully"
apache-2.0
4,563,522,579,595,640,300
38.022727
83
0.630169
false
4.127404
false
false
false
ArchiveTeam/spuf-grab
pipeline.py
1
11245
# encoding=utf8 import datetime from distutils.version import StrictVersion import hashlib import os.path import random from seesaw.config import realize, NumberConfigValue from seesaw.externalprocess import ExternalProcess from seesaw.item import ItemInterpolation, ItemValue from seesaw.task import SimpleTask, LimitConcurrent from seesaw.tracker import GetItemFromTracker, PrepareStatsForTracker, \ UploadWithTracker, SendDoneToTracker import shutil import socket import subprocess import sys import time import string import seesaw from seesaw.externalprocess import WgetDownload from seesaw.pipeline import Pipeline from seesaw.project import Project from seesaw.util import find_executable # check the seesaw version if StrictVersion(seesaw.__version__) < StrictVersion("0.8.5"): raise Exception("This pipeline needs seesaw version 0.8.5 or higher.") ########################################################################### # Find a useful Wget+Lua executable. # # WGET_LUA will be set to the first path that # 1. does not crash with --version, and # 2. prints the required version string WGET_LUA = find_executable( "Wget+Lua", ["GNU Wget 1.14.lua.20130523-9a5c", "GNU Wget 1.14.lua.20160530-955376b"], [ "./wget-lua", "./wget-lua-warrior", "./wget-lua-local", "../wget-lua", "../../wget-lua", "/home/warrior/wget-lua", "/usr/bin/wget-lua" ] ) if not WGET_LUA: raise Exception("No usable Wget+Lua found.") ########################################################################### # The version number of this pipeline definition. # # Update this each time you make a non-cosmetic change. # It will be added to the WARC files and reported to the tracker. VERSION = "20170615.01" USER_AGENT = 'ArchiveTeam' TRACKER_ID = 'spuf' TRACKER_HOST = 'tracker.archiveteam.org' ########################################################################### # This section defines project-specific tasks. # # Simple tasks (tasks that do not need any concurrency) are based on the # SimpleTask class and have a process(item) method that is called for # each item. class CheckIP(SimpleTask): def __init__(self): SimpleTask.__init__(self, "CheckIP") self._counter = 0 def process(self, item): # NEW for 2014! Check if we are behind firewall/proxy if self._counter <= 0: item.log_output('Checking IP address.') ip_set = set() ip_set.add(socket.gethostbyname('twitter.com')) ip_set.add(socket.gethostbyname('facebook.com')) ip_set.add(socket.gethostbyname('youtube.com')) ip_set.add(socket.gethostbyname('microsoft.com')) ip_set.add(socket.gethostbyname('icanhas.cheezburger.com')) ip_set.add(socket.gethostbyname('archiveteam.org')) if len(ip_set) != 6: item.log_output('Got IP addresses: {0}'.format(ip_set)) item.log_output( 'Are you behind a firewall/proxy? That is a big no-no!') raise Exception( 'Are you behind a firewall/proxy? That is a big no-no!') # Check only occasionally if self._counter <= 0: self._counter = 10 else: self._counter -= 1 class PrepareDirectories(SimpleTask): def __init__(self, warc_prefix): SimpleTask.__init__(self, "PrepareDirectories") self.warc_prefix = warc_prefix def process(self, item): item_name = item["item_name"] escaped_item_name = item_name.replace(':', '_').replace('/', '_').replace('~', '_') dirname = "/".join((item["data_dir"], escaped_item_name)) if os.path.isdir(dirname): shutil.rmtree(dirname) os.makedirs(dirname) item["item_dir"] = dirname item["warc_file_base"] = "%s-%s-%s" % (self.warc_prefix, escaped_item_name, time.strftime("%Y%m%d-%H%M%S")) open("%(item_dir)s/%(warc_file_base)s.warc.gz" % item, "w").close() class MoveFiles(SimpleTask): def __init__(self): SimpleTask.__init__(self, "MoveFiles") def process(self, item): # NEW for 2014! Check if wget was compiled with zlib support if os.path.exists("%(item_dir)s/%(warc_file_base)s.warc" % item): raise Exception('Please compile wget with zlib support!') os.rename("%(item_dir)s/%(warc_file_base)s.warc.gz" % item, "%(data_dir)s/%(warc_file_base)s.warc.gz" % item) shutil.rmtree("%(item_dir)s" % item) def get_hash(filename): with open(filename, 'rb') as in_file: return hashlib.sha1(in_file.read()).hexdigest() CWD = os.getcwd() PIPELINE_SHA1 = get_hash(os.path.join(CWD, 'pipeline.py')) LUA_SHA1 = get_hash(os.path.join(CWD, 'spuf.lua')) def stats_id_function(item): # NEW for 2014! Some accountability hashes and stats. d = { 'pipeline_hash': PIPELINE_SHA1, 'lua_hash': LUA_SHA1, 'python_version': sys.version, } return d class WgetArgs(object): def realize(self, item): wget_args = [ WGET_LUA, "-U", USER_AGENT, "-nv", "--load-cookies", "cookies.txt", #"--no-cookies", "--lua-script", "spuf.lua", "-o", ItemInterpolation("%(item_dir)s/wget.log"), "--no-check-certificate", "--output-document", ItemInterpolation("%(item_dir)s/wget.tmp"), "--truncate-output", "-e", "robots=off", "--rotate-dns", "--recursive", "--level=inf", "--no-parent", "--page-requisites", "--timeout", "30", "--tries", "inf", "--domains", "steampowered.com", "--span-hosts", "--waitretry", "30", "--warc-file", ItemInterpolation("%(item_dir)s/%(warc_file_base)s"), "--warc-header", "operator: Archive Team", "--warc-header", "steam-users-forum-dld-script-version: " + VERSION, "--warc-header", ItemInterpolation("steam-users-forum-item: %(item_name)s"), ] item_name = item['item_name'] assert ':' in item_name item_type, item_value = item_name.split(':', 1) item['item_type'] = item_type item['item_value'] = item_value tries = 0 while tries < 10: if os.path.isfile('login.php?do=login'): os.remove('login.php?do=login') os.system("wget --save-cookies cookies.txt --user-agent 'ArchiveTeam' --keep-session-cookies --post-data 'vb_login_username=archiveTeam&cookieuser=1&vb_login_password=&s=&securitytoken=guest&do=login&vb_login_md5password=9aa65d84012ee50e456c4e6916089636&vb_login_md5password_utf=9aa65d84012ee50e456c4e6916089636' --referer http://forums.steampowered.com/forums/ http://forums.steampowered.com/forums/login.php?do=login") if not os.path.isfile('login.php?do=login'): continue with open('login.php?do=login') as f: if 'alt="Forum Database Error"' in f.read(): continue break else: raise Exception('Could not log in.') wget_args.append('http://forums.steampowered.com/forums/showthread.php') if item_type == 'threads': start, stop = item_value.split('-') for i in range(int(start), int(stop)+1): wget_args.extend(['--warc-header', 'steam-users-forum-thread: {i}'.format(i=i)]) wget_args.append('http://forums.steampowered.com/forums/showthread.php?t={i}'.format(i=i)) elif item_type == 'forums': start, stop = item_value.split('-') for i in range(int(start), int(stop)+1): wget_args.extend(['--warc-header', 'steam-users-forum-forum: {i}'.format(i=i)]) wget_args.append('http://forums.steampowered.com/forums/forumdisplay.php?f={i}&daysprune=-1'.format(i=i)) wget_args.append('http://forums.steampowered.com/forums/forumdisplay.php?f={i}'.format(i=i)) elif item_type == 'members': start, stop = item_value.split('-') for i in range(int(start), int(stop)+1): wget_args.extend(['--warc-header', 'steam-users-forum-member: {i}'.format(i=i)]) wget_args.append('http://forums.steampowered.com/forums/member.php?u={i}'.format(i=i)) else: raise Exception('Unknown item') if 'bind_address' in globals(): wget_args.extend(['--bind-address', globals()['bind_address']]) print('') print('*** Wget will bind address at {0} ***'.format( globals()['bind_address'])) print('') return realize(wget_args, item) ########################################################################### # Initialize the project. # # This will be shown in the warrior management panel. The logo should not # be too big. The deadline is optional. project = Project( title = "Steam Users' Forum", project_html = """ <img class="project-logo" alt="Steam Logo" src="http://archiveteam.org/images/thumb/4/48/Steam_Icon_2014.png/100px-Steam_Icon_2014.png" /> <h2>Steam Users' Forum <span class="links"><a href="http://forums.steampowered.com/forums">Website</a> &middot; <a href="http://tracker.archiveteam.org/spuf/">Leaderboard</a></span></h2> <p>Getting killed June 5th.</p> """, utc_deadline = datetime.datetime(2017, 6, 4, 23, 59, 0) ) pipeline = Pipeline( CheckIP(), GetItemFromTracker("http://%s/%s" % (TRACKER_HOST, TRACKER_ID), downloader, VERSION), PrepareDirectories(warc_prefix="spuf"), WgetDownload( WgetArgs(), max_tries=2, accept_on_exit_code=[0, 4, 8], env={ "item_dir": ItemValue("item_dir"), "item_value": ItemValue("item_value"), "item_type": ItemValue("item_type"), "warc_file_base": ItemValue("warc_file_base"), } ), PrepareStatsForTracker( defaults={"downloader": downloader, "version": VERSION}, file_groups={ "data": [ ItemInterpolation("%(item_dir)s/%(warc_file_base)s.warc.gz") ] }, id_function=stats_id_function, ), MoveFiles(), LimitConcurrent(NumberConfigValue(min=1, max=4, default="1", name="shared:rsync_threads", title="Rsync threads", description="The maximum number of concurrent uploads."), UploadWithTracker( "http://%s/%s" % (TRACKER_HOST, TRACKER_ID), downloader=downloader, version=VERSION, files=[ ItemInterpolation("%(data_dir)s/%(warc_file_base)s.warc.gz") ], rsync_target_source_path=ItemInterpolation("%(data_dir)s/"), rsync_extra_args=[ "--recursive", "--partial", "--partial-dir", ".rsync-tmp", ] ), ), SendDoneToTracker( tracker_url="http://%s/%s" % (TRACKER_HOST, TRACKER_ID), stats=ItemValue("stats") ) )
unlicense
-1,336,016,031,714,418,700
35.868852
432
0.574033
false
3.543965
false
false
false
Encrylize/flask-blogger
app/utils/helpers.py
1
1218
from urllib.parse import urljoin, urlparse from flask import request def get_or_create(model, **kwargs): """ Gets or creates an instance of model. Args: model: SQLAlchemy model **kwargs: Model properties Returns: An instance of model and True if it was created, False if it was not. """ instance = model.query.filter_by(**kwargs).first() if instance: return instance, False else: instance = model(**kwargs) return instance, True def is_safe_url(target): """ Checks if a URL is safe. Args: target: The URL to check Returns: True if the URL is safe, False if it is not. """ ref_url = urlparse(request.host_url) test_url = urlparse(urljoin(request.host_url, target)) return test_url.scheme in ('http', 'https') and ref_url.netloc == test_url.netloc def get_redirect_target(): """ Gets a safe redirect target. Returns: The first safe redirect target. """ for target in request.args.get('next'), request.referrer: if not target: continue elif is_safe_url(target): return target
mit
7,296,754,981,301,055,000
20
77
0.591954
false
4.2
false
false
false
Azure/azure-sdk-for-python
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_12_01/aio/operations/_images_operations.py
1
29335
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class ImagesOperations: """ImagesOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.compute.v2019_12_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def _create_or_update_initial( self, resource_group_name: str, image_name: str, parameters: "_models.Image", **kwargs: Any ) -> "_models.Image": cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-12-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_or_update_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'imageName': self._serialize.url("image_name", image_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'Image') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('Image', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('Image', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore async def begin_create_or_update( self, resource_group_name: str, image_name: str, parameters: "_models.Image", **kwargs: Any ) -> AsyncLROPoller["_models.Image"]: """Create or update an image. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param image_name: The name of the image. :type image_name: str :param parameters: Parameters supplied to the Create Image operation. :type parameters: ~azure.mgmt.compute.v2019_12_01.models.Image :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either Image or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2019_12_01.models.Image] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._create_or_update_initial( resource_group_name=resource_group_name, image_name=image_name, parameters=parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('Image', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'imageName': self._serialize.url("image_name", image_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore async def _update_initial( self, resource_group_name: str, image_name: str, parameters: "_models.ImageUpdate", **kwargs: Any ) -> "_models.Image": cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-12-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._update_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'imageName': self._serialize.url("image_name", image_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'ImageUpdate') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('Image', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('Image', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore async def begin_update( self, resource_group_name: str, image_name: str, parameters: "_models.ImageUpdate", **kwargs: Any ) -> AsyncLROPoller["_models.Image"]: """Update an image. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param image_name: The name of the image. :type image_name: str :param parameters: Parameters supplied to the Update Image operation. :type parameters: ~azure.mgmt.compute.v2019_12_01.models.ImageUpdate :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either Image or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2019_12_01.models.Image] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._update_initial( resource_group_name=resource_group_name, image_name=image_name, parameters=parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('Image', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'imageName': self._serialize.url("image_name", image_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore async def _delete_initial( self, resource_group_name: str, image_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-12-01" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'imageName': self._serialize.url("image_name", image_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore async def begin_delete( self, resource_group_name: str, image_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """Deletes an Image. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param image_name: The name of the image. :type image_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._delete_initial( resource_group_name=resource_group_name, image_name=image_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'imageName': self._serialize.url("image_name", image_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore async def get( self, resource_group_name: str, image_name: str, expand: Optional[str] = None, **kwargs: Any ) -> "_models.Image": """Gets an image. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param image_name: The name of the image. :type image_name: str :param expand: The expand expression to apply on the operation. :type expand: str :keyword callable cls: A custom type or function that will be passed the direct response :return: Image, or the result of cls(response) :rtype: ~azure.mgmt.compute.v2019_12_01.models.Image :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-12-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'imageName': self._serialize.url("image_name", image_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] if expand is not None: query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('Image', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore def list_by_resource_group( self, resource_group_name: str, **kwargs: Any ) -> AsyncIterable["_models.ImageListResult"]: """Gets the list of images under a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ImageListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2019_12_01.models.ImageListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-12-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_by_resource_group.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('ImageListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images'} # type: ignore def list( self, **kwargs: Any ) -> AsyncIterable["_models.ImageListResult"]: """Gets the list of Images in the subscription. Use nextLink property in the response to get the next page of Images. Do this till nextLink is null to fetch all the Images. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ImageListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2019_12_01.models.ImageListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-12-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('ImageListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/images'} # type: ignore
mit
-7,266,428,311,589,017,000
47.407591
181
0.634907
false
4.309534
true
false
false
pradyu1993/scikit-learn
sklearn/gaussian_process/gaussian_process.py
1
34415
#!/usr/bin/python # -*- coding: utf-8 -*- # Author: Vincent Dubourg <vincent.dubourg@gmail.com> # (mostly translation, see implementation details) # License: BSD style import numpy as np from scipy import linalg, optimize, rand from ..base import BaseEstimator, RegressorMixin from ..metrics.pairwise import manhattan_distances from ..utils import array2d, check_random_state from ..utils import deprecated from . import regression_models as regression from . import correlation_models as correlation MACHINE_EPSILON = np.finfo(np.double).eps if hasattr(linalg, 'solve_triangular'): # only in scipy since 0.9 solve_triangular = linalg.solve_triangular else: # slower, but works def solve_triangular(x, y, lower=True): return linalg.solve(x, y) def l1_cross_distances(X): """ Computes the nonzero componentwise L1 cross-distances between the vectors in X. Parameters ---------- X: array_like An array with shape (n_samples, n_features) Returns ------- D: array with shape (n_samples * (n_samples - 1) / 2, n_features) The array of componentwise L1 cross-distances. ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2) The indices i and j of the vectors in X associated to the cross- distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]). """ X = array2d(X) n_samples, n_features = X.shape n_nonzero_cross_dist = n_samples * (n_samples - 1) / 2 ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int) D = np.zeros((n_nonzero_cross_dist, n_features)) ll_1 = 0 for k in range(n_samples - 1): ll_0 = ll_1 ll_1 = ll_0 + n_samples - k - 1 ij[ll_0:ll_1, 0] = k ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples) D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples]) return D, ij.astype(np.int) class GaussianProcess(BaseEstimator, RegressorMixin): """The Gaussian Process model class. Parameters ---------- regr : string or callable, optional A regression function returning an array of outputs of the linear regression functional basis. The number of observations n_samples should be greater than the size p of this basis. Default assumes a simple constant regression trend. Available built-in regression models are:: 'constant', 'linear', 'quadratic' corr : string or callable, optional A stationary autocorrelation function returning the autocorrelation between two points x and x'. Default assumes a squared-exponential autocorrelation model. Built-in correlation models are:: 'absolute_exponential', 'squared_exponential', 'generalized_exponential', 'cubic', 'linear' beta0 : double array_like, optional The regression weight vector to perform Ordinary Kriging (OK). Default assumes Universal Kriging (UK) so that the vector beta of regression weights is estimated using the maximum likelihood principle. storage_mode : string, optional A string specifying whether the Cholesky decomposition of the correlation matrix should be stored in the class (storage_mode = 'full') or not (storage_mode = 'light'). Default assumes storage_mode = 'full', so that the Cholesky decomposition of the correlation matrix is stored. This might be a useful parameter when one is not interested in the MSE and only plan to estimate the BLUP, for which the correlation matrix is not required. verbose : boolean, optional A boolean specifying the verbose level. Default is verbose = False. theta0 : double array_like, optional An array with shape (n_features, ) or (1, ). The parameters in the autocorrelation model. If thetaL and thetaU are also specified, theta0 is considered as the starting point for the maximum likelihood rstimation of the best set of parameters. Default assumes isotropic autocorrelation model with theta0 = 1e-1. thetaL : double array_like, optional An array with shape matching theta0's. Lower bound on the autocorrelation parameters for maximum likelihood estimation. Default is None, so that it skips maximum likelihood estimation and it uses theta0. thetaU : double array_like, optional An array with shape matching theta0's. Upper bound on the autocorrelation parameters for maximum likelihood estimation. Default is None, so that it skips maximum likelihood estimation and it uses theta0. normalize : boolean, optional Input X and observations y are centered and reduced wrt means and standard deviations estimated from the n_samples observations provided. Default is normalize = True so that data is normalized to ease maximum likelihood estimation. nugget : double or ndarray, optional Introduce a nugget effect to allow smooth predictions from noisy data. If nugget is an ndarray, it must be the same length as the number of data points used for the fit. The nugget is added to the diagonal of the assumed training covariance; in this way it acts as a Tikhonov regularization in the problem. In the special case of the squared exponential correlation function, the nugget mathematically represents the variance of the input values. Default assumes a nugget close to machine precision for the sake of robustness (nugget = 10. * MACHINE_EPSILON). optimizer : string, optional A string specifying the optimization algorithm to be used. Default uses 'fmin_cobyla' algorithm from scipy.optimize. Available optimizers are:: 'fmin_cobyla', 'Welch' 'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_. It consists in iterating over several one-dimensional optimizations instead of running one single multi-dimensional optimization. random_start : int, optional The number of times the Maximum Likelihood Estimation should be performed from a random starting point. The first MLE always uses the specified starting point (theta0), the next starting points are picked at random according to an exponential distribution (log-uniform on [thetaL, thetaU]). Default does not use random starting point (random_start = 1). random_state: integer or numpy.RandomState, optional The generator used to shuffle the sequence of coordinates of theta in the Welch optimizer. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. Attributes ---------- `theta_`: array Specified theta OR the best set of autocorrelation parameters (the \ sought maximizer of the reduced likelihood function). `reduced_likelihood_function_value_`: array The optimal reduced likelihood function value. Examples -------- >>> import numpy as np >>> from sklearn.gaussian_process import GaussianProcess >>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T >>> y = (X * np.sin(X)).ravel() >>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.) >>> gp.fit(X, y) # doctest: +ELLIPSIS GaussianProcess(beta0=None... ... Notes ----- The presentation implementation is based on a translation of the DACE Matlab toolbox, see reference [NLNS2002]_. References ---------- .. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J. Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002) http://www2.imm.dtu.dk/~hbn/dace/dace.pdf .. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell, and M.D. Morris (1992). Screening, predicting, and computer experiments. Technometrics, 34(1) 15--25.` http://www.jstor.org/pss/1269548 """ _regression_types = { 'constant': regression.constant, 'linear': regression.linear, 'quadratic': regression.quadratic} _correlation_types = { 'absolute_exponential': correlation.absolute_exponential, 'squared_exponential': correlation.squared_exponential, 'generalized_exponential': correlation.generalized_exponential, 'cubic': correlation.cubic, 'linear': correlation.linear} _optimizer_types = [ 'fmin_cobyla', 'Welch'] def __init__(self, regr='constant', corr='squared_exponential', beta0=None, storage_mode='full', verbose=False, theta0=1e-1, thetaL=None, thetaU=None, optimizer='fmin_cobyla', random_start=1, normalize=True, nugget=10. * MACHINE_EPSILON, random_state=None): self.regr = regr self.corr = corr self.beta0 = beta0 self.storage_mode = storage_mode self.verbose = verbose self.theta0 = theta0 self.thetaL = thetaL self.thetaU = thetaU self.normalize = normalize self.nugget = nugget self.optimizer = optimizer self.random_start = random_start self.random_state = random_state # Run input checks self._check_params() def fit(self, X, y): """ The Gaussian Process model fitting method. Parameters ---------- X : double array_like An array with shape (n_samples, n_features) with the input at which observations were made. y : double array_like An array with shape (n_samples, ) with the observations of the scalar output to be predicted. Returns ------- gp : self A fitted Gaussian Process model object awaiting data to perform predictions. """ self.random_state = check_random_state(self.random_state) # Force data to 2D numpy.array X = array2d(X) y = np.asarray(y).ravel()[:, np.newaxis] # Check shapes of DOE & observations n_samples_X, n_features = X.shape n_samples_y = y.shape[0] if n_samples_X != n_samples_y: raise ValueError("X and y must have the same number of rows.") else: n_samples = n_samples_X # Run input checks self._check_params(n_samples) # Normalize data or don't if self.normalize: X_mean = np.mean(X, axis=0) X_std = np.std(X, axis=0) y_mean = np.mean(y, axis=0) y_std = np.std(y, axis=0) X_std[X_std == 0.] = 1. y_std[y_std == 0.] = 1. # center and scale X if necessary X = (X - X_mean) / X_std y = (y - y_mean) / y_std else: X_mean = np.zeros(1) X_std = np.ones(1) y_mean = np.zeros(1) y_std = np.ones(1) # Calculate matrix of distances D between samples D, ij = l1_cross_distances(X) if np.min(np.sum(D, axis=1)) == 0. \ and self.corr != correlation.pure_nugget: raise Exception("Multiple input features cannot have the same" " value") # Regression matrix and parameters F = self.regr(X) n_samples_F = F.shape[0] if F.ndim > 1: p = F.shape[1] else: p = 1 if n_samples_F != n_samples: raise Exception("Number of rows in F and X do not match. Most " + "likely something is going wrong with the " + "regression model.") if p > n_samples_F: raise Exception(("Ordinary least squares problem is undetermined " + "n_samples=%d must be greater than the " + "regression model size p=%d.") % (n_samples, p)) if self.beta0 is not None: if self.beta0.shape[0] != p: raise Exception("Shapes of beta0 and F do not match.") # Set attributes self.X = X self.y = y self.D = D self.ij = ij self.F = F self.X_mean, self.X_std = X_mean, X_std self.y_mean, self.y_std = y_mean, y_std # Determine Gaussian Process model parameters if self.thetaL is not None and self.thetaU is not None: # Maximum Likelihood Estimation of the parameters if self.verbose: print("Performing Maximum Likelihood Estimation of the " + "autocorrelation parameters...") self.theta_, self.reduced_likelihood_function_value_, par = \ self._arg_max_reduced_likelihood_function() if np.isinf(self.reduced_likelihood_function_value_): raise Exception("Bad parameter region. " + "Try increasing upper bound") else: # Given parameters if self.verbose: print("Given autocorrelation parameters. " + "Computing Gaussian Process model parameters...") self.theta_ = self.theta0 self.reduced_likelihood_function_value_, par = \ self.reduced_likelihood_function() if np.isinf(self.reduced_likelihood_function_value_): raise Exception("Bad point. Try increasing theta0.") self.beta = par['beta'] self.gamma = par['gamma'] self.sigma2 = par['sigma2'] self.C = par['C'] self.Ft = par['Ft'] self.G = par['G'] if self.storage_mode == 'light': # Delete heavy data (it will be computed again if required) # (it is required only when MSE is wanted in self.predict) if self.verbose: print("Light storage mode specified. " + "Flushing autocorrelation matrix...") self.D = None self.ij = None self.F = None self.C = None self.Ft = None self.G = None return self def predict(self, X, eval_MSE=False, batch_size=None): """ This function evaluates the Gaussian Process model at x. Parameters ---------- X : array_like An array with shape (n_eval, n_features) giving the point(s) at which the prediction(s) should be made. eval_MSE : boolean, optional A boolean specifying whether the Mean Squared Error should be evaluated or not. Default assumes evalMSE = False and evaluates only the BLUP (mean prediction). batch_size : integer, optional An integer giving the maximum number of points that can be evaluated simulatneously (depending on the available memory). Default is None so that all given points are evaluated at the same time. Returns ------- y : array_like An array with shape (n_eval, ) with the Best Linear Unbiased Prediction at x. MSE : array_like, optional (if eval_MSE == True) An array with shape (n_eval, ) with the Mean Squared Error at x. """ # Check input shapes X = array2d(X) n_eval, n_features_X = X.shape n_samples, n_features = self.X.shape # Run input checks self._check_params(n_samples) if n_features_X != n_features: raise ValueError(("The number of features in X (X.shape[1] = %d) " + "should match the sample size used for fit() " + "which is %d.") % (n_features_X, n_features)) if batch_size is None: # No memory management # (evaluates all given points in a single batch run) # Normalize input X = (X - self.X_mean) / self.X_std # Initialize output y = np.zeros(n_eval) if eval_MSE: MSE = np.zeros(n_eval) # Get pairwise componentwise L1-distances to the input training set dx = manhattan_distances(X, Y=self.X, sum_over_features=False) # Get regression function and correlation f = self.regr(X) r = self.corr(self.theta_, dx).reshape(n_eval, n_samples) # Scaled predictor y_ = np.dot(f, self.beta) + np.dot(r, self.gamma) # Predictor y = (self.y_mean + self.y_std * y_).ravel() # Mean Squared Error if eval_MSE: C = self.C if C is None: # Light storage mode (need to recompute C, F, Ft and G) if self.verbose: print("This GaussianProcess used 'light' storage mode " + "at instanciation. Need to recompute " + "autocorrelation matrix...") reduced_likelihood_function_value, par = \ self.reduced_likelihood_function() self.C = par['C'] self.Ft = par['Ft'] self.G = par['G'] rt = solve_triangular(self.C, r.T, lower=True) if self.beta0 is None: # Universal Kriging u = solve_triangular(self.G.T, np.dot(self.Ft.T, rt) - f.T) else: # Ordinary Kriging u = np.zeros(y.shape) MSE = self.sigma2 * (1. - (rt ** 2.).sum(axis=0) + (u ** 2.).sum(axis=0)) # Mean Squared Error might be slightly negative depending on # machine precision: force to zero! MSE[MSE < 0.] = 0. return y, MSE else: return y else: # Memory management if type(batch_size) is not int or batch_size <= 0: raise Exception("batch_size must be a positive integer") if eval_MSE: y, MSE = np.zeros(n_eval), np.zeros(n_eval) for k in range(max(1, n_eval / batch_size)): batch_from = k * batch_size batch_to = min([(k + 1) * batch_size + 1, n_eval + 1]) y[batch_from:batch_to], MSE[batch_from:batch_to] = \ self.predict(X[batch_from:batch_to], eval_MSE=eval_MSE, batch_size=None) return y, MSE else: y = np.zeros(n_eval) for k in range(max(1, n_eval / batch_size)): batch_from = k * batch_size batch_to = min([(k + 1) * batch_size + 1, n_eval + 1]) y[batch_from:batch_to] = \ self.predict(X[batch_from:batch_to], eval_MSE=eval_MSE, batch_size=None) return y def reduced_likelihood_function(self, theta=None): """ This function determines the BLUP parameters and evaluates the reduced likelihood function for the given autocorrelation parameters theta. Maximizing this function wrt the autocorrelation parameters theta is equivalent to maximizing the likelihood of the assumed joint Gaussian distribution of the observations y evaluated onto the design of experiments X. Parameters ---------- theta : array_like, optional An array containing the autocorrelation parameters at which the Gaussian Process model parameters should be determined. Default uses the built-in autocorrelation parameters (ie ``theta = self.theta_``). Returns ------- reduced_likelihood_function_value : double The value of the reduced likelihood function associated to the given autocorrelation parameters theta. par : dict A dictionary containing the requested Gaussian Process model parameters: sigma2 Gaussian Process variance. beta Generalized least-squares regression weights for Universal Kriging or given beta0 for Ordinary Kriging. gamma Gaussian Process weights. C Cholesky decomposition of the correlation matrix [R]. Ft Solution of the linear equation system : [R] x Ft = F G QR decomposition of the matrix Ft. """ if theta is None: # Use built-in autocorrelation parameters theta = self.theta_ # Initialize output reduced_likelihood_function_value = - np.inf par = {} # Retrieve data n_samples = self.X.shape[0] D = self.D ij = self.ij F = self.F if D is None: # Light storage mode (need to recompute D, ij and F) D, ij = l1_cross_distances(self.X) if np.min(np.sum(D, axis=1)) == 0. \ and self.corr != correlation.pure_nugget: raise Exception("Multiple X are not allowed") F = self.regr(self.X) # Set up R r = self.corr(theta, D) R = np.eye(n_samples) * (1. + self.nugget) R[ij[:, 0], ij[:, 1]] = r R[ij[:, 1], ij[:, 0]] = r # Cholesky decomposition of R try: C = linalg.cholesky(R, lower=True) except linalg.LinAlgError: return reduced_likelihood_function_value, par # Get generalized least squares solution Ft = solve_triangular(C, F, lower=True) try: Q, G = linalg.qr(Ft, econ=True) except: #/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177: # DeprecationWarning: qr econ argument will be removed after scipy # 0.7. The economy transform will then be available through the # mode='economic' argument. Q, G = linalg.qr(Ft, mode='economic') pass sv = linalg.svd(G, compute_uv=False) rcondG = sv[-1] / sv[0] if rcondG < 1e-10: # Check F sv = linalg.svd(F, compute_uv=False) condF = sv[0] / sv[-1] if condF > 1e15: raise Exception("F is too ill conditioned. Poor combination " + "of regression model and observations.") else: # Ft is too ill conditioned, get out (try different theta) return reduced_likelihood_function_value, par Yt = solve_triangular(C, self.y, lower=True) if self.beta0 is None: # Universal Kriging beta = solve_triangular(G, np.dot(Q.T, Yt)) else: # Ordinary Kriging beta = np.array(self.beta0) rho = Yt - np.dot(Ft, beta) sigma2 = (rho ** 2.).sum(axis=0) / n_samples # The determinant of R is equal to the squared product of the diagonal # elements of its Cholesky decomposition C detR = (np.diag(C) ** (2. / n_samples)).prod() # Compute/Organize output reduced_likelihood_function_value = - sigma2.sum() * detR par['sigma2'] = sigma2 * self.y_std ** 2. par['beta'] = beta par['gamma'] = solve_triangular(C.T, rho) par['C'] = C par['Ft'] = Ft par['G'] = G return reduced_likelihood_function_value, par @deprecated("to be removed in 0.14, access ``self.theta_`` etc. directly " " after fit.") def arg_max_reduced_likelihood_function(self): return self._arg_max_reduced_likelihood_function() @property @deprecated('``theta`` is deprecated and will be removed in 0.14, ' 'please use ``theta_`` instead.') def theta(self): return self.theta_ @property @deprecated("``reduced_likelihood_function_value`` is deprecated and will" "be removed in 0.14, please use " "``reduced_likelihood_function_value_`` instead.") def reduced_likelihood_function_value(self): return self.reduced_likelihood_function_value_ def _arg_max_reduced_likelihood_function(self): """ This function estimates the autocorrelation parameters theta as the maximizer of the reduced likelihood function. (Minimization of the opposite reduced likelihood function is used for convenience) Parameters ---------- self : All parameters are stored in the Gaussian Process model object. Returns ------- optimal_theta : array_like The best set of autocorrelation parameters (the sought maximizer of the reduced likelihood function). optimal_reduced_likelihood_function_value : double The optimal reduced likelihood function value. optimal_par : dict The BLUP parameters associated to thetaOpt. """ # Initialize output best_optimal_theta = [] best_optimal_rlf_value = [] best_optimal_par = [] if self.verbose: print "The chosen optimizer is: " + str(self.optimizer) if self.random_start > 1: print str(self.random_start) + " random starts are required." percent_completed = 0. # Force optimizer to fmin_cobyla if the model is meant to be isotropic if self.optimizer == 'Welch' and self.theta0.size == 1: self.optimizer = 'fmin_cobyla' if self.optimizer == 'fmin_cobyla': def minus_reduced_likelihood_function(log10t): return - self.reduced_likelihood_function(theta=10. ** log10t)[0] constraints = [] for i in range(self.theta0.size): constraints.append(lambda log10t: \ log10t[i] - np.log10(self.thetaL[0, i])) constraints.append(lambda log10t: \ np.log10(self.thetaU[0, i]) - log10t[i]) for k in range(self.random_start): if k == 0: # Use specified starting point as first guess theta0 = self.theta0 else: # Generate a random starting point log10-uniformly # distributed between bounds log10theta0 = np.log10(self.thetaL) \ + rand(self.theta0.size).reshape(self.theta0.shape) \ * np.log10(self.thetaU / self.thetaL) theta0 = 10. ** log10theta0 # Run Cobyla try: log10_optimal_theta = \ optimize.fmin_cobyla(minus_reduced_likelihood_function, np.log10(theta0), constraints, iprint=0) except ValueError as ve: print("Optimization failed. Try increasing the ``nugget``") raise ve optimal_theta = 10. ** log10_optimal_theta optimal_minus_rlf_value, optimal_par = \ self.reduced_likelihood_function(theta=optimal_theta) optimal_rlf_value = - optimal_minus_rlf_value # Compare the new optimizer to the best previous one if k > 0: if optimal_rlf_value > best_optimal_rlf_value: best_optimal_rlf_value = optimal_rlf_value best_optimal_par = optimal_par best_optimal_theta = optimal_theta else: best_optimal_rlf_value = optimal_rlf_value best_optimal_par = optimal_par best_optimal_theta = optimal_theta if self.verbose and self.random_start > 1: if (20 * k) / self.random_start > percent_completed: percent_completed = (20 * k) / self.random_start print "%s completed" % (5 * percent_completed) optimal_rlf_value = best_optimal_rlf_value optimal_par = best_optimal_par optimal_theta = best_optimal_theta elif self.optimizer == 'Welch': # Backup of the given atrributes theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU corr = self.corr verbose = self.verbose # This will iterate over fmin_cobyla optimizer self.optimizer = 'fmin_cobyla' self.verbose = False # Initialize under isotropy assumption if verbose: print("Initialize under isotropy assumption...") self.theta0 = array2d(self.theta0.min()) self.thetaL = array2d(self.thetaL.min()) self.thetaU = array2d(self.thetaU.max()) theta_iso, optimal_rlf_value_iso, par_iso = \ self._arg_max_reduced_likelihood_function() optimal_theta = theta_iso + np.zeros(theta0.shape) # Iterate over all dimensions of theta allowing for anisotropy if verbose: print("Now improving allowing for anisotropy...") for i in self.random_state.permutation(theta0.size): if verbose: print "Proceeding along dimension %d..." % (i + 1) self.theta0 = array2d(theta_iso) self.thetaL = array2d(thetaL[0, i]) self.thetaU = array2d(thetaU[0, i]) def corr_cut(t, d): return corr(array2d(np.hstack([ optimal_theta[0][0:i], t[0], optimal_theta[0][(i + 1)::]])), d) self.corr = corr_cut optimal_theta[0, i], optimal_rlf_value, optimal_par = \ self._arg_max_reduced_likelihood_function() # Restore the given atrributes self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU self.corr = corr self.optimizer = 'Welch' self.verbose = verbose else: raise NotImplementedError(("This optimizer ('%s') is not " + "implemented yet. Please contribute!") % self.optimizer) return optimal_theta, optimal_rlf_value, optimal_par def _check_params(self, n_samples=None): # Check regression model if not callable(self.regr): if self.regr in self._regression_types: self.regr = self._regression_types[self.regr] else: raise ValueError(("regr should be one of %s or callable, " + "%s was given.") % (self._regression_types.keys(), self.regr)) # Check regression weights if given (Ordinary Kriging) if self.beta0 is not None: self.beta0 = array2d(self.beta0) if self.beta0.shape[1] != 1: # Force to column vector self.beta0 = self.beta0.T # Check correlation model if not callable(self.corr): if self.corr in self._correlation_types: self.corr = self._correlation_types[self.corr] else: raise ValueError(("corr should be one of %s or callable, " + "%s was given.") % (self._correlation_types.keys(), self.corr)) # Check storage mode if self.storage_mode != 'full' and self.storage_mode != 'light': raise ValueError("Storage mode should either be 'full' or " + "'light', %s was given." % self.storage_mode) # Check correlation parameters self.theta0 = array2d(self.theta0) lth = self.theta0.size if self.thetaL is not None and self.thetaU is not None: self.thetaL = array2d(self.thetaL) self.thetaU = array2d(self.thetaU) if self.thetaL.size != lth or self.thetaU.size != lth: raise ValueError("theta0, thetaL and thetaU must have the " + "same length.") if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL): raise ValueError("The bounds must satisfy O < thetaL <= " + "thetaU.") elif self.thetaL is None and self.thetaU is None: if np.any(self.theta0 <= 0): raise ValueError("theta0 must be strictly positive.") elif self.thetaL is None or self.thetaU is None: raise ValueError("thetaL and thetaU should either be both or " + "neither specified.") # Force verbose type to bool self.verbose = bool(self.verbose) # Force normalize type to bool self.normalize = bool(self.normalize) # Check nugget value self.nugget = np.asarray(self.nugget) if np.any(self.nugget) < 0.: raise ValueError("nugget must be positive or zero.") if (n_samples is not None and self.nugget.shape not in [(), (n_samples,)]): raise ValueError("nugget must be either a scalar " "or array of length n_samples.") # Check optimizer if not self.optimizer in self._optimizer_types: raise ValueError("optimizer should be one of %s" % self._optimizer_types) # Force random_start type to int self.random_start = int(self.random_start)
bsd-3-clause
-5,101,911,511,660,186,000
37.366778
79
0.555165
false
4.222181
false
false
false
bdh1011/wau
venv/lib/python2.7/site-packages/pandas/core/internals.py
1
151884
import copy import itertools import re import operator from datetime import datetime, timedelta from collections import defaultdict import numpy as np from pandas.core.base import PandasObject from pandas.core.common import (_possibly_downcast_to_dtype, isnull, _NS_DTYPE, _TD_DTYPE, ABCSeries, is_list_like, ABCSparseSeries, _infer_dtype_from_scalar, is_null_datelike_scalar, _maybe_promote, is_timedelta64_dtype, is_datetime64_dtype, array_equivalent, _maybe_convert_string_to_object, is_categorical) from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.indexing import maybe_convert_indices, length_of_indexer from pandas.core.categorical import Categorical, maybe_to_categorical import pandas.core.common as com from pandas.sparse.array import _maybe_to_sparse, SparseArray import pandas.lib as lib import pandas.tslib as tslib import pandas.computation.expressions as expressions from pandas.util.decorators import cache_readonly from pandas.tslib import Timestamp, Timedelta from pandas import compat from pandas.compat import range, map, zip, u from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type from pandas.lib import BlockPlacement class Block(PandasObject): """ Canonical n-dimensional unit of homogeneous dtype contained in a pandas data structure Index-ignorant; let the container take care of that """ __slots__ = ['_mgr_locs', 'values', 'ndim'] is_numeric = False is_float = False is_integer = False is_complex = False is_datetime = False is_timedelta = False is_bool = False is_object = False is_categorical = False is_sparse = False _can_hold_na = False _downcast_dtype = None _can_consolidate = True _verify_integrity = True _validate_ndim = True _ftype = 'dense' _holder = None def __init__(self, values, placement, ndim=None, fastpath=False): if ndim is None: ndim = values.ndim elif values.ndim != ndim: raise ValueError('Wrong number of dimensions') self.ndim = ndim self.mgr_locs = placement self.values = values if len(self.mgr_locs) != len(self.values): raise ValueError('Wrong number of items passed %d,' ' placement implies %d' % ( len(self.values), len(self.mgr_locs))) @property def _consolidate_key(self): return (self._can_consolidate, self.dtype.name) @property def _is_single_block(self): return self.ndim == 1 @property def is_view(self): """ return a boolean if I am possibly a view """ return self.values.base is not None @property def is_datelike(self): """ return True if I am a non-datelike """ return self.is_datetime or self.is_timedelta def is_categorical_astype(self, dtype): """ validate that we have a astypeable to categorical, returns a boolean if we are a categorical """ if com.is_categorical_dtype(dtype): if dtype == com.CategoricalDtype(): return True # this is a pd.Categorical, but is not # a valid type for astypeing raise TypeError("invalid type {0} for astype".format(dtype)) return False def to_dense(self): return self.values.view() @property def fill_value(self): return np.nan @property def mgr_locs(self): return self._mgr_locs @property def array_dtype(self): """ the dtype to return if I want to construct this block as an array """ return self.dtype def make_block_same_class(self, values, placement, copy=False, fastpath=True, **kwargs): """ Wrap given values in a block of same type as self. `kwargs` are used in SparseBlock override. """ if copy: values = values.copy() return make_block(values, placement, klass=self.__class__, fastpath=fastpath, **kwargs) @mgr_locs.setter def mgr_locs(self, new_mgr_locs): if not isinstance(new_mgr_locs, BlockPlacement): new_mgr_locs = BlockPlacement(new_mgr_locs) self._mgr_locs = new_mgr_locs def __unicode__(self): # don't want to print out all of the items here name = com.pprint_thing(self.__class__.__name__) if self._is_single_block: result = '%s: %s dtype: %s' % ( name, len(self), self.dtype) else: shape = ' x '.join([com.pprint_thing(s) for s in self.shape]) result = '%s: %s, %s, dtype: %s' % ( name, com.pprint_thing(self.mgr_locs.indexer), shape, self.dtype) return result def __len__(self): return len(self.values) def __getstate__(self): return self.mgr_locs.indexer, self.values def __setstate__(self, state): self.mgr_locs = BlockPlacement(state[0]) self.values = state[1] self.ndim = self.values.ndim def _slice(self, slicer): """ return a slice of my values """ return self.values[slicer] def reshape_nd(self, labels, shape, ref_items): """ Parameters ---------- labels : list of new axis labels shape : new shape ref_items : new ref_items return a new block that is transformed to a nd block """ return _block2d_to_blocknd( values=self.get_values().T, placement=self.mgr_locs, shape=shape, labels=labels, ref_items=ref_items) def getitem_block(self, slicer, new_mgr_locs=None): """ Perform __getitem__-like, return result as block. As of now, only supports slices that preserve dimensionality. """ if new_mgr_locs is None: if isinstance(slicer, tuple): axis0_slicer = slicer[0] else: axis0_slicer = slicer new_mgr_locs = self.mgr_locs[axis0_slicer] new_values = self._slice(slicer) if self._validate_ndim and new_values.ndim != self.ndim: raise ValueError("Only same dim slicing is allowed") return self.make_block_same_class(new_values, new_mgr_locs) @property def shape(self): return self.values.shape @property def itemsize(self): return self.values.itemsize @property def dtype(self): return self.values.dtype @property def ftype(self): return "%s:%s" % (self.dtype, self._ftype) def merge(self, other): return _merge_blocks([self, other]) def reindex_axis(self, indexer, method=None, axis=1, fill_value=None, limit=None, mask_info=None): """ Reindex using pre-computed indexer information """ if axis < 1: raise AssertionError('axis must be at least 1, got %d' % axis) if fill_value is None: fill_value = self.fill_value new_values = com.take_nd(self.values, indexer, axis, fill_value=fill_value, mask_info=mask_info) return make_block(new_values, ndim=self.ndim, fastpath=True, placement=self.mgr_locs) def get(self, item): loc = self.items.get_loc(item) return self.values[loc] def iget(self, i): return self.values[i] def set(self, locs, values, check=False): """ Modify Block in-place with new item value Returns ------- None """ self.values[locs] = values def delete(self, loc): """ Delete given loc(-s) from block in-place. """ self.values = np.delete(self.values, loc, 0) self.mgr_locs = self.mgr_locs.delete(loc) def apply(self, func, **kwargs): """ apply the function to my values; return a block if we are not one """ result = func(self.values, **kwargs) if not isinstance(result, Block): result = make_block(values=_block_shape(result), placement=self.mgr_locs,) return result def fillna(self, value, limit=None, inplace=False, downcast=None): if not self._can_hold_na: if inplace: return [self] else: return [self.copy()] mask = isnull(self.values) if limit is not None: if self.ndim > 2: raise NotImplementedError("number of dimensions for 'fillna' " "is currently limited to 2") mask[mask.cumsum(self.ndim-1) > limit] = False value = self._try_fill(value) blocks = self.putmask(mask, value, inplace=inplace) return self._maybe_downcast(blocks, downcast) def _maybe_downcast(self, blocks, downcast=None): # no need to downcast our float # unless indicated if downcast is None and self.is_float: return blocks elif downcast is None and (self.is_timedelta or self.is_datetime): return blocks result_blocks = [] for b in blocks: result_blocks.extend(b.downcast(downcast)) return result_blocks def downcast(self, dtypes=None): """ try to downcast each item to the dict of dtypes if present """ # turn it off completely if dtypes is False: return [self] values = self.values # single block handling if self._is_single_block: # try to cast all non-floats here if dtypes is None: dtypes = 'infer' nv = _possibly_downcast_to_dtype(values, dtypes) return [make_block(nv, ndim=self.ndim, fastpath=True, placement=self.mgr_locs)] # ndim > 1 if dtypes is None: return [self] if not (dtypes == 'infer' or isinstance(dtypes, dict)): raise ValueError("downcast must have a dictionary or 'infer' as " "its argument") # item-by-item # this is expensive as it splits the blocks items-by-item blocks = [] for i, rl in enumerate(self.mgr_locs): if dtypes == 'infer': dtype = 'infer' else: raise AssertionError("dtypes as dict is not supported yet") dtype = dtypes.get(item, self._downcast_dtype) if dtype is None: nv = _block_shape(values[i], ndim=self.ndim) else: nv = _possibly_downcast_to_dtype(values[i], dtype) nv = _block_shape(nv, ndim=self.ndim) blocks.append(make_block(nv, ndim=self.ndim, fastpath=True, placement=[rl])) return blocks def astype(self, dtype, copy=False, raise_on_error=True, values=None, **kwargs): return self._astype(dtype, copy=copy, raise_on_error=raise_on_error, values=values, **kwargs) def _astype(self, dtype, copy=False, raise_on_error=True, values=None, klass=None, **kwargs): """ Coerce to the new type (if copy=True, return a new copy) raise on an except if raise == True """ # may need to convert to categorical # this is only called for non-categoricals if self.is_categorical_astype(dtype): return make_block(Categorical(self.values, **kwargs), ndim=self.ndim, placement=self.mgr_locs) # astype processing dtype = np.dtype(dtype) if self.dtype == dtype: if copy: return self.copy() return self if klass is None: if dtype == np.object_: klass = ObjectBlock try: # force the copy here if values is None: # _astype_nansafe works fine with 1-d only values = com._astype_nansafe(self.values.ravel(), dtype, copy=True) values = values.reshape(self.values.shape) newb = make_block(values, ndim=self.ndim, placement=self.mgr_locs, fastpath=True, dtype=dtype, klass=klass) except: if raise_on_error is True: raise newb = self.copy() if copy else self if newb.is_numeric and self.is_numeric: if newb.shape != self.shape: raise TypeError("cannot set astype for copy = [%s] for dtype " "(%s [%s]) with smaller itemsize that current " "(%s [%s])" % (copy, self.dtype.name, self.itemsize, newb.dtype.name, newb.itemsize)) return newb def convert(self, copy=True, **kwargs): """ attempt to coerce any object types to better types return a copy of the block (if copy = True) by definition we are not an ObjectBlock here! """ return [self.copy()] if copy else [self] def _can_hold_element(self, value): raise NotImplementedError() def _try_cast(self, value): raise NotImplementedError() def _try_cast_result(self, result, dtype=None): """ try to cast the result to our original type, we may have roundtripped thru object in the mean-time """ if dtype is None: dtype = self.dtype if self.is_integer or self.is_bool or self.is_datetime: pass elif self.is_float and result.dtype == self.dtype: # protect against a bool/object showing up here if isinstance(dtype, compat.string_types) and dtype == 'infer': return result if not isinstance(dtype, type): dtype = dtype.type if issubclass(dtype, (np.bool_, np.object_)): if issubclass(dtype, np.bool_): if isnull(result).all(): return result.astype(np.bool_) else: result = result.astype(np.object_) result[result == 1] = True result[result == 0] = False return result else: return result.astype(np.object_) return result # may need to change the dtype here return _possibly_downcast_to_dtype(result, dtype) def _try_operate(self, values): """ return a version to operate on as the input """ return values def _try_coerce_args(self, values, other): """ provide coercion to our input arguments """ return values, other def _try_coerce_result(self, result): """ reverse of try_coerce_args """ return result def _try_coerce_and_cast_result(self, result, dtype=None): result = self._try_coerce_result(result) result = self._try_cast_result(result, dtype=dtype) return result def _try_fill(self, value): return value def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ values = self.values if slicer is not None: values = values[:, slicer] mask = isnull(values) if not self.is_object and not quoting: values = values.astype(str) else: values = np.array(values, dtype='object') values[mask] = na_rep return values # block actions #### def copy(self, deep=True): values = self.values if deep: values = values.copy() return make_block(values, ndim=self.ndim, klass=self.__class__, fastpath=True, placement=self.mgr_locs) def replace(self, to_replace, value, inplace=False, filter=None, regex=False): """ replace the to_replace value with value, possible to create new blocks here this is just a call to putmask. regex is not used here. It is used in ObjectBlocks. It is here for API compatibility.""" mask = com.mask_missing(self.values, to_replace) if filter is not None: filtered_out = ~self.mgr_locs.isin(filter) mask[filtered_out.nonzero()[0]] = False if not mask.any(): if inplace: return [self] return [self.copy()] return self.putmask(mask, value, inplace=inplace) def setitem(self, indexer, value): """ set the value inplace; return a new block (of a possibly different dtype) indexer is a direct slice/positional indexer; value must be a compatible shape """ # coerce None values, if appropriate if value is None: if self.is_numeric: value = np.nan # coerce args values, value = self._try_coerce_args(self.values, value) arr_value = np.array(value) # cast the values to a type that can hold nan (if necessary) if not self._can_hold_element(value): dtype, _ = com._maybe_promote(arr_value.dtype) values = values.astype(dtype) transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x) values = transf(values) l = len(values) # length checking # boolean with truth values == len of the value is ok too if isinstance(indexer, (np.ndarray, list)): if is_list_like(value) and len(indexer) != len(value): if not (isinstance(indexer, np.ndarray) and indexer.dtype == np.bool_ and len(indexer[indexer]) == len(value)): raise ValueError("cannot set using a list-like indexer " "with a different length than the value") # slice elif isinstance(indexer, slice): if is_list_like(value) and l: if len(value) != length_of_indexer(indexer, values): raise ValueError("cannot set using a slice indexer with a " "different length than the value") try: def _is_scalar_indexer(indexer): # return True if we are all scalar indexers if arr_value.ndim == 1: if not isinstance(indexer, tuple): indexer = tuple([indexer]) return all([ np.isscalar(idx) for idx in indexer ]) return False def _is_empty_indexer(indexer): # return a boolean if we have an empty indexer if arr_value.ndim == 1: if not isinstance(indexer, tuple): indexer = tuple([indexer]) return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer) return False # empty indexers # 8669 (empty) if _is_empty_indexer(indexer): pass # setting a single element for each dim and with a rhs that could be say a list # GH 6043 elif _is_scalar_indexer(indexer): values[indexer] = value # if we are an exact match (ex-broadcasting), # then use the resultant dtype elif len(arr_value.shape) and arr_value.shape[0] == values.shape[0] and np.prod(arr_value.shape) == np.prod(values.shape): values[indexer] = value values = values.astype(arr_value.dtype) # set else: values[indexer] = value # coerce and try to infer the dtypes of the result if np.isscalar(value): dtype, _ = _infer_dtype_from_scalar(value) else: dtype = 'infer' values = self._try_coerce_and_cast_result(values, dtype) block = make_block(transf(values), ndim=self.ndim, placement=self.mgr_locs, fastpath=True) # may have to soft convert_objects here if block.is_object and not self.is_object: block = block.convert(convert_numeric=False) return block except (ValueError, TypeError) as detail: raise except Exception as detail: pass return [self] def putmask(self, mask, new, align=True, inplace=False): """ putmask the data to the block; it is possible that we may create a new dtype of block return the resulting block(s) Parameters ---------- mask : the condition to respect new : a ndarray/object align : boolean, perform alignment on other/cond, default is True inplace : perform inplace modification, default is False Returns ------- a new block(s), the result of the putmask """ new_values = self.values if inplace else self.values.copy() # may need to align the new if hasattr(new, 'reindex_axis'): new = new.values.T # may need to align the mask if hasattr(mask, 'reindex_axis'): mask = mask.values.T # if we are passed a scalar None, convert it here if not is_list_like(new) and isnull(new) and not self.is_object: new = self.fill_value if self._can_hold_element(new): new = self._try_cast(new) # pseudo-broadcast if isinstance(new, np.ndarray) and new.ndim == self.ndim - 1: new = np.repeat(new, self.shape[-1]).reshape(self.shape) np.putmask(new_values, mask, new) # maybe upcast me elif mask.any(): # need to go column by column new_blocks = [] if self.ndim > 1: for i, ref_loc in enumerate(self.mgr_locs): m = mask[i] v = new_values[i] # need a new block if m.any(): n = new[i] if isinstance( new, np.ndarray) else np.array(new) # type of the new block dtype, _ = com._maybe_promote(n.dtype) # we need to exiplicty astype here to make a copy n = n.astype(dtype) nv = _putmask_smart(v, m, n) else: nv = v if inplace else v.copy() # Put back the dimension that was taken from it and make # a block out of the result. block = make_block(values=nv[np.newaxis], placement=[ref_loc], fastpath=True) new_blocks.append(block) else: nv = _putmask_smart(new_values, mask, new) new_blocks.append(make_block(values=nv, placement=self.mgr_locs, fastpath=True)) return new_blocks if inplace: return [self] return [make_block(new_values, placement=self.mgr_locs, fastpath=True)] def interpolate(self, method='pad', axis=0, index=None, values=None, inplace=False, limit=None, fill_value=None, coerce=False, downcast=None, **kwargs): def check_int_bool(self, inplace): # Only FloatBlocks will contain NaNs. # timedelta subclasses IntBlock if (self.is_bool or self.is_integer) and not self.is_timedelta: if inplace: return self else: return self.copy() # a fill na type method try: m = com._clean_fill_method(method) except: m = None if m is not None: r = check_int_bool(self, inplace) if r is not None: return r return self._interpolate_with_fill(method=m, axis=axis, inplace=inplace, limit=limit, fill_value=fill_value, coerce=coerce, downcast=downcast) # try an interp method try: m = com._clean_interp_method(method, **kwargs) except: m = None if m is not None: r = check_int_bool(self, inplace) if r is not None: return r return self._interpolate(method=m, index=index, values=values, axis=axis, limit=limit, fill_value=fill_value, inplace=inplace, downcast=downcast, **kwargs) raise ValueError("invalid method '{0}' to interpolate.".format(method)) def _interpolate_with_fill(self, method='pad', axis=0, inplace=False, limit=None, fill_value=None, coerce=False, downcast=None): """ fillna but using the interpolate machinery """ # if we are coercing, then don't force the conversion # if the block can't hold the type if coerce: if not self._can_hold_na: if inplace: return [self] else: return [self.copy()] fill_value = self._try_fill(fill_value) values = self.values if inplace else self.values.copy() values = self._try_operate(values) values = com.interpolate_2d(values, method=method, axis=axis, limit=limit, fill_value=fill_value, dtype=self.dtype) values = self._try_coerce_result(values) blocks = [make_block(values, ndim=self.ndim, klass=self.__class__, fastpath=True, placement=self.mgr_locs)] return self._maybe_downcast(blocks, downcast) def _interpolate(self, method=None, index=None, values=None, fill_value=None, axis=0, limit=None, inplace=False, downcast=None, **kwargs): """ interpolate using scipy wrappers """ data = self.values if inplace else self.values.copy() # only deal with floats if not self.is_float: if not self.is_integer: return self data = data.astype(np.float64) if fill_value is None: fill_value = self.fill_value if method in ('krogh', 'piecewise_polynomial', 'pchip'): if not index.is_monotonic: raise ValueError("{0} interpolation requires that the " "index be monotonic.".format(method)) # process 1-d slices in the axis direction def func(x): # process a 1-d slice, returning it # should the axis argument be handled below in apply_along_axis? # i.e. not an arg to com.interpolate_1d return com.interpolate_1d(index, x, method=method, limit=limit, fill_value=fill_value, bounds_error=False, **kwargs) # interp each column independently interp_values = np.apply_along_axis(func, axis, data) blocks = [make_block(interp_values, ndim=self.ndim, klass=self.__class__, fastpath=True, placement=self.mgr_locs)] return self._maybe_downcast(blocks, downcast) def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None): """ Take values according to indexer and return them as a block.bb """ if fill_tuple is None: fill_value = self.fill_value new_values = com.take_nd(self.get_values(), indexer, axis=axis, allow_fill=False) else: fill_value = fill_tuple[0] new_values = com.take_nd(self.get_values(), indexer, axis=axis, allow_fill=True, fill_value=fill_value) if new_mgr_locs is None: if axis == 0: slc = lib.indexer_as_slice(indexer) if slc is not None: new_mgr_locs = self.mgr_locs[slc] else: new_mgr_locs = self.mgr_locs[indexer] else: new_mgr_locs = self.mgr_locs if new_values.dtype != self.dtype: return make_block(new_values, new_mgr_locs) else: return self.make_block_same_class(new_values, new_mgr_locs) def get_values(self, dtype=None): return self.values def diff(self, n, axis=1): """ return block for the diff of the values """ new_values = com.diff(self.values, n, axis=axis) return [make_block(values=new_values, ndim=self.ndim, fastpath=True, placement=self.mgr_locs)] def shift(self, periods, axis=0): """ shift the block by periods, possibly upcast """ # convert integer to float if necessary. need to do a lot more than # that, handle boolean etc also new_values, fill_value = com._maybe_upcast(self.values) # make sure array sent to np.roll is c_contiguous f_ordered = new_values.flags.f_contiguous if f_ordered: new_values = new_values.T axis = new_values.ndim - axis - 1 if np.prod(new_values.shape): new_values = np.roll(new_values, com._ensure_platform_int(periods), axis=axis) axis_indexer = [ slice(None) ] * self.ndim if periods > 0: axis_indexer[axis] = slice(None,periods) else: axis_indexer[axis] = slice(periods,None) new_values[tuple(axis_indexer)] = fill_value # restore original order if f_ordered: new_values = new_values.T return [make_block(new_values, ndim=self.ndim, fastpath=True, placement=self.mgr_locs)] def eval(self, func, other, raise_on_error=True, try_cast=False): """ evaluate the block; return result block from the result Parameters ---------- func : how to combine self, other other : a ndarray/object raise_on_error : if True, raise when I can't perform the function, False by default (and just return the data that we had coming in) Returns ------- a new block, the result of the func """ values = self.values if hasattr(other, 'reindex_axis'): other = other.values # make sure that we can broadcast is_transposed = False if hasattr(other, 'ndim') and hasattr(values, 'ndim'): if values.ndim != other.ndim: is_transposed = True else: if values.shape == other.shape[::-1]: is_transposed = True elif values.shape[0] == other.shape[-1]: is_transposed = True else: # this is a broadcast error heree raise ValueError("cannot broadcast shape [%s] with block " "values [%s]" % (values.T.shape, other.shape)) transf = (lambda x: x.T) if is_transposed else (lambda x: x) # coerce/transpose the args if needed values, other = self._try_coerce_args(transf(values), other) # get the result, may need to transpose the other def get_result(other): return self._try_coerce_result(func(values, other)) # error handler if we have an issue operating with the function def handle_error(): if raise_on_error: raise TypeError('Could not operate %s with block values %s' % (repr(other), str(detail))) else: # return the values result = np.empty(values.shape, dtype='O') result.fill(np.nan) return result # get the result try: result = get_result(other) # if we have an invalid shape/broadcast error # GH4576, so raise instead of allowing to pass through except ValueError as detail: raise except Exception as detail: result = handle_error() # technically a broadcast error in numpy can 'work' by returning a # boolean False if not isinstance(result, np.ndarray): if not isinstance(result, np.ndarray): # differentiate between an invalid ndarray-ndarray comparison # and an invalid type comparison if isinstance(values, np.ndarray) and is_list_like(other): raise ValueError('Invalid broadcasting comparison [%s] ' 'with block values' % repr(other)) raise TypeError('Could not compare [%s] with block values' % repr(other)) # transpose if needed result = transf(result) # try to cast if requested if try_cast: result = self._try_cast_result(result) return [make_block(result, ndim=self.ndim, fastpath=True, placement=self.mgr_locs)] def where(self, other, cond, align=True, raise_on_error=True, try_cast=False): """ evaluate the block; return result block(s) from the result Parameters ---------- other : a ndarray/object cond : the condition to respect align : boolean, perform alignment on other/cond raise_on_error : if True, raise when I can't perform the function, False by default (and just return the data that we had coming in) Returns ------- a new block(s), the result of the func """ values = self.values # see if we can align other if hasattr(other, 'reindex_axis'): other = other.values # make sure that we can broadcast is_transposed = False if hasattr(other, 'ndim') and hasattr(values, 'ndim'): if values.ndim != other.ndim or values.shape == other.shape[::-1]: # if its symmetric are ok, no reshaping needed (GH 7506) if (values.shape[0] == np.array(values.shape)).all(): pass # pseodo broadcast (its a 2d vs 1d say and where needs it in a # specific direction) elif (other.ndim >= 1 and values.ndim - 1 == other.ndim and values.shape[0] != other.shape[0]): other = _block_shape(other).T else: values = values.T is_transposed = True # see if we can align cond if not hasattr(cond, 'shape'): raise ValueError( "where must have a condition that is ndarray like") if hasattr(cond, 'reindex_axis'): cond = cond.values # may need to undo transpose of values if hasattr(values, 'ndim'): if values.ndim != cond.ndim or values.shape == cond.shape[::-1]: values = values.T is_transposed = not is_transposed other = _maybe_convert_string_to_object(other) # our where function def func(c, v, o): if c.ravel().all(): return v v, o = self._try_coerce_args(v, o) try: return self._try_coerce_result( expressions.where(c, v, o, raise_on_error=True) ) except Exception as detail: if raise_on_error: raise TypeError('Could not operate [%s] with block values ' '[%s]' % (repr(o), str(detail))) else: # return the values result = np.empty(v.shape, dtype='float64') result.fill(np.nan) return result # see if we can operate on the entire block, or need item-by-item # or if we are a single block (ndim == 1) result = func(cond, values, other) if self._can_hold_na or self.ndim == 1: if not isinstance(result, np.ndarray): raise TypeError('Could not compare [%s] with block values' % repr(other)) if is_transposed: result = result.T # try to cast if requested if try_cast: result = self._try_cast_result(result) return make_block(result, ndim=self.ndim, placement=self.mgr_locs) # might need to separate out blocks axis = cond.ndim - 1 cond = cond.swapaxes(axis, 0) mask = np.array([cond[i].all() for i in range(cond.shape[0])], dtype=bool) result_blocks = [] for m in [mask, ~mask]: if m.any(): r = self._try_cast_result( result.take(m.nonzero()[0], axis=axis)) result_blocks.append(make_block(r.T, placement=self.mgr_locs[m])) return result_blocks def equals(self, other): if self.dtype != other.dtype or self.shape != other.shape: return False return array_equivalent(self.values, other.values) class NonConsolidatableMixIn(object): """ hold methods for the nonconsolidatable blocks """ _can_consolidate = False _verify_integrity = False _validate_ndim = False _holder = None def __init__(self, values, placement, ndim=None, fastpath=False,): # Placement must be converted to BlockPlacement via property setter # before ndim logic, because placement may be a slice which doesn't # have a length. self.mgr_locs = placement # kludgetastic if ndim is None: if len(self.mgr_locs) != 1: ndim = 1 else: ndim = 2 self.ndim = ndim if not isinstance(values, self._holder): raise TypeError("values must be {0}".format(self._holder.__name__)) self.values = values def get_values(self, dtype=None): """ need to to_dense myself (and always return a ndim sized object) """ values = self.values.to_dense() if values.ndim == self.ndim - 1: values = values.reshape((1,) + values.shape) return values def iget(self, col): if self.ndim == 2 and isinstance(col, tuple): col, loc = col if col != 0: raise IndexError("{0} only contains one item".format(self)) return self.values[loc] else: if col != 0: raise IndexError("{0} only contains one item".format(self)) return self.values def should_store(self, value): return isinstance(value, self._holder) def set(self, locs, values, check=False): assert locs.tolist() == [0] self.values = values def get(self, item): if self.ndim == 1: loc = self.items.get_loc(item) return self.values[loc] else: return self.values def _slice(self, slicer): """ return a slice of my values (but densify first) """ return self.get_values()[slicer] def _try_cast_result(self, result, dtype=None): return result class NumericBlock(Block): __slots__ = () is_numeric = True _can_hold_na = True class FloatOrComplexBlock(NumericBlock): __slots__ = () def equals(self, other): if self.dtype != other.dtype or self.shape != other.shape: return False left, right = self.values, other.values return ((left == right) | (np.isnan(left) & np.isnan(right))).all() class FloatBlock(FloatOrComplexBlock): __slots__ = () is_float = True _downcast_dtype = 'int64' def _can_hold_element(self, element): if is_list_like(element): element = np.array(element) tipo = element.dtype.type return issubclass(tipo, (np.floating, np.integer)) and not issubclass( tipo, (np.datetime64, np.timedelta64)) return isinstance(element, (float, int, np.float_, np.int_)) and not isinstance( element, (bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64)) def _try_cast(self, element): try: return float(element) except: # pragma: no cover return element def to_native_types(self, slicer=None, na_rep='', float_format=None, decimal='.', quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ values = self.values if slicer is not None: values = values[:, slicer] mask = isnull(values) formatter = None if float_format and decimal != '.': formatter = lambda v : (float_format % v).replace('.',decimal,1) elif decimal != '.': formatter = lambda v : ('%g' % v).replace('.',decimal,1) elif float_format: formatter = lambda v : float_format % v if formatter is None and not quoting: values = values.astype(str) else: values = np.array(values, dtype='object') values[mask] = na_rep if formatter: imask = (~mask).ravel() values.flat[imask] = np.array( [formatter(val) for val in values.ravel()[imask]]) return values def should_store(self, value): # when inserting a column should not coerce integers to floats # unnecessarily return (issubclass(value.dtype.type, np.floating) and value.dtype == self.dtype) class ComplexBlock(FloatOrComplexBlock): __slots__ = () is_complex = True def _can_hold_element(self, element): if is_list_like(element): element = np.array(element) return issubclass(element.dtype.type, (np.floating, np.integer, np.complexfloating)) return (isinstance(element, (float, int, complex, np.float_, np.int_)) and not isinstance(bool, np.bool_)) def _try_cast(self, element): try: return complex(element) except: # pragma: no cover return element def should_store(self, value): return issubclass(value.dtype.type, np.complexfloating) class IntBlock(NumericBlock): __slots__ = () is_integer = True _can_hold_na = False def _can_hold_element(self, element): if is_list_like(element): element = np.array(element) tipo = element.dtype.type return issubclass(tipo, np.integer) and not issubclass(tipo, (np.datetime64, np.timedelta64)) return com.is_integer(element) def _try_cast(self, element): try: return int(element) except: # pragma: no cover return element def should_store(self, value): return com.is_integer_dtype(value) and value.dtype == self.dtype class TimeDeltaBlock(IntBlock): __slots__ = () is_timedelta = True _can_hold_na = True is_numeric = False @property def fill_value(self): return tslib.iNaT def _try_fill(self, value): """ if we are a NaT, return the actual fill value """ if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all(): value = tslib.iNaT elif isinstance(value, Timedelta): value = value.value elif isinstance(value, np.timedelta64): pass elif com.is_integer(value): # coerce to seconds of timedelta value = np.timedelta64(int(value * 1e9)) elif isinstance(value, timedelta): value = np.timedelta64(value) return value def _try_coerce_args(self, values, other): """ Coerce values and other to float64, with null values converted to NaN. values is always ndarray-like, other may not be """ def masker(v): mask = isnull(v) v = v.astype('float64') v[mask] = np.nan return v values = masker(values) if is_null_datelike_scalar(other): other = np.nan elif isinstance(other, (np.timedelta64, Timedelta, timedelta)): other = _coerce_scalar_to_timedelta_type(other, unit='s', box=False).item() if other == tslib.iNaT: other = np.nan elif lib.isscalar(other): other = np.float64(other) else: other = masker(other) return values, other def _try_operate(self, values): """ return a version to operate on """ return values.view('i8') def _try_coerce_result(self, result): """ reverse of try_coerce_args / try_operate """ if isinstance(result, np.ndarray): mask = isnull(result) if result.dtype.kind in ['i', 'f', 'O']: result = result.astype('m8[ns]') result[mask] = tslib.iNaT elif isinstance(result, np.integer): result = lib.Timedelta(result) return result def should_store(self, value): return issubclass(value.dtype.type, np.timedelta64) def to_native_types(self, slicer=None, na_rep=None, quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ values = self.values if slicer is not None: values = values[:, slicer] mask = isnull(values) rvalues = np.empty(values.shape, dtype=object) if na_rep is None: na_rep = 'NaT' rvalues[mask] = na_rep imask = (~mask).ravel() #### FIXME #### # should use the core.format.Timedelta64Formatter here # to figure what format to pass to the Timedelta # e.g. to not show the decimals say rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all') for val in values.ravel()[imask]], dtype=object) return rvalues def get_values(self, dtype=None): # return object dtypes as Timedelta if dtype == object: return lib.map_infer(self.values.ravel(), lib.Timedelta ).reshape(self.values.shape) return self.values class BoolBlock(NumericBlock): __slots__ = () is_bool = True _can_hold_na = False def _can_hold_element(self, element): if is_list_like(element): element = np.array(element) return issubclass(element.dtype.type, np.integer) return isinstance(element, (int, bool)) def _try_cast(self, element): try: return bool(element) except: # pragma: no cover return element def should_store(self, value): return issubclass(value.dtype.type, np.bool_) def replace(self, to_replace, value, inplace=False, filter=None, regex=False): to_replace_values = np.atleast_1d(to_replace) if not np.can_cast(to_replace_values, bool): return self return super(BoolBlock, self).replace(to_replace, value, inplace=inplace, filter=filter, regex=regex) class ObjectBlock(Block): __slots__ = () is_object = True _can_hold_na = True def __init__(self, values, ndim=2, fastpath=False, placement=None): if issubclass(values.dtype.type, compat.string_types): values = np.array(values, dtype=object) super(ObjectBlock, self).__init__(values, ndim=ndim, fastpath=fastpath, placement=placement) @property def is_bool(self): """ we can be a bool if we have only bool values but are of type object """ return lib.is_bool_array(self.values.ravel()) def convert(self, convert_dates=True, convert_numeric=True, convert_timedeltas=True, copy=True, by_item=True): """ attempt to coerce any object types to better types return a copy of the block (if copy = True) by definition we ARE an ObjectBlock!!!!! can return multiple blocks! """ # attempt to create new type blocks blocks = [] if by_item and not self._is_single_block: for i, rl in enumerate(self.mgr_locs): values = self.iget(i) values = com._possibly_convert_objects( values.ravel(), convert_dates=convert_dates, convert_numeric=convert_numeric, convert_timedeltas=convert_timedeltas, ).reshape(values.shape) values = _block_shape(values, ndim=self.ndim) newb = make_block(values, ndim=self.ndim, placement=[rl]) blocks.append(newb) else: values = com._possibly_convert_objects( self.values.ravel(), convert_dates=convert_dates, convert_numeric=convert_numeric ).reshape(self.values.shape) blocks.append(make_block(values, ndim=self.ndim, placement=self.mgr_locs)) return blocks def set(self, locs, values, check=False): """ Modify Block in-place with new item value Returns ------- None """ # GH6026 if check: try: if (self.values[locs] == values).all(): return except: pass try: self.values[locs] = values except (ValueError): # broadcasting error # see GH6171 new_shape = list(values.shape) new_shape[0] = len(self.items) self.values = np.empty(tuple(new_shape),dtype=self.dtype) self.values.fill(np.nan) self.values[locs] = values def _maybe_downcast(self, blocks, downcast=None): if downcast is not None: return blocks # split and convert the blocks result_blocks = [] for blk in blocks: result_blocks.extend(blk.convert(convert_dates=True, convert_numeric=False)) return result_blocks def _can_hold_element(self, element): return True def _try_cast(self, element): return element def should_store(self, value): return not (issubclass(value.dtype.type, (np.integer, np.floating, np.complexfloating, np.datetime64, np.bool_)) or com.is_categorical_dtype(value)) def replace(self, to_replace, value, inplace=False, filter=None, regex=False): blk = [self] to_rep_is_list = com.is_list_like(to_replace) value_is_list = com.is_list_like(value) both_lists = to_rep_is_list and value_is_list either_list = to_rep_is_list or value_is_list if not either_list and com.is_re(to_replace): blk[0], = blk[0]._replace_single(to_replace, value, inplace=inplace, filter=filter, regex=True) elif not (either_list or regex): blk = super(ObjectBlock, self).replace(to_replace, value, inplace=inplace, filter=filter, regex=regex) elif both_lists: for to_rep, v in zip(to_replace, value): blk[0], = blk[0]._replace_single(to_rep, v, inplace=inplace, filter=filter, regex=regex) elif to_rep_is_list and regex: for to_rep in to_replace: blk[0], = blk[0]._replace_single(to_rep, value, inplace=inplace, filter=filter, regex=regex) else: blk[0], = blk[0]._replace_single(to_replace, value, inplace=inplace, filter=filter, regex=regex) return blk def _replace_single(self, to_replace, value, inplace=False, filter=None, regex=False): # to_replace is regex compilable to_rep_re = regex and com.is_re_compilable(to_replace) # regex is regex compilable regex_re = com.is_re_compilable(regex) # only one will survive if to_rep_re and regex_re: raise AssertionError('only one of to_replace and regex can be ' 'regex compilable') # if regex was passed as something that can be a regex (rather than a # boolean) if regex_re: to_replace = regex regex = regex_re or to_rep_re # try to get the pattern attribute (compiled re) or it's a string try: pattern = to_replace.pattern except AttributeError: pattern = to_replace # if the pattern is not empty and to_replace is either a string or a # regex if regex and pattern: rx = re.compile(to_replace) else: # if the thing to replace is not a string or compiled regex call # the superclass method -> to_replace is some kind of object result = super(ObjectBlock, self).replace(to_replace, value, inplace=inplace, filter=filter, regex=regex) if not isinstance(result, list): result = [result] return result new_values = self.values if inplace else self.values.copy() # deal with replacing values with objects (strings) that match but # whose replacement is not a string (numeric, nan, object) if isnull(value) or not isinstance(value, compat.string_types): def re_replacer(s): try: return value if rx.search(s) is not None else s except TypeError: return s else: # value is guaranteed to be a string here, s can be either a string # or null if it's null it gets returned def re_replacer(s): try: return rx.sub(value, s) except TypeError: return s f = np.vectorize(re_replacer, otypes=[self.dtype]) if filter is None: filt = slice(None) else: filt = self.mgr_locs.isin(filter).nonzero()[0] new_values[filt] = f(new_values[filt]) return [self if inplace else make_block(new_values, fastpath=True, placement=self.mgr_locs)] class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock): __slots__ = () is_categorical = True _can_hold_na = True _holder = Categorical def __init__(self, values, placement, fastpath=False, **kwargs): # coerce to categorical if we can super(CategoricalBlock, self).__init__(maybe_to_categorical(values), fastpath=True, placement=placement, **kwargs) @property def is_view(self): """ I am never a view """ return False def to_dense(self): return self.values.to_dense().view() @property def shape(self): return (len(self.mgr_locs), len(self.values)) @property def array_dtype(self): """ the dtype to return if I want to construct this block as an array """ return np.object_ def _slice(self, slicer): """ return a slice of my values """ # slice the category # return same dims as we currently have return self.values._slice(slicer) def fillna(self, value, limit=None, inplace=False, downcast=None): # we may need to upcast our fill to match our dtype if limit is not None: raise NotImplementedError("specifying a limit for 'fillna' has " "not been implemented yet") values = self.values if inplace else self.values.copy() return [self.make_block_same_class(values=values.fillna(value=value, limit=limit), placement=self.mgr_locs)] def interpolate(self, method='pad', axis=0, inplace=False, limit=None, fill_value=None, **kwargs): values = self.values if inplace else self.values.copy() return self.make_block_same_class(values=values.fillna(fill_value=fill_value, method=method, limit=limit), placement=self.mgr_locs) def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None): """ Take values according to indexer and return them as a block.bb """ if fill_tuple is None: fill_value = None else: fill_value = fill_tuple[0] # axis doesn't matter; we are really a single-dim object # but are passed the axis depending on the calling routing # if its REALLY axis 0, then this will be a reindex and not a take new_values = self.values.take_nd(indexer, fill_value=fill_value) # if we are a 1-dim object, then always place at 0 if self.ndim == 1: new_mgr_locs = [0] else: if new_mgr_locs is None: new_mgr_locs = self.mgr_locs return self.make_block_same_class(new_values, new_mgr_locs) def putmask(self, mask, new, align=True, inplace=False): """ putmask the data to the block; it is possible that we may create a new dtype of block return the resulting block(s) Parameters ---------- mask : the condition to respect new : a ndarray/object align : boolean, perform alignment on other/cond, default is True inplace : perform inplace modification, default is False Returns ------- a new block(s), the result of the putmask """ new_values = self.values if inplace else self.values.copy() new_values[mask] = new return [self.make_block_same_class(values=new_values, placement=self.mgr_locs)] def _astype(self, dtype, copy=False, raise_on_error=True, values=None, klass=None): """ Coerce to the new type (if copy=True, return a new copy) raise on an except if raise == True """ if self.is_categorical_astype(dtype): values = self.values else: values = np.asarray(self.values).astype(dtype, copy=False) if copy: values = values.copy() return make_block(values, ndim=self.ndim, placement=self.mgr_locs) def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ values = self.values if slicer is not None: # Categorical is always one dimension values = values[slicer] mask = isnull(values) values = np.array(values, dtype='object') values[mask] = na_rep # we are expected to return a 2-d ndarray return values.reshape(1,len(values)) class DatetimeBlock(Block): __slots__ = () is_datetime = True _can_hold_na = True def __init__(self, values, placement, fastpath=False, **kwargs): if values.dtype != _NS_DTYPE: values = tslib.cast_to_nanoseconds(values) super(DatetimeBlock, self).__init__(values, fastpath=True, placement=placement, **kwargs) def _can_hold_element(self, element): if is_list_like(element): element = np.array(element) return element.dtype == _NS_DTYPE or element.dtype == np.int64 return (com.is_integer(element) or isinstance(element, datetime) or isnull(element)) def _try_cast(self, element): try: return int(element) except: return element def _try_operate(self, values): """ return a version to operate on """ return values.view('i8') def _try_coerce_args(self, values, other): """ Coerce values and other to dtype 'i8'. NaN and NaT convert to the smallest i8, and will correctly round-trip to NaT if converted back in _try_coerce_result. values is always ndarray-like, other may not be """ values = values.view('i8') if is_null_datelike_scalar(other): other = tslib.iNaT elif isinstance(other, datetime): other = lib.Timestamp(other).asm8.view('i8') elif hasattr(other, 'dtype') and com.is_integer_dtype(other): other = other.view('i8') else: other = np.array(other, dtype='i8') return values, other def _try_coerce_result(self, result): """ reverse of try_coerce_args """ if isinstance(result, np.ndarray): if result.dtype.kind in ['i', 'f', 'O']: result = result.astype('M8[ns]') elif isinstance(result, (np.integer, np.datetime64)): result = lib.Timestamp(result) return result @property def fill_value(self): return tslib.iNaT def _try_fill(self, value): """ if we are a NaT, return the actual fill value """ if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all(): value = tslib.iNaT return value def fillna(self, value, limit=None, inplace=False, downcast=None): # straight putmask here values = self.values if inplace else self.values.copy() mask = isnull(self.values) value = self._try_fill(value) if limit is not None: if self.ndim > 2: raise NotImplementedError("number of dimensions for 'fillna' " "is currently limited to 2") mask[mask.cumsum(self.ndim-1)>limit]=False np.putmask(values, mask, value) return [self if inplace else make_block(values, fastpath=True, placement=self.mgr_locs)] def to_native_types(self, slicer=None, na_rep=None, date_format=None, quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ values = self.values if slicer is not None: values = values[:, slicer] from pandas.core.format import _get_format_datetime64_from_values format = _get_format_datetime64_from_values(values, date_format) result = tslib.format_array_from_datetime(values.view('i8').ravel(), tz=None, format=format, na_rep=na_rep).reshape(values.shape) return result def should_store(self, value): return issubclass(value.dtype.type, np.datetime64) def set(self, locs, values, check=False): """ Modify Block in-place with new item value Returns ------- None """ if values.dtype != _NS_DTYPE: # Workaround for numpy 1.6 bug values = tslib.cast_to_nanoseconds(values) self.values[locs] = values def get_values(self, dtype=None): # return object dtype as Timestamps if dtype == object: return lib.map_infer(self.values.ravel(), lib.Timestamp)\ .reshape(self.values.shape) return self.values class SparseBlock(NonConsolidatableMixIn, Block): """ implement as a list of sparse arrays of the same dtype """ __slots__ = () is_sparse = True is_numeric = True _can_hold_na = True _ftype = 'sparse' _holder = SparseArray @property def shape(self): return (len(self.mgr_locs), self.sp_index.length) @property def itemsize(self): return self.dtype.itemsize @property def fill_value(self): #return np.nan return self.values.fill_value @fill_value.setter def fill_value(self, v): # we may need to upcast our fill to match our dtype if issubclass(self.dtype.type, np.floating): v = float(v) self.values.fill_value = v @property def sp_values(self): return self.values.sp_values @sp_values.setter def sp_values(self, v): # reset the sparse values self.values = SparseArray(v, sparse_index=self.sp_index, kind=self.kind, dtype=v.dtype, fill_value=self.values.fill_value, copy=False) @property def sp_index(self): return self.values.sp_index @property def kind(self): return self.values.kind def __len__(self): try: return self.sp_index.length except: return 0 def copy(self, deep=True): return self.make_block_same_class(values=self.values, sparse_index=self.sp_index, kind=self.kind, copy=deep, placement=self.mgr_locs) def make_block_same_class(self, values, placement, sparse_index=None, kind=None, dtype=None, fill_value=None, copy=False, fastpath=True): """ return a new block """ if dtype is None: dtype = self.dtype if fill_value is None: fill_value = self.values.fill_value # if not isinstance(values, SparseArray) and values.ndim != self.ndim: # raise ValueError("ndim mismatch") if values.ndim == 2: nitems = values.shape[0] if nitems == 0: # kludgy, but SparseBlocks cannot handle slices, where the # output is 0-item, so let's convert it to a dense block: it # won't take space since there's 0 items, plus it will preserve # the dtype. return make_block(np.empty(values.shape, dtype=dtype), placement, fastpath=True,) elif nitems > 1: raise ValueError("Only 1-item 2d sparse blocks are supported") else: values = values.reshape(values.shape[1]) new_values = SparseArray(values, sparse_index=sparse_index, kind=kind or self.kind, dtype=dtype, fill_value=fill_value, copy=copy) return make_block(new_values, ndim=self.ndim, fastpath=fastpath, placement=placement) def interpolate(self, method='pad', axis=0, inplace=False, limit=None, fill_value=None, **kwargs): values = com.interpolate_2d( self.values.to_dense(), method, axis, limit, fill_value) return self.make_block_same_class(values=values, placement=self.mgr_locs) def fillna(self, value, limit=None, inplace=False, downcast=None): # we may need to upcast our fill to match our dtype if limit is not None: raise NotImplementedError("specifying a limit for 'fillna' has " "not been implemented yet") if issubclass(self.dtype.type, np.floating): value = float(value) values = self.values if inplace else self.values.copy() return [self.make_block_same_class(values=values.get_values(value), fill_value=value, placement=self.mgr_locs)] def shift(self, periods, axis=0): """ shift the block by periods """ N = len(self.values.T) indexer = np.zeros(N, dtype=int) if periods > 0: indexer[periods:] = np.arange(N - periods) else: indexer[:periods] = np.arange(-periods, N) new_values = self.values.to_dense().take(indexer) # convert integer to float if necessary. need to do a lot more than # that, handle boolean etc also new_values, fill_value = com._maybe_upcast(new_values) if periods > 0: new_values[:periods] = fill_value else: new_values[periods:] = fill_value return [self.make_block_same_class(new_values, placement=self.mgr_locs)] def reindex_axis(self, indexer, method=None, axis=1, fill_value=None, limit=None, mask_info=None): """ Reindex using pre-computed indexer information """ if axis < 1: raise AssertionError('axis must be at least 1, got %d' % axis) # taking on the 0th axis always here if fill_value is None: fill_value = self.fill_value return self.make_block_same_class(self.values.take(indexer), fill_value=fill_value, placement=self.mgr_locs) def sparse_reindex(self, new_index): """ sparse reindex and return a new block current reindex only works for float64 dtype! """ values = self.values values = values.sp_index.to_int_index().reindex( values.sp_values.astype('float64'), values.fill_value, new_index) return self.make_block_same_class(values, sparse_index=new_index, placement=self.mgr_locs) def make_block(values, placement, klass=None, ndim=None, dtype=None, fastpath=False): if klass is None: dtype = dtype or values.dtype vtype = dtype.type if isinstance(values, SparseArray): klass = SparseBlock elif issubclass(vtype, np.floating): klass = FloatBlock elif (issubclass(vtype, np.integer) and issubclass(vtype, np.timedelta64)): klass = TimeDeltaBlock elif (issubclass(vtype, np.integer) and not issubclass(vtype, np.datetime64)): klass = IntBlock elif dtype == np.bool_: klass = BoolBlock elif issubclass(vtype, np.datetime64): klass = DatetimeBlock elif issubclass(vtype, np.complexfloating): klass = ComplexBlock elif is_categorical(values): klass = CategoricalBlock else: klass = ObjectBlock return klass(values, ndim=ndim, fastpath=fastpath, placement=placement) # TODO: flexible with index=None and/or items=None class BlockManager(PandasObject): """ Core internal data structure to implement DataFrame Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a lightweight blocked set of labeled data to be manipulated by the DataFrame public API class Attributes ---------- shape ndim axes values items Methods ------- set_axis(axis, new_labels) copy(deep=True) get_dtype_counts get_ftype_counts get_dtypes get_ftypes apply(func, axes, block_filter_fn) get_bool_data get_numeric_data get_slice(slice_like, axis) get(label) iget(loc) get_scalar(label_tup) take(indexer, axis) reindex_axis(new_labels, axis) reindex_indexer(new_labels, indexer, axis) delete(label) insert(loc, label, value) set(label, value) Parameters ---------- Notes ----- This is *not* a public API class """ __slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated', '_is_consolidated', '_blknos', '_blklocs'] def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True): self.axes = [_ensure_index(ax) for ax in axes] self.blocks = tuple(blocks) for block in blocks: if block.is_sparse: if len(block.mgr_locs) != 1: raise AssertionError("Sparse block refers to multiple items") else: if self.ndim != block.ndim: raise AssertionError(('Number of Block dimensions (%d) must ' 'equal number of axes (%d)') % (block.ndim, self.ndim)) if do_integrity_check: self._verify_integrity() self._consolidate_check() self._rebuild_blknos_and_blklocs() def make_empty(self, axes=None): """ return an empty BlockManager with the items axis of len 0 """ if axes is None: axes = [_ensure_index([])] + [ _ensure_index(a) for a in self.axes[1:] ] # preserve dtype if possible if self.ndim == 1: blocks = np.array([], dtype=self.array_dtype) else: blocks = [] return self.__class__(blocks, axes) def __nonzero__(self): return True # Python3 compat __bool__ = __nonzero__ @property def shape(self): return tuple(len(ax) for ax in self.axes) @property def ndim(self): return len(self.axes) def set_axis(self, axis, new_labels): new_labels = _ensure_index(new_labels) old_len = len(self.axes[axis]) new_len = len(new_labels) if new_len != old_len: raise ValueError('Length mismatch: Expected axis has %d elements, ' 'new values have %d elements' % (old_len, new_len)) self.axes[axis] = new_labels def rename_axis(self, mapper, axis, copy=True): """ Rename one of axes. Parameters ---------- mapper : unary callable axis : int copy : boolean, default True """ obj = self.copy(deep=copy) obj.set_axis(axis, _transform_index(self.axes[axis], mapper)) return obj def add_prefix(self, prefix): f = (str(prefix) + '%s').__mod__ return self.rename_axis(f, axis=0) def add_suffix(self, suffix): f = ('%s' + str(suffix)).__mod__ return self.rename_axis(f, axis=0) @property def _is_single_block(self): if self.ndim == 1: return True if len(self.blocks) != 1: return False blk = self.blocks[0] return (blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice == slice(0, len(self), 1)) def _rebuild_blknos_and_blklocs(self): """ Update mgr._blknos / mgr._blklocs. """ new_blknos = np.empty(self.shape[0], dtype=np.int64) new_blklocs = np.empty(self.shape[0], dtype=np.int64) new_blknos.fill(-1) new_blklocs.fill(-1) for blkno, blk in enumerate(self.blocks): rl = blk.mgr_locs new_blknos[rl.indexer] = blkno new_blklocs[rl.indexer] = np.arange(len(rl)) if (new_blknos == -1).any(): raise AssertionError("Gaps in blk ref_locs") self._blknos = new_blknos self._blklocs = new_blklocs # make items read only for now def _get_items(self): return self.axes[0] items = property(fget=_get_items) def _get_counts(self, f): """ return a dict of the counts of the function in BlockManager """ self._consolidate_inplace() counts = dict() for b in self.blocks: v = f(b) counts[v] = counts.get(v, 0) + b.shape[0] return counts def get_dtype_counts(self): return self._get_counts(lambda b: b.dtype.name) def get_ftype_counts(self): return self._get_counts(lambda b: b.ftype) def get_dtypes(self): dtypes = np.array([blk.dtype for blk in self.blocks]) return com.take_1d(dtypes, self._blknos, allow_fill=False) def get_ftypes(self): ftypes = np.array([blk.ftype for blk in self.blocks]) return com.take_1d(ftypes, self._blknos, allow_fill=False) def __getstate__(self): block_values = [b.values for b in self.blocks] block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks] axes_array = [ax for ax in self.axes] extra_state = { '0.14.1': { 'axes': axes_array, 'blocks': [dict(values=b.values, mgr_locs=b.mgr_locs.indexer) for b in self.blocks] } } # First three elements of the state are to maintain forward # compatibility with 0.13.1. return axes_array, block_values, block_items, extra_state def __setstate__(self, state): def unpickle_block(values, mgr_locs): # numpy < 1.7 pickle compat if values.dtype == 'M8[us]': values = values.astype('M8[ns]') return make_block(values, placement=mgr_locs) if (isinstance(state, tuple) and len(state) >= 4 and '0.14.1' in state[3]): state = state[3]['0.14.1'] self.axes = [_ensure_index(ax) for ax in state['axes']] self.blocks = tuple( unpickle_block(b['values'], b['mgr_locs']) for b in state['blocks']) else: # discard anything after 3rd, support beta pickling format for a # little while longer ax_arrays, bvalues, bitems = state[:3] self.axes = [_ensure_index(ax) for ax in ax_arrays] if len(bitems) == 1 and self.axes[0].equals(bitems[0]): # This is a workaround for pre-0.14.1 pickles that didn't # support unpickling multi-block frames/panels with non-unique # columns/items, because given a manager with items ["a", "b", # "a"] there's no way of knowing which block's "a" is where. # # Single-block case can be supported under the assumption that # block items corresponded to manager items 1-to-1. all_mgr_locs = [slice(0, len(bitems[0]))] else: all_mgr_locs = [self.axes[0].get_indexer(blk_items) for blk_items in bitems] self.blocks = tuple( unpickle_block(values, mgr_locs) for values, mgr_locs in zip(bvalues, all_mgr_locs)) self._post_setstate() def _post_setstate(self): self._is_consolidated = False self._known_consolidated = False self._rebuild_blknos_and_blklocs() def __len__(self): return len(self.items) def __unicode__(self): output = com.pprint_thing(self.__class__.__name__) for i, ax in enumerate(self.axes): if i == 0: output += u('\nItems: %s') % ax else: output += u('\nAxis %d: %s') % (i, ax) for block in self.blocks: output += u('\n%s') % com.pprint_thing(block) return output def _verify_integrity(self): mgr_shape = self.shape tot_items = sum(len(x.mgr_locs) for x in self.blocks) for block in self.blocks: if not block.is_sparse and block.shape[1:] != mgr_shape[1:]: construction_error(tot_items, block.shape[1:], self.axes) if len(self.items) != tot_items: raise AssertionError('Number of manager items must equal union of ' 'block items\n# manager items: {0}, # ' 'tot_items: {1}'.format(len(self.items), tot_items)) def apply(self, f, axes=None, filter=None, do_integrity_check=False, **kwargs): """ iterate over the blocks, collect and create a new block manager Parameters ---------- f : the callable or function name to operate on at the block level axes : optional (if not supplied, use self.axes) filter : list, if supplied, only call the block if the filter is in the block do_integrity_check : boolean, default False. Do the block manager integrity check Returns ------- Block Manager (new object) """ result_blocks = [] # filter kwarg is used in replace-* family of methods if filter is not None: filter_locs = set(self.items.get_indexer_for(filter)) if len(filter_locs) == len(self.items): # All items are included, as if there were no filtering filter = None else: kwargs['filter'] = filter_locs if f == 'where' and kwargs.get('align', True): align_copy = True align_keys = ['other', 'cond'] elif f == 'putmask' and kwargs.get('align', True): align_copy = False align_keys = ['new', 'mask'] elif f == 'eval': align_copy = False align_keys = ['other'] elif f == 'fillna': # fillna internally does putmask, maybe it's better to do this # at mgr, not block level? align_copy = False align_keys = ['value'] else: align_keys = [] aligned_args = dict((k, kwargs[k]) for k in align_keys if hasattr(kwargs[k], 'reindex_axis')) for b in self.blocks: if filter is not None: if not b.mgr_locs.isin(filter_locs).any(): result_blocks.append(b) continue if aligned_args: b_items = self.items[b.mgr_locs.indexer] for k, obj in aligned_args.items(): axis = getattr(obj, '_info_axis_number', 0) kwargs[k] = obj.reindex_axis(b_items, axis=axis, copy=align_copy) applied = getattr(b, f)(**kwargs) if isinstance(applied, list): result_blocks.extend(applied) else: result_blocks.append(applied) if len(result_blocks) == 0: return self.make_empty(axes or self.axes) bm = self.__class__(result_blocks, axes or self.axes, do_integrity_check=do_integrity_check) bm._consolidate_inplace() return bm def isnull(self, **kwargs): return self.apply('apply', **kwargs) def where(self, **kwargs): return self.apply('where', **kwargs) def eval(self, **kwargs): return self.apply('eval', **kwargs) def setitem(self, **kwargs): return self.apply('setitem', **kwargs) def putmask(self, **kwargs): return self.apply('putmask', **kwargs) def diff(self, **kwargs): return self.apply('diff', **kwargs) def interpolate(self, **kwargs): return self.apply('interpolate', **kwargs) def shift(self, **kwargs): return self.apply('shift', **kwargs) def fillna(self, **kwargs): return self.apply('fillna', **kwargs) def downcast(self, **kwargs): return self.apply('downcast', **kwargs) def astype(self, dtype, **kwargs): return self.apply('astype', dtype=dtype, **kwargs) def convert(self, **kwargs): return self.apply('convert', **kwargs) def replace(self, **kwargs): return self.apply('replace', **kwargs) def replace_list(self, src_list, dest_list, inplace=False, regex=False): """ do a list replace """ # figure out our mask a-priori to avoid repeated replacements values = self.as_matrix() def comp(s): if isnull(s): return isnull(values) return _possibly_compare(values, getattr(s, 'asm8', s), operator.eq) masks = [comp(s) for i, s in enumerate(src_list)] result_blocks = [] for blk in self.blocks: # its possible to get multiple result blocks here # replace ALWAYS will return a list rb = [blk if inplace else blk.copy()] for i, (s, d) in enumerate(zip(src_list, dest_list)): new_rb = [] for b in rb: if b.dtype == np.object_: result = b.replace(s, d, inplace=inplace, regex=regex) if isinstance(result, list): new_rb.extend(result) else: new_rb.append(result) else: # get our mask for this element, sized to this # particular block m = masks[i][b.mgr_locs.indexer] if m.any(): new_rb.extend(b.putmask(m, d, inplace=True)) else: new_rb.append(b) rb = new_rb result_blocks.extend(rb) bm = self.__class__(result_blocks, self.axes) bm._consolidate_inplace() return bm def reshape_nd(self, axes, **kwargs): """ a 2d-nd reshape operation on a BlockManager """ return self.apply('reshape_nd', axes=axes, **kwargs) def is_consolidated(self): """ Return True if more than one block with the same dtype """ if not self._known_consolidated: self._consolidate_check() return self._is_consolidated def _consolidate_check(self): ftypes = [blk.ftype for blk in self.blocks] self._is_consolidated = len(ftypes) == len(set(ftypes)) self._known_consolidated = True @property def is_mixed_type(self): # Warning, consolidation needs to get checked upstairs self._consolidate_inplace() return len(self.blocks) > 1 @property def is_numeric_mixed_type(self): # Warning, consolidation needs to get checked upstairs self._consolidate_inplace() return all([block.is_numeric for block in self.blocks]) @property def is_datelike_mixed_type(self): # Warning, consolidation needs to get checked upstairs self._consolidate_inplace() return any([block.is_datelike for block in self.blocks]) @property def is_view(self): """ return a boolean if we are a single block and are a view """ if len(self.blocks) == 1: return self.blocks[0].is_view # It is technically possible to figure out which blocks are views # e.g. [ b.values.base is not None for b in self.blocks ] # but then we have the case of possibly some blocks being a view # and some blocks not. setting in theory is possible on the non-view # blocks w/o causing a SettingWithCopy raise/warn. But this is a bit # complicated return False def get_bool_data(self, copy=False): """ Parameters ---------- copy : boolean, default False Whether to copy the blocks """ self._consolidate_inplace() return self.combine([b for b in self.blocks if b.is_bool], copy) def get_numeric_data(self, copy=False): """ Parameters ---------- copy : boolean, default False Whether to copy the blocks """ self._consolidate_inplace() return self.combine([b for b in self.blocks if b.is_numeric], copy) def combine(self, blocks, copy=True): """ return a new manager with the blocks """ if len(blocks) == 0: return self.make_empty() # FIXME: optimization potential indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks])) inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0]) new_items = self.items.take(indexer) new_blocks = [] for b in blocks: b = b.copy(deep=copy) b.mgr_locs = com.take_1d(inv_indexer, b.mgr_locs.as_array, axis=0, allow_fill=False) new_blocks.append(b) new_axes = list(self.axes) new_axes[0] = new_items return self.__class__(new_blocks, new_axes, do_integrity_check=False) def get_slice(self, slobj, axis=0): if axis >= self.ndim: raise IndexError("Requested axis not found in manager") if axis == 0: new_blocks = self._slice_take_blocks_ax0(slobj) else: slicer = [slice(None)] * (axis + 1) slicer[axis] = slobj slicer = tuple(slicer) new_blocks = [blk.getitem_block(slicer) for blk in self.blocks] new_axes = list(self.axes) new_axes[axis] = new_axes[axis][slobj] bm = self.__class__(new_blocks, new_axes, do_integrity_check=False, fastpath=True) bm._consolidate_inplace() return bm def __contains__(self, item): return item in self.items @property def nblocks(self): return len(self.blocks) def copy(self, deep=True): """ Make deep or shallow copy of BlockManager Parameters ---------- deep : boolean o rstring, default True If False, return shallow copy (do not copy data) If 'all', copy data and a deep copy of the index Returns ------- copy : BlockManager """ # this preserves the notion of view copying of axes if deep: if deep == 'all': copy = lambda ax: ax.copy(deep=True) else: copy = lambda ax: ax.view() new_axes = [ copy(ax) for ax in self.axes] else: new_axes = list(self.axes) return self.apply('copy', axes=new_axes, deep=deep, do_integrity_check=False) def as_matrix(self, items=None): if len(self.blocks) == 0: return np.empty(self.shape, dtype=float) if items is not None: mgr = self.reindex_axis(items, axis=0) else: mgr = self if self._is_single_block or not self.is_mixed_type: return mgr.blocks[0].get_values() else: return mgr._interleave() def _interleave(self): """ Return ndarray from blocks with specified item order Items must be contained in the blocks """ dtype = _interleaved_dtype(self.blocks) result = np.empty(self.shape, dtype=dtype) if result.shape[0] == 0: # Workaround for numpy 1.7 bug: # # >>> a = np.empty((0,10)) # >>> a[slice(0,0)] # array([], shape=(0, 10), dtype=float64) # >>> a[[]] # Traceback (most recent call last): # File "<stdin>", line 1, in <module> # IndexError: index 0 is out of bounds for axis 0 with size 0 return result itemmask = np.zeros(self.shape[0]) for blk in self.blocks: rl = blk.mgr_locs result[rl.indexer] = blk.get_values(dtype) itemmask[rl.indexer] = 1 if not itemmask.all(): raise AssertionError('Some items were not contained in blocks') return result def xs(self, key, axis=1, copy=True, takeable=False): if axis < 1: raise AssertionError('Can only take xs across axis >= 1, got %d' % axis) # take by position if takeable: loc = key else: loc = self.axes[axis].get_loc(key) slicer = [slice(None, None) for _ in range(self.ndim)] slicer[axis] = loc slicer = tuple(slicer) new_axes = list(self.axes) # could be an array indexer! if isinstance(loc, (slice, np.ndarray)): new_axes[axis] = new_axes[axis][loc] else: new_axes.pop(axis) new_blocks = [] if len(self.blocks) > 1: # we must copy here as we are mixed type for blk in self.blocks: newb = make_block(values=blk.values[slicer], klass=blk.__class__, fastpath=True, placement=blk.mgr_locs) new_blocks.append(newb) elif len(self.blocks) == 1: block = self.blocks[0] vals = block.values[slicer] if copy: vals = vals.copy() new_blocks = [make_block(values=vals, placement=block.mgr_locs, klass=block.__class__, fastpath=True,)] return self.__class__(new_blocks, new_axes) def fast_xs(self, loc): """ get a cross sectional for a given location in the items ; handle dups return the result, is *could* be a view in the case of a single block """ if len(self.blocks) == 1: return self.blocks[0].values[:, loc] items = self.items # non-unique (GH4726) if not items.is_unique: result = self._interleave() if self.ndim == 2: result = result.T return result[loc] # unique dtype = _interleaved_dtype(self.blocks) n = len(items) result = np.empty(n, dtype=dtype) for blk in self.blocks: # Such assignment may incorrectly coerce NaT to None # result[blk.mgr_locs] = blk._slice((slice(None), loc)) for i, rl in enumerate(blk.mgr_locs): result[rl] = blk._try_coerce_result(blk.iget((i, loc))) return result def consolidate(self): """ Join together blocks having same dtype Returns ------- y : BlockManager """ if self.is_consolidated(): return self bm = self.__class__(self.blocks, self.axes) bm._is_consolidated = False bm._consolidate_inplace() return bm def _consolidate_inplace(self): if not self.is_consolidated(): self.blocks = tuple(_consolidate(self.blocks)) self._is_consolidated = True self._known_consolidated = True self._rebuild_blknos_and_blklocs() def get(self, item, fastpath=True): """ Return values for selected item (ndarray or BlockManager). """ if self.items.is_unique: if not isnull(item): loc = self.items.get_loc(item) else: indexer = np.arange(len(self.items))[isnull(self.items)] # allow a single nan location indexer if not np.isscalar(indexer): if len(indexer) == 1: loc = indexer.item() else: raise ValueError("cannot label index with a null key") return self.iget(loc, fastpath=fastpath) else: if isnull(item): raise ValueError("cannot label index with a null key") indexer = self.items.get_indexer_for([item]) return self.reindex_indexer(new_axis=self.items[indexer], indexer=indexer, axis=0, allow_dups=True) def iget(self, i, fastpath=True): """ Return the data as a SingleBlockManager if fastpath=True and possible Otherwise return as a ndarray """ block = self.blocks[self._blknos[i]] values = block.iget(self._blklocs[i]) if not fastpath or block.is_sparse or values.ndim != 1: return values # fastpath shortcut for select a single-dim from a 2-dim BM return SingleBlockManager([ block.make_block_same_class(values, placement=slice(0, len(values)), ndim=1, fastpath=True) ], self.axes[1]) def get_scalar(self, tup): """ Retrieve single item """ full_loc = list(ax.get_loc(x) for ax, x in zip(self.axes, tup)) blk = self.blocks[self._blknos[full_loc[0]]] full_loc[0] = self._blklocs[full_loc[0]] # FIXME: this may return non-upcasted types? return blk.values[tuple(full_loc)] def delete(self, item): """ Delete selected item (items if non-unique) in-place. """ indexer = self.items.get_loc(item) is_deleted = np.zeros(self.shape[0], dtype=np.bool_) is_deleted[indexer] = True ref_loc_offset = -is_deleted.cumsum() is_blk_deleted = [False] * len(self.blocks) if isinstance(indexer, int): affected_start = indexer else: affected_start = is_deleted.nonzero()[0][0] for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]): blk = self.blocks[blkno] bml = blk.mgr_locs blk_del = is_deleted[bml.indexer].nonzero()[0] if len(blk_del) == len(bml): is_blk_deleted[blkno] = True continue elif len(blk_del) != 0: blk.delete(blk_del) bml = blk.mgr_locs blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer]) # FIXME: use Index.delete as soon as it uses fastpath=True self.axes[0] = self.items[~is_deleted] self.blocks = tuple(b for blkno, b in enumerate(self.blocks) if not is_blk_deleted[blkno]) self._shape = None self._rebuild_blknos_and_blklocs() def set(self, item, value, check=False): """ Set new item in-place. Does not consolidate. Adds new Block if not contained in the current set of items if check, then validate that we are not setting the same data in-place """ # FIXME: refactor, clearly separate broadcasting & zip-like assignment # can prob also fix the various if tests for sparse/categorical value_is_sparse = isinstance(value, SparseArray) value_is_cat = is_categorical(value) value_is_nonconsolidatable = value_is_sparse or value_is_cat if value_is_sparse: # sparse assert self.ndim == 2 def value_getitem(placement): return value elif value_is_cat: # categorical def value_getitem(placement): return value else: if value.ndim == self.ndim - 1: value = value.reshape((1,) + value.shape) def value_getitem(placement): return value else: def value_getitem(placement): return value[placement.indexer] if value.shape[1:] != self.shape[1:]: raise AssertionError('Shape of new values must be compatible ' 'with manager shape') try: loc = self.items.get_loc(item) except KeyError: # This item wasn't present, just insert at end self.insert(len(self.items), item, value) return if isinstance(loc, int): loc = [loc] blknos = self._blknos[loc] blklocs = self._blklocs[loc].copy() unfit_mgr_locs = [] unfit_val_locs = [] removed_blknos = [] for blkno, val_locs in _get_blkno_placements(blknos, len(self.blocks), group=True): blk = self.blocks[blkno] blk_locs = blklocs[val_locs.indexer] if blk.should_store(value): blk.set(blk_locs, value_getitem(val_locs), check=check) else: unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs]) unfit_val_locs.append(val_locs) # If all block items are unfit, schedule the block for removal. if len(val_locs) == len(blk.mgr_locs): removed_blknos.append(blkno) else: self._blklocs[blk.mgr_locs.indexer] = -1 blk.delete(blk_locs) self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk)) if len(removed_blknos): # Remove blocks & update blknos accordingly is_deleted = np.zeros(self.nblocks, dtype=np.bool_) is_deleted[removed_blknos] = True new_blknos = np.empty(self.nblocks, dtype=np.int64) new_blknos.fill(-1) new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos)) self._blknos = com.take_1d(new_blknos, self._blknos, axis=0, allow_fill=False) self.blocks = tuple(blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos)) if unfit_val_locs: unfit_mgr_locs = np.concatenate(unfit_mgr_locs) unfit_count = len(unfit_mgr_locs) new_blocks = [] if value_is_nonconsolidatable: # This code (ab-)uses the fact that sparse blocks contain only # one item. new_blocks.extend( make_block(values=value.copy(), ndim=self.ndim, placement=slice(mgr_loc, mgr_loc + 1)) for mgr_loc in unfit_mgr_locs) self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) + len(self.blocks)) self._blklocs[unfit_mgr_locs] = 0 else: # unfit_val_locs contains BlockPlacement objects unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:]) new_blocks.append( make_block(values=value_getitem(unfit_val_items), ndim=self.ndim, placement=unfit_mgr_locs)) self._blknos[unfit_mgr_locs] = len(self.blocks) self._blklocs[unfit_mgr_locs] = np.arange(unfit_count) self.blocks += tuple(new_blocks) # Newly created block's dtype may already be present. self._known_consolidated = False def insert(self, loc, item, value, allow_duplicates=False): """ Insert item at selected position. Parameters ---------- loc : int item : hashable value : array_like allow_duplicates: bool If False, trying to insert non-unique item will raise """ if not allow_duplicates and item in self.items: # Should this be a different kind of error?? raise ValueError('cannot insert %s, already exists' % item) if not isinstance(loc, int): raise TypeError("loc must be int") block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc+1)) for blkno, count in _fast_count_smallints(self._blknos[loc:]): blk = self.blocks[blkno] if count == len(blk.mgr_locs): blk.mgr_locs = blk.mgr_locs.add(1) else: new_mgr_locs = blk.mgr_locs.as_array.copy() new_mgr_locs[new_mgr_locs >= loc] += 1 blk.mgr_locs = new_mgr_locs if loc == self._blklocs.shape[0]: # np.append is a lot faster (at least in numpy 1.7.1), let's use it # if we can. self._blklocs = np.append(self._blklocs, 0) self._blknos = np.append(self._blknos, len(self.blocks)) else: self._blklocs = np.insert(self._blklocs, loc, 0) self._blknos = np.insert(self._blknos, loc, len(self.blocks)) self.axes[0] = self.items.insert(loc, item) self.blocks += (block,) self._shape = None self._known_consolidated = False if len(self.blocks) > 100: self._consolidate_inplace() def reindex_axis(self, new_index, axis, method=None, limit=None, fill_value=None, copy=True): """ Conform block manager to new index. """ new_index = _ensure_index(new_index) new_index, indexer = self.axes[axis].reindex( new_index, method=method, limit=limit) return self.reindex_indexer(new_index, indexer, axis=axis, fill_value=fill_value, copy=copy) def reindex_indexer(self, new_axis, indexer, axis, fill_value=None, allow_dups=False, copy=True): """ Parameters ---------- new_axis : Index indexer : ndarray of int64 or None axis : int fill_value : object allow_dups : bool pandas-indexer with -1's only. """ if indexer is None: if new_axis is self.axes[axis] and not copy: return self result = self.copy(deep=copy) result.axes = list(self.axes) result.axes[axis] = new_axis return result self._consolidate_inplace() # some axes don't allow reindexing with dups if not allow_dups: self.axes[axis]._can_reindex(indexer) if axis >= self.ndim: raise IndexError("Requested axis not found in manager") if axis == 0: new_blocks = self._slice_take_blocks_ax0( indexer, fill_tuple=(fill_value,)) else: new_blocks = [blk.take_nd(indexer, axis=axis, fill_tuple=(fill_value if fill_value is not None else blk.fill_value,)) for blk in self.blocks] new_axes = list(self.axes) new_axes[axis] = new_axis return self.__class__(new_blocks, new_axes) def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None): """ Slice/take blocks along axis=0. Overloaded for SingleBlock Returns ------- new_blocks : list of Block """ allow_fill = fill_tuple is not None sl_type, slobj, sllen = _preprocess_slice_or_indexer( slice_or_indexer, self.shape[0], allow_fill=allow_fill) if self._is_single_block: blk = self.blocks[0] if sl_type in ('slice', 'mask'): return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))] elif not allow_fill or self.ndim == 1: if allow_fill and fill_tuple[0] is None: _, fill_value = com._maybe_promote(blk.dtype) fill_tuple = (fill_value,) return [blk.take_nd(slobj, axis=0, new_mgr_locs=slice(0, sllen), fill_tuple=fill_tuple)] if sl_type in ('slice', 'mask'): blknos = self._blknos[slobj] blklocs = self._blklocs[slobj] else: blknos = com.take_1d(self._blknos, slobj, fill_value=-1, allow_fill=allow_fill) blklocs = com.take_1d(self._blklocs, slobj, fill_value=-1, allow_fill=allow_fill) # When filling blknos, make sure blknos is updated before appending to # blocks list, that way new blkno is exactly len(blocks). # # FIXME: mgr_groupby_blknos must return mgr_locs in ascending order, # pytables serialization will break otherwise. blocks = [] for blkno, mgr_locs in _get_blkno_placements(blknos, len(self.blocks), group=True): if blkno == -1: # If we've got here, fill_tuple was not None. fill_value = fill_tuple[0] blocks.append(self._make_na_block( placement=mgr_locs, fill_value=fill_value)) else: blk = self.blocks[blkno] # Otherwise, slicing along items axis is necessary. if not blk._can_consolidate: # A non-consolidatable block, it's easy, because there's only one item # and each mgr loc is a copy of that single item. for mgr_loc in mgr_locs: newblk = blk.copy(deep=True) newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1) blocks.append(newblk) else: blocks.append(blk.take_nd( blklocs[mgr_locs.indexer], axis=0, new_mgr_locs=mgr_locs, fill_tuple=None)) return blocks def _make_na_block(self, placement, fill_value=None): # TODO: infer dtypes other than float64 from fill_value if fill_value is None: fill_value = np.nan block_shape = list(self.shape) block_shape[0] = len(placement) dtype, fill_value = com._infer_dtype_from_scalar(fill_value) block_values = np.empty(block_shape, dtype=dtype) block_values.fill(fill_value) return make_block(block_values, placement=placement) def take(self, indexer, axis=1, verify=True, convert=True): """ Take items along any axis. """ self._consolidate_inplace() indexer = np.arange(indexer.start, indexer.stop, indexer.step, dtype='int64') if isinstance(indexer, slice) \ else np.asanyarray(indexer, dtype='int64') n = self.shape[axis] if convert: indexer = maybe_convert_indices(indexer, n) if verify: if ((indexer == -1) | (indexer >= n)).any(): raise Exception('Indices must be nonzero and less than ' 'the axis length') new_labels = self.axes[axis].take(indexer) return self.reindex_indexer(new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True) def merge(self, other, lsuffix='', rsuffix=''): if not self._is_indexed_like(other): raise AssertionError('Must have same axes to merge managers') l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix, right=other.items, rsuffix=rsuffix) new_items = _concat_indexes([l, r]) new_blocks = [blk.copy(deep=False) for blk in self.blocks] offset = self.shape[0] for blk in other.blocks: blk = blk.copy(deep=False) blk.mgr_locs = blk.mgr_locs.add(offset) new_blocks.append(blk) new_axes = list(self.axes) new_axes[0] = new_items return self.__class__(_consolidate(new_blocks), new_axes) def _is_indexed_like(self, other): """ Check all axes except items """ if self.ndim != other.ndim: raise AssertionError(('Number of dimensions must agree ' 'got %d and %d') % (self.ndim, other.ndim)) for ax, oax in zip(self.axes[1:], other.axes[1:]): if not ax.equals(oax): return False return True def equals(self, other): self_axes, other_axes = self.axes, other.axes if len(self_axes) != len(other_axes): return False if not all (ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)): return False self._consolidate_inplace() other._consolidate_inplace() if len(self.blocks) != len(other.blocks): return False # canonicalize block order, using a tuple combining the type # name and then mgr_locs because there might be unconsolidated # blocks (say, Categorical) which can only be distinguished by # the iteration order def canonicalize(block): return (block.dtype.name, block.mgr_locs.as_array.tolist()) self_blocks = sorted(self.blocks, key=canonicalize) other_blocks = sorted(other.blocks, key=canonicalize) return all(block.equals(oblock) for block, oblock in zip(self_blocks, other_blocks)) class SingleBlockManager(BlockManager): """ manage a single block with """ ndim = 1 _is_consolidated = True _known_consolidated = True __slots__ = () def __init__(self, block, axis, do_integrity_check=False, fastpath=False): if isinstance(axis, list): if len(axis) != 1: raise ValueError( "cannot create SingleBlockManager with more than 1 axis") axis = axis[0] # passed from constructor, single block, single axis if fastpath: self.axes = [axis] if isinstance(block, list): # empty block if len(block) == 0: block = [np.array([])] elif len(block) != 1: raise ValueError('Cannot create SingleBlockManager with ' 'more than 1 block') block = block[0] else: self.axes = [_ensure_index(axis)] # create the block here if isinstance(block, list): # provide consolidation to the interleaved_dtype if len(block) > 1: dtype = _interleaved_dtype(block) block = [b.astype(dtype) for b in block] block = _consolidate(block) if len(block) != 1: raise ValueError('Cannot create SingleBlockManager with ' 'more than 1 block') block = block[0] if not isinstance(block, Block): block = make_block(block, placement=slice(0, len(axis)), ndim=1, fastpath=True) self.blocks = [block] def _post_setstate(self): pass @property def _block(self): return self.blocks[0] @property def _values(self): return self._block.values def reindex(self, new_axis, indexer=None, method=None, fill_value=None, limit=None, copy=True): # if we are the same and don't copy, just return if self.index.equals(new_axis): if copy: return self.copy(deep=True) else: return self values = self._block.get_values() if indexer is None: indexer = self.items.get_indexer_for(new_axis) if fill_value is None: # FIXME: is fill_value used correctly in sparse blocks? if not self._block.is_sparse: fill_value = self._block.fill_value else: fill_value = np.nan new_values = com.take_1d(values, indexer, fill_value=fill_value) # fill if needed if method is not None or limit is not None: new_values = com.interpolate_2d(new_values, method=method, limit=limit, fill_value=fill_value) if self._block.is_sparse: make_block = self._block.make_block_same_class block = make_block(new_values, copy=copy, placement=slice(0, len(new_axis))) mgr = SingleBlockManager(block, new_axis) mgr._consolidate_inplace() return mgr def get_slice(self, slobj, axis=0): if axis >= self.ndim: raise IndexError("Requested axis not found in manager") return self.__class__(self._block._slice(slobj), self.index[slobj], fastpath=True) @property def index(self): return self.axes[0] def convert(self, **kwargs): """ convert the whole block as one """ kwargs['by_item'] = False return self.apply('convert', **kwargs) @property def dtype(self): return self._values.dtype @property def array_dtype(self): return self._block.array_dtype @property def ftype(self): return self._block.ftype def get_dtype_counts(self): return {self.dtype.name: 1} def get_ftype_counts(self): return {self.ftype: 1} def get_dtypes(self): return np.array([self._block.dtype]) def get_ftypes(self): return np.array([self._block.ftype]) @property def values(self): return self._values.view() def get_values(self): """ return a dense type view """ return np.array(self._block.to_dense(),copy=False) @property def itemsize(self): return self._values.itemsize @property def _can_hold_na(self): return self._block._can_hold_na def is_consolidated(self): return True def _consolidate_check(self): pass def _consolidate_inplace(self): pass def delete(self, item): """ Delete single item from SingleBlockManager. Ensures that self.blocks doesn't become empty. """ loc = self.items.get_loc(item) self._block.delete(loc) self.axes[0] = self.axes[0].delete(loc) def fast_xs(self, loc): """ fast path for getting a cross-section return a view of the data """ return self._block.values[loc] def construction_error(tot_items, block_shape, axes, e=None): """ raise a helpful message about our construction """ passed = tuple(map(int, [tot_items] + list(block_shape))) implied = tuple(map(int, [len(ax) for ax in axes])) if passed == implied and e is not None: raise e raise ValueError("Shape of passed values is {0}, indices imply {1}".format( passed,implied)) def create_block_manager_from_blocks(blocks, axes): try: if len(blocks) == 1 and not isinstance(blocks[0], Block): # if blocks[0] is of length 0, return empty blocks if not len(blocks[0]): blocks = [] else: # It's OK if a single block is passed as values, its placement is # basically "all items", but if there're many, don't bother # converting, it's an error anyway. blocks = [make_block(values=blocks[0], placement=slice(0, len(axes[0])))] mgr = BlockManager(blocks, axes) mgr._consolidate_inplace() return mgr except (ValueError) as e: blocks = [getattr(b, 'values', b) for b in blocks] tot_items = sum(b.shape[0] for b in blocks) construction_error(tot_items, blocks[0].shape[1:], axes, e) def create_block_manager_from_arrays(arrays, names, axes): try: blocks = form_blocks(arrays, names, axes) mgr = BlockManager(blocks, axes) mgr._consolidate_inplace() return mgr except (ValueError) as e: construction_error(len(arrays), arrays[0].shape, axes, e) def form_blocks(arrays, names, axes): # put "leftover" items in float bucket, where else? # generalize? float_items = [] complex_items = [] int_items = [] bool_items = [] object_items = [] sparse_items = [] datetime_items = [] cat_items = [] extra_locs = [] names_idx = Index(names) if names_idx.equals(axes[0]): names_indexer = np.arange(len(names_idx)) else: assert names_idx.intersection(axes[0]).is_unique names_indexer = names_idx.get_indexer_for(axes[0]) for i, name_idx in enumerate(names_indexer): if name_idx == -1: extra_locs.append(i) continue k = names[name_idx] v = arrays[name_idx] if isinstance(v, (SparseArray, ABCSparseSeries)): sparse_items.append((i, k, v)) elif issubclass(v.dtype.type, np.floating): float_items.append((i, k, v)) elif issubclass(v.dtype.type, np.complexfloating): complex_items.append((i, k, v)) elif issubclass(v.dtype.type, np.datetime64): if v.dtype != _NS_DTYPE: v = tslib.cast_to_nanoseconds(v) if hasattr(v, 'tz') and v.tz is not None: object_items.append((i, k, v)) else: datetime_items.append((i, k, v)) elif issubclass(v.dtype.type, np.integer): if v.dtype == np.uint64: # HACK #2355 definite overflow if (v > 2 ** 63 - 1).any(): object_items.append((i, k, v)) continue int_items.append((i, k, v)) elif v.dtype == np.bool_: bool_items.append((i, k, v)) elif is_categorical(v): cat_items.append((i, k, v)) else: object_items.append((i, k, v)) blocks = [] if len(float_items): float_blocks = _multi_blockify(float_items) blocks.extend(float_blocks) if len(complex_items): complex_blocks = _simple_blockify( complex_items, np.complex128) blocks.extend(complex_blocks) if len(int_items): int_blocks = _multi_blockify(int_items) blocks.extend(int_blocks) if len(datetime_items): datetime_blocks = _simple_blockify( datetime_items, _NS_DTYPE) blocks.extend(datetime_blocks) if len(bool_items): bool_blocks = _simple_blockify( bool_items, np.bool_) blocks.extend(bool_blocks) if len(object_items) > 0: object_blocks = _simple_blockify( object_items, np.object_) blocks.extend(object_blocks) if len(sparse_items) > 0: sparse_blocks = _sparse_blockify(sparse_items) blocks.extend(sparse_blocks) if len(cat_items) > 0: cat_blocks = [ make_block(array, klass=CategoricalBlock, fastpath=True, placement=[i] ) for i, names, array in cat_items ] blocks.extend(cat_blocks) if len(extra_locs): shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:]) # empty items -> dtype object block_values = np.empty(shape, dtype=object) block_values.fill(np.nan) na_block = make_block(block_values, placement=extra_locs) blocks.append(na_block) return blocks def _simple_blockify(tuples, dtype): """ return a single array of a block that has a single dtype; if dtype is not None, coerce to this dtype """ values, placement = _stack_arrays(tuples, dtype) # CHECK DTYPE? if dtype is not None and values.dtype != dtype: # pragma: no cover values = values.astype(dtype) block = make_block(values, placement=placement) return [block] def _multi_blockify(tuples, dtype=None): """ return an array of blocks that potentially have different dtypes """ # group by dtype grouper = itertools.groupby(tuples, lambda x: x[2].dtype) new_blocks = [] for dtype, tup_block in grouper: values, placement = _stack_arrays( list(tup_block), dtype) block = make_block(values, placement=placement) new_blocks.append(block) return new_blocks def _sparse_blockify(tuples, dtype=None): """ return an array of blocks that potentially have different dtypes (and are sparse) """ new_blocks = [] for i, names, array in tuples: array = _maybe_to_sparse(array) block = make_block( array, klass=SparseBlock, fastpath=True, placement=[i]) new_blocks.append(block) return new_blocks def _stack_arrays(tuples, dtype): # fml def _asarray_compat(x): if isinstance(x, ABCSeries): return x.values else: return np.asarray(x) def _shape_compat(x): if isinstance(x, ABCSeries): return len(x), else: return x.shape placement, names, arrays = zip(*tuples) first = arrays[0] shape = (len(arrays),) + _shape_compat(first) stacked = np.empty(shape, dtype=dtype) for i, arr in enumerate(arrays): stacked[i] = _asarray_compat(arr) return stacked, placement def _interleaved_dtype(blocks): if not len(blocks): return None counts = defaultdict(lambda: []) for x in blocks: counts[type(x)].append(x) def _lcd_dtype(l): """ find the lowest dtype that can accomodate the given types """ m = l[0].dtype for x in l[1:]: if x.dtype.itemsize > m.itemsize: m = x.dtype return m have_int = len(counts[IntBlock]) > 0 have_bool = len(counts[BoolBlock]) > 0 have_object = len(counts[ObjectBlock]) > 0 have_float = len(counts[FloatBlock]) > 0 have_complex = len(counts[ComplexBlock]) > 0 have_dt64 = len(counts[DatetimeBlock]) > 0 have_td64 = len(counts[TimeDeltaBlock]) > 0 have_cat = len(counts[CategoricalBlock]) > 0 have_sparse = len(counts[SparseBlock]) > 0 have_numeric = have_float or have_complex or have_int has_non_numeric = have_dt64 or have_td64 or have_cat if (have_object or (have_bool and (have_numeric or have_dt64 or have_td64)) or (have_numeric and has_non_numeric) or have_cat or have_dt64 or have_td64): return np.dtype(object) elif have_bool: return np.dtype(bool) elif have_int and not have_float and not have_complex: # if we are mixing unsigned and signed, then return # the next biggest int type (if we can) lcd = _lcd_dtype(counts[IntBlock]) kinds = set([i.dtype.kind for i in counts[IntBlock]]) if len(kinds) == 1: return lcd if lcd == 'uint64' or lcd == 'int64': return np.dtype('int64') # return 1 bigger on the itemsize if unsinged if lcd.kind == 'u': return np.dtype('int%s' % (lcd.itemsize * 8 * 2)) return lcd elif have_complex: return np.dtype('c16') else: return _lcd_dtype(counts[FloatBlock] + counts[SparseBlock]) def _consolidate(blocks): """ Merge blocks having same dtype, exclude non-consolidating blocks """ # sort by _can_consolidate, dtype gkey = lambda x: x._consolidate_key grouper = itertools.groupby(sorted(blocks, key=gkey), gkey) new_blocks = [] for (_can_consolidate, dtype), group_blocks in grouper: merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype, _can_consolidate=_can_consolidate) if isinstance(merged_blocks, list): new_blocks.extend(merged_blocks) else: new_blocks.append(merged_blocks) return new_blocks def _merge_blocks(blocks, dtype=None, _can_consolidate=True): if len(blocks) == 1: return blocks[0] if _can_consolidate: if dtype is None: if len(set([b.dtype for b in blocks])) != 1: raise AssertionError("_merge_blocks are invalid!") dtype = blocks[0].dtype # FIXME: optimization potential in case all mgrs contain slices and # combination of those slices is a slice, too. new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks]) new_values = _vstack([b.values for b in blocks], dtype) argsort = np.argsort(new_mgr_locs) new_values = new_values[argsort] new_mgr_locs = new_mgr_locs[argsort] return make_block(new_values, fastpath=True, placement=new_mgr_locs) # no merge return blocks def _block_shape(values, ndim=1, shape=None): """ guarantee the shape of the values to be at least 1 d """ if values.ndim <= ndim: if shape is None: shape = values.shape values = values.reshape(tuple((1,) + shape)) return values def _vstack(to_stack, dtype): # work around NumPy 1.6 bug if dtype == _NS_DTYPE or dtype == _TD_DTYPE: new_values = np.vstack([x.view('i8') for x in to_stack]) return new_values.view(dtype) else: return np.vstack(to_stack) def _possibly_compare(a, b, op): res = op(a, b) is_a_array = isinstance(a, np.ndarray) is_b_array = isinstance(b, np.ndarray) if np.isscalar(res) and (is_a_array or is_b_array): type_names = [type(a).__name__, type(b).__name__] if is_a_array: type_names[0] = 'ndarray(dtype=%s)' % a.dtype if is_b_array: type_names[1] = 'ndarray(dtype=%s)' % b.dtype raise TypeError("Cannot compare types %r and %r" % tuple(type_names)) return res def _concat_indexes(indexes): return indexes[0].append(indexes[1:]) def _block2d_to_blocknd(values, placement, shape, labels, ref_items): """ pivot to the labels shape """ from pandas.core.internals import make_block panel_shape = (len(placement),) + shape # TODO: lexsort depth needs to be 2!! # Create observation selection vector using major and minor # labels, for converting to panel format. selector = _factor_indexer(shape[1:], labels) mask = np.zeros(np.prod(shape), dtype=bool) mask.put(selector, True) if mask.all(): pvalues = np.empty(panel_shape, dtype=values.dtype) else: dtype, fill_value = _maybe_promote(values.dtype) pvalues = np.empty(panel_shape, dtype=dtype) pvalues.fill(fill_value) values = values for i in range(len(placement)): pvalues[i].flat[mask] = values[:, i] return make_block(pvalues, placement=placement) def _factor_indexer(shape, labels): """ given a tuple of shape and a list of Categorical labels, return the expanded label indexer """ mult = np.array(shape)[::-1].cumprod()[::-1] return com._ensure_platform_int( np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T) def _get_blkno_placements(blknos, blk_count, group=True): """ Parameters ---------- blknos : array of int64 blk_count : int group : bool Returns ------- iterator yield (BlockPlacement, blkno) """ blknos = com._ensure_int64(blknos) # FIXME: blk_count is unused, but it may avoid the use of dicts in cython for blkno, indexer in lib.get_blkno_indexers(blknos, group): yield blkno, BlockPlacement(indexer) def items_overlap_with_suffix(left, lsuffix, right, rsuffix): """ If two indices overlap, add suffixes to overlapping entries. If corresponding suffix is empty, the entry is simply converted to string. """ to_rename = left.intersection(right) if len(to_rename) == 0: return left, right else: if not lsuffix and not rsuffix: raise ValueError('columns overlap but no suffix specified: %s' % to_rename) def lrenamer(x): if x in to_rename: return '%s%s' % (x, lsuffix) return x def rrenamer(x): if x in to_rename: return '%s%s' % (x, rsuffix) return x return (_transform_index(left, lrenamer), _transform_index(right, rrenamer)) def _transform_index(index, func): """ Apply function to all values found in index. This includes transforming multiindex entries separately. """ if isinstance(index, MultiIndex): items = [tuple(func(y) for y in x) for x in index] return MultiIndex.from_tuples(items, names=index.names) else: items = [func(x) for x in index] return Index(items, name=index.name) def _putmask_smart(v, m, n): """ Return a new block, try to preserve dtype if possible. Parameters ---------- v : `values`, updated in-place (array like) m : `mask`, applies to both sides (array like) n : `new values` either scalar or an array like aligned with `values` """ # n should be the length of the mask or a scalar here if not is_list_like(n): n = np.array([n] * len(m)) elif isinstance(n, np.ndarray) and n.ndim == 0: # numpy scalar n = np.repeat(np.array(n, ndmin=1), len(m)) # see if we are only masking values that if putted # will work in the current dtype try: nn = n[m] nn_at = nn.astype(v.dtype) comp = (nn == nn_at) if is_list_like(comp) and comp.all(): nv = v.copy() nv[m] = nn_at return nv except (ValueError, IndexError, TypeError): pass # change the dtype dtype, _ = com._maybe_promote(n.dtype) nv = v.astype(dtype) try: nv[m] = n[m] except ValueError: idx, = np.where(np.squeeze(m)) for mask_index, new_val in zip(idx, n[m]): nv[mask_index] = new_val return nv def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy): """ Concatenate block managers into one. Parameters ---------- mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples axes : list of Index concat_axis : int copy : bool """ concat_plan = combine_concat_plans([get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers], concat_axis) blocks = [make_block(concatenate_join_units(join_units, concat_axis, copy=copy), placement=placement) for placement, join_units in concat_plan] return BlockManager(blocks, axes) def get_empty_dtype_and_na(join_units): """ Return dtype and N/A values to use when concatenating specified units. Returned N/A value may be None which means there was no casting involved. Returns ------- dtype na """ if len(join_units) == 1: blk = join_units[0].block if blk is None: return np.float64, np.nan has_none_blocks = False dtypes = [None] * len(join_units) for i, unit in enumerate(join_units): if unit.block is None: has_none_blocks = True else: dtypes[i] = unit.dtype # dtypes = set() upcast_classes = set() null_upcast_classes = set() for dtype, unit in zip(dtypes, join_units): if dtype is None: continue if com.is_categorical_dtype(dtype): upcast_cls = 'category' elif issubclass(dtype.type, np.bool_): upcast_cls = 'bool' elif issubclass(dtype.type, np.object_): upcast_cls = 'object' elif is_datetime64_dtype(dtype): upcast_cls = 'datetime' elif is_timedelta64_dtype(dtype): upcast_cls = 'timedelta' else: upcast_cls = 'float' # Null blocks should not influence upcast class selection, unless there # are only null blocks, when same upcasting rules must be applied to # null upcast classes. if unit.is_null: null_upcast_classes.add(upcast_cls) else: upcast_classes.add(upcast_cls) if not upcast_classes: upcast_classes = null_upcast_classes # create the result if 'object' in upcast_classes: return np.dtype(np.object_), np.nan elif 'bool' in upcast_classes: if has_none_blocks: return np.dtype(np.object_), np.nan else: return np.dtype(np.bool_), None elif 'category' in upcast_classes: return com.CategoricalDtype(), np.nan elif 'float' in upcast_classes: return np.dtype(np.float64), np.nan elif 'datetime' in upcast_classes: return np.dtype('M8[ns]'), tslib.iNaT elif 'timedelta' in upcast_classes: return np.dtype('m8[ns]'), tslib.iNaT else: # pragma raise AssertionError("invalid dtype determination in get_concat_dtype") def concatenate_join_units(join_units, concat_axis, copy): """ Concatenate values from several join units along selected axis. """ if concat_axis == 0 and len(join_units) > 1: # Concatenating join units along ax0 is handled in _merge_blocks. raise AssertionError("Concatenating join units along axis0") empty_dtype, upcasted_na = get_empty_dtype_and_na(join_units) to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype, upcasted_na=upcasted_na) for ju in join_units] if len(to_concat) == 1: # Only one block, nothing to concatenate. concat_values = to_concat[0] if copy and concat_values.base is not None: concat_values = concat_values.copy() else: concat_values = com._concat_compat(to_concat, axis=concat_axis) return concat_values def get_mgr_concatenation_plan(mgr, indexers): """ Construct concatenation plan for given block manager and indexers. Parameters ---------- mgr : BlockManager indexers : dict of {axis: indexer} Returns ------- plan : list of (BlockPlacement, JoinUnit) tuples """ # Calculate post-reindex shape , save for item axis which will be separate # for each block anyway. mgr_shape = list(mgr.shape) for ax, indexer in indexers.items(): mgr_shape[ax] = len(indexer) mgr_shape = tuple(mgr_shape) if 0 in indexers: ax0_indexer = indexers.pop(0) blknos = com.take_1d(mgr._blknos, ax0_indexer, fill_value=-1) blklocs = com.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1) else: if mgr._is_single_block: blk = mgr.blocks[0] return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))] ax0_indexer = None blknos = mgr._blknos blklocs = mgr._blklocs plan = [] for blkno, placements in _get_blkno_placements(blknos, len(mgr.blocks), group=False): assert placements.is_slice_like join_unit_indexers = indexers.copy() shape = list(mgr_shape) shape[0] = len(placements) shape = tuple(shape) if blkno == -1: unit = JoinUnit(None, shape) else: blk = mgr.blocks[blkno] ax0_blk_indexer = blklocs[placements.indexer] unit_no_ax0_reindexing = ( len(placements) == len(blk.mgr_locs) and # Fastpath detection of join unit not needing to reindex its # block: no ax0 reindexing took place and block placement was # sequential before. ((ax0_indexer is None and blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice.step == 1) or # Slow-ish detection: all indexer locs are sequential (and # length match is checked above). (np.diff(ax0_blk_indexer) == 1).all())) # Omit indexer if no item reindexing is required. if unit_no_ax0_reindexing: join_unit_indexers.pop(0, None) else: join_unit_indexers[0] = ax0_blk_indexer unit = JoinUnit(blk, shape, join_unit_indexers) plan.append((placements, unit)) return plan def combine_concat_plans(plans, concat_axis): """ Combine multiple concatenation plans into one. existing_plan is updated in-place. """ if len(plans) == 1: for p in plans[0]: yield p[0], [p[1]] elif concat_axis == 0: offset = 0 for plan in plans: last_plc = None for plc, unit in plan: yield plc.add(offset), [unit] last_plc = plc if last_plc is not None: offset += last_plc.as_slice.stop else: num_ended = [0] def _next_or_none(seq): retval = next(seq, None) if retval is None: num_ended[0] += 1 return retval plans = list(map(iter, plans)) next_items = list(map(_next_or_none, plans)) while num_ended[0] != len(next_items): if num_ended[0] > 0: raise ValueError("Plan shapes are not aligned") placements, units = zip(*next_items) lengths = list(map(len, placements)) min_len, max_len = min(lengths), max(lengths) if min_len == max_len: yield placements[0], units next_items[:] = map(_next_or_none, plans) else: yielded_placement = None yielded_units = [None] * len(next_items) for i, (plc, unit) in enumerate(next_items): yielded_units[i] = unit if len(plc) > min_len: # trim_join_unit updates unit in place, so only # placement needs to be sliced to skip min_len. next_items[i] = (plc[min_len:], trim_join_unit(unit, min_len)) else: yielded_placement = plc next_items[i] = _next_or_none(plans[i]) yield yielded_placement, yielded_units def trim_join_unit(join_unit, length): """ Reduce join_unit's shape along item axis to length. Extra items that didn't fit are returned as a separate block. """ if 0 not in join_unit.indexers: extra_indexers = join_unit.indexers if join_unit.block is None: extra_block = None else: extra_block = join_unit.block.getitem_block(slice(length, None)) join_unit.block = join_unit.block.getitem_block(slice(length)) else: extra_block = join_unit.block extra_indexers = copy.copy(join_unit.indexers) extra_indexers[0] = extra_indexers[0][length:] join_unit.indexers[0] = join_unit.indexers[0][:length] extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:] join_unit.shape = (length,) + join_unit.shape[1:] return JoinUnit(block=extra_block, indexers=extra_indexers, shape=extra_shape) class JoinUnit(object): def __init__(self, block, shape, indexers={}): # Passing shape explicitly is required for cases when block is None. self.block = block self.indexers = indexers self.shape = shape def __repr__(self): return '%s(%r, %s)' % (self.__class__.__name__, self.block, self.indexers) @cache_readonly def needs_filling(self): for indexer in self.indexers.values(): # FIXME: cache results of indexer == -1 checks. if (indexer == -1).any(): return True return False @cache_readonly def dtype(self): if self.block is None: raise AssertionError("Block is None, no dtype") if not self.needs_filling: return self.block.dtype else: return com._get_dtype(com._maybe_promote(self.block.dtype, self.block.fill_value)[0]) return self._dtype @cache_readonly def is_null(self): if self.block is None: return True if not self.block._can_hold_na: return False # Usually it's enough to check but a small fraction of values to see if # a block is NOT null, chunks should help in such cases. 1000 value # was chosen rather arbitrarily. values_flat = self.block.values.ravel() total_len = values_flat.shape[0] chunk_len = max(total_len // 40, 1000) for i in range(0, total_len, chunk_len): if not isnull(values_flat[i: i + chunk_len]).all(): return False return True @cache_readonly def needs_block_conversion(self): """ we might need to convert the joined values to a suitable block repr """ block = self.block return block is not None and (block.is_sparse or block.is_categorical) def get_reindexed_values(self, empty_dtype, upcasted_na): if upcasted_na is None: # No upcasting is necessary fill_value = self.block.fill_value values = self.block.get_values() else: fill_value = upcasted_na if self.is_null and not getattr(self.block,'is_categorical',None): missing_arr = np.empty(self.shape, dtype=empty_dtype) if np.prod(self.shape): # NumPy 1.6 workaround: this statement gets strange if all # blocks are of same dtype and some of them are empty: # empty one are considered "null" so they must be filled, # but no dtype upcasting happens and the dtype may not # allow NaNs. # # In general, no one should get hurt when one tries to put # incorrect values into empty array, but numpy 1.6 is # strict about that. missing_arr.fill(fill_value) return missing_arr if not self.indexers: if self.block.is_categorical: # preserve the categoricals for validation in _concat_compat return self.block.values elif self.block.is_sparse: # preserve the sparse array for validation in _concat_compat return self.block.values if self.block.is_bool: # External code requested filling/upcasting, bool values must # be upcasted to object to avoid being upcasted to numeric. values = self.block.astype(np.object_).values else: # No dtype upcasting is done here, it will be performed during # concatenation itself. values = self.block.get_values() if not self.indexers: # If there's no indexing to be done, we want to signal outside # code that this array must be copied explicitly. This is done # by returning a view and checking `retval.base`. values = values.view() else: for ax, indexer in self.indexers.items(): values = com.take_nd(values, indexer, axis=ax, fill_value=fill_value) return values def _fast_count_smallints(arr): """Faster version of set(arr) for sequences of small numbers.""" if len(arr) == 0: # Handle empty arr case separately: numpy 1.6 chokes on that. return np.empty((0, 2), dtype=arr.dtype) else: counts = np.bincount(arr.astype(np.int_)) nz = counts.nonzero()[0] return np.c_[nz, counts[nz]] def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill): if isinstance(slice_or_indexer, slice): return 'slice', slice_or_indexer, lib.slice_len(slice_or_indexer, length) elif (isinstance(slice_or_indexer, np.ndarray) and slice_or_indexer.dtype == np.bool_): return 'mask', slice_or_indexer, slice_or_indexer.sum() else: indexer = np.asanyarray(slice_or_indexer, dtype=np.int64) if not allow_fill: indexer = maybe_convert_indices(indexer, length) return 'fancy', indexer, len(indexer)
mit
1,726,036,148,876,224,800
32.963327
134
0.537957
false
4.163943
false
false
false
ludbb/secp256k1-py
tests/test_schnorr.py
1
1732
import pytest import secp256k1 def test_schnorr_simple(): if not secp256k1.HAS_SCHNORR: pytest.skip('secp256k1_schnorr not enabled, skipping') return inst = secp256k1.PrivateKey() raw_sig = inst.schnorr_sign(b'hello') assert inst.pubkey.schnorr_verify(b'hello', raw_sig) key2 = secp256k1.PrivateKey() assert not key2.pubkey.schnorr_verify(b'hello', raw_sig) blank = secp256k1.PublicKey() pubkey = blank.schnorr_recover(b'hello', raw_sig) pub = secp256k1.PublicKey(pubkey) assert pub.serialize() == inst.pubkey.serialize() def test_schnorr_partial(): if not secp256k1.HAS_SCHNORR: pytest.skip('secp256k1_schnorr not enabled, skipping') return signer1 = secp256k1.PrivateKey() pubnonce1, privnonce1 = signer1.schnorr_generate_nonce_pair(b'hello') signer2 = secp256k1.PrivateKey() pubnonce2, privnonce2 = signer2.schnorr_generate_nonce_pair(b'hello') # First test partial signatures with only two signers. partial1 = signer1.schnorr_partial_sign(b'hello', privnonce1, pubnonce2) partial2 = signer2.schnorr_partial_sign(b'hello', privnonce2, pubnonce1) blank = secp256k1.PublicKey(flags=secp256k1.NO_FLAGS) sig = blank.schnorr_partial_combine([partial1, partial2]) # Recover the public key from the combined signature. pubkey = secp256k1.PublicKey().schnorr_recover(b'hello', sig) assert blank.public_key is None # Check that the combined public keys from signer1 and signer2 # match the recovered public key. blank.combine( [signer1.pubkey.public_key, signer2.pubkey.public_key]) assert blank.public_key assert secp256k1.PublicKey(pubkey).serialize() == blank.serialize()
mit
3,662,207,017,736,819,700
35.083333
76
0.711894
false
3.166362
true
false
false
Youwotma/portia
slybot/slybot/pageactions.py
1
1528
import json import re LUA_SOURCE = """ function main(splash) assert(splash:go(splash.args.url)) assert(splash:runjs(splash.args.js_source)) assert(splash:wait_for_resume(splash.args.slybot_actions_source)) splash:set_result_content_type("text/html") return splash.html() end """ JS_SOURCE = """ function main(splash) { var events = (%s); try{ __slybot__performEvents(events, function(){ splash.resume(); }); }catch(e){ splash.error(e); } } """ def filter_for_url(url): def _filter(page_action): accept = page_action.get('accept') reject = page_action.get('reject') if reject and re.search(reject, url): return False if accept and not re.search(accept, url): return False return True return _filter class PageActionsMiddleware(object): def process_request(self, request, spider): splash_options = request.meta.get('splash', None) if not splash_options: # Already processed or JS disabled return splash_args = splash_options.get('args', {}) events = spider.page_actions url = splash_args['url'] events = filter(filter_for_url(url), events) if len(events): splash_options['endpoint'] = 'execute' splash_args.update({ "lua_source": LUA_SOURCE, "slybot_actions_source": (JS_SOURCE % json.dumps(events)), }) __all__ = ['PageActionsMiddleware']
bsd-3-clause
-9,078,013,978,702,002,000
26.781818
74
0.590314
false
3.570093
false
false
false
jimmycallin/master-thesis
architectures/nn_discourse_parser/nets/data_reader.py
1
6857
import json import codecs class DRelation(object): """Implicit discourse relation object The object is created from the CoNLL-json formatted data. The format can be a bit clunky to get certain information. So convenient methods should be implemented here mostly to be used by the feature functions """ def __init__(self, relation_dict, parse): self.relation_dict = relation_dict self.parse = parse self._arg_tokens = {} self._arg_tokens[1] = None self._arg_tokens[2] = None self._arg_words = {} self._arg_words[1] = None self._arg_words[2] = None self._arg_tree = {} self._arg_tree[1] = None self._arg_tree[2] = None self._arg1_tree = None self._arg1_tree_token_indices = None self._arg2_tree = None self._arg2_tree_token_indices = None @property def senses(self): return self.relation_dict['Sense'] def arg_words(self, arg_pos): """Returns a list of Word objects""" assert(arg_pos == 1 or arg_pos == 2) if self._arg_words[arg_pos] is None: key = 'Arg%s' % arg_pos word_list = self.relation_dict[key]['TokenList'] self._arg_words[arg_pos] = [Word(x, self.parse[self.doc_id]) for x in word_list] return self._arg_words[arg_pos] def arg_tree(self, arg_pos): """Extract the tree for the argument One tree only. Truncated as needed Returns: 1) tree string 2) token indices (not address tuples) of that tree. """ assert(arg_pos == 1 or arg_pos == 2) if self._arg_tree[arg_pos] is None: trees, sentence_indices = self.arg_trees(arg_pos) if arg_pos == 1: tree = trees[-1] sentence_index = sentence_indices[-1] elif arg_pos == 2: tree = trees[0] sentence_index = sentence_indices[0] key = 'Arg%s' % arg_pos token_indices = [x[4] for x in self.relation_dict[key]['TokenList'] if x[3] == sentence_index] self._arg_tree[arg_pos] = (tree, token_indices) return self._arg_tree[arg_pos] def arg_dtree_rule_list(self, arg_pos): """Returns a list of arcs in the dependency tree(s) for the arg """ assert(arg_pos == 1 or arg_pos == 2) token_list = self.arg_token_addresses(arg_pos) sentence_indices = set([x[3] for x in token_list]) sentence_index_to_dependency_tree = {} for sentence_index in sentence_indices: dependencies = \ self.parse[self.doc_id]['sentences'][sentence_index]['dependencies'] index_to_dependency = {} # a dependency looks like this [u'prep', u'reported-8', u'In-1'] for dep in dependencies: rel_type = dep[0] head, _ = dep[1].rsplit('-', 1) dependent, index = dep[2].rsplit('-', 1) index_to_dependency[int(index)] = [rel_type, head, dependent] sentence_index_to_dependency_tree[sentence_index] = index_to_dependency rule_list = [] for token_address in token_list: _, _, _, sentence_index, token_index = token_address dtree = sentence_index_to_dependency_tree[sentence_index] if token_index in dtree: rule_list.append('_'.join(dtree[token_index])) return rule_list def arg_token_addresses(self, arg_pos): assert(arg_pos == 1 or arg_pos == 2) key = 'Arg%s' % arg_pos return self.relation_dict[key]['TokenList'] @property def doc_id(self): return self.relation_dict['DocID'] @property def relation_id(self): return self.relation_dict['ID'] @property def relation_type(self): return self.relation_dict['Type'] @property def doc_relation_id(self): return '%s_%s' % (self.doc_id, self.relation_id) def arg_tokens(self, arg_pos): """Returns a list of raw tokens""" assert(arg_pos == 1 or arg_pos == 2) if self._arg_tokens[arg_pos] is None: key = 'Arg%s' % arg_pos token_list = self.relation_dict[key]['TokenList'] self._arg_tokens[arg_pos] = [self.parse[self.doc_id]['sentences'][x[3]]['words'][x[4]][0] for x in token_list] return self._arg_tokens[arg_pos] def arg_trees(self, arg_pos): key = 'Arg%s' % arg_pos token_list = self.relation_dict[key]['TokenList'] sentence_indices = set([x[3] for x in token_list]) return [self.parse[self.doc_id]['sentences'][x]['parsetree'] for x in sentence_indices], list(sentence_indices) def __repr__(self): return self.relation_dict.__repr__() def __str__(self): return self.relation_dict.__str__() class Word(object): """Word class wrapper [u"'ve", {u'CharacterOffsetBegin':2449, u'CharacterOffsetEnd':2452, u'Linkers':[u'arg2_15006',u'arg1_15008'], u'PartOfSpeech':u'VBP'}] """ def __init__(self, word_address, parse): self.word_address = word_address self.word_token, self.word_info = parse['sentences'][word_address[3]]['words'][word_address[4]] @property def pos(self): return self.word_info['PartOfSpeech'] @property def lemma(self): return self.word_info['Lemma'] @property def sentence_index(self): return self.word_address[3] def extract_implicit_relations(data_folder, label_function=None): #parse_file = '%s/pdtb-parses-plus.json' % data_folder #parse_file = '%s/pdtb-parses.json' % data_folder parse_file = '%s/parses.json' % data_folder parse = json.load(codecs.open(parse_file, encoding='utf8')) #relation_file = '%s/pdtb-data-plus.json' % data_folder #relation_file = '%s/pdtb-data.json' % data_folder relation_file = '%s/relations.json' % data_folder relation_dicts = [json.loads(x) for x in open(relation_file)] relations = [DRelation(x, parse) for x in relation_dicts if x['Type'] == 'Implicit'] if label_function is not None: relations = [x for x in relations if label_function.label(x) is not None] return relations def extract_non_explicit_relations(data_folder, label_function=None): parse_file = '%s/pdtb-parses.json' % data_folder parse = json.load(codecs.open(parse_file, encoding='utf8')) relation_file = '%s/pdtb-data.json' % data_folder relation_dicts = [json.loads(x) for x in open(relation_file)] relations = [DRelation(x, parse) for x in relation_dicts if x['Type'] != 'Explicit'] if label_function is not None: relations = [x for x in relations if label_function.label(x) is not None] return relations
mit
4,789,138,021,986,704,000
35.473404
122
0.589616
false
3.503832
false
false
false
rwl/openpowersystem
cdpsm/iec61970/core/voltage_level.py
1
2591
#------------------------------------------------------------------------------ # Copyright (C) 2009 Richard Lincoln # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the Free # Software Foundation; version 2 dated June, 1991. # # This software is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANDABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU Affero General Public License # along with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #------------------------------------------------------------------------------ """ A collection of equipment at one common system voltage forming a switchgear. The equipment typically consist of breakers, busbars, instrumentation, control, regulation and protection devices as well as assemblies of all these. """ # <<< imports # @generated from cdpsm.iec61970.core.equipment_container import EquipmentContainer from cdpsm.iec61970.core.base_voltage import BaseVoltage from cdpsm.iec61970.core.substation import Substation from cdpsm.iec61970.domain import Voltage from google.appengine.ext import db # >>> imports class VoltageLevel(EquipmentContainer): """ A collection of equipment at one common system voltage forming a switchgear. The equipment typically consist of breakers, busbars, instrumentation, control, regulation and protection devices as well as assemblies of all these. """ # <<< voltage_level.attributes # @generated # The bus bar's low voltage limit low_voltage_limit = Voltage # The bus bar's high voltage limit high_voltage_limit = Voltage # >>> voltage_level.attributes # <<< voltage_level.references # @generated # The base voltage used for all equipment within the VoltageLevel. base_voltage = db.ReferenceProperty(BaseVoltage, collection_name="voltage_level") # Virtual property. The association is used in the naming hierarchy. pass # bays # The association is used in the naming hierarchy. substation = db.ReferenceProperty(Substation, collection_name="voltage_levels") # >>> voltage_level.references # <<< voltage_level.operations # @generated # >>> voltage_level.operations # EOF -------------------------------------------------------------------------
agpl-3.0
-1,730,218,190,851,964,200
38.861538
235
0.677345
false
4.421502
false
false
false
ganga-devs/ganga
ganga/GangaDirac/Lib/Server/DiracCommands.py
1
18300
# Dirac commands #/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ @diracCommand def getJobGroupJobs(jg): ''' Return jobs in a group''' return dirac.selectJobs(jobGroup=jg) @diracCommand def kill(id): ''' Kill a given DIRAC Job ID within DIRAC ''' return dirac.deleteJob(id) @diracCommand def peek(id): ''' Peek at the DIRAC Job id and return what we saw ''' return dirac.peekJob(id) @diracCommand def getJobCPUTime(id): ''' Get the amount of CPU time taken by the DIRAC Job id''' return dirac.getJobCPUTime(id) @diracCommand def reschedule(id): ''' Reschedule within DIRAC a given DIRAC Job id''' return dirac.reschedule(id) @diracCommand def submit(djob, mode='wms'): ''' Submit a DIRAC job given by the jdl:djob with a given mode ''' return dirac.submitJob(djob, mode=mode) @diracCommand def ping(system, service): ''' Ping a given service on a given system running DIRAC ''' return dirac.ping(system, service) @diracCommand def removeFile(lfn): ''' Remove a given LFN from the DFC''' ret = {} if type(lfn) is list: for l in lfn: ret.update(dirac.removeFile(l)) else: ret.update(dirac.removeFile(lfn)) return ret @diracCommand def getMetadata(lfn): ''' Return the metadata associated with a given :DN''' return dirac.getLfnMetadata(lfn) @diracCommand def getReplicas(lfns): ''' Return the locations of the replicas of a given LFN in a dict format, SE: location ''' return dirac.getReplicas(lfns, active=True, preferDisk = True) @diracCommand def getReplicasForJobs(lfns): ''' Return the locations of the replicas of a given LFN in a dict format, SE: location. This is for use in the splitter to negate copies at SEs that are not to be used for user jobs ''' return dirac.getReplicasForJobs(lfns) @diracCommand def getAccessURL(lfn, SE, protocol=False): ''' Return the access URL for the given LFN, storage element and protocol. The protocol should be in the form of a list ''' return dirac.getAccessURL(lfn, SE, False, protocol) @diracCommand def getFile(lfns, destDir=''): ''' Put the physical file behind the LFN in the destDir path''' return dirac.getFile(lfns, destDir=destDir) @diracCommand def replicateFile(lfn, destSE, srcSE='', locCache=''): ''' Replicate a given LFN from a srcSE to a destSE''' res = dirac.replicateFile(lfn, destSE, srcSE, locCache) return res @diracCommand def removeReplica(lfn, sE): ''' Remove the physical files and LFN from the DFC''' return dirac.removeReplica(lfn, sE) @diracCommand def getOutputData(id, outputFiles='', destinationDir=''): ''' Return output data of a requeted DIRAC Job id, place outputFiles in a given destinationDir') ''' return dirac.getJobOutputData(id, outputFiles, destinationDir) @diracCommand def splitInputData(files, files_per_job): ''' Split list of files ito a list of list of smaller files (below files_per_job in length) and return the list of lists''' return dirac.splitInputData(files, files_per_job) @diracCommand def getInputDataCatalog(lfns, site, xml_file): ''' Get the XML describing the given LFNs at a given site''' return dirac.getInputDataCatalog(lfns, site, xml_file) @diracCommand def uploadFile(lfn, file, diracSEs, guid=None): ''' Upload a given file to an lfn with 1 replica places at each element in diracSEs. Use a given guid if given''' outerr = {} for se in diracSEs: result = dirac.addFile(lfn, file, se, guid) if result.get('OK', False) and lfn in result.get('Value', {'Successful': {}})['Successful']: result['Value']['Successful'][lfn].update({'DiracSE': se}) md = dirac.getLfnMetadata(lfn) if md.get('OK', False) and lfn in md.get('Value', {'Successful': {}})['Successful']: guid = md['Value']['Successful'][lfn]['GUID'] result['Value']['Successful'][lfn].update({'GUID': guid}) return result outerr.update({se: result}) return outerr @diracCommand def addFile(lfn, file, diracSE, guid): ''' Upload a given file to an lfn with 1 replica places at each element in diracSEs. Use a given guid if given''' return dirac.addFile(lfn, file, diracSE, guid) @diracCommand def getOutputSandbox(id, outputDir=os.getcwd(), unpack=True, oversized=True, noJobDir=True, pipe_out=True): ''' Get the outputsandbox and return the output from Dirac to the calling function id: the DIRAC jobid of interest outputDir: output directory locall on disk to use oversized: is this output sandbox oversized this will be modified noJobDir: should we create a folder with the DIRAC job ID? output: should I output the Dirac output or should I return a python object (False) unpack: should the sandbox be untarred when downloaded''' result = dirac.getOutputSandbox(id, outputDir, oversized, noJobDir, unpack) if result is not None and result.get('OK', False): if not noJobDir: tmpdir = os.path.join(outputDir, str(id)) os.system('mv -f %s/* %s/. ; rm -rf %s' % (tmpdir, outputDir, tmpdir)) os.system('for file in $(ls %s/*Ganga_*.log); do ln -s ${file} %s/stdout; break; done' % (outputDir, outputDir)) #So the download failed. Maybe the sandbox was oversized and stored on the grid. Check in the job parameters and download it else: parameters = dirac.getJobParameters(id) if parameters is not None and parameters.get('OK', False): parameters = parameters['Value'] if 'OutputSandboxLFN' in parameters: result = dirac.getFile(parameters['OutputSandboxLFN'], destDir=outputDir) dirac.removeFile(parameters['OutputSandboxLFN']) return result @diracCommand def getOutputDataInfo(id, pipe_out=True): ''' Get information on the output data generated by a job of ID and pipe it out or return it''' ret = {} result = getOutputDataLFNs(id, pipe_out=False) if result.get('OK', False) and 'Value' in result: for lfn in result.get('Value', []): file_name = os.path.basename(lfn) ret[file_name] = {} ret[file_name]['LFN'] = lfn md = dirac.getLfnMetadata(lfn) if md.get('OK', False) and lfn in md.get('Value', {'Successful': {}})['Successful']: ret[file_name]['GUID'] = md['Value']['Successful'][lfn]['GUID'] # this catches if fail upload, note lfn still exists in list as # dirac tried it elif md.get('OK', False) and lfn in md.get('Value', {'Failed': {}})['Failed']: ret[file_name]['LFN'] = '###FAILED###' ret[file_name]['LOCATIONS'] = md['Value']['Failed'][lfn] ret[file_name]['GUID'] = 'NotAvailable' continue rp = dirac.getReplicas(lfn) if rp.get('OK', False) and lfn in rp.get('Value', {'Successful': {}})['Successful']: ret[file_name]['LOCATIONS'] = rp['Value']['Successful'][lfn].keys() return ret # could shrink this with dirac.getJobOutputLFNs from ##dirac @diracCommand def getOutputDataLFNs(id, pipe_out=True): ''' Get the outputDataLFN which have been generated by a Dirac job of ID and pipe it out or return it''' parameters = dirac.getJobParameters(id) lfns = [] ok = False message = 'The outputdata LFNs could not be found.' if parameters is not None and parameters.get('OK', False): parameters = parameters['Value'] # remove the sandbox if it has been uploaded sandbox = None if 'OutputSandboxLFN' in parameters: sandbox = parameters['OutputSandboxLFN'] # now find out about the outputdata if 'UploadedOutputData' in parameters: lfn_list = parameters['UploadedOutputData'] import re lfns = re.split(',\s*', lfn_list) if sandbox is not None and sandbox in lfns: lfns.remove(sandbox) ok = True elif parameters is not None and 'Message' in parameters: message = parameters['Message'] result = {'OK': ok} if ok: result['Value'] = lfns else: result['Message'] = message return result @diracCommand def normCPUTime(id, pipe_out=True): ''' Get the normalied CPU time that has been used by a DIRAC job of ID and pipe it out or return it''' parameters = dirac.getJobParameters(id) ncput = None if parameters is not None and parameters.get('OK', False): parameters = parameters['Value'] if 'NormCPUTime(s)' in parameters: ncput = parameters['NormCPUTime(s)'] return ncput @diracCommand def finished_job(id, outputDir=os.getcwd(), unpack=True, oversized=True, noJobDir=True, downloadSandbox = True): ''' Nesting function to reduce number of calls made against DIRAC when finalising a job, takes arguments such as getOutputSandbox Returns the CPU time of the job as a dict, the output sandbox information in another dict and a dict of the LFN of any uploaded data''' out_cpuTime = normCPUTime(id, pipe_out=False) if downloadSandbox: out_sandbox = getOutputSandbox(id, outputDir, unpack, oversized, noJobDir, pipe_out=False) else: out_sandbox = None out_dataInfo = getOutputDataInfo(id, pipe_out=False) outStateTime = {'completed' : getStateTime(id, 'completed', pipe_out=False)} return (out_cpuTime, out_sandbox, out_dataInfo, outStateTime) @diracCommand def finaliseJobs(inputDict, statusmapping, downloadSandbox=True, oversized=True, noJobDir=True): ''' A function to get the necessaries to finalise a whole bunch of jobs. Returns a dict of job information and a dict of stati.''' returnDict = {} statusList = dirac.getJobStatus(list(inputDict)) for diracID in inputDict: returnDict[diracID] = {} returnDict[diracID]['cpuTime'] = normCPUTime(diracID, pipe_out=False) if downloadSandbox: returnDict[diracID]['outSandbox'] = getOutputSandbox(diracID, inputDict[diracID], oversized, noJobDir, pipe_out=False) else: returnDict[diracID]['outSandbox'] = None returnDict[diracID]['outDataInfo'] = getOutputDataInfo(diracID, pipe_out=False) returnDict[diracID]['outStateTime'] = {'completed' : getStateTime(diracID, 'completed', pipe_out=False)} return returnDict, statusList @diracCommand def status(job_ids, statusmapping, pipe_out=True): '''Function to check the statuses and return the Ganga status of a job after looking it's DIRAC status against a Ganga one''' # Translate between the many statuses in DIRAC and the few in Ganga #return {'OK':True, 'Value':[['WIP', 'WIP', 'WIP', 'WIP', 'WIP']]} result = dirac.getJobStatus(job_ids) if not result['OK']: return result status_list = [] bulk_status = result['Value'] for _id in job_ids: job_status = bulk_status.get(_id, {}) minor_status = job_status.get('MinorStatus', None) dirac_status = job_status.get('Status', None) dirac_site = job_status.get('Site', None) ganga_status = statusmapping.get(dirac_status, None) if ganga_status is None: ganga_status = 'failed' dirac_status = 'Unknown: No status for Job' #if dirac_status == 'Completed' and (minor_status not in ['Pending Requests']): # ganga_status = 'running' if minor_status in ['Uploading Output Data']: ganga_status = 'running' try: from DIRAC.Core.DISET.RPCClient import RPCClient monitoring = RPCClient('WorkloadManagement/JobMonitoring') app_status = monitoring.getJobAttributes(_id)['Value']['ApplicationStatus'] except: app_status = "unknown ApplicationStatus" status_list.append([minor_status, dirac_status, dirac_site, ganga_status, app_status]) return status_list @diracCommand def getStateTime(id, status, pipe_out=True): ''' Return the state time from DIRAC corresponding to DIRACJob tranasitions''' log = dirac.getJobLoggingInfo(id) if 'Value' not in log: return None L = log['Value'] checkstr = '' if status == 'running': checkstr = 'Running' elif status == 'completed': checkstr = 'Done' elif status == 'completing': checkstr = 'Completed' elif status == 'failed': checkstr = 'Failed' else: checkstr = '' if checkstr == '': print("%s" % None) return for l in L: if checkstr in l[0]: T = datetime.datetime(*(time.strptime(l[3], "%Y-%m-%d %H:%M:%S")[0:6])) return T return None @diracCommand def getBulkStateTime(job_ids, status, pipe_out=True): ''' Function to repeatedly call getStateTime for multiple Dirac Job id and return the result in a dictionary ''' result = {} for this_id in job_ids: result[this_id] = getStateTime(this_id, status, pipe_out=False) return result @diracCommand def monitorJobs(job_ids, status_mapping, pipe_out=True): ''' This combines 'status' and 'getBulkStateTime' into 1 function call for monitoring ''' status_info = status(job_ids, status_mapping, pipe_out=False) state_job_status = {} for job_id, this_stat_info in zip(job_ids, status_info): if this_stat_info: update_status = this_stat_info[3] if update_status not in state_job_status: state_job_status[update_status] = [] state_job_status[update_status].append(job_id) state_info = {} for this_status, these_jobs in state_job_status.items(): state_info[this_status] = getBulkStateTime(these_jobs, this_status, pipe_out=False) return (status_info, state_info) @diracCommand def timedetails(id): ''' Function to return the getJobLoggingInfo for a DIRAC Job of id''' log = dirac.getJobLoggingInfo(id) d = {} for i in range(0, len(log['Value'])): d[i] = log['Value'][i] return d # DiracAdmin commands #/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ @diracCommand def getJobPilotOutput(id, dir): ''' Get the output of the DIRAC pilot that this job was running on and place it in dir''' pwd = os.getcwd() try: os.chdir(dir) os.system('rm -f pilot_%d/std.out && rmdir pilot_%d ' % (id, id)) result = DiracAdmin().getJobPilotOutput(id) finally: os.chdir(pwd) return result @diracCommand def getServicePorts(): ''' Get the service ports from the DiracAdmin based upon the Dirac config''' return DiracAdmin().getServicePorts() @diracCommand def isSEArchive(se): ''' Ask if the specified SE is for archive ''' from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers return DMSHelpers().isSEArchive(se) @diracCommand def getSitesForSE(se): ''' Get the Sites associated with this SE''' from DIRAC.Core.Utilities.SiteSEMapping import getSitesForSE result = getSitesForSE(storageElement=se) return result @diracCommand def getSEsForSite(site): ''' Get the list of SE associated with this site''' from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite result = getSEsForSite(site) return result @diracCommand def getSESiteMapping(): '''Get the mapping of SEs and sites''' from DIRAC.Core.Utilities.SiteSEMapping import getSESiteMapping result = getSESiteMapping() return result @diracCommand def checkSEStatus(se, access = 'Write'): ''' returns the value of a certain SE status flag (access or other) param se: Storage Element name type se: string param access: type of access type access: string in ('Read', 'Write', 'Remove', 'Check') returns: True or False ''' result = dirac.checkSEAccess(se, access) return result @diracCommand def listFiles(baseDir, minAge = None): ''' Return a list of LFNs for files stored on the grid in the argument directory and its subdirectories param baseDir: Top directory to begin search type baseDir: string param minAge: minimum age of files to be returned type minAge: string format: "W:D:H" ''' from DIRAC.Resources.Catalog.FileCatalog import FileCatalog fc = FileCatalog() from datetime import datetime, timedelta withMetaData = False cutoffTime = datetime.utcnow() import re r = re.compile('\d:\d:\d') if r.match(minAge): withMetaData = True timeList = minAge.split(':') timeLimit = timedelta(weeks = int(timeList[0]), days = int(timeList[1]), hours = int(timeList[2])) cutoffTime = datetime.utcnow() - timeLimit baseDir = baseDir.rstrip('/') activeDirs = [baseDir] allFiles = [] emptyDirs = [] while len(activeDirs) > 0: currentDir = activeDirs.pop() res = fc.listDirectory(currentDir, withMetaData, timeout = 360) if not res['OK']: return "Error retrieving directory contents", "%s %s" % ( currentDir, res['Message'] ) elif currentDir in res['Value']['Failed']: return "Error retrieving directory contents", "%s %s" % ( currentDir, res['Value']['Failed'][currentDir] ) else: dirContents = res['Value']['Successful'][currentDir] subdirs = dirContents['SubDirs'] files = dirContents['Files'] if not subdirs and not files: emptyDirs.append( currentDir ) else: for subdir in sorted( subdirs, reverse=True): if (not withMetaData) or subdirs[subdir]['CreationDate'] < cutoffTime: activeDirs.append(subdir) for filename in sorted(files): fileOK = False if (not withMetaData) or files[filename]['MetaData']['CreationDate'] < cutoffTime: fileOK = True if not fileOK: files.pop(filename) allFiles += sorted(files) return allFiles
gpl-2.0
-7,655,071,968,776,760,000
35.094675
139
0.640109
false
3.670277
false
false
false
rafaelvieiras/script.pseudotv.live
resources/lib/ChannelListThread.py
1
9795
# Copyright (C) 2011 Jason Anderson # # # This file is part of PseudoTV. # # PseudoTV is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PseudoTV is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with PseudoTV. If not, see <http://www.gnu.org/licenses/>. import xbmc, xbmcgui, xbmcaddon import subprocess, os import time, threading import datetime import sys, re import random, traceback from ChannelList import ChannelList from Channel import Channel from Globals import * from Artdownloader import * class ChannelListThread(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.myOverlay = None sys.setcheckinterval(25) self.chanlist = ChannelList() self.paused = False self.fullUpdating = True self.Artdownloader = Artdownloader() def log(self, msg, level = xbmc.LOGDEBUG): log('ChannelListThread: ' + msg, level) def run(self): self.log("Starting") self.chanlist.exitThread = False self.chanlist.readConfig() self.chanlist.sleepTime = 0.1 if self.myOverlay == None: self.log("Overlay not defined. Exiting.") return self.chanlist.myOverlay = self.myOverlay self.fullUpdating = (self.myOverlay.backgroundUpdating == 0) validchannels = 0 for i in range(self.myOverlay.maxChannels): self.chanlist.channels.append(Channel()) if self.myOverlay.channels[i].isValid: validchannels += 1 # Don't load invalid channels if minimum threading mode is on if self.fullUpdating and self.myOverlay.isMaster: if validchannels < self.chanlist.enteredChannelCount: title = 'PseudoTV Live, Background Loading...' xbmc.executebuiltin('XBMC.Notification(%s, %s, %s)' % (title, 4000 , THUMB)) for i in range(self.myOverlay.maxChannels): if self.myOverlay.channels[i].isValid == False: while True: if self.myOverlay.isExiting: self.log("Closing thread") return time.sleep(1) if self.paused == False: break self.chanlist.channels[i].setAccessTime(self.myOverlay.channels[i].lastAccessTime) try: if self.chanlist.setupChannel(i + 1, True, True, False) == True: while self.paused: if self.myOverlay.isExiting: self.log("IsExiting") return time.sleep(1) self.myOverlay.channels[i] = self.chanlist.channels[i] if self.myOverlay.channels[i].isValid == True: title = "PseudoTV Live, Channel " + str(i + 1) + " Added" xbmc.executebuiltin('XBMC.Notification(%s, %s, %s)' % (title, 4000, THUMB)) except Exception,e: self.log("Unknown Channel Creation Exception", xbmc.LOGERROR) self.log(traceback.format_exc(), xbmc.LOGERROR) return REAL_SETTINGS.setSetting('ForceChannelReset', 'false') self.chanlist.sleepTime = 0.3 if REAL_SETTINGS.getSetting("ArtService_Enabled") == "true": InfoTimer = INFOBAR_TIMER[int(REAL_SETTINGS.getSetting('InfoTimer'))] self.ArtServiceThread = threading.Timer(float(InfoTimer), self.Artdownloader.ArtService) self.ArtServiceThread.name = "ArtServiceThread" self.ArtServiceThread.start() while True: for i in range(self.myOverlay.maxChannels): modified = True while modified == True and self.myOverlay.channels[i].getTotalDuration() < PREP_CHANNEL_TIME and self.myOverlay.channels[i].Playlist.size() < 16288: # If minimum updating is on, don't attempt to load invalid channels if self.fullUpdating == False and self.myOverlay.channels[i].isValid == False and self.myOverlay.isMaster: break modified = False if self.myOverlay.isExiting: self.log("Closing thread") return time.sleep(2) curtotal = self.myOverlay.channels[i].getTotalDuration() if self.myOverlay.isMaster: if curtotal > 0: # When appending, many of the channel variables aren't set, so copy them over. # This needs to be done before setup since a rule may use one of the values. # It also needs to be done after since one of them may have changed while being setup. self.chanlist.channels[i].playlistPosition = self.myOverlay.channels[i].playlistPosition self.chanlist.channels[i].showTimeOffset = self.myOverlay.channels[i].showTimeOffset self.chanlist.channels[i].lastAccessTime = self.myOverlay.channels[i].lastAccessTime self.chanlist.channels[i].totalTimePlayed = self.myOverlay.channels[i].totalTimePlayed self.chanlist.channels[i].isPaused = self.myOverlay.channels[i].isPaused self.chanlist.channels[i].mode = self.myOverlay.channels[i].mode # Only allow appending valid channels, don't allow erasing them try: self.chanlist.setupChannel(i + 1, True, False, True) except Exception,e: self.log("Unknown Channel Appending Exception", xbmc.LOGERROR) self.log(traceback.format_exc(), xbmc.LOGERROR) return self.chanlist.channels[i].playlistPosition = self.myOverlay.channels[i].playlistPosition self.chanlist.channels[i].showTimeOffset = self.myOverlay.channels[i].showTimeOffset self.chanlist.channels[i].lastAccessTime = self.myOverlay.channels[i].lastAccessTime self.chanlist.channels[i].totalTimePlayed = self.myOverlay.channels[i].totalTimePlayed self.chanlist.channels[i].isPaused = self.myOverlay.channels[i].isPaused self.chanlist.channels[i].mode = self.myOverlay.channels[i].mode else: try: self.chanlist.setupChannel(i + 1, True, True, False) except Exception,e: self.log("Unknown Channel Modification Exception", xbmc.LOGERROR) self.log(traceback.format_exc(), xbmc.LOGERROR) return else: try: # We're not master, so no modifications...just try and load the channel self.chanlist.setupChannel(i + 1, True, False, False) except Exception,e: self.log("Unknown Channel Loading Exception", xbmc.LOGERROR) self.log(traceback.format_exc(), xbmc.LOGERROR) return self.myOverlay.channels[i] = self.chanlist.channels[i] if self.myOverlay.isMaster: ADDON_SETTINGS.setSetting('Channel_' + str(i + 1) + '_time', str(self.myOverlay.channels[i].totalTimePlayed)) if self.myOverlay.channels[i].getTotalDuration() > curtotal and self.myOverlay.isMaster: modified = True # A do-while loop for the paused state while True: if self.myOverlay.isExiting: self.log("Closing thread") return time.sleep(2) if self.paused == False: break timeslept = 0 if self.fullUpdating == False and self.myOverlay.isMaster: return # If we're master, wait 30 minutes in between checks. If not, wait 5 minutes. while (timeslept < 1800 and self.myOverlay.isMaster == True) or (timeslept < 300 and self.myOverlay.isMaster == False): if self.myOverlay.isExiting: self.log("IsExiting") return time.sleep(2) timeslept += 2 self.log("All channels up to date. Exiting thread.") def pause(self): self.paused = True self.chanlist.threadPaused = True def unpause(self): self.paused = False self.chanlist.threadPaused = False
gpl-3.0
-5,893,234,249,640,738,000
44.347222
164
0.54099
false
4.709135
false
false
false
AnoopAlias/nDeploy
scripts/update_cluster_ipmap.py
1
1898
#!/usr/bin/env python import yaml import argparse import os __author__ = "Anoop P Alias" __copyright__ = "Copyright 2014, PiServe Technologies Pvt Ltd , India" __license__ = "GPL" __email__ = "anoop.alias@piserve.com" installation_path = "/opt/nDeploy" # Absolute Installation Path cluster_config_file = installation_path+"/conf/ndeploy_cluster.yaml" # Function defs def update_ip_map(server, iphere, ipthere): cluster_data_yaml = open(cluster_config_file, 'r') cluster_data_yaml_parsed = yaml.safe_load(cluster_data_yaml) cluster_data_yaml.close() if cluster_data_yaml_parsed: if server in cluster_data_yaml_parsed.keys(): connect_server_dict = cluster_data_yaml_parsed.get(server) ipmap_dict = connect_server_dict.get("ipmap") ipmap_dict[iphere] = ipthere with open(cluster_config_file, 'w') as yaml_file: yaml_file.write(yaml.dump(cluster_data_yaml_parsed, default_flow_style=False)) else: mydict = {server: {'ipmap': {iphere: ipthere}}} cluster_data_yaml_parsed.update(mydict) with open(cluster_config_file, 'w') as yaml_file: yaml_file.write(yaml.dump(cluster_data_yaml_parsed, default_flow_style=False)) else: print("Invalid cluster data") parser = argparse.ArgumentParser(description="create/update nDeploy-cluster ipmap") parser.add_argument("slave_hostname") parser.add_argument("ip_here") parser.add_argument("remote_ip") args = parser.parse_args() server_key = args.slave_hostname ip_here = args.ip_here remote_ip = args.remote_ip if os.path.isfile(cluster_config_file): update_ip_map(server_key, ip_here, remote_ip) else: mydict = {server_key: {'ipmap': {ip_here: remote_ip}}} with open(cluster_config_file, 'w') as cluster_conf: cluster_conf.write(yaml.dump(mydict, default_flow_style=False))
gpl-3.0
6,655,193,397,231,080,000
34.148148
94
0.674921
false
3.278066
true
false
false
letouriste001/SmartForest_2.0
python3.4Smartforest/lib/python3.4/site-packages/django/db/migrations/recorder.py
1
2868
from __future__ import unicode_literals from django.apps.registry import Apps from django.db import models from django.db.utils import DatabaseError from django.utils.encoding import python_2_unicode_compatible from django.utils.timezone import now from .exceptions import MigrationSchemaMissing class MigrationRecorder(object): """ Deals with storing migration records in the database. Because this table is actually itself used for dealing with model creation, it's the one thing we can't do normally via migrations. We manually handle table creation/schema updating (using schema backend) and then have a floating model to do queries with. If a migration is unapplied its row is removed from the table. Having a row in the table always means a migration is applied. """ @python_2_unicode_compatible class Migration(models.Model): app = models.CharField(max_length=255) name = models.CharField(max_length=255) applied = models.DateTimeField(default=now) class Meta: apps = Apps() app_label = "migrations" db_table = "django_migrations" def __str__(self): return "Migration %s for %s" % (self.name, self.app) def __init__(self, connection): self.connection = connection @property def migration_qs(self): return self.Migration.objects.using(self.connection.alias) def ensure_schema(self): """ Ensures the table exists and has the correct schema. """ # If the table's there, that's fine - we've never changed its schema # in the codebase. if self.Migration._meta.db_table in self.connection.introspection.table_names(self.connection.cursor()): return # Make the table try: with self.connection.schema_editor() as editor: editor.create_model(self.Migration) except DatabaseError as exc: raise MigrationSchemaMissing("Unable to create the django_migrations table (%s)" % exc) def applied_migrations(self): """ Returns a set of (app, name) of applied migrations. """ self.ensure_schema() return set(tuple(x) for x in self.migration_qs.values_list("app", "name")) def record_applied(self, app, name): """ Records that a migration was applied. """ self.ensure_schema() self.migration_qs.create(app=app, name=name) def record_unapplied(self, app, name): """ Records that a migration was unapplied. """ self.ensure_schema() self.migration_qs.filter(app=app, name=name).delete() def flush(self): """ Deletes all migration records. Useful if you're testing migrations. """ self.migration_qs.all().delete()
mit
213,370,591,806,448,500
32.348837
112
0.642957
false
3.907357
false
false
false
staffanm/layeredconfig
layeredconfig/dictsource.py
1
1625
# this should possibly be a abstract class as well from . import ConfigSource class DictSource(ConfigSource): def __init__(self, **kwargs): """If your backend data is exposable as a python dict, you can subclass from this class to avoid implementing :py:meth:`has`, :py:meth:`get`, :py:meth:`keys`, :py:meth:`subsection` and :py:meth:`subsections`. You only need to write :py:meth:`__init__` (which should set ``self.source`` to that exposed dict), and possibly :py:meth:`typed` and :py:meth:`save`. """ super(DictSource, self).__init__(**kwargs) self.source = {} def subsections(self): for (k, v) in self.source.items(): if isinstance(v, dict): yield k def keys(self): for (k, v) in self.source.items(): if not isinstance(v, dict) and not isinstance(v, type): yield k def subsection(self, key): # Make an object of the correct type return self.__class__(defaults=self.source[key], parent=self, identifier=self.identifier) def typed(self, key): # if we have it, we can type it return key in self.source and self.source[key] is not None def has(self, key): # should return true for real values only, not type placeholders or sub-dicts return key in self.source and not isinstance(self.source[key], (type, dict)) def get(self, key): return self.source[key] def set(self, key, value): self.source[key] = value
bsd-3-clause
8,381,840,833,951,817,000
33.574468
85
0.580923
false
4.0625
false
false
false
samervin/arctic-scavengers-randomizer
arctic_cards/leaders.py
1
3619
# Fields NAME = 'name' SET = 'set' USES_REFUGEES = 'uses-refugees' TEXT = 'text' # Set values HQ_EXP = 'hq' RECON_EXP = 'recon' # Information not strictly contained on the card COMMENT = 'comment' class Leaders: ALL_LEADERS = [ { NAME: 'The Peacemaker', SET: HQ_EXP, USES_REFUGEES: True, TEXT: 'Each round you may play 1 Refugee to increase the power of another tribe member\s hunt or dig actions by +2.' }, { NAME: 'The Gangster', SET: HQ_EXP, USES_REFUGEES: True, TEXT: 'Your Refugees have a fight of 0 and they count as 2 people for the purpose of breaking tied skirmishes.' }, { NAME: 'The Butcher', SET: HQ_EXP, TEXT: 'Each round you may kill 1 of your tribe members (remove the card permanently from play) and sell his/her internal organs for 1 food and 1 med.' }, { NAME: 'The Fanatic', SET: HQ_EXP, USES_REFUGEES: True, TEXT: 'Each round you may use 1 Refugee from your hand as a suicide bomber against an opponent. ' 'Discard 1 of your opponent\'s revealed cards (your choice), the Refugee dies in the process (remove card from play).' }, { NAME: 'The Organizer', SET: HQ_EXP, USES_REFUGEES: True, TEXT: 'Each round you may play 1 Refugee to perform a draw of 2, but only keep 1. ' 'No other cards may be played to modify this draw and you may not perform another draw this round.' }, { NAME: 'The Cannibal', SET: HQ_EXP, TEXT: 'Each round you may cannibalize 1 tribe member for 3 food (and subsequently remove that card from play). ' 'You may not combine food from hunting or a garden when hiring with cannibalized food.' }, { NAME: 'The Sergent at Arms', SET: HQ_EXP, TEXT: 'You are immune to the disarm action, preventing saboteurs from discarding your tools. ' 'When hiring saboteurs, you pay no food (cost for you is 1 med).', COMMENT: 'This card is misspelled as printed: the correct spelling is Sergeant.' }, { NAME: 'The Mentor', SET: HQ_EXP, USES_REFUGEES: True, TEXT: 'Each round you may play 1 Refugee card to grant another tribe member a +1 to any action.' }, { NAME: 'The Excavator', SET: HQ_EXP, USES_REFUGEES: True, TEXT: 'All of your Refugees have a dig of 1. ' 'If a Refugee uses a digging tool (i.e. shovel or a pick axe), ignore the tool\'s normal bonus and add +1 to the score.' }, { NAME: 'The Ranger', SET: HQ_EXP, USES_REFUGEES: True, TEXT: 'All of your Refugees and Tribe Families have a hunt of 1.' }, { NAME: 'The Swindler', SET: RECON_EXP, USES_REFUGEES: True, TEXT: 'Once per turn, you may discard 1 Refugee to persuade a mercenary into joining your tribe for 1 less food ' 'or discard two Refugees to reduce the price by 1 med.' }, { NAME: 'The Yardmaster', SET: RECON_EXP, TEXT: 'Once per turn, you may peek at the top 2 cards of the Junkyard. ' 'Return both of them to the top or bottom of the Junkyard.' } ]
mit
6,301,782,325,497,952,000
37.913978
162
0.546284
false
3.604582
false
false
false
Samuel789/MediPi
MedManagementWeb/env/lib/python3.5/site-packages/Crypto/Cipher/DES.py
1
7100
# -*- coding: utf-8 -*- # # Cipher/DES.py : DES # # =================================================================== # The contents of this file are dedicated to the public domain. To # the extent that dedication to the public domain is not available, # everyone is granted a worldwide, perpetual, royalty-free, # non-exclusive license to exercise all rights associated with the # contents of this file for any purpose whatsoever. # No rights are reserved. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # =================================================================== """DES symmetric cipher DES `(Data Encryption Standard)`__ is a symmetric block cipher standardized by NIST_ . It has a fixed data block size of 8 bytes. Its keys are 64 bits long, even though 8 bits were used for integrity (now they are ignored) and do not contribute to securty. The effective key length is therefore 56 bits only. DES is cryptographically secure, but its key length is too short by nowadays standards and it could be brute forced with some effort. **Use AES, not DES. This module is provided only for legacy purposes.** As an example, encryption can be done as follows: >>> from Crypto.Cipher import DES >>> >>> key = b'-8B key-' >>> cipher = DES.new(key, DES.MODE_OFB) >>> plaintext = b'sona si latine loqueris ' >>> msg = cipher.iv + cipher.encrypt(plaintext) .. __: http://en.wikipedia.org/wiki/Data_Encryption_Standard .. _NIST: http://csrc.nist.gov/publications/fips/fips46-3/fips46-3.pdf :undocumented: __package__ """ import sys from Crypto.Cipher import _create_cipher from Crypto.Util.py3compat import byte_string from Crypto.Util._raw_api import (load_pycryptodome_raw_lib, VoidPointer, SmartPointer, c_size_t, expect_byte_string) _raw_des_lib = load_pycryptodome_raw_lib( "Crypto.Cipher._raw_des", """ int DES_start_operation(const uint8_t key[], size_t key_len, void **pResult); int DES_encrypt(const void *state, const uint8_t *in, uint8_t *out, size_t data_len); int DES_decrypt(const void *state, const uint8_t *in, uint8_t *out, size_t data_len); int DES_stop_operation(void *state); """) def _create_base_cipher(dict_parameters): """This method instantiates and returns a handle to a low-level base cipher. It will absorb named parameters in the process.""" try: key = dict_parameters.pop("key") except KeyError: raise TypeError("Missing 'key' parameter") expect_byte_string(key) if len(key) != key_size: raise ValueError("Incorrect DES key length (%d bytes)" % len(key)) start_operation = _raw_des_lib.DES_start_operation stop_operation = _raw_des_lib.DES_stop_operation cipher = VoidPointer() result = start_operation(key, c_size_t(len(key)), cipher.address_of()) if result: raise ValueError("Error %X while instantiating the DES cipher" % result) return SmartPointer(cipher.get(), stop_operation) def new(key, mode, *args, **kwargs): """Create a new DES cipher :Parameters: key : byte string The secret key to use in the symmetric cipher. It must be 8 byte long. The parity bits will be ignored. :Keywords: mode : a *MODE_** constant The chaining mode to use for encryption or decryption. iv : byte string (*Only* `MODE_CBC`, `MODE_CFB`, `MODE_OFB`, `MODE_OPENPGP`). The initialization vector to use for encryption or decryption. For `MODE_OPENPGP`, IV must be 8 bytes long for encryption and 10 bytes for decryption (in the latter case, it is actually the *encrypted* IV which was prefixed to the ciphertext). For all other modes, it must be 8 bytes long. If not provided, a random byte string is generated (you can read it back via the ``iv`` attribute). nonce : byte string (*Only* `MODE_EAX` and `MODE_CTR`). A mandatory value that must never be reused for any other encryption. For `MODE_CTR`, its length must be in the range ``[0..7]``. For `MODE_EAX`, there are no restrictions, but it is recommended to use at least 16 bytes. If not provided for `MODE_EAX`, a random byte string is generated (you can read it back via the ``nonce`` attribute). mac_len : integer (*Only* `MODE_EAX`). Length of the authentication tag, in bytes. It must be no larger than 8 (which is the default). segment_size : integer (*Only* `MODE_CFB`).The number of **bits** the plaintext and ciphertext are segmented in. It must be a multiple of 8. If not specified, it will be assumed to be 8. initial_value : integer (*Only* `MODE_CTR`). The initial value for the counter within the counter block. By default it is 0. :Return: a DES cipher, of the applicable mode: - CBC_ mode - CFB_ mode - CTR_ mode - EAX_ mode - ECB_ mode - OFB_ mode - OpenPgp_ mode .. _CBC: Crypto.Cipher._mode_cbc.CbcMode-class.html .. _CFB: Crypto.Cipher._mode_cfb.CfbMode-class.html .. _CTR: Crypto.Cipher._mode_ctr.CtrMode-class.html .. _EAX: Crypto.Cipher._mode_eax.EaxMode-class.html .. _ECB: Crypto.Cipher._mode_ecb.EcbMode-class.html .. _OFB: Crypto.Cipher._mode_ofb.OfbMode-class.html .. _OpenPgp: Crypto.Cipher._mode_openpgp.OpenPgpMode-class.html """ return _create_cipher(sys.modules[__name__], key, mode, *args, **kwargs) #: Electronic Code Book (ECB). See `Crypto.Cipher._mode_ecb.EcbMode`. MODE_ECB = 1 #: Cipher-Block Chaining (CBC). See `Crypto.Cipher._mode_cbc.CbcMode`. MODE_CBC = 2 #: Cipher FeedBack (CFB). See `Crypto.Cipher._mode_cfb.CfbMode`. MODE_CFB = 3 #: Output FeedBack (OFB). See `Crypto.Cipher._mode_ofb.OfbMode`. MODE_OFB = 5 #: CounTer Mode (CTR). See `Crypto.Cipher._mode_ctr.CtrMode`. MODE_CTR = 6 #: OpenPGP Mode. See `Crypto.Cipher._mode_openpgp.OpenPgpMode`. MODE_OPENPGP = 7 #: EAX Mode. See `Crypto.Cipher._mode_eax.EaxMode`. MODE_EAX = 9 #: Size of a data block (in bytes) block_size = 8 #: Size of a key (in bytes) key_size = 8
apache-2.0
-5,694,647,605,484,890,000
35.787565
79
0.613521
false
3.815153
false
false
false
harikishen/addons-server
src/olympia/amo/tasks.py
1
2584
import datetime from django.core.mail import EmailMessage, EmailMultiAlternatives import olympia.core.logger from olympia import amo from olympia.activity.models import ActivityLog from olympia.amo.celery import task from olympia.amo.utils import get_email_backend from olympia.bandwagon.models import Collection from olympia.stats.models import Contribution log = olympia.core.logger.getLogger('z.task') @task def send_email(recipient, subject, message, from_email=None, html_message=None, attachments=None, real_email=False, cc=None, headers=None, fail_silently=False, async=False, max_retries=None, reply_to=None, **kwargs): backend = EmailMultiAlternatives if html_message else EmailMessage connection = get_email_backend(real_email) result = backend(subject, message, from_email, to=recipient, cc=cc, connection=connection, headers=headers, attachments=attachments, reply_to=reply_to) if html_message: result.attach_alternative(html_message, 'text/html') try: result.send(fail_silently=False) return True except Exception as e: log.error('send_mail failed with error: %s' % e) if async: return send_email.retry(exc=e, max_retries=max_retries) elif not fail_silently: raise else: return False @task def set_modified_on_object(obj, **kw): """Sets modified on one object at a time.""" try: log.info('Setting modified on object: %s, %s' % (obj.__class__.__name__, obj.pk)) obj.update(modified=datetime.datetime.now()) except Exception, e: log.error('Failed to set modified on: %s, %s - %s' % (obj.__class__.__name__, obj.pk, e)) @task def delete_logs(items, **kw): log.info('[%s@%s] Deleting logs' % (len(items), delete_logs.rate_limit)) ActivityLog.objects.filter(pk__in=items).exclude( action__in=amo.LOG_KEEP).delete() @task def delete_stale_contributions(items, **kw): log.info('[%s@%s] Deleting stale contributions' % (len(items), delete_stale_contributions.rate_limit)) Contribution.objects.filter( transaction_id__isnull=True, pk__in=items).delete() @task def delete_anonymous_collections(items, **kw): log.info('[%s@%s] Deleting anonymous collections' % (len(items), delete_anonymous_collections.rate_limit)) Collection.objects.filter(type=amo.COLLECTION_ANONYMOUS, pk__in=items).delete()
bsd-3-clause
7,679,414,389,111,565,000
33
76
0.64822
false
3.64457
false
false
false
sctjkc01/ofCourse
ofcourse/participants.py
1
3800
import os from datetime import datetime, date, timedelta from urlparse import urlparse import yaml from flask import Blueprint, redirect from flask.ext.mako import render_template import ofcourse from ofcourse.util import app_path, get_hw_keys participants_bp = Blueprint('participants_bp', __name__, template_folder=app_path('templates')) currentYear = str(date.today().year) currentTerm = "fall" if date.today().month > 7 else "spring" @participants_bp.route('/') def participants_blank(): """ This is the default landing for the participants listing page. It will list all of the participants in the current term for HFOSS """ return participants_year_term(currentYear, currentTerm) @participants_bp.route('/<year_or_nick>') def participants_year(year_or_nick): """ This will get all the participants within a given year """ p_url = find_participant(year_or_nick) if p_url is not None: # render individual page return redirect(p_url) # otherwise render as a year return participants(year_or_nick + '/') @participants_bp.route('/<year>/<term>') def participants_year_term(year, term): """ This will get all the participants within a given year and term """ return participants(year + '/' + term + '/') @participants_bp.route('/all') def participants_all(): return participants('') """ This will get all the participants who have taken HFOSS """ def participants(root_dir): """ Render the participants page, which shows a directory of all the students with their forge links, blog posts, assignment links, and etc. """ yaml_dir = app_path('people', root_dir) student_data = [] for dirpath, dirnames, files in os.walk(yaml_dir): dirpath = dirpath.rstrip("/") for fname in sorted(files): if fname.endswith('.yaml'): with open(dirpath + '/' + fname) as students: contents = yaml.safe_load(students) contents['yaml'] = dirpath + '/' + fname year_term_data = dirpath.split('/') contents['participant_page'] = "{y}/{t}/{u}".format( y=year_term_data[-2], t=year_term_data[-1], u=os.path.splitext(fname)[0] ) for forge in contents['forges']: url = urlparse(forge) if "github.com" in url.netloc: contents['github'] = url.path[1:] contents['isActive'] = (currentYear in year_term_data and currentTerm in year_term_data) student_data.append(contents) assignments = get_hw_keys() elapsed = (datetime.today() - ofcourse.site.COURSE_START).total_seconds() target_number = int(elapsed / timedelta(weeks=1).total_seconds() + 1 + len(assignments)) return render_template( 'blogs.mak', name='mako', student_data=student_data, gravatar=ofcourse.site.gravatar, target_number=target_number, hw_keys=assignments ) def find_participant(nick): yaml_dir = app_path('people') for dirpath, dirnames, files in os.walk(yaml_dir): for fname in files: if (fname.lower().startswith(nick.lower()) and fname.endswith('.yaml')): participant = os.path.join( dirpath, fname ).replace(yaml_dir, '') participant = participant.replace('.yaml', '') return 'participants' + participant
apache-2.0
3,540,628,806,068,801,000
28.6875
77
0.569737
false
4.231626
false
false
false
smurfix/DaBroker
dabroker/base/transport/__init__.py
1
4226
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, division, unicode_literals ## ## This file is part of DaBroker, a distributed data access manager. ## ## DaBroker is Copyright © 2014 by Matthias Urlichs <matthias@urlichs.de>, ## it is licensed under the GPLv3. See the file `README.rst` for details, ## including optimistic statements by the author. ## ## This paragraph is auto-generated and may self-destruct at any time, ## courtesy of "make update". The original is in ‘utils/_boilerplate.py’. ## Thus, please do not remove the next line, or insert any blank lines. ##BP from gevent import GreenletExit from dabroker.util.thread import prep_spawned import logging logger = logging.getLogger("dabroker.base.transport") class ConnectionError(RuntimeError): pass class BaseCallbacks(object): def recv(self,msg): """Incoming message from the other side. NOT used for receiving replies!""" raise NotImplementedError("You need to override {}.recv()".format(self.__class__.__name__)) def send(self,msg): """Outgoing message to the other side. NOT used for sending replies!""" raise NotImplementedError("You need to override {}.send()".format(self.__class__.__name__)) def ended(self,err=None): """Called on receiver error. Do not reconnect here!""" pass def reconnect(self,err=None): """Called after a closed connection has been cleaned up""" pass def register_codec(self,codec): raise NotImplementedError("You need to override {}.register_codec()".format(self.__class__.__name__)) class RelayedError(Exception): """An encapsulation for a server error (with traceback)""" def __init__(self,err,tb): self.err = str(err) self.tb = tb def __repr__(self): return "{}({})".format(self.__class__.__name__,self.err) def __str__(self): r = repr(self) if self.tb is None: return r return r+"\n"+self.tb class BaseTransport(object): _job = None defaults = {} connection = None last_msgid = 0 def __init__(self,callbacks, cfg={}): self.cfg = self.defaults.copy() self.cfg.update(cfg) self.callbacks = callbacks self.trace = cfg.get('trace',0) def connect(self, purge=False): """Connect. (Synchronously.) Do not override! Override .connect1() (setup) and .connect2() (initial tasks)""" assert self.callbacks is not None assert self.connection is None self.connect1() if purge: self.purge_all() self.connect2() def connect1(self): """Set up a connection. Call super() before your code.""" if self._job is not None: raise RuntimeError("Already connected") logger.debug("connecting: %r",self) def connect2(self): """Add initial tasks after a connection has been established. Call super() after your code.""" assert self._job is None self._job = self._run_job() self._job.start() def disconnect(self): """Sever the connection; do not auto-reconnect.""" logger.debug("disconnecting: %r",self) j,self._job = self._job,None if j: j.stop() def disconnected(self, err=None): """Clear connection objects. This will be called by the reader task as it exits. Do not reconnect from here; do that in your .reconnect""" logger.debug("disconnected: %r",self) def purge_all(self): """ Clear this transport's message queue. This should only be called when client and server are known to be idle AND when you suspect an unprocessable message might clog the queue. """ pass def send(self,msg): raise NotImplementedError("You need to override {}.send()".format(self.__class__.__name__)) def run(self): raise NotImplementedError("You need to override {}.run()".format(self.__class__.__name__)) @prep_spawned def _run_job(self): try: logger.debug("Running receiver loop: %r",self) self.run() except GreenletExit: err=None logger.debug("Receiver loop ends: %r",self) self.callbacks.ended(None) except BaseException as e: err = e logger.exception("Receiver loop error: %r",self) self.callbacks.ended(e) else: err=None logger.debug("Receiver loop ends: %r",self) self.callbacks.ended(None) finally: self.disconnected() if self._job is not None: self._job = None self.callbacks.reconnect(err)
gpl-3.0
-528,446,127,231,001,700
26.769737
103
0.689647
false
3.352661
false
false
false
MattFaus/CrowdTube-Connector
youtube.py
1
6824
import os import urlparse from lib import gdata import lib.gdata.youtube.client import secrets GDATA_API_CLIENT_ID = 'CrowdTube-Connector' class YouTubeCaptionEditor(object): def __init__(self, google_email, google_password, youtube_username): self.youtube_username = youtube_username self.youtube_client = lib.gdata.youtube.client.YouTubeClient() # We shouldn't need this auth_token, but we'll keep it around self.auth_token = self.youtube_client.client_login( google_email, google_password, GDATA_API_CLIENT_ID) # A dictionary of youtube_id and YouTubeVideo objects self.videos = {} def get_videos(self): # Format copied from lib.gdata.youtube.client.py feed_uri = '%s%s/%s' % (lib.gdata.youtube.client.YOUTUBE_USER_FEED_URI, self.youtube_username, 'uploads') all_videos = self.youtube_client.get_videos(uri=feed_uri) for video in all_videos.entry: new_video = YouTubeVideo(video, self.youtube_client) self.videos[new_video.video_id] = new_video def get_video(self, video_id): video_entry = self.youtube_client.get_video_entry(video_id=video_id) return YouTubeVideo(video_entry, self.youtube_client) def delete_track(self, video_id, track_id): """Deletes an existing track.""" # TODO(mattfaus): Take google_developer_key as a constructor arg? response = self.youtube_client.delete_track(video_id, track_id, client_id=GDATA_API_CLIENT_ID, developer_key=secrets.google_developer_key) # http://docs.python.org/release/2.2.3/lib/httpresponse-objects.html if response.status != 200: print response.status, response.msg return False return True def add_track(self, video_id, title, language, track_content): """Adds a caption track. If a track with the same title already exists, this will silently fail. """ # TODO(mattfaus): Take google_developer_key as a constructor arg? track_content = track_content.encode('utf-8') response = self.youtube_client.create_track(video_id, title, language, track_content, client_id=GDATA_API_CLIENT_ID, developer_key=secrets.google_developer_key, fmt='sub') # Returns a TrackEntry object return response def update_track(self, video_id, track_id, track_content): """Adds a caption track.""" # TODO(mattfaus): Take google_developer_key as a constructor arg? track_content = track_content.encode('utf-8') response = self.youtube_client.update_track(video_id, track_id, track_content, client_id=GDATA_API_CLIENT_ID, developer_key=secrets.google_developer_key, fmt='sub') # Returns a TrackEntry object return response # TODO(mattfaus): Suck these two classes into the YouTubeCaptionEditor, above # make the YouTubeCaptionEditor behave more like a full-fledged youtube client # Shouldn't have to pass the youtube_client object around to the sub-classes # No need to have dictionaries where an array would do just fine (YouTubeVideo.caption_tracks) class YouTubeVideo(object): def __init__(self, video_entry, youtube_client=None): self.youtube_client = youtube_client # tag:youtube.com,2008:video:SNrEiiJwD4Y id_parts = video_entry.GetId().split(':') self.video_id = id_parts[id_parts.index('video') + 1] self.title = video_entry.title.text caption_link = video_entry.get_link( 'http://gdata.youtube.com/schemas/2007#video.captionTracks') self.caption_feed = caption_link.href # TODO(mattfaus): Make this less ugly has_entries = [ a.value for a in caption_link.GetAttributes() if '{http://gdata.youtube.com/schemas/2007}hasEntries' == a._qname] has_entries = has_entries[0] == 'true' self.has_entries = has_entries self.caption_tracks = {} def get_caption_tracks(self, download=False): # Don't check self.has_entries. It may be False when only a # machine-generated caption track exists. if not self.youtube_client: raise ValueError('No youtube client available!') # STOPSHIP(mattfaus): get_caption_feed() only returns the first 24 caption tracks # so we must iterate to read more # TODO(mattfaus): Filter this by language with the 'lr' attribute all_captions = self.youtube_client.get_caption_feed(self.caption_feed) for caption_entry in all_captions.entry: new_track = YouTubeCaptionTrack(caption_entry, self.youtube_client) self.caption_tracks[new_track.track_source] = new_track if download: new_track.download_track() def get_machine_generated_track(self): self.get_caption_tracks() for src, caption_track in self.caption_tracks.iteritems(): print src, caption_track if caption_track.machine_generated: caption_track.download_track() return caption_track class YouTubeCaptionTrack(object): def __init__(self, caption_entry, youtube_client): self.youtube_client = youtube_client self.language = caption_entry.content.lang self.track_source = caption_entry.content.src self.machine_generated = YouTubeCaptionTrack._is_machine_generated( caption_entry) # Parse the video_id and caption_id out of a url like this: # https://gdata.youtube.com/feeds/api/videos/Jom6EtXzRMg/captiondata/Ch4LEO3ZhwUaFQjIic2vrcLuxCYSAmVuGgAiA2Fzcgw o = urlparse.urlparse(self.track_source) path_parts = o.path.split('/') self.video_id = path_parts[path_parts.index('videos') + 1] self.track_id = path_parts[path_parts.index('captiondata') + 1] self.track_content = None @staticmethod def _is_machine_generated(caption_entry): """Looks for the derived element, and returns True if it is equal to speechRecognition. """ # TODO(mattfaus): Move this to TrackEntry within youtube/data.py? derived = caption_entry.GetElements( tag='derived', namespace='http://gdata.youtube.com/schemas/2007') if not derived: return False else: derived = derived[0] return derived.text == 'speechRecognition' def download_track(self): response = self.youtube_client.get_caption_track( track_url=self.track_source, client_id=GDATA_API_CLIENT_ID, developer_key=secrets.google_developer_key) self.track_content = response.read(2 ** 31) return self.track_content
mit
-7,013,494,189,144,412,000
38.445087
120
0.651231
false
3.822969
false
false
false
rockfruit/bika.lims
bika/lims/browser/analysisrequest/results_not_requested.py
1
2747
# This file is part of Bika LIMS # # Copyright 2011-2016 by it's authors. # Some rights reserved. See LICENSE.txt, AUTHORS.txt. from AccessControl import getSecurityManager from bika.lims import bikaMessageFactory as _ from bika.lims.utils import t from bika.lims.permissions import * from bika.lims.browser.analysisrequest import AnalysisRequestManageResultsView from bika.lims.content.analysisrequest import schema as AnalysisRequestSchema from bika.lims.utils import to_utf8 from bika.lims.workflow import doActionFor from plone.app.layout.globals.interfaces import IViewView from DateTime import DateTime from Products.Archetypes import PloneMessageFactory as PMF from Products.CMFCore.utils import getToolByName from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile from zope.interface import implements import plone class AnalysisRequestResultsNotRequestedView(AnalysisRequestManageResultsView): implements(IViewView) template = ViewPageTemplateFile("templates/analysisrequest_analyses_not_requested.pt") def __call__(self): ar = self.context workflow = getToolByName(ar, 'portal_workflow') # If is a retracted AR, show the link to child AR and show a warn msg if workflow.getInfoFor(ar, 'review_state') == 'invalid': childar = hasattr(ar, 'getChildAnalysisRequest') \ and ar.getChildAnalysisRequest() or None childid = childar and childar.getRequestID() or None message = _('This Analysis Request has been withdrawn and is shown ' 'for trace-ability purposes only. Retest: ${retest_child_id}.', mapping={"retest_child_id":childid if childid else ''}) self.context.plone_utils.addPortalMessage(message, 'warning') # If is an AR automatically generated due to a Retraction, show it's # parent AR information if hasattr(ar, 'getParentAnalysisRequest') \ and ar.getParentAnalysisRequest(): par = ar.getParentAnalysisRequest() message = _( 'This Analysis Request has been generated automatically due to ' 'the retraction of the Analysis Request ${retracted_request_id}.', mapping={"retracted_request_id": par.getRequestID()}) self.context.plone_utils.addPortalMessage(message, 'info') can_do = getSecurityManager().checkPermission(ResultsNotRequested, ar) if workflow.getInfoFor(ar, 'cancellation_state') == "cancelled": self.request.response.redirect(ar.absolute_url()) elif not(can_do): self.request.response.redirect(ar.absolute_url()) else: return self.template()
agpl-3.0
3,269,595,701,656,959,500
46.362069
90
0.699672
false
4.174772
false
false
false
LongSeanSilvr/DC_Metro_Tracker
development_version/src/general_intents.py
1
1923
import build_response as br # ====================================================================================================================== # Skill Behavior: Welcome Response # ====================================================================================================================== class Welcome(object): def __init__(self): self.card_title = "Welcome" self.reprompt_text = "What station would you like train times for?" self.flag = "welcome" def build_response(self): output = br.build_response(self.card_title, self.flag, reprompt_text=self.reprompt_text) return output # ====================================================================================================================== # Skill Intent: Help # ====================================================================================================================== class Help(object): def __init__(self, intent, session): # Parameters are here so handler can treat this like the other intent classes self.card_title = "Help" self.reprompt_text = "What station would you like train times for?" self.flag = "help" def build_response(self): output = br.build_response(self.card_title, self.flag, reprompt_text=self.reprompt_text) return output # ====================================================================================================================== # Skill Intent: Quit # ====================================================================================================================== class Exit(object): def __init__(self, intent, session): # Parameters are here so handler can treat this like the other intent classes self.card_title = "Exiting" self.flag = "exit" def build_response(self): output = br.build_response(self.card_title, self.flag) return output
gpl-3.0
3,137,623,135,016,649,700
44.785714
120
0.411856
false
5.494286
false
false
false
dakrauth/picker
picker/forms.py
1
6144
from django import forms from django.utils import timezone from django.utils.module_loading import import_string from . import models as picker from . import utils _picker_widget = None encoded_game_key = 'game_{}'.format TIE_KEY = '__TIE__' def decoded_game_key(value): return int(value.replace('game_', '')) def encoded_game_item(game): return ( encoded_game_key(game.id), str(game.winner.id) if game.winner else (TIE_KEY if game.is_tie else '') ) def get_picker_widget(league): global _picker_widget if not _picker_widget: widget_path = league.config('TEAM_PICKER_WIDGET') if widget_path: _picker_widget = import_string(widget_path) _picker_widget = _picker_widget or forms.RadioSelect return _picker_widget class GameField(forms.ChoiceField): def __init__(self, game, manage=False, widget=None): choices = [(str(game.away.id), game.away), (str(game.home.id), game.home)] if manage: choices.insert(1, (TIE_KEY, '')) self.game = game self.manage = manage self.game_id = game.id self.is_game = True super(GameField, self).__init__( choices=choices, label=game.start_time.strftime('%a, %b %d %I:%M %p'), required=False, help_text=game.tv, disabled=not self.manage and (self.game.start_time <= timezone.now()), widget=widget or get_picker_widget(game.gameset.league) ) class FieldIter: def __init__(self, form): self.fields = [] self.form = form def append(self, name): self.fields.append(name) def __iter__(self): for name in self.fields: yield self.form[name] class BasePickForm(forms.Form): management = False def __init__(self, gameset, *args, **kws): super(BasePickForm, self).__init__(*args, **kws) self.gameset = gameset self.game_fields = FieldIter(self) games = list(gameset.games.select_related('home__league', 'away__league')) if games: for gm in games: key = encoded_game_key(gm.id) self.fields[key] = GameField(gm, self.management) self.game_fields.append(key) self.fields['points'] = forms.IntegerField( label='{}:'.format(games[-1].vs_description), required=False ) class ManagementPickForm(BasePickForm): management = True def __init__(self, gameset, *args, **kws): kws.setdefault('initial', {}).update(**self.get_initial_picks(gameset)) super(ManagementPickForm, self).__init__(gameset, *args, **kws) def save(self): gameset = self.gameset data = self.cleaned_data.copy() gameset.points = data.pop('points', 0) or 0 gameset.save() for key, winner in data.items(): if winner: pk = decoded_game_key(key) game = gameset.games.get(pk=pk) game.winner = None if winner == TIE_KEY else int(winner) gameset.update_pick_status() @staticmethod def get_initial_picks(gameset): return dict({ encoded_game_key(game.id): str(game.winner.id) for game in gameset.games.played() if game.winner }, points=gameset.points) class UserPickForm(BasePickForm): def __init__(self, user, gameset, *args, **kws): initial = self.get_initial_user_picks(gameset, user) kws.setdefault('initial', {}).update(initial) self.user = user super(UserPickForm, self).__init__(gameset, *args, **kws) def save(self): data = self.cleaned_data.copy() picks = picker.PickSet.objects.for_gameset_user(self.gameset, self.user) points = data.pop('points', None) games = {decoded_game_key(k): v for k, v in data.items() if v} picks.update_picks(games=games, points=points) return picks @staticmethod def get_initial_user_picks(gameset, user): ps = gameset.pick_for_user(user) initial = dict({ encoded_game_key(g_id): str(w_id) for g_id, w_id in ps.gamepicks.picked_winner_ids() }, points=ps.points) if ps else {} return initial class GameForm(forms.ModelForm): class Meta: model = picker.Game fields = ('start_time', 'location') class PreferenceForm(forms.ModelForm): class Meta: model = picker.Preference fields = ('autopick',) def __init__(self, instance, *args, **kws): kws['instance'] = instance self.current_email = instance.user.email.lower() kws.setdefault('initial', {})['email'] = self.current_email super(PreferenceForm, self).__init__(*args, **kws) for league in picker.League.objects.all(): field_name = '{}_favorite'.format(league.slug) current = None if instance: try: current = picker.PickerFavorite.objects.get(user=instance.user, league=league) except picker.PickerFavorite.DoesNotExist: pass self.fields[field_name] = forms.ModelChoiceField( picker.Team.objects.filter(league=league), label='{} Favorite'.format(league.abbr.upper()), empty_label='-- Select --', required=False, initial=current.team if current else None ) def save(self, commit=True): super(PreferenceForm, self).save(commit) if commit: picker.PickerFavorite.objects.filter(user=self.instance.user).delete() for key in self.cleaned_data: if not key.endswith('_favorite'): continue slug = key.rsplit('_')[0] league = picker.League.objects.get(slug=slug) picker.PickerFavorite.objects.create( league=league, user=self.instance.user, team=self.cleaned_data[key] )
mit
-7,155,869,303,144,028,000
30.187817
98
0.57487
false
3.719128
false
false
false
amerlyq/piony
piony/config/argparser.py
1
2747
from argparse import ArgumentParser, RawDescriptionHelpFormatter import piony from piony.common.exceptions import InputError class ArgParser(object): def __init__(self): self.ps = ArgumentParser(prog=piony.__appname__, formatter_class=RawDescriptionHelpFormatter, description=piony.__doc__, epilog="Enjoy!!!") self._setup_options() def parse(self, argv): if not argv: argv = [] elif isinstance(argv, str): argv = argv.split() elif not isinstance(argv, list): raise InputError("Wrong argv type: {}".format(type(argv))) return self.ps.parse_args(argv) def apply(self, args): from operator import xor res = (False, False) dbg = {'a': (True, True), 'v': (True, False), 'k': (False, True)} if args.verbose: for entry in args.verbose: res = map(xor, res, dbg[entry]) piony.G_DEBUG_VISUALS, piony.G_DEBUG_ACTIONS = res def _setup_options(self): ## Configuration farg = self.ps.add_argument farg('buds', metavar='bud', nargs='*', type=str, default=None, help="Setup profile layout in json directly on cmdline. " "Can be specified several times -- one for each slice. " "Or use pathes to files with slices inside.") farg('-v', '--version', action='version', default=None, version="%(prog)s {0}".format(piony.__version__), help="Version of program.") gr_window = self.ps.add_argument_group('Window') warg = gr_window.add_argument warg('-c', '--config', default=None, help="Config file with default settings.") warg('-p', '--print', default=None, help="Toggle action print/execute to use as frontend only.") ## Appearance warg('-s', '--size', type=int, default=None, help="Sets window size WxH=NxN to derive all rings sizes from it.") warg('-F', '--fullscreen', action='store_true', default=None, help="Overlay fullscreen/local") warg('-T', '--no-tooltip', action='store_true', default=None, help="Disable pop-up items, for those who is irritated.") ## Process gr_general = self.ps.add_argument_group('General') garg = gr_general.add_argument garg('-k', '--kill', action='store_true', default=None, help="Kill running daemonized program.") garg('-V', '--verbose', nargs='?', type=str, const='a', choices=['a', 'v', 'k'], default=None, help="Verbose (debug): [a]ll (default), [v]isuals, [k]eys.")
gpl-3.0
114,584,023,838,943,360
41.261538
80
0.560612
false
4.004373
false
false
false
strahlc/exaile
xlgui/main.py
1
43837
# Copyright (C) 2008-2010 Adam Olsen # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # # The developers of the Exaile media player hereby grant permission # for non-GPL compatible GStreamer and Exaile plugins to be used and # distributed together with GStreamer and Exaile. This permission is # above and beyond the permissions granted by the GPL license by which # Exaile is covered. If you modify this code, you may extend this # exception to your version of the code, but you are not obligated to # do so. If you do not wish to do so, delete this exception statement # from your version. import datetime import logging import os import re import threading import cairo from gi.repository import Gdk from gi.repository import GLib from gi.repository import GObject from gi.repository import Gtk from gi.repository import Pango from xl.nls import gettext as _ from xl import ( common, covers, event, formatter, player, playlist, providers, settings, trax, xdg ) from xlgui.accelerators import AcceleratorManager from xlgui.playlist_container import PlaylistContainer from xlgui.widgets import ( dialogs, info, menu, playback ) from xlgui.widgets.playlist import ( PlaylistPage, PlaylistView ) from xlgui import ( guiutil, tray, menu as mainmenu ) logger = logging.getLogger(__name__) # Length of playback step when user presses seek key (sec) SEEK_STEP_DEFAULT = 10 # Length of volume steps when user presses up/down key VOLUME_STEP_DEFAULT = 0.1 class MainWindow(GObject.GObject): """ Main Exaile Window """ __gproperties__ = { 'is-fullscreen': (bool, 'Fullscreen', 'Whether the window is fullscreen.', False, # Default GObject.PARAM_READWRITE), } __gsignals__ = {'main-visible-toggle': (GObject.SignalFlags.RUN_LAST, bool, ())} _mainwindow = None def __init__(self, controller, builder, collection): """ Initializes the main window @param controller: the main gui controller """ GObject.GObject.__init__(self) self.controller = controller self.collection = collection self.playlist_manager = controller.exaile.playlists self.current_page = -1 self._fullscreen = False self.resuming = False self.window_state = 0 self.minimized = False self.builder = builder self.window = self.builder.get_object('ExaileWindow') self.window.set_title('Exaile') self.title_formatter = formatter.TrackFormatter(settings.get_option( 'gui/main_window_title_format', _('$title (by $artist)') + ' - Exaile')) self.accelgroup = Gtk.AccelGroup() self.window.add_accel_group(self.accelgroup) self.accel_manager = AcceleratorManager('mainwindow-accelerators', self.accelgroup) self.menubar = self.builder.get_object("mainmenu") fileitem = self.builder.get_object("file_menu_item") filemenu = menu.ProviderMenu('menubar-file-menu', self) fileitem.set_submenu(filemenu) edititem = self.builder.get_object("edit_menu_item") editmenu = menu.ProviderMenu('menubar-edit-menu', self) edititem.set_submenu(editmenu) viewitem = self.builder.get_object("view_menu_item") viewmenu = menu.ProviderMenu('menubar-view-menu', self) viewitem.set_submenu(viewmenu) toolsitem = self.builder.get_object("tools_menu_item") toolsmenu = menu.ProviderMenu('menubar-tools-menu', self) toolsitem.set_submenu(toolsmenu) helpitem = self.builder.get_object("help_menu_item") helpmenu = menu.ProviderMenu('menubar-help-menu', self) helpitem.set_submenu(helpmenu) self._setup_widgets() self._setup_position() self._setup_hotkeys() logger.info("Connecting main window events...") self._connect_events() MainWindow._mainwindow = self mainmenu._create_menus() def _setup_hotkeys(self): """ Sets up accelerators that haven't been set up in UI designer """ hotkeys = ( ('<Control>S', lambda *e: self.on_save_playlist()), ('<Shift><Control>S', lambda *e: self.on_save_playlist_as()), ('<Control>F', lambda *e: self.on_panel_filter_focus()), ('<Control>G', lambda *e: self.on_search_playlist_focus()), # FIXME ('<Control><Alt>l', lambda *e: player.QUEUE.clear()), # FIXME ('<Control>P', self._on_playpause_button), ('<Control>Right', lambda *e: self._on_seek_key(True)), ('<Control>Left', lambda *e: self._on_seek_key(False)), ('<Control>plus', lambda *e: self._on_volume_key(True)), ('<Control>minus', lambda *e: self._on_volume_key(False)), ('<Control>Page_Up', self._on_prev_tab_key), ('<Control>Page_Down', self._on_next_tab_key), ('<Alt>N', self._on_focus_playlist_container), # These 4 are subject to change.. probably should do this # via a different mechanism too... ('<Alt>I', lambda *e: self.controller.focus_panel('files')), #('<Alt>C', lambda *e: self.controller.focus_panel('collection')), ('<Alt>R', lambda *e: self.controller.focus_panel('radio')), ('<Alt>L', lambda *e: self.controller.focus_panel('playlists')), ('<Alt>1', lambda *e: self._on_focus_playlist_tab(0)), ('<Alt>2', lambda *e: self._on_focus_playlist_tab(1)), ('<Alt>3', lambda *e: self._on_focus_playlist_tab(2)), ('<Alt>4', lambda *e: self._on_focus_playlist_tab(3)), ('<Alt>5', lambda *e: self._on_focus_playlist_tab(4)), ('<Alt>6', lambda *e: self._on_focus_playlist_tab(5)), ('<Alt>7', lambda *e: self._on_focus_playlist_tab(6)), ('<Alt>8', lambda *e: self._on_focus_playlist_tab(7)), ('<Alt>9', lambda *e: self._on_focus_playlist_tab(8)), ('<Alt>0', lambda *e: self._on_focus_playlist_tab(9)), ) self.accel_group = Gtk.AccelGroup() for key, function in hotkeys: key, mod = Gtk.accelerator_parse(key) self.accel_group.connect(key, mod, Gtk.AccelFlags.VISIBLE, function) self.window.add_accel_group(self.accel_group) def _setup_widgets(self): """ Sets up the various widgets """ # TODO: Maybe make this stackable self.message = dialogs.MessageBar( parent=self.builder.get_object('player_box'), buttons=Gtk.ButtonsType.CLOSE ) self.message.connect('response', self.on_messagebar_response) self.info_area = MainWindowTrackInfoPane(player.PLAYER) self.info_area.set_auto_update(True) self.info_area.set_padding(3, 3, 3, 3) self.info_area.hide() self.info_area.set_no_show_all(True) guiutil.gtk_widget_replace(self.builder.get_object('info_area'), self.info_area) self.volume_control = playback.VolumeControl(player.PLAYER) self.info_area.get_action_area().pack_end(self.volume_control, False, False, 0) self.alpha_style = None if settings.get_option('gui/use_alpha', False): screen = self.window.get_screen() visual = screen.get_rgba_visual() self.window.set_visual(visual) self.window.connect('screen-changed', self.on_screen_changed) self.alpha_style = Gtk.CssProvider.new() self.window.get_style_context().add_provider(self.alpha_style, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION) self._update_alpha() playlist_area = self.builder.get_object('playlist_area') self.playlist_container = PlaylistContainer('saved_tabs', player.PLAYER) for notebook in self.playlist_container.notebooks: notebook.connect_after('switch-page', self.on_playlist_container_switch_page) page = notebook.get_current_tab() if page is not None: selection = page.view.get_selection() selection.connect('changed', self.on_playlist_view_selection_changed) playlist_area.pack_start(self.playlist_container, True, True, 3) self.splitter = self.builder.get_object('splitter') # In most (all?) RTL locales, the playback controls should still be LTR. # Just in case that's not always the case, we provide a hidden option to # force RTL layout instead. This can be removed once we're more certain # that the default behavior (always LTR) is correct. controls_direction = Gtk.TextDirection.RTL \ if settings.get_option('gui/rtl_playback_controls') \ else Gtk.TextDirection.LTR self.play_image = Gtk.Image.new_from_icon_name('media-playback-start', Gtk.IconSize.SMALL_TOOLBAR) self.play_image.set_direction(controls_direction) self.pause_image = Gtk.Image.new_from_icon_name('media-playback-pause', Gtk.IconSize.SMALL_TOOLBAR) self.pause_image.set_direction(controls_direction) play_toolbar = self.builder.get_object('play_toolbar') play_toolbar.set_direction(controls_direction) for button in ('playpause', 'next', 'prev', 'stop'): widget = self.builder.get_object('%s_button' % button) setattr(self, '%s_button' % button, widget) widget.get_child().set_direction(controls_direction) self.progress_bar = playback.SeekProgressBar(player.PLAYER) self.progress_bar.get_child().set_direction(controls_direction) # Don't expand vertically; looks awful on Adwaita. self.progress_bar.set_valign(Gtk.Align.CENTER) guiutil.gtk_widget_replace( self.builder.get_object('playback_progressbar_dummy'), self.progress_bar ) self.stop_button.toggle_spat = False self.stop_button.add_events(Gdk.EventMask.POINTER_MOTION_MASK) self.stop_button.connect('motion-notify-event', self.on_stop_button_motion_notify_event) self.stop_button.connect('leave-notify-event', self.on_stop_button_leave_notify_event) self.stop_button.connect('key-press-event', self.on_stop_button_key_press_event) self.stop_button.connect('key-release-event', self.on_stop_button_key_release_event) self.stop_button.connect('focus-out-event', self.on_stop_button_focus_out_event) self.stop_button.connect('button-press-event', self.on_stop_button_press_event) self.stop_button.connect('button-release-event', self.on_stop_button_release_event) self.stop_button.drag_dest_set(Gtk.DestDefaults.ALL, [Gtk.TargetEntry.new("exaile-index-list", Gtk.TargetFlags.SAME_APP, 0)], Gdk.DragAction.COPY) self.stop_button.connect('drag-motion', self.on_stop_button_drag_motion) self.stop_button.connect('drag-leave', self.on_stop_button_drag_leave) self.stop_button.connect('drag-data-received', self.on_stop_button_drag_data_received) self.statusbar = info.Statusbar(self.builder.get_object('status_bar')) event.add_ui_callback(self.on_exaile_loaded, 'exaile_loaded') def _connect_events(self): """ Connects the various events to their handlers """ self.builder.connect_signals({ 'on_configure_event': self.configure_event, 'on_window_state_event': self.window_state_change_event, 'on_delete_event': self.on_delete_event, 'on_playpause_button_clicked': self._on_playpause_button, 'on_next_button_clicked': lambda *e: player.QUEUE.next(), 'on_prev_button_clicked': lambda *e: player.QUEUE.prev(), 'on_about_item_activate': self.on_about_item_activate, # Controller # 'on_scan_collection_item_activate': self.controller.on_rescan_collection, # 'on_device_manager_item_activate': lambda *e: self.controller.show_devices(), # 'on_track_properties_activate':self.controller.on_track_properties, }) event.add_ui_callback(self.on_playback_resume, 'playback_player_resume', player.PLAYER) event.add_ui_callback(self.on_playback_end, 'playback_player_end', player.PLAYER) event.add_ui_callback(self.on_playback_end, 'playback_error', player.PLAYER) event.add_ui_callback(self.on_playback_start, 'playback_track_start', player.PLAYER) event.add_ui_callback(self.on_toggle_pause, 'playback_toggle_pause', player.PLAYER) event.add_ui_callback(self.on_track_tags_changed, 'track_tags_changed') event.add_ui_callback(self.on_buffering, 'playback_buffering', player.PLAYER) event.add_ui_callback(self.on_playback_error, 'playback_error', player.PLAYER) event.add_ui_callback(self.on_playlist_tracks_added, 'playlist_tracks_added') event.add_ui_callback(self.on_playlist_tracks_removed, 'playlist_tracks_removed') # Settings self._on_option_set('gui_option_set', settings, 'gui/show_info_area') self._on_option_set('gui_option_set', settings, 'gui/show_info_area_covers') event.add_ui_callback(self._on_option_set, 'option_set') def _connect_panel_events(self): """ Sets up panel events """ # When there's nothing in the notebook, hide it self.controller.panel_notebook.connect('page-added', self.on_panel_notebook_add_page) self.controller.panel_notebook.connect('page-removed', self.on_panel_notebook_remove_page) # panels panels = self.controller.panel_notebook.panels for panel_name in ('playlists', 'radio', 'files', 'collection'): panel = panels[panel_name].panel sort = False if panel_name in ('files', 'collection'): sort = True panel.connect('append-items', lambda panel, items, force_play, sort=sort: self.on_append_items(items, force_play, sort=sort)) panel.connect('queue-items', lambda panel, items, sort=sort: self.on_append_items(items, queue=True, sort=sort)) panel.connect('replace-items', lambda panel, items, sort=sort: self.on_append_items(items, replace=True, sort=sort)) ## Collection Panel panel = panels['collection'].panel panel.connect('collection-tree-loaded', self.on_collection_tree_loaded) ## Playlist Panel panel = panels['playlists'].panel panel.connect('playlist-selected', lambda panel, playlist: self.playlist_container.create_tab_from_playlist(playlist)) ## Radio Panel panel = panels['radio'].panel panel.connect('playlist-selected', lambda panel, playlist: self.playlist_container.create_tab_from_playlist(playlist)) ## Files Panel #panel = panels['files'] def _update_alpha(self): if self.alpha_style is None: return opac = 1.0 - float(settings.get_option('gui/transparency')) self.alpha_style.load_from_data( '.background { ' + ('background-color: alpha(@theme_bg_color, %s);' % opac) + '}' ) def do_get_property(self, prop): if prop.name == 'is-fullscreen': return self._fullscreen else: return GObject.GObject.do_get_property(self, prop) def do_set_property(self, prop, value): if prop.name == 'is-fullscreen': if value: self.window.fullscreen() else: self.window.unfullscreen() else: GObject.GObject.do_set_property(self, prop, value) def on_screen_changed(self, widget, event): """ Updates the colormap on screen change """ screen = widget.get_screen() visual = screen.get_rgba_visual() or screen.get_rgb_visual() self.window.set_visual(visual) def on_messagebar_response(self, widget, response): """ Hides the messagebar if requested """ if response == Gtk.ResponseType.CLOSE: widget.hide() def on_panel_notebook_add_page(self, notebook, page, page_num): if self.splitter.get_child1() is None: self.splitter.pack1(self.controller.panel_notebook) self.controller.panel_notebook.get_parent() \ .child_set_property(self.controller.panel_notebook, 'shrink', False) def on_panel_notebook_remove_page(self, notebook, page, page_num): if notebook.get_n_pages() == 0: self.splitter.remove(self.controller.panel_notebook) def on_stop_button_motion_notify_event(self, widget, event): """ Sets the hover state and shows SPAT icon """ widget.__hovered = True if event.get_state() & Gdk.ModifierType.SHIFT_MASK: widget.set_image(Gtk.Image.new_from_icon_name( 'process-stop', Gtk.IconSize.BUTTON)) else: widget.set_image(Gtk.Image.new_from_icon_name( 'media-playback-stop', Gtk.IconSize.BUTTON)) def on_stop_button_leave_notify_event(self, widget, event): """ Unsets the hover state and resets the button icon """ widget.__hovered = False if not widget.is_focus() and \ ~(event.get_state() & Gdk.ModifierType.SHIFT_MASK): widget.set_image(Gtk.Image.new_from_icon_name( 'media-playback-stop', Gtk.IconSize.BUTTON)) def on_stop_button_key_press_event(self, widget, event): """ Shows SPAT icon on Shift key press """ if event.keyval in (Gdk.KEY_Shift_L, Gdk.KEY_Shift_R): widget.set_image(Gtk.Image.new_from_icon_name( 'process-stop', Gtk.IconSize.BUTTON)) widget.toggle_spat = True if event.keyval in (Gdk.KEY_space, Gdk.KEY_Return): if widget.toggle_spat: self.on_spat_clicked() else: player.PLAYER.stop() def on_stop_button_key_release_event(self, widget, event): """ Resets the button icon """ if event.keyval in (Gdk.KEY_Shift_L, Gdk.KEY_Shift_R): widget.set_image(Gtk.Image.new_from_icon_name( 'media-playback-stop', Gtk.IconSize.BUTTON)) widget.toggle_spat = False def on_stop_button_focus_out_event(self, widget, event): """ Resets the button icon unless the button is still hovered """ if not getattr(widget, '__hovered', False): widget.set_image(Gtk.Image.new_from_icon_name( 'media-playback-stop', Gtk.IconSize.BUTTON)) def on_stop_button_press_event(self, widget, event): """ Called when the user clicks on the stop button """ if event.button == 1: if event.get_state() & Gdk.ModifierType.SHIFT_MASK: self.on_spat_clicked() elif event.button == 3: menu = guiutil.Menu() menu.append(_("Toggle: Stop after Selected Track"), self.on_spat_clicked, 'process-stop') menu.popup(None, None, None, None, event.button, event.time) def on_stop_button_release_event(self, widget, event): """ Called when the user releases the mouse from the stop button """ rect = widget.get_allocation() if 0 <= event.x < rect.width and 0 <= event.y < rect.height: player.PLAYER.stop() def on_stop_button_drag_motion(self, widget, context, x, y, time): """ Indicates possible SPAT during drag motion of tracks """ target = widget.drag_dest_find_target(context, widget.drag_dest_get_target_list()).name() if target == 'exaile-index-list': widget.set_image(Gtk.Image.new_from_icon_name( 'process-stop', Gtk.IconSize.BUTTON)) def on_stop_button_drag_leave(self, widget, context, time): """ Resets the stop button """ widget.set_image(Gtk.Image.new_from_icon_name( 'media-playback-stop', Gtk.IconSize.BUTTON)) def on_stop_button_drag_data_received(self, widget, context, x, y, selection, info, time): """ Allows for triggering the SPAT feature by dropping tracks on the stop button """ source_widget = Gtk.drag_get_source_widget(context) if selection.target.name() == 'exaile-index-list' and isinstance(source_widget, PlaylistView): position = int(selection.data.split(',')[0]) if position == source_widget.playlist.spat_position: position = -1 source_widget.playlist.spat_position = position source_widget.queue_draw() def on_spat_clicked(self, *e): """ Called when the user clicks on the SPAT item """ trs = self.get_selected_page().view.get_selected_items() if not trs: return # TODO: this works, but implement this some other way in the future if player.QUEUE.current_playlist.spat_position == -1: player.QUEUE.current_playlist.spat_position = trs[0][0] else: player.QUEUE.current_playlist.spat_position = -1 self.get_selected_page().view.queue_draw() def on_append_items(self, tracks, force_play=False, queue=False, sort=False, replace=False): """ Called when a panel (or other component) has tracks to append and possibly queue :param tracks: The tracks to append :param force_play: Force playing the first track if there is no track currently playing. Otherwise check a setting to determine whether the track should be played :param queue: Additionally queue tracks :param sort: Sort before adding :param replace: Clear playlist before adding """ if len(tracks) == 0: return page = self.get_selected_page() if sort: tracks = trax.sort_tracks( ('artist', 'date', 'album', 'discnumber', 'tracknumber'), tracks) if replace: page.playlist.clear() offset = len(page.playlist) page.playlist.extend(tracks) # extending the queue automatically starts playback if queue: if player.QUEUE is not page.playlist: player.QUEUE.extend(tracks) elif (force_play or settings.get_option( 'playlist/append_menu_starts_playback', False )) and \ not player.PLAYER.current: page.view.play_track_at(offset, tracks[0]) def on_playback_error(self, type, player, message): """ Called when there has been a playback error """ self.message.show_error(_('Playback error encountered!'), message) def on_buffering(self, type, player, percent): """ Called when a stream is buffering """ percent = min(percent, 100) self.statusbar.set_status(_("Buffering: %d%%...") % percent, 1) def on_track_tags_changed(self, type, track, tag): """ Called when tags are changed """ if track is player.PLAYER.current: self._update_track_information() def on_collection_tree_loaded(self, tree): """ Updates information on collection tree load """ self.statusbar.update_info() def on_exaile_loaded(self, event_type, exaile, nothing): """ Updates information on exaile load """ self.statusbar.update_info() event.remove_callback(self.on_exaile_loaded, 'exaile_loaded') def on_playlist_tracks_added(self, type, playlist, tracks): """ Updates information on track add """ self.statusbar.update_info() def on_playlist_tracks_removed(self, type, playlist, tracks): """ Updates information on track removal """ self.statusbar.update_info() def on_toggle_pause(self, type, player, object): """ Called when the user clicks the play button after playback has already begun """ if player.is_paused(): image = self.play_image tooltip = _('Continue Playback') else: image = self.pause_image tooltip = _('Pause Playback') self.playpause_button.set_image(image) self.playpause_button.set_tooltip_text(tooltip) self._update_track_information() def on_playlist_container_switch_page(self, notebook, page, page_num): """ Updates info after notebook page switch """ page = notebook.get_nth_page(page_num) selection = page.view.get_selection() selection.connect('changed', self.on_playlist_view_selection_changed) self.statusbar.update_info() def on_playlist_view_selection_changed(self, selection): """ Updates info after playlist page selection change """ self.statusbar.update_info() def on_panel_filter_focus(self, *e): """ Gives focus to the filter field of the current panel """ try: self.controller.get_active_panel().filter.grab_focus() except (AttributeError, KeyError): pass def on_search_playlist_focus(self, *e): """ Gives focus to the playlist search bar """ plpage = get_selected_playlist() if plpage: plpage.get_search_entry().grab_focus() def on_save_playlist(self, *e): """ Called when the user presses Ctrl+S Spawns the save dialog of the currently selected playlist tab if not custom, saves changes directly if custom """ tab = self.get_selected_tab() if not tab: return if tab.page.playlist.get_is_custom(): tab.do_save_changes_to_custom() else: tab.do_save_custom() def on_save_playlist_as(self, *e): """ Called when the user presses Ctrl+S Spawns the save as dialog of the current playlist tab """ tab = self.get_selected_tab() if not tab: return tab.do_save_custom() def on_clear_playlist(self, *e): """ Clears the current playlist tab """ page = self.get_selected_page() if page: page.playlist.clear() def on_open_item_activate(self, menuitem): """ Shows a dialog to open media """ def on_uris_selected(dialog, uris): uris.reverse() if len(uris) > 0: self.controller.open_uri(uris.pop(), play=True) for uri in uris: self.controller.open_uri(uri, play=False) dialog = dialogs.MediaOpenDialog(self.window) dialog.connect('uris-selected', on_uris_selected) dialog.show() def on_open_url_item_activate(self, menuitem): """ Shows a dialog to open an URI """ def on_uri_selected(dialog, uri): self.controller.open_uri(uri, play=False) dialog = dialogs.URIOpenDialog(self.window) dialog.connect('uri-selected', on_uri_selected) dialog.show() def on_open_directories_item_activate(self, menuitem): """ Shows a dialog to open directories """ def on_uris_selected(dialog, uris): uris.reverse() if len(uris) > 0: self.controller.open_uri(uris.pop(), play=True) for uri in uris: self.controller.open_uri(uri, play=False) dialog = dialogs.DirectoryOpenDialog(self.window) # Selecting empty folders is useless dialog.props.create_folders = False dialog.connect('uris-selected', on_uris_selected) dialog.show() def on_export_current_playlist_activate(self, menuitem): """ Shows a dialog to export the current playlist """ page = self.get_selected_page() if not page or not isinstance(page, PlaylistPage): return def on_message(dialog, message_type, message): """ Show messages in the main window message area """ if message_type == Gtk.MessageType.INFO: self.message.show_info(markup=message) elif message_type == Gtk.MessageType.ERROR: self.message.show_error(_('Playlist export failed!'), message) return True dialog = dialogs.PlaylistExportDialog(page.playlist, self.window) dialog.connect('message', on_message) dialog.show() def on_playlist_utilities_bar_visible_toggled(self, checkmenuitem): """ Shows or hides the playlist utilities bar """ settings.set_option('gui/playlist_utilities_bar_visible', checkmenuitem.get_active()) def on_show_playing_track_item_activate(self, menuitem): """ Tries to show the currently playing track """ self.playlist_container.show_current_track() def on_about_item_activate(self, menuitem): """ Shows the about dialog """ dialog = dialogs.AboutDialog(self.window) dialog.show() def on_playback_resume(self, type, player, data): self.resuming = True def on_playback_start(self, type, player, object): """ Called when playback starts Sets the currently playing track visible in the currently selected playlist if the user has chosen this setting """ if self.resuming: self.resuming = False return self._update_track_information() self.playpause_button.set_image(self.pause_image) self.playpause_button.set_tooltip_text(_('Pause Playback')) def on_playback_end(self, type, player, object): """ Called when playback ends """ self.window.set_title('Exaile') self.playpause_button.set_image(self.play_image) self.playpause_button.set_tooltip_text(_('Start Playback')) def _on_option_set(self, name, object, option): """ Handles changes of settings """ if option == 'gui/main_window_title_format': self.title_formatter.props.format = settings.get_option( option, self.title_formatter.props.format) elif option == 'gui/use_tray': usetray = settings.get_option(option, False) if self.controller.tray_icon and not usetray: self.controller.tray_icon.destroy() self.controller.tray_icon = None elif not self.controller.tray_icon and usetray: self.controller.tray_icon = tray.TrayIcon(self) elif option == 'gui/show_info_area': self.info_area.set_no_show_all(False) if settings.get_option(option, True): self.info_area.show_all() else: self.info_area.hide() self.info_area.set_no_show_all(True) elif option == 'gui/show_info_area_covers': cover = self.info_area.cover cover.set_no_show_all(False) if settings.get_option(option, True): cover.show_all() else: cover.hide() cover.set_no_show_all(True) elif option == 'gui/transparency': self._update_alpha() def _on_volume_key(self, is_up): diff = int(100 * settings.get_option('gui/volue_key_step_size', VOLUME_STEP_DEFAULT)) if not is_up: diff = -diff player.PLAYER.modify_volume(diff) return True def _on_seek_key(self, is_forward): diff = settings.get_option('gui/seek_key_step_size', SEEK_STEP_DEFAULT) if not is_forward: diff = -diff if player.PLAYER.current: player.PLAYER.modify_time(diff) self.progress_bar.update_progress() return True def _on_prev_tab_key(self, *e): self.playlist_container.get_current_notebook().select_prev_tab() return True def _on_next_tab_key(self, *e): self.playlist_container.get_current_notebook().select_next_tab() return True def _on_playpause_button(self, *e): self.playpause() return True def _on_focus_playlist_tab(self, tab_nr): self.playlist_container.get_current_notebook().focus_tab(tab_nr) return True def _on_focus_playlist_container(self, *_e): self.playlist_container.focus() return True def _update_track_information(self): """ Sets track information """ track = player.PLAYER.current if not track: return self.window.set_title(self.title_formatter.format(track)) def playpause(self): """ Pauses the playlist if it is playing, starts playing if it is paused. If stopped, try to start playing the next suitable track. """ if player.PLAYER.is_paused() or player.PLAYER.is_playing(): player.PLAYER.toggle_pause() else: pl = self.get_selected_page() player.QUEUE.set_current_playlist(pl.playlist) try: trackpath = pl.view.get_selected_paths()[0] pl.playlist.current_position = trackpath[0] except IndexError: pass player.QUEUE.play(track=pl.playlist.current) def _setup_position(self): """ Sets up the position and sized based on the size the window was when it was last moved or resized """ if settings.get_option('gui/mainw_maximized', False): self.window.maximize() width = settings.get_option('gui/mainw_width', 500) height = settings.get_option('gui/mainw_height', 475) x = settings.get_option('gui/mainw_x', 10) y = settings.get_option('gui/mainw_y', 10) self.window.move(x, y) self.window.resize(width, height) pos = settings.get_option('gui/mainw_sash_pos', 200) self.splitter.set_position(pos) def on_delete_event(self, *e): """ Called when the user attempts to close the window """ sash_pos = self.splitter.get_position() if sash_pos > 10: settings.set_option('gui/mainw_sash_pos', sash_pos) if settings.get_option('gui/use_tray', False) and \ settings.get_option('gui/close_to_tray', False): self.window.hide() else: self.quit() return True def quit(self, *e): """ Quits Exaile """ self.window.hide() GLib.idle_add(self.controller.exaile.quit) return True def on_restart_item_activate(self, menuitem): """ Restarts Exaile """ self.window.hide() GLib.idle_add(self.controller.exaile.quit, True) def toggle_visible(self, bringtofront=False): """ Toggles visibility of the main window """ toggle_handled = self.emit('main-visible-toggle') if not toggle_handled: if bringtofront and self.window.is_active() or \ not bringtofront and self.window.get_property('visible'): self.window.hide() else: # the ordering for deiconify/show matters -- if this gets # switched, then the minimization detection breaks self.window.deiconify() self.window.show() def configure_event(self, *e): """ Called when the window is resized or moved """ # Don't save window size if it is maximized or fullscreen. if settings.get_option('gui/mainw_maximized', False) or \ self._fullscreen: return False (width, height) = self.window.get_size() if [width, height] != [ settings.get_option("gui/mainw_"+key, -1) for \ key in ["width", "height"] ]: settings.set_option('gui/mainw_height', height, save=False) settings.set_option('gui/mainw_width', width, save=False) (x, y) = self.window.get_position() if [x, y] != [ settings.get_option("gui/mainw_"+key, -1) for \ key in ["x", "y"] ]: settings.set_option('gui/mainw_x', x, save=False) settings.set_option('gui/mainw_y', y, save=False) return False def window_state_change_event(self, window, event): """ Saves the current maximized and fullscreen states and minimizes to tray if requested """ if event.changed_mask & Gdk.WindowState.MAXIMIZED: settings.set_option('gui/mainw_maximized', bool(event.new_window_state & Gdk.WindowState.MAXIMIZED)) if event.changed_mask & Gdk.WindowState.FULLSCREEN: self._fullscreen = bool(event.new_window_state & Gdk.WindowState.FULLSCREEN) self.notify('is-fullscreen') # detect minimization state changes prev_minimized = self.minimized if not self.minimized: if event.changed_mask & Gdk.WindowState.ICONIFIED and \ not event.changed_mask & Gdk.WindowState.WITHDRAWN and \ event.new_window_state & Gdk.WindowState.ICONIFIED and \ not event.new_window_state & Gdk.WindowState.WITHDRAWN and \ not self.window_state & Gdk.WindowState.ICONIFIED: self.minimized = True else: if event.changed_mask & Gdk.WindowState.WITHDRAWN and \ not event.new_window_state & (Gdk.WindowState.WITHDRAWN): #and \ self.minimized = False # track this self.window_state = event.new_window_state if settings.get_option('gui/minimize_to_tray', False): # old code to detect minimization # -> it must have worked at some point, perhaps this is a GTK version # specific set of behaviors? Current code works now on 2.24.17 #if wm_state is not None: # if '_NET_WM_STATE_HIDDEN' in wm_state[2]: # show tray # window.hide #else # destroy tray if self.minimized != prev_minimized and self.minimized == True: if not settings.get_option('gui/use_tray', False) and \ self.controller.tray_icon is None: self.controller.tray_icon = tray.TrayIcon(self) window.hide() elif not settings.get_option('gui/use_tray', False) and \ self.controller.tray_icon is not None: self.controller.tray_icon.destroy() self.controller.tray_icon = None return False def get_selected_page(self): """ Returns the currentry displayed playlist notebook page """ return self.playlist_container.get_current_tab() def get_selected_playlist(self): try: page = self.get_selected_page() except AttributeError: return None if not isinstance(page, PlaylistPage): return None return page class MainWindowTrackInfoPane(info.TrackInfoPane, providers.ProviderHandler): """ Extends the regular track info pane by an area for custom widgets The mainwindow-info-area-widget provider is used to show widgets on the right of the info area. They should be small. The registered provider should provide a method 'create_widget' that takes the info area instance as a parameter, and that returns a Gtk.Widget to be inserted into the widget_area of the info area, and an attribute 'name' that will be used when removing the provider. """ def __init__(self, player): info.TrackInfoPane.__init__(self, player) self.__player = player self.widget_area = Gtk.Box() self.get_child().pack_start(self.widget_area, False, False, 0) self.__widget_area_widgets = {} # call this last if we're using simple_init=True providers.ProviderHandler.__init__(self, 'mainwindow-info-area-widget', target=player, simple_init=True) def get_player(self): ''' Retrieves the player object that this info area is associated with ''' return self._TrackInfoPane__player def on_provider_added(self, provider): name = provider.name widget = provider.create_widget(self) old_widget = self.__widget_area_widgets.get(name) if old_widget is not None: self.widget_area.remove(old_widget) old_widget.destroy() self.__widget_area_widgets[name] = widget self.widget_area.pack_start(widget, False, False, 0) widget.show_all() def on_provider_removed(self, provider): widget = self.__widget_area_widgets.pop(provider.name, None) if widget is not None: self.widget_area.remove(widget) widget.destroy() def get_playlist_container(): return MainWindow._mainwindow.playlist_container def get_playlist_notebook(): '''Retrieves the primary playlist notebook''' return MainWindow._mainwindow.playlist_container.notebooks[0] def get_selected_page(): return MainWindow._mainwindow.get_selected_page() def get_selected_playlist(): return MainWindow._mainwindow.get_selected_playlist() def mainwindow(): return MainWindow._mainwindow # vim: et sts=4 sw=4
gpl-2.0
-6,720,579,076,938,104,000
35.930918
105
0.589593
false
3.948212
false
false
false
amw2104/fireplace
fireplace/cards/classic/paladin.py
1
2853
from ..utils import * ## # Hero Powers # Reinforce (Uther Lightbringer) class CS2_101: activate = Summon(CONTROLLER, "CS2_101t") # Reinforce (Uther Skin 1) class CS2_101_H1: activate = CS2_101.activate ## # Minions # Guardian of Kings class CS2_088: play = Heal(FRIENDLY_HERO, 6) # Argent Protector class EX1_362: play = GiveDivineShield(TARGET) # Aldor Peacekeeper class EX1_382: play = Buff(TARGET, "EX1_382e") class EX1_382e: atk = SET(1) # Tirion Fordring class EX1_383: deathrattle = Summon(CONTROLLER, "EX1_383t") ## # Spells # Blessing of Might class CS2_087: play = Buff(TARGET, "CS2_087e") CS2_087e = buff(atk=3) # Holy Light class CS2_089: play = Heal(TARGET, 6) # Blessing of Kings class CS2_092: play = Buff(TARGET, "CS2_092e") CS2_092e = buff(+4, +4) # Consecration class CS2_093: play = Hit(ENEMY_CHARACTERS, 2) # Hammer of Wrath class CS2_094: play = Hit(TARGET, 3), Draw(CONTROLLER) # Divine Favor class EX1_349: play = DrawUntil(CONTROLLER, Count(ENEMY_HAND)) # Lay on Hands class EX1_354: play = Heal(TARGET, 8), Draw(CONTROLLER) * 3 # Blessed Champion class EX1_355: play = Buff(TARGET, "EX1_355e") class EX1_355e: atk = lambda self, i: i * 2 # Humility class EX1_360: play = Buff(TARGET, "EX1_360e") class EX1_360e: atk = SET(1) # Blessing of Wisdom class EX1_363: play = Buff(TARGET, "EX1_363e") class EX1_363e: events = Attack(OWNER).on(Draw(CONTROLLER)) # Blessing of Wisdom (Unused) class EX1_363e2: events = Attack(OWNER).on(Draw(OWNER_OPPONENT)) # Holy Wrath class EX1_365: play = Draw(CONTROLLER).then(Hit(TARGET, COST(Draw.CARD))) # Hand of Protection class EX1_371: play = GiveDivineShield(TARGET) # Avenging Wrath class EX1_384: def play(self): count = self.controller.get_spell_damage(8) yield Hit(RANDOM_ENEMY_CHARACTER, 1) * count # Equality class EX1_619: play = Buff(ALL_MINIONS, "EX1_619e") class EX1_619e: max_health = SET(1) ## # Secrets # Noble Sacrifice class EX1_130: secret = Attack(ENEMY_MINIONS).on(FULL_BOARD | ( Reveal(SELF), Retarget(Attack.ATTACKER, Summon(CONTROLLER, "EX1_130a")) )) # Eye for an Eye class EX1_132: secret = Damage(FRIENDLY_HERO).on( Reveal(SELF), Hit(ENEMY_HERO, Damage.AMOUNT) ) # Redemption class EX1_136: secret = Death(FRIENDLY + MINION).on(FULL_BOARD | ( Reveal(SELF), Summon(CONTROLLER, Copy(Death.ENTITY)).then(SetCurrentHealth(Summon.CARD, 1)) )) # Repentance class EX1_379: secret = Play(OPPONENT, MINION | HERO).after( Reveal(SELF), Buff(Play.CARD, "EX1_379e") ) class EX1_379e: max_health = SET(1) ## # Weapons # Truesilver Champion class CS2_097: events = Attack(FRIENDLY_HERO).on(Heal(FRIENDLY_HERO, 2)) # Sword of Justice class EX1_366: events = Summon(CONTROLLER, MINION).after( Buff(Summon.CARD, "EX1_366e"), Hit(SELF, 1) ) EX1_366e = buff(+1, +1)
agpl-3.0
-3,566,954,898,071,706,600
14.256684
79
0.685594
false
2.196305
false
false
false
renyi533/tensorflow
tensorflow/python/keras/mixed_precision/experimental/policy.py
1
25763
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains the Policy class for mixed precision training.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import six from tensorflow.python.framework import dtypes from tensorflow.python.keras import backend from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.keras.mixed_precision.experimental import device_compatibility_check from tensorflow.python.keras.mixed_precision.experimental import loss_scale as keras_loss_scale_module from tensorflow.python.keras.utils import generic_utils from tensorflow.python.platform import tf_logging from tensorflow.python.training.experimental import mixed_precision_global_state from tensorflow.python.util.tf_export import keras_export # Default value of certain arguments, indicating the default behavior for # that argument should be used. USE_DEFAULT = 'USE_DEFAULT' @keras_export('keras.mixed_precision.experimental.Policy') class Policy(object): """A dtype policy for a Keras layer. A dtype policy determines dtype-related aspects of a layer, such as its computation and variable dtypes. Each layer has a policy. Policies can be passed to the `dtype` argument of layer constructors, or a global policy can be set with `tf.keras.mixed_precision.experimental.set_policy`. A layer will default to the global policy if no policy is passed to it's constructor. For many models, each layer's policy will have the same compute dtype and variable dtype, which will typically be float32. In this case, we refer to the singular dtype as the layer's dtype, which can be queried by the property `tf.keras.layers.Layer.dtype`. When mixed precision training is used, most layers will instead have a float16 or bfloat16 compute dtype and a float32 variable dtype, and so the layer does not have a single dtype. When the variable dtype does not match the compute dtype, variables will be automatically casted to the compute dtype to avoid type errors. In this case, `tf.keras.layers.Layer.dtype` refers to the variable dtype, not the compute dtype. See [the mixed precision guide](https://www.tensorflow.org/guide/keras/mixed_precision) for more information on how to use mixed precision. Certain policies also have a `tf.mixed_precision.experimental.LossScale` instance, which is used by `tf.keras.Model`s to performance loss scaling. Loss scaling is a technique used with mixed precision to avoid numerical underflow in float16 gradients. Loss scaling is only done by Models in `Model.fit`, `Model.train_on_batch`, and similar methods. Layers which are not Models ignore the loss scale. Policies are constructed by passing a string to the constructor, e.g. `tf.keras.mixed_precision.experimental.Policy('float32')`. The string determines the compute and variable dtypes. It can be one of the following: * Any dtype name, such as 'float32' or 'float64'. Both the variable and compute dtypes will be that dtype. No loss scaling is done by default. * 'mixed_float16' or 'mixed_bfloat16': The compute dtype is float16 or bfloat16, while the variable dtype is float32. These policies are used for mixed precision training. With 'mixed_float16', a dynamic loss scale is used by default. 'mixed_bfloat16' does no loss scaling by default, as loss scaling is unnecessary with bfloat16. ### How to use mixed precision in a Keras model To use mixed precision in a Keras model, the `'mixed_float16'` or `'mixed_bfloat16'` policy can be used. `tf.keras.mixed_precision.experimental.set_policy` can be used to set the default policy for layers if no policy is passed to them. For example: >>> tf.keras.mixed_precision.experimental.set_policy('mixed_float16') >>> model = tf.keras.models.Sequential([ ... tf.keras.layers.Input((100,)), ... # Dense layers use global policy of 'mixed_float16', which does ... # computations in float16 while keeping variables in float32. ... tf.keras.layers.Dense(10), ... tf.keras.layers.Dense(10), ... # Softmax should be done in float32 for numeric stability. We pass ... # dtype='float32' to use float32 instead of the global policy. ... tf.keras.layers.Activation('softmax', dtype='float32') ... ]) Alternatively, the policy can be passed to individual layers instead of setting the global policy with `set_policy`: >>> policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16') >>> model = tf.keras.models.Sequential([ ... tf.keras.layers.Input((100,)), ... tf.keras.layers.Dense(10, dtype=policy), ... tf.keras.layers.Dense(10, dtype=policy), ... # Softmax should be done in float32 for numeric stability. ... tf.keras.layers.Activation('softmax', dtype='float32') ... ]) Note the `'mixed_float16'` policy will apply loss scaling by default in `Model.fit`, `Model.train_on_batch`, and other training methods. If no such method is used (e.g., a custom training loop is used) and `'mixed_float16'` is used, the loss scale must be manually applied. See `tf.keras.mixed_precision.experimental.LossScaleOptimizer` for details. For `'mixed_bfloat16'`, no loss scaling is done and loss scaling never needs to be manually applied. See [the mixed precision guide](https://www.tensorflow.org/guide/keras/mixed_precision) for more information on using mixed precision ### How to use float64 in a Keras model Using float64 is similar to mixed precision. Either the global policy can be set to float64, or `dtype='float64'` can be passed to individual layers. For example, to set the global policy: >>> tf.keras.mixed_precision.experimental.set_policy('float64') >>> model = tf.keras.models.Sequential([ ... tf.keras.layers.Input((100,)), ... # All layers use global policy of 'float64', which does computations ... # and creates variables in float64. ... tf.keras.layers.Dense(10), ... tf.keras.layers.Dense(10), ... tf.keras.layers.Activation('softmax') ... ]) >>> # Optionaly set policy back to float32 if any other models use float32 >>> tf.keras.mixed_precision.experimental.set_policy('float32') ### How a layer uses its policy's compute dtype A layer will cast its inputs to its compute dtype in TensorFlow 2. For example: >>> x = tf.ones((4, 4, 4, 4), dtype='float64') >>> # `layer`'s policy defaults to float32. >>> layer = tf.keras.layers.Conv2D(filters=4, kernel_size=2) >>> # `layer` casts it's inputs to its compute dtype, which is float32, and >>> # does computations in float32. >>> y = layer(x) >>> y.dtype tf.float32 Note that the base `tf.keras.layers.Layer` class inserts the casts. If subclassing your own layer, you do not have to insert any casts. Currently, only tensors in the first argument to the layer's `call` method are casted. For example: >>> class MyLayer(tf.keras.layers.Layer): ... # Bug! `b` will not be casted. ... def call(self, a, b): ... return a + 1., b + 1. >>> a = tf.constant(1., dtype="float32") >>> b = tf.constant(1., dtype="float32") >>> layer = MyLayer(dtype="float64") >>> x, y = layer(a, b) >>> x.dtype tf.float64 >>> y.dtype tf.float32 If writing your own layer, it is recommended to accept tensors only in the first argument. This way, all tensors are casted to the layer's compute dtype. `MyLayer` should therefore be written as: >>> class MyLayer(tf.keras.layers.Layer): ... # Now, all tensor inputs will be casted. ... def call(self, inputs): ... a, b = inputs ... return a + 1., b + 1. >>> a = tf.constant(1., dtype="float32") >>> b = tf.constant(1., dtype="float32") >>> layer = MyLayer(dtype="float64") >>> x, y = layer((a, b)) >>> x.dtype tf.float64 >>> y.dtype tf.float64 Other arguments are not automatically casted for technical reasons, but this may change in a future minor release. A layer subclass can prevent its inputs from being autocasted by passing `autocast=False` to the layer constructor. For example: >>> class NonAutoCastingLayer(tf.keras.layers.Layer): ... def __init__(self, **kwargs): ... kwargs['autocast'] = False ... super(NonAutoCastingLayer, self).__init__(**kwargs) ... def call(self, inp): ... return inp >>> x = tf.ones((4, 4, 4, 4), dtype='float32') >>> layer = NonAutoCastingLayer(dtype='float64') >>> y = layer(x) # Will not cast inputs to it's compute dtype of float64 >>> y.dtype tf.float32 ### How a layer uses its policy's variable dtype The default dtype of variables created by `tf.keras.layers.Layer.add_weight` is the layer's policy's variable dtype. If a layer's compute and variable dtypes differ, `add_weight` will wrap floating-point variables with a special wrapper called an `AutoCastVariable`. This wrapper is identical to the original variable except it casts itself to the layer's compute dtype when used within `Layer.call`. Outside `Layer.call`, the variable is not casted. A layer author can prevent a variable from being wrapped with an `AutoCastVariable` by passing `experimental_autocast=False` to `add_weight`: >>> class MyLayer(tf.keras.layers.Layer): ... def build(self, input_shape): ... self.x = self.add_weight('x') ... self.y = self.add_weight('y', experimental_autocast=False) >>> policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16') >>> layer = MyLayer(dtype=policy) >>> layer.build((2, 2)) >>> layer.x <AutoCastVariable 'x:0' shape=() dtype=float32 true_dtype=float32, numpy=...> >>> layer.y <tf.Variable 'y:0' shape=() dtype=float32, numpy=...> Passing `experimental_autocast=False` is useful for layers which may internally do some math in the variable dtype instead of the compute dtype. For example, you may wish to compute variable statistics, such as mean and variance, in the variable dtype. ### How to write a layer that supports mixed precision and float64. For the most part, layers will automatically support mixed precision and float64 without any additional work, due to the fact the base layer automatically casts inputs, creates variables of the correct type, and in the case of mixed precision, wraps variables with `AutoCastVariables`. For example, this simple dense layer does not require any additional work to support mixed precision or float64. Keras automatically casts the inputs and variable to the appropriate dtype. >>> class MyDense(tf.keras.layers.Layer): ... def build(self, input_shape): ... self.kernel = self.add_weight('kernel', (input_shape[-1], 10)) ... def call(self, inputs): ... return tf.matmul(inputs, self.kernel) >>> policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16') >>> layer = MyDense(dtype=policy) >>> x = np.random.rand(10, 10) >>> y = layer(x) >>> y.dtype tf.float16 The primary case where you need extra work to support mixed precision or float64 is when you create a new tensor, such as with `tf.ones` or `tf.constant`. In such cases, you must create the tensor of the correct dtype. For example, suppose you modify the `MyDense` layer to add a random number to the output using `tf.random.normal`. You must pass the input dtype to `tf.random.normal` to ensure the dtypes match. >>> class MyDense(tf.keras.layers.Layer): ... def build(self, input_shape): ... self.kernel = self.add_weight('kernel', (input_shape[-1], 10)) ... def call(self, inputs): ... rand = tf.random.normal(shape=inputs.shape, dtype=inputs.dtype) ... return tf.matmul(inputs, self.kernel) + rand >>> >>> layer = MyDense(dtype=policy) >>> y = layer(x) >>> y.dtype tf.float16 If you did not pass `dtype=inputs.dtype` to `tf.random.normal`, a `TypeError` would have occurred. This is because the dtype defaults to `"float32"`, so the layer would only work if the inputs were float32. ### The deprecated "infer" policy In addition to the above mentioned policies, a policy can also be "infer". This Policy is deprecated, and it is not recommended. When a layer has an infer policy, it will infer the computation and variable dtype from the first input the first time the layer is called. Once the layer is called for the first time, the layer's policy will change to the dtype of the first input. In TensorFlow 1, only the "infer" policy is available. """ def __init__(self, name, loss_scale=USE_DEFAULT): """Constructs the policy. The `name` argument determines the compute and variable dtype, the default loss scale, and has no additional effect on the Policy. The compute and variable dtypes can only be specified through `name`, and cannot be specified directly. Args: name: A string. Can be one of the following values: * Any dtype name, such as 'float32' or 'float64'. Both the variable and compute dtypes will be that dtype. * 'mixed_float16' or 'mixed_bfloat16': The compute dtype is float16 or bfloat16, while the variable dtype is float32. With 'mixed_float16', a dynamic loss scale is used. These policies are used for mixed precision training. * 'infer' (deprecated): Infer the compute and variable dtype from the input dtype. loss_scale: A `tf.mixed_precision.experimental.LossScale`, an int (which uses a `FixedLossScale`), or the string "dynamic" (which uses a `DynamicLossScale`). Defaults to using no loss scaling unless `name` is "mixed_float16", in which case this defaults to "dynamic". Only `tf.keras.Model`s, not layers, use the loss scale, and it is only used during `Model.fit`, `Model.train_on_batch`, and other similar methods. """ if isinstance(name, dtypes.DType): raise TypeError("'name' must be a string, not a DType. " "Instead, pass DType.name. Got: %s" % (name.name,)) elif not isinstance(name, six.string_types): raise TypeError("'name' must be a string, but got: %s" % (name,)) self._name = name self._compute_dtype, self._variable_dtype = self._parse_name(name) if loss_scale == USE_DEFAULT: loss_scale = 'dynamic' if name == 'mixed_float16' else None self._using_default_loss_scale = True else: self._using_default_loss_scale = False if loss_scale and self._compute_dtype not in (None, 'float16'): tf_logging.warn('Creating a Policy with a loss scale is only useful for ' 'float16 policies. You passed loss_scale=%r for policy ' '%s. Consider not passing any loss_scale instead.' % (loss_scale, name)) self._loss_scale = keras_loss_scale_module.get(loss_scale) if name in ('mixed_float16', 'mixed_bloat16'): device_compatibility_check.log_device_compatibility_check(name) def _parse_name(self, name): """Parses a Policy name into a compute and variable dtype. Args: name: The name of the policy: Returns: The (compute_dtype, variable_dtype) pair. """ if name.endswith('_float32_vars'): error_msg = ('Policies ending in \'_float32_vars\' have been removed ' 'from TensorFlow.') if name in ('infer_float32_vars', 'infer_with_float32_vars'): error_msg += (' Please use the \'mixed_float16\' or \'mixed_bfloat16\' ' 'policy instead.') elif name == 'float16_with_float32_vars': error_msg += (' Please use the \'mixed_float16\' policy instead.') elif name == 'bfloat16_with_float32_vars': error_msg += (' Please use the \'mixed_bfloat16\' policy instead.') error_msg += ' Got policy name: \'%s\'' % name raise ValueError(error_msg) if name == 'mixed_float16': return 'float16', 'float32' elif name == 'mixed_bfloat16': return 'bfloat16', 'float32' elif name == 'infer': return None, None try: dtype = dtypes.as_dtype(name).name except TypeError: error = ("Cannot convert value %s to a mixed precision Policy. " "Valid policies include include 'mixed_float16', " "'mixed_bfloat16', and the name of any dtype such as " "'float32'." % (name,)) # six.raise_from suppresses the original TypeError from being raised six.raise_from(ValueError(error), None) return dtype, dtype @property def variable_dtype(self): """The variable dtype of this policy. This is the dtype layers will create their variables in, unless a layer explicitly chooses a different dtype. If this is different than `Policy.compute_dtype`, Layers will cast variables to the compute dtype to avoid type errors. Returns: The variable dtype of this policy, or None if the variable dtype should be inferred from the inputs. """ return self._variable_dtype @property def compute_dtype(self): """The compute dtype of this policy. This is the dtype layers will do their computations in. Note that even if the compute dtype is float16 or bfloat16, hardware devices may not do individual adds, multiplies, and other fundamental operations in [b]float16, but instead may do some of them in float32 for numeric stability. The compute dtype is the dtype of the inputs and outputs of the TensorFlow ops that the layer executes. Internally, many TensorFlow ops will do certain internal calculations in float32, or some other device-internal intermediate format with higher precision than [b]float16, to increase numeric stability. For example, a `tf.keras.layers.Dense` layer, when run on a GPU with a float16 compute dtype, will pass float16 inputs to tf.matmul. But, tf.matmul will do use float32 intermediate math. The performance benefit of float16 is still apparent, due to increased memory bandwidth and the fact modern GPUs have specialized hardware for computing matmuls on float16 while still keeping intermediate computations in float32. Returns: The compute dtype of this policy, or None if the compute dtype should be inferred from the inputs. """ return self._compute_dtype @property def should_cast_variables(self): """Returns True if variables should be casted. This is true if the variable dtype is not the same as the compute dtype. Returns: True, if variables should be casted. """ return self.variable_dtype != self.compute_dtype @property def loss_scale(self): """Returns the loss scale of this Policy. Returns: A `tf.mixed_precision.experimental.LossScale`, or None. """ return self._loss_scale @property def name(self): """Returns the name of this policy.""" return self._name def __repr__(self): return '<Policy "%s", loss_scale=%s>' % (self._name, self.loss_scale) def get_config(self): config = { 'name': self.name } if not self._using_default_loss_scale: # We only include the loss scale if the default loss scale is not used. # This allows us to change the loss scale config format without breaking # users who use the default loss scale. config['loss_scale'] = keras_loss_scale_module.serialize(self.loss_scale) return config @classmethod def from_config(cls, config, custom_objects=None): if 'loss_scale' in config and isinstance(config['loss_scale'], dict): config = config.copy() config['loss_scale'] = keras_loss_scale_module.deserialize( config['loss_scale'], custom_objects=custom_objects) return cls(**config) # The current global policy in effect. If None, it means the current value of # floatx should be used as the policy if the V2 dtype behavior is enabled, # or "infer" otherwise. # TODO(reedwm): Make this thread local? _global_policy = None @keras_export('keras.mixed_precision.experimental.global_policy') def global_policy(): """Returns the global Policy. The global policy is the default policy used for layers, if no policy is passed to the layer constructor. If no policy has been set with `keras.mixed_precision.experimental.set_policy`, this will return a policy constructed from `tf.keras.backend.floatx()` in TensorFlow 2 (floatx defaults to float32), or an "infer" policy in TensorFlow 1. See `keras.mixed_precision.experimental.Policy` for more information. Returns: The global Policy. """ if _global_policy is None: if base_layer_utils.v2_dtype_behavior_enabled(): return Policy(backend.floatx()) else: return Policy('infer') return _global_policy def policy_defaults_to_floatx(): """Returns True if `global_policy()` will use the current value of floatx.""" return _global_policy is None and base_layer_utils.v2_dtype_behavior_enabled() def _check_if_mixed_precision_graph_rewrite_is_enabled(): # TODO(reedwm): Update this comment once the Keras API is complete. if mixed_precision_global_state.mixed_precision_graph_rewrite_is_enabled: raise ValueError( 'The mixed precision policy cannot be set, because the mixed ' 'precision graph rewrite has already been enabled.\n' 'At most, one of the following functions can be called:\n\n' ' 1. tf.train.experimental.enable_mixed_precision_graph_rewrite() ' '(You called this first)\n' ' 2. tf.keras.mixed_precision.experimental.set_policy() (You called ' 'this second)\n\n' 'You called both functions, which is an error, because both functions ' 'enable you to use mixed precision. If in doubt which function to use, ' 'use the second, as it supports Eager execution and is more ' 'customizable.') @keras_export('keras.mixed_precision.experimental.set_policy') def set_policy(policy): """Sets the global Policy. The global policy is the default policy used for layers, if no policy is passed to the layer constructor. If no global policy is set, layers will instead default to a Policy constructed from `tf.keras.backend.floatx()` in TensorFlow 2. In TensorFlow 1, layers default to an "infer" policy. See `keras.mixed_precision.experimental.Policy` for more information. Args: policy: A Policy, or a string that will be converted to a Policy.. """ global _global_policy _check_if_mixed_precision_graph_rewrite_is_enabled() if policy is not None and not isinstance(policy, Policy): policy = Policy(policy) if (policy and not base_layer_utils.v2_dtype_behavior_enabled() and policy.compute_dtype): raise ValueError( 'The global policy can only be set to a non-infer policy in TensorFlow ' '2') _global_policy = policy mixed_precision_global_state.using_default_mixed_precision_policy = ( _global_policy is None) # TODO(reedwm): Make this thread local @contextlib.contextmanager def policy_scope(policy): """A context manager that sets the global Policy under it. Args: policy: A Policy, or a string that will be converted to a Policy.. Yields: Nothing. """ old_policy = _global_policy try: set_policy(policy) yield finally: set_policy(old_policy) def _is_convertible_to_dtype(dtype): try: dtypes.as_dtype(dtype) return True except TypeError: return False def _policy_equivalent_to_dtype(policy): """Returns True if the Policy is equivalent to a single dtype. A policy is equivalent to a single dtype if the policy's compute and variable dtypes are the same and the policy does not cause the layer/model to have additional behavior, such as loss scaling. The "infer" policy is considered equivalent to a single dtype. Args: policy: A Policy. Returns: True, if the policy is equivalent to a single dtype. """ # We use type() instead of isinstance because a sublcass of Policy is never # equivalent to a dtype. return (type(policy) == Policy and # pylint: disable=unidiomatic-typecheck list(policy.get_config().keys()) == ['name'] and (policy.name == 'infer' or _is_convertible_to_dtype(policy.name))) def serialize(policy): if _policy_equivalent_to_dtype(policy): # We return either None or the policy name for compatibility with older # versions of Keras. If the policy name is returned, it is a dtype string # such as 'float32'. return None if policy.name == 'infer' else policy.name return generic_utils.serialize_keras_object(policy) def deserialize(config, custom_objects=None): if isinstance(config, str) and _is_convertible_to_dtype(config): return Policy(config) if config is None: return Policy('infer') module_objects = {'Policy': Policy} return generic_utils.deserialize_keras_object( config, module_objects=module_objects, custom_objects=custom_objects, printable_module_name='dtype policy')
apache-2.0
4,548,425,901,872,756,700
39.958665
102
0.695843
false
3.929083
true
false
false
googleapis/googleapis-gen
google/cloud/networkmanagement/v1/networkmanagement-v1-py/google/cloud/network_management_v1/services/reachability_service/transports/grpc.py
1
21150
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warnings from typing import Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import grpc_helpers # type: ignore from google.api_core import operations_v1 # type: ignore from google.api_core import gapic_v1 # type: ignore import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import grpc # type: ignore from google.cloud.network_management_v1.types import connectivity_test from google.cloud.network_management_v1.types import reachability from google.longrunning import operations_pb2 # type: ignore from .base import ReachabilityServiceTransport, DEFAULT_CLIENT_INFO class ReachabilityServiceGrpcTransport(ReachabilityServiceTransport): """gRPC backend transport for ReachabilityService. The Reachability service in the Google Cloud Network Management API provides services that analyze the reachability within a single Google Virtual Private Cloud (VPC) network, between peered VPC networks, between VPC and on-premises networks, or between VPC networks and internet hosts. A reachability analysis is based on Google Cloud network configurations. You can use the analysis results to verify these configurations and to troubleshoot connectivity issues. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. It sends protocol buffers over the wire using gRPC (which is built on top of HTTP/2); the ``grpcio`` package must be installed. """ _stubs: Dict[str, Callable] def __init__(self, *, host: str = 'networkmanagement.googleapis.com', credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Sequence[str] = None, channel: grpc.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if ``channel`` is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. channel (Optional[grpc.Channel]): A ``Channel`` instance through which to make calls. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or applicatin default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure mutual TLS channel. It is ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: # Ignore credentials if a channel was passed. credentials = False # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None else: if api_mtls_endpoint: host = api_mtls_endpoint # Create SSL credentials with client_cert_source or application # default SSL credentials. if client_cert_source: cert, key = client_cert_source() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: self._ssl_channel_credentials = SslCredentials().ssl_credentials else: if client_cert_source_for_mtls and not ssl_channel_credentials: cert, key = client_cert_source_for_mtls() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) # The base transport sets the host, credentials and scopes super().__init__( host=host, credentials=credentials, credentials_file=credentials_file, scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: self._grpc_channel = type(self).create_channel( self._host, credentials=self._credentials, credentials_file=credentials_file, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) # Wrap messages. This must be done after self._grpc_channel exists self._prep_wrapped_messages(client_info) @classmethod def create_channel(cls, host: str = 'networkmanagement.googleapis.com', credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs) -> grpc.Channel: """Create and return a gRPC channel object. Args: host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is mutually exclusive with credentials. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. quota_project_id (Optional[str]): An optional project to use for billing and quota. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: grpc.Channel: A gRPC channel object. Raises: google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ return grpc_helpers.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, default_scopes=cls.AUTH_SCOPES, scopes=scopes, default_host=cls.DEFAULT_HOST, **kwargs ) @property def grpc_channel(self) -> grpc.Channel: """Return the channel designed to connect to this service. """ return self._grpc_channel @property def operations_client(self) -> operations_v1.OperationsClient: """Create the client designed to process long-running operations. This property caches on the instance; repeated calls return the same client. """ # Sanity check: Only create a new client if we do not already have one. if self._operations_client is None: self._operations_client = operations_v1.OperationsClient( self.grpc_channel ) # Return the client from cache. return self._operations_client @property def list_connectivity_tests(self) -> Callable[ [reachability.ListConnectivityTestsRequest], reachability.ListConnectivityTestsResponse]: r"""Return a callable for the list connectivity tests method over gRPC. Lists all Connectivity Tests owned by a project. Returns: Callable[[~.ListConnectivityTestsRequest], ~.ListConnectivityTestsResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if 'list_connectivity_tests' not in self._stubs: self._stubs['list_connectivity_tests'] = self.grpc_channel.unary_unary( '/google.cloud.networkmanagement.v1.ReachabilityService/ListConnectivityTests', request_serializer=reachability.ListConnectivityTestsRequest.serialize, response_deserializer=reachability.ListConnectivityTestsResponse.deserialize, ) return self._stubs['list_connectivity_tests'] @property def get_connectivity_test(self) -> Callable[ [reachability.GetConnectivityTestRequest], connectivity_test.ConnectivityTest]: r"""Return a callable for the get connectivity test method over gRPC. Gets the details of a specific Connectivity Test. Returns: Callable[[~.GetConnectivityTestRequest], ~.ConnectivityTest]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if 'get_connectivity_test' not in self._stubs: self._stubs['get_connectivity_test'] = self.grpc_channel.unary_unary( '/google.cloud.networkmanagement.v1.ReachabilityService/GetConnectivityTest', request_serializer=reachability.GetConnectivityTestRequest.serialize, response_deserializer=connectivity_test.ConnectivityTest.deserialize, ) return self._stubs['get_connectivity_test'] @property def create_connectivity_test(self) -> Callable[ [reachability.CreateConnectivityTestRequest], operations_pb2.Operation]: r"""Return a callable for the create connectivity test method over gRPC. Creates a new Connectivity Test. After you create a test, the reachability analysis is performed as part of the long running operation, which completes when the analysis completes. If the endpoint specifications in ``ConnectivityTest`` are invalid (for example, containing non-existent resources in the network, or you don't have read permissions to the network configurations of listed projects), then the reachability result returns a value of ``UNKNOWN``. If the endpoint specifications in ``ConnectivityTest`` are incomplete, the reachability result returns a value of AMBIGUOUS. For more information, see the Connectivity Test documentation. Returns: Callable[[~.CreateConnectivityTestRequest], ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if 'create_connectivity_test' not in self._stubs: self._stubs['create_connectivity_test'] = self.grpc_channel.unary_unary( '/google.cloud.networkmanagement.v1.ReachabilityService/CreateConnectivityTest', request_serializer=reachability.CreateConnectivityTestRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs['create_connectivity_test'] @property def update_connectivity_test(self) -> Callable[ [reachability.UpdateConnectivityTestRequest], operations_pb2.Operation]: r"""Return a callable for the update connectivity test method over gRPC. Updates the configuration of an existing ``ConnectivityTest``. After you update a test, the reachability analysis is performed as part of the long running operation, which completes when the analysis completes. The Reachability state in the test resource is updated with the new result. If the endpoint specifications in ``ConnectivityTest`` are invalid (for example, they contain non-existent resources in the network, or the user does not have read permissions to the network configurations of listed projects), then the reachability result returns a value of UNKNOWN. If the endpoint specifications in ``ConnectivityTest`` are incomplete, the reachability result returns a value of ``AMBIGUOUS``. See the documentation in ``ConnectivityTest`` for for more details. Returns: Callable[[~.UpdateConnectivityTestRequest], ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if 'update_connectivity_test' not in self._stubs: self._stubs['update_connectivity_test'] = self.grpc_channel.unary_unary( '/google.cloud.networkmanagement.v1.ReachabilityService/UpdateConnectivityTest', request_serializer=reachability.UpdateConnectivityTestRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs['update_connectivity_test'] @property def rerun_connectivity_test(self) -> Callable[ [reachability.RerunConnectivityTestRequest], operations_pb2.Operation]: r"""Return a callable for the rerun connectivity test method over gRPC. Rerun an existing ``ConnectivityTest``. After the user triggers the rerun, the reachability analysis is performed as part of the long running operation, which completes when the analysis completes. Even though the test configuration remains the same, the reachability result may change due to underlying network configuration changes. If the endpoint specifications in ``ConnectivityTest`` become invalid (for example, specified resources are deleted in the network, or you lost read permissions to the network configurations of listed projects), then the reachability result returns a value of ``UNKNOWN``. Returns: Callable[[~.RerunConnectivityTestRequest], ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if 'rerun_connectivity_test' not in self._stubs: self._stubs['rerun_connectivity_test'] = self.grpc_channel.unary_unary( '/google.cloud.networkmanagement.v1.ReachabilityService/RerunConnectivityTest', request_serializer=reachability.RerunConnectivityTestRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs['rerun_connectivity_test'] @property def delete_connectivity_test(self) -> Callable[ [reachability.DeleteConnectivityTestRequest], operations_pb2.Operation]: r"""Return a callable for the delete connectivity test method over gRPC. Deletes a specific ``ConnectivityTest``. Returns: Callable[[~.DeleteConnectivityTestRequest], ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if 'delete_connectivity_test' not in self._stubs: self._stubs['delete_connectivity_test'] = self.grpc_channel.unary_unary( '/google.cloud.networkmanagement.v1.ReachabilityService/DeleteConnectivityTest', request_serializer=reachability.DeleteConnectivityTestRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs['delete_connectivity_test'] __all__ = ( 'ReachabilityServiceGrpcTransport', )
apache-2.0
3,437,077,281,151,357,400
45.792035
96
0.636359
false
4.8947
true
false
false
VlachosGroup/VlachosGroupAdditivity
pgradd/DrawMol.py
1
2230
""" ========================================= Defenition to draw RDKIT mol object (:mod:`pgradd.DrawMol`) ========================================= Coverts a rdkit mol object to a svg image and display. """ from rdkit import Chem from rdkit.Chem import rdDepictor from rdkit.Chem.Draw import rdMolDraw2D from IPython.display import SVG, display # http://rdkit.blogspot.com/2015/02/new-drawing-code.html def moltosvg(mol, highlight=[], molSize=(400, 400), kekulize=True): mc = Chem.Mol(mol.ToBinary()) if kekulize: try: Chem.Kekulize(mc) except Exception: mc = Chem.Mol(mol.ToBinary()) if not mc.GetNumConformers(): rdDepictor.Compute2DCoords(mc) drawer = rdMolDraw2D.MolDraw2DSVG(molSize[0], molSize[1]) # Atom Label opts = drawer.drawOptions() # Atom name and index for i in range(mol.GetNumAtoms()): opts.atomLabels[i] = mol.GetAtomWithIdx(i).GetSymbol()+str(i) # radicals and charges for atom in mol.GetAtoms(): nr = atom.GetNumRadicalElectrons() nc = atom.GetFormalCharge() if nr > 0: string = atom.GetSymbol() + ':'*divmod(nr, 2)[0] +\ '.'*divmod(nr, 2)[1] opts.atomLabels[atom.GetIdx()] += string elif nc == 1: string = atom.GetSymbol() + '+' opts.atomLabels[atom.GetIdx()] += string elif nc > 1: string = atom.GetSymbol() + '+' + str(nc) opts.atomLabels[atom.GetIdx()] += string elif nc == -1: string = atom.GetSymbol() + '-' opts.atomLabels[atom.GetIdx()] += string elif nc < -1: string = atom.GetSymbol() + '-' + str(nc) opts.atomLabels[atom.GetIdx()] += string # highlight if highlight: drawer.DrawMolecule(mc, highlightAtoms=highlight) else: drawer.DrawMolecule(mc) drawer.FinishDrawing() svg = drawer.GetDrawingText() # It seems that the svg renderer used doesn't quite hit the spec. # Here are some fixes to make it work in the notebook, although I think # the underlying issue needs to be resolved at the generation step svg.replace('svg:', '') display(SVG(svg))
mit
5,404,241,152,769,177,000
32.283582
75
0.58296
false
3.506289
false
false
false
aldebaran/qibuild
python/qitest/parsers.py
1
7334
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved. # Use of this source code is governed by a BSD-style license (see the COPYING file). """ Collection of parser fonctions for qitests actions """ from __future__ import absolute_import from __future__ import unicode_literals from __future__ import print_function import os import qisys.parsers import qitest.project import qibuild.parsers class EmptyTestListException(Exception): """ No test to run exception """ pass def test_parser(parser, with_num_jobs=True): """ Test Parser """ qisys.parsers.worktree_parser(parser) group = parser.add_argument_group("test options") group.add_argument("--perf", dest="perf", action="store_true", help="run perfs tests instead of pure tests.") group.add_argument("-k", "--pattern", dest="patterns", action="append", help="Filter tests matching these patterns") group.add_argument("-x", "--exclude", dest="excludes", action="append", help="Exclude test matching these patterns") group.add_argument("-V", dest="verbose_tests", action="store_true", help="display tests output") group.add_argument("--valgrind", dest="valgrind", action="store_true", help="run tests under valgrind") group.add_argument("--nightmare", dest="nightmare", action="store_true", help="run tests in shuffle and 20 times (apply only to gtest)") group.add_argument("--coverage", dest="coverage", action="store_true", help="run coverage") group.add_argument("--ncpu", dest="num_cpus", default=-1, type=int, help="set number of CPU each test is allowed to use (linux)") group.add_argument("--nightly", action="store_true", dest="nightly") group.add_argument("--break-on-failure", action="store_true", dest="break_on_failure", help="Break on failure (for gtest only)") group.add_argument("--repeat-until-fail", default=0, type=int, metavar="N", help="Repeat tests until they fail (at most N times)") group.add_argument("--qitest-json", dest="qitest_jsons", action="append") group.add_argument("--test-output-dir", type=os.path.abspath, dest="test_output_dir", help="Generate XML test reports in the given directory " "(instead of build-<platform>/sdk/test-results)") group.add_argument("--coverage-output-dir", dest="coverage_output_dir", help="Generate XML and HTML coverage reports in the given " "directory (instead of build-<platform>/sdk/coverage-results)") group.add_argument("--root-output-dir", dest="test_output_dir", metavar="ROOT_OUTPUT_DIR", help="same as --test-output-dir (deprecated)") group.add_argument("--no-capture", dest="capture", action="store_false") group.add_argument("--ignore-timeouts", dest="ignore_timeouts", action="store_true", help="Ignore timeouts when running tests") group.add_argument("--lf", "--last-failed", dest="last_failed", action="store_true", help="Run the failing test from previous run") group.add_argument("--allow-no-test", dest="allow_no_test", action="store_true", help="Don't fail if no tests to run") parser.set_defaults(nightly=False, capture=True, last_failed=False, ignore_timeouts=False) if with_num_jobs: qisys.parsers.parallel_parser(group, default=1) return group def get_test_runner(args, build_project=None, qitest_json=None): """ Get Test Runner """ test_project = None if not qitest_json: qitest_json = vars(args).get("qitest_json") if not qitest_json: candidate = os.path.join(os.getcwd(), "qitest.json") if os.path.exists(candidate): qitest_json = candidate if qitest_json: test_project = qitest.project.TestProject(qitest_json) if not test_project: if build_project: test_project = build_project.to_test_project() else: return None test_runner = qibuild.test_runner.ProjectTestRunner(test_project) if build_project: test_runner.cwd = build_project.sdk_directory test_runner.env = build_project.build_worktree.get_env() else: test_runner.cwd = qisys.sh.to_native_path(os.path.dirname(qitest_json)) test_runner.patterns = args.patterns test_runner.excludes = args.excludes test_runner.perf = args.perf test_runner.coverage = args.coverage test_runner.break_on_failure = args.break_on_failure test_runner.valgrind = args.valgrind test_runner.verbose = args.verbose_tests test_runner.num_cpus = args.num_cpus test_runner.num_jobs = args.num_jobs test_runner.repeat_until_fail = args.repeat_until_fail test_runner.nightly = args.nightly test_runner.nightmare = args.nightmare test_runner.test_output_dir = args.test_output_dir test_runner.capture = args.capture test_runner.last_failed = args.last_failed test_runner.ignore_timeouts = args.ignore_timeouts return test_runner def parse_build_projects(args): """ Parse Build Projects """ res = list() try: build_worktree = qibuild.parsers.get_build_worktree(args) solve_deps = False if args.use_deps: solve_deps = True build_projects = qibuild.parsers.get_build_projects( build_worktree, args, solve_deps=solve_deps) for build_project in build_projects: test_runner = None try: test_runner = get_test_runner(args, build_project=build_project) except qibuild.project.NoQiTestJson: pass if test_runner: res.append(test_runner) except (qisys.worktree.NotInWorkTree, qibuild.parsers.CouldNotGuessProjectName): pass return res def get_test_runners(args): """ Get Test Runners """ res = list() qitest_jsons = args.qitest_jsons or list() # first case: qitest.json in current working directory test_runner = get_test_runner(args) if test_runner: res.append(test_runner) # second case: qitest.json specified with --qitest-json for qitest_json in qitest_jsons: test_runner = get_test_runner(args, qitest_json=qitest_json) res.append(test_runner) # third case: parsing build projects build_projects_runners = parse_build_projects(args) # avoid appending a test_runner guessed from a build project # when res already contains a test runner computed from a # --qitest-json argument known_cwds = [x.cwd for x in res] for test_runner in build_projects_runners: if test_runner.cwd not in known_cwds: res.append(test_runner) if args.coverage and not build_projects_runners: raise Exception("""--coverage can only be used from a qibuild CMake project\n""") elif args.coverage: return build_projects_runners if not res: raise EmptyTestListException("Nothing found to test") return res
bsd-3-clause
6,434,639,803,409,143,000
43.993865
94
0.637715
false
3.890716
true
false
false
fake-name/ReadableWebProxy
WebMirror/management/rss_parser_funcs/feed_parse_extractCurrentlyTLingBuniMi.py
1
1148
def extractCurrentlyTLingBuniMi(item): """ """ vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol or frag) or 'preview' in item['title'].lower(): return None if item['title'].startswith('[BNM]'): return buildReleaseMessageWithType(item, 'Bu ni Mi wo Sasagete Hyaku to Yonen. Elf de Yarinaosu Musha Shugyou', vol, chp, frag=frag, postfix=postfix) if item['title'].startswith('[DD]'): return buildReleaseMessageWithType(item, 'Doll Dungeon', vol, chp, frag=frag, postfix=postfix) if item['title'].startswith('[HCLS]'): return buildReleaseMessageWithType(item, 'High Comprehension Low Strength', vol, chp, frag=frag, postfix=postfix) tagmap = [ ('Abyss Domination', 'Abyss Domination', 'translated'), ('Nine Yang Sword Saint', 'Nine Yang Sword Saint', 'translated'), ('Mysterious World Beast God', 'Mysterious World Beast God', 'translated'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
bsd-3-clause
-392,690,096,227,605,250
44.96
151
0.690767
false
3.045093
false
false
false
Froff/TFY4115-Simulering
python/Simulation.py
1
1185
from math import sqrt import Slope class Simulation: SIM_STEP_SIZE = 0.0001 const_g = -981 def __init__ (self, slope, **kwargs): self.slope = slope self.t = [0] self.x = [Simulation.SIM_STEP_SIZE] self.mom_inertia_coefficient = 0 for name, value in kwargs.items(): if name == "startingposition": self.x = [value] if name == "momentofintertiacoefficient": self.mom_inertia_coefficient = value def runSimulation(self): while not self.isFinished(): self.step() def step (self): x = self.x[-1] dydx = self.slope.dydx(x) y = self.slope.f(x) - self.slope.f(0) I = self.mom_inertia_coefficient g = Simulation.const_g step_size = Simulation.SIM_STEP_SIZE try: self.x.append(x + step_size * sqrt( (2*g*y) / ( (1 + I) * (1 + dydx**2) ) )) self.t.append(self.t[-1] + Simulation.SIM_STEP_SIZE) except ValueError: print("Math domain error. x={}, y={}".format(x, y)) exit(2) def isFinished (self): return self.x[-1] >= self.slope.end
mit
6,737,321,104,293,273,000
30.184211
88
0.533333
false
3.395415
false
false
false
googleapis/googleapis-gen
google/cloud/gkehub/v1alpha2/gkehub-v1alpha2-py/google/cloud/gkehub_v1alpha2/services/gke_hub/pagers.py
1
5811
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional from google.cloud.gkehub_v1alpha2.types import membership class ListMembershipsPager: """A pager for iterating through ``list_memberships`` requests. This class thinly wraps an initial :class:`google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse` object, and provides an ``__iter__`` method to iterate through its ``resources`` field. If there are more pages, the ``__iter__`` method will make additional ``ListMemberships`` requests and continue to iterate through the ``resources`` field on the corresponding responses. All the usual :class:`google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__(self, method: Callable[..., membership.ListMembershipsResponse], request: membership.ListMembershipsRequest, response: membership.ListMembershipsResponse, *, metadata: Sequence[Tuple[str, str]] = ()): """Instantiate the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.gkehub_v1alpha2.types.ListMembershipsRequest): The initial request object. response (google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = membership.ListMembershipsRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property def pages(self) -> Iterable[membership.ListMembershipsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response def __iter__(self) -> Iterable[membership.Membership]: for page in self.pages: yield from page.resources def __repr__(self) -> str: return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) class ListMembershipsAsyncPager: """A pager for iterating through ``list_memberships`` requests. This class thinly wraps an initial :class:`google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse` object, and provides an ``__aiter__`` method to iterate through its ``resources`` field. If there are more pages, the ``__aiter__`` method will make additional ``ListMemberships`` requests and continue to iterate through the ``resources`` field on the corresponding responses. All the usual :class:`google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__(self, method: Callable[..., Awaitable[membership.ListMembershipsResponse]], request: membership.ListMembershipsRequest, response: membership.ListMembershipsResponse, *, metadata: Sequence[Tuple[str, str]] = ()): """Instantiates the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.gkehub_v1alpha2.types.ListMembershipsRequest): The initial request object. response (google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = membership.ListMembershipsRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property async def pages(self) -> AsyncIterable[membership.ListMembershipsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response def __aiter__(self) -> AsyncIterable[membership.Membership]: async def async_generator(): async for page in self.pages: for response in page.resources: yield response return async_generator() def __repr__(self) -> str: return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
apache-2.0
-1,709,948,911,245,092,900
40.507143
95
0.660816
false
4.473441
false
false
false
erccarls/vectorsearch
vectorsearch/word2vec.py
1
4242
from __future__ import division # py3 "true division" import logging import sys import os import heapq from timeit import default_timer from copy import deepcopy from collections import defaultdict import threading import itertools import gensim from gensim.utils import keep_vocab_item try: from queue import Queue, Empty except ImportError: from Queue import Queue, Empty from numpy import exp, log, dot, zeros, outer, random, dtype, float32 as REAL,\ uint32, seterr, array, uint8, vstack, fromstring, sqrt, newaxis,\ ndarray, empty, sum as np_sum, prod, ones, ascontiguousarray from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc from six import iteritems, itervalues, string_types from six.moves import xrange from types import GeneratorType logger = logging.getLogger(__name__) try: from gensim.models.word2vec_inner import train_batch_sg, train_batch_cbow from gensim.models.word2vec_inner import score_sentence_sg, score_sentence_cbow from gensim.models.word2vec_inner import FAST_VERSION, MAX_WORDS_IN_BATCH except ImportError: # failed... fall back to plain numpy (20-80x slower training than the above) FAST_VERSION = -1 MAX_WORDS_IN_BATCH = 10000 class Word2Vec(gensim.models.Word2Vec): def __init__(self, *args, **kwargs): super(self.__class__, self).__init__(*args, **kwargs) self._stem_memory = defaultdict(set) def most_similar(self, words={}, topn=10, restrict_vocab=None): """ Find the top-N most similar words. words : a dict where the words are the keys and the weights are the values. This method computes cosine similarity between a simple mean of the projection weight vectors of the given words and the vectors for each word in the model. The method corresponds to the `word-analogy` and `distance` scripts in the original word2vec implementation. If topn is False, most_similar returns the vector of similarity scores. `restrict_vocab` is an optional integer which limits the range of vectors which are searched for most-similar values. For example, restrict_vocab=10000 would only check the first 10000 word vectors in the vocabulary order. (This may be meaningful if you've sorted the vocabulary by descending frequency.) Example:: >>> trained_model.most_similar(positive=['woman', 'king'], negative=['man']) [('queen', 0.50882536), ...] """ self.init_sims() # if isinstance(positive, string_types) and not negative: # # allow calls like most_similar('dog'), as a shorthand for most_similar(['dog']) # positive = [positive] # add weights for each word, if not already present; default to 1.0 for positive and -1.0 for negative words # positive = [ # (word, 1.0) if isinstance(word, string_types + (ndarray,)) else word # for word in positive # ] # negative = [ # (word, -1.0) if isinstance(word, string_types + (ndarray,)) else word # for word in negative # ] # compute the weighted average of all words all_words, mean = set(), [] for word, weight in words.items(): if isinstance(word, ndarray): mean.append(weight * word) elif word in self.vocab: mean.append(weight * self.syn0norm[self.vocab[word].index]) all_words.add(self.vocab[word].index) else: Warning("word '%s' not in vocabulary" % word) if not mean: raise ValueError("cannot compute similarity with no input") mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL) limited = self.syn0norm if restrict_vocab is None else self.syn0norm[:restrict_vocab] dists = dot(limited, mean) if not topn: return dists best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True) # ignore (don't return) words from the input result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words] return result[:topn]
apache-2.0
-2,757,497,388,881,234,400
38.654206
116
0.656294
false
4.055449
false
false
false
CloudBreadPaPa/azure-ml-python-seminar
code/python/ml-Iris.py
1
1412
import urllib2 # If you are using Python 3+, import urllib instead of urllib2 import json data = { "Inputs": { "input1": { "ColumnNames": ["Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width", "Species"], "Values": [ [ "1", "1", "1", "1", "" ], ] }, }, "GlobalParameters": { } } body = str.encode(json.dumps(data)) url = 'https://asiasoutheast.services.azureml.net/workspaces/46d0e60b05b34558827abd41f11d204f/services/acac88a083ce443789028306375ddf56/execute?api-version=2.0&details=true' api_key = '<change here>' # Replace this with the API key for the web service headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)} req = urllib2.Request(url, body, headers) try: response = urllib2.urlopen(req) # If you are using Python 3+, replace urllib2 with urllib.request in the above code: # req = urllib.request.Request(url, body, headers) # response = urllib.request.urlopen(req) result = response.read() print(result) except urllib2.HTTPError, error: print("The request failed with status code: " + str(error.code)) # Print the headers - they include the requert ID and the timestamp, which are useful for debugging the failure print(error.info()) print(json.loads(error.read()))
mit
-7,397,852,236,911,984,000
30.377778
173
0.626771
false
3.49505
false
false
false
wisechengyi/pants
src/python/pants/util/collections.py
1
3201
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import collections import collections.abc from typing import Any, Callable, DefaultDict, Iterable, List, MutableMapping, Type, TypeVar, Union _K = TypeVar("_K") _V = TypeVar("_V") def factory_dict(value_factory: Callable[[_K], _V], *args, **kwargs) -> DefaultDict: """A dict whose values are computed by `value_factory` when a `__getitem__` key is missing. Note that values retrieved by any other method will not be lazily computed; eg: via `get`. :param value_factory: :param *args: Any positional args to pass through to `dict`. :param **kwrags: Any kwargs to pass through to `dict`. """ class FactoryDict(collections.defaultdict): @staticmethod def __never_called(): raise AssertionError( "The default factory should never be called since we override " "__missing__." ) def __init__(self): super().__init__(self.__never_called, *args, **kwargs) def __missing__(self, key): value = value_factory(key) self[key] = value return value return FactoryDict() def recursively_update(d: MutableMapping, d2: MutableMapping) -> None: """dict.update but which merges child dicts (dict2 takes precedence where there's conflict).""" for k, v in d2.items(): if k in d: if isinstance(v, dict): recursively_update(d[k], v) continue d[k] = v _T = TypeVar("_T") def assert_single_element(iterable: Iterable[_T]) -> _T: """Get the single element of `iterable`, or raise an error. :raise: :class:`StopIteration` if there is no element. :raise: :class:`ValueError` if there is more than one element. """ it = iter(iterable) first_item = next(it) try: next(it) except StopIteration: return first_item raise ValueError(f"iterable {iterable!r} has more than one element.") def ensure_list(val: Union[Any, Iterable[Any]], *, expected_type: Type[_T]) -> List[_T]: """Given either a single value or an iterable of values, always return a list. This performs runtime type checking to ensure that every element of the list is the expected type. """ if isinstance(val, expected_type): return [val] if not isinstance(val, collections.abc.Iterable): raise ValueError( f"The value {val} (type {type(val)}) did not have the expected type {expected_type} " "nor was it an iterable." ) result: List[_T] = [] for i, x in enumerate(val): if not isinstance(x, expected_type): raise ValueError( f"Not all elements of the iterable have type {expected_type}. Encountered the " f"element {x} of type {type(x)} at index {i}." ) result.append(x) return result def ensure_str_list(val: Union[str, Iterable[str]]) -> List[str]: """Given either a single string or an iterable of strings, always return a list.""" return ensure_list(val, expected_type=str)
apache-2.0
1,141,446,506,871,677,600
32.34375
99
0.621993
false
4.046776
false
false
false
devdelay/home-assistant
homeassistant/util/__init__.py
1
13534
"""Helper methods for various modules.""" from collections.abc import MutableSet from itertools import chain import threading import queue from datetime import datetime import re import enum import socket import random import string from functools import wraps from types import MappingProxyType from typing import Any, Sequence from .dt import as_local, utcnow RE_SANITIZE_FILENAME = re.compile(r'(~|\.\.|/|\\)') RE_SANITIZE_PATH = re.compile(r'(~|\.(\.)+)') RE_SLUGIFY = re.compile(r'[^a-z0-9_]+') def sanitize_filename(filename): r"""Sanitize a filename by removing .. / and \\.""" return RE_SANITIZE_FILENAME.sub("", filename) def sanitize_path(path): """Sanitize a path by removing ~ and ..""" return RE_SANITIZE_PATH.sub("", path) def slugify(text: str) -> str: """Slugify a given text.""" text = text.lower().replace(" ", "_") return RE_SLUGIFY.sub("", text) def repr_helper(inp: Any) -> str: """Help creating a more readable string representation of objects.""" if isinstance(inp, (dict, MappingProxyType)): return ", ".join( repr_helper(key)+"="+repr_helper(item) for key, item in inp.items()) elif isinstance(inp, datetime): return as_local(inp).isoformat() else: return str(inp) def convert(value, to_type, default=None): """Convert value to to_type, returns default if fails.""" try: return default if value is None else to_type(value) except (ValueError, TypeError): # If value could not be converted return default def ensure_unique_string(preferred_string: str, current_strings: Sequence[str]) -> str: """Return a string that is not present in current_strings. If preferred string exists will append _2, _3, .. """ test_string = preferred_string current_strings_set = set(current_strings) tries = 1 while test_string in current_strings_set: tries += 1 test_string = "{}_{}".format(preferred_string, tries) return test_string # Taken from: http://stackoverflow.com/a/11735897 def get_local_ip(): """Try to determine the local IP address of the machine.""" try: sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # Use Google Public DNS server to determine own IP sock.connect(('8.8.8.8', 80)) return sock.getsockname()[0] except socket.error: return socket.gethostbyname(socket.gethostname()) finally: sock.close() # Taken from http://stackoverflow.com/a/23728630 def get_random_string(length=10): """Return a random string with letters and digits.""" generator = random.SystemRandom() source_chars = string.ascii_letters + string.digits return ''.join(generator.choice(source_chars) for _ in range(length)) class OrderedEnum(enum.Enum): """Taken from Python 3.4.0 docs.""" # pylint: disable=no-init, too-few-public-methods def __ge__(self, other): """Return the greater than element.""" if self.__class__ is other.__class__: return self.value >= other.value return NotImplemented def __gt__(self, other): """Return the greater element.""" if self.__class__ is other.__class__: return self.value > other.value return NotImplemented def __le__(self, other): """Return the lower than element.""" if self.__class__ is other.__class__: return self.value <= other.value return NotImplemented def __lt__(self, other): """Return the lower element.""" if self.__class__ is other.__class__: return self.value < other.value return NotImplemented class OrderedSet(MutableSet): """Ordered set taken from http://code.activestate.com/recipes/576694/.""" def __init__(self, iterable=None): """Initialize the set.""" self.end = end = [] end += [None, end, end] # sentinel node for doubly linked list self.map = {} # key --> [key, prev, next] if iterable is not None: self |= iterable def __len__(self): """Return the length of the set.""" return len(self.map) def __contains__(self, key): """Check if key is in set.""" return key in self.map def add(self, key): """Add an element to the end of the set.""" if key not in self.map: end = self.end curr = end[1] curr[2] = end[1] = self.map[key] = [key, curr, end] def promote(self, key): """Promote element to beginning of the set, add if not there.""" if key in self.map: self.discard(key) begin = self.end[2] curr = begin[1] curr[2] = begin[1] = self.map[key] = [key, curr, begin] def discard(self, key): """Discard an element from the set.""" if key in self.map: key, prev_item, next_item = self.map.pop(key) prev_item[2] = next_item next_item[1] = prev_item def __iter__(self): """Iteration of the set.""" end = self.end curr = end[2] while curr is not end: yield curr[0] curr = curr[2] def __reversed__(self): """Reverse the ordering.""" end = self.end curr = end[1] while curr is not end: yield curr[0] curr = curr[1] def pop(self, last=True): # pylint: disable=arguments-differ """Pop element of the end of the set. Set last=False to pop from the beginning. """ if not self: raise KeyError('set is empty') key = self.end[1][0] if last else self.end[2][0] self.discard(key) return key def update(self, *args): """Add elements from args to the set.""" for item in chain(*args): self.add(item) def __repr__(self): """Return the representation.""" if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, list(self)) def __eq__(self, other): """Return the comparision.""" if isinstance(other, OrderedSet): return len(self) == len(other) and list(self) == list(other) return set(self) == set(other) class Throttle(object): """A class for throttling the execution of tasks. This method decorator adds a cooldown to a method to prevent it from being called more then 1 time within the timedelta interval `min_time` after it returned its result. Calling a method a second time during the interval will return None. Pass keyword argument `no_throttle=True` to the wrapped method to make the call not throttled. Decorator takes in an optional second timedelta interval to throttle the 'no_throttle' calls. Adds a datetime attribute `last_call` to the method. """ # pylint: disable=too-few-public-methods def __init__(self, min_time, limit_no_throttle=None): """Initialize the throttle.""" self.min_time = min_time self.limit_no_throttle = limit_no_throttle def __call__(self, method): """Caller for the throttle.""" if self.limit_no_throttle is not None: method = Throttle(self.limit_no_throttle)(method) # Different methods that can be passed in: # - a function # - an unbound function on a class # - a method (bound function on a class) # We want to be able to differentiate between function and unbound # methods (which are considered functions). # All methods have the classname in their qualname seperated by a '.' # Functions have a '.' in their qualname if defined inline, but will # be prefixed by '.<locals>.' so we strip that out. is_func = (not hasattr(method, '__self__') and '.' not in method.__qualname__.split('.<locals>.')[-1]) @wraps(method) def wrapper(*args, **kwargs): """Wrapper that allows wrapped to be called only once per min_time. If we cannot acquire the lock, it is running so return None. """ # pylint: disable=protected-access if hasattr(method, '__self__'): host = method.__self__ elif is_func: host = wrapper else: host = args[0] if args else wrapper if not hasattr(host, '_throttle'): host._throttle = {} if id(self) not in host._throttle: host._throttle[id(self)] = [threading.Lock(), None] throttle = host._throttle[id(self)] if not throttle[0].acquire(False): return None # Check if method is never called or no_throttle is given force = not throttle[1] or kwargs.pop('no_throttle', False) try: if force or utcnow() - throttle[1] > self.min_time: result = method(*args, **kwargs) throttle[1] = utcnow() return result else: return None finally: throttle[0].release() return wrapper class ThreadPool(object): """A priority queue-based thread pool.""" # pylint: disable=too-many-instance-attributes def __init__(self, job_handler, worker_count=0, busy_callback=None): """Initialize the pool. job_handler: method to be called from worker thread to handle job worker_count: number of threads to run that handle jobs busy_callback: method to be called when queue gets too big. Parameters: worker_count, list of current_jobs, pending_jobs_count """ self._job_handler = job_handler self._busy_callback = busy_callback self.worker_count = 0 self.busy_warning_limit = 0 self._work_queue = queue.PriorityQueue() self.current_jobs = [] self._lock = threading.RLock() self._quit_task = object() self.running = True for _ in range(worker_count): self.add_worker() def add_worker(self): """Add worker to the thread pool and reset warning limit.""" with self._lock: if not self.running: raise RuntimeError("ThreadPool not running") worker = threading.Thread( target=self._worker, name='ThreadPool Worker {}'.format(self.worker_count)) worker.daemon = True worker.start() self.worker_count += 1 self.busy_warning_limit = self.worker_count * 3 def remove_worker(self): """Remove worker from the thread pool and reset warning limit.""" with self._lock: if not self.running: raise RuntimeError("ThreadPool not running") self._work_queue.put(PriorityQueueItem(0, self._quit_task)) self.worker_count -= 1 self.busy_warning_limit = self.worker_count * 3 def add_job(self, priority, job): """Add a job to the queue.""" with self._lock: if not self.running: raise RuntimeError("ThreadPool not running") self._work_queue.put(PriorityQueueItem(priority, job)) # Check if our queue is getting too big. if self._work_queue.qsize() > self.busy_warning_limit \ and self._busy_callback is not None: # Increase limit we will issue next warning. self.busy_warning_limit *= 2 self._busy_callback( self.worker_count, self.current_jobs, self._work_queue.qsize()) def block_till_done(self): """Block till current work is done.""" self._work_queue.join() def stop(self): """Finish all the jobs and stops all the threads.""" self.block_till_done() with self._lock: if not self.running: return # Tell the workers to quit for _ in range(self.worker_count): self.remove_worker() self.running = False # Wait till all workers have quit self.block_till_done() def _worker(self): """Handle jobs for the thread pool.""" while True: # Get new item from work_queue job = self._work_queue.get().item if job is self._quit_task: self._work_queue.task_done() return # Add to current running jobs job_log = (utcnow(), job) self.current_jobs.append(job_log) # Do the job self._job_handler(job) # Remove from current running job self.current_jobs.remove(job_log) # Tell work_queue the task is done self._work_queue.task_done() class PriorityQueueItem(object): """Holds a priority and a value. Used within PriorityQueue.""" # pylint: disable=too-few-public-methods def __init__(self, priority, item): """Initialize the queue.""" self.priority = priority self.item = item def __lt__(self, other): """Return the ordering.""" return self.priority < other.priority
mit
-2,104,050,902,340,730,000
30.328704
79
0.570637
false
4.208333
false
false
false
bxlab/bx-python
lib/bx/align/epo.py
1
11523
"""Classes and utilities for mutliple alignments from the EPO pipeline""" import logging import os import pickle as cPickle import re from collections import namedtuple from ._epo import ( # noqa: F401 bed_union, cummulative_intervals, fastLoadChain, rem_dash ) log = logging.getLogger(__name__) class Chain(namedtuple('Chain', 'score tName tSize tStrand tStart tEnd qName qSize qStrand qStart qEnd id')): """A Chain header as in http://genome.ucsc.edu/goldenPath/help/chain.html chain coordinates are with respect to the strand, so for example tStart on the + strand is the distance from the leftmost position; tStart on the - strand is the distance from the rightmost position.""" __slots__ = () def __str__(self): return "chain {score} {tName} {tSize} {tStrand} {tStart} {tEnd} {qName} {qSize} {qStrand} {qStart} {qEnd} {id}".format(**self._asdict()) @classmethod def _strfactory(cls, line): """factory class method for Chain :param line: header of a chain (in .chain format) """ assert isinstance(line, str), "this is a factory from string" line = line.rstrip().split()[1:] # the first component is the keyword "chain" tup = [t[0](t[1]) for t in zip([int, str, int, str, int, int, str, int, str, int, int, str], line)] return tuple.__new__(cls, tup) @classmethod def _make_from_epo(cls, trg_comp, qr_comp, trg_chrom_sizes, qr_chrom_sizes): """crate a chain of collinear rings from the given components. The target of the chain will always be on the forward strand. This is done to avoid confusion when mapping psl files. So, if trg_comp.strand=-, qr_comp.strand=- (resp. +) the chain header will have tStrand=+, qStrand=+ (resp. -). No strand changes on the other cases. :param trg_comp: target (i.e, the first) component :type trg_comp: L{EPOitem} :param qr_comp: query (i.e, the second) component :type qr_comp: L{EPOitem} :param trg_chrom_sizes: chromosome sizes of the target :type trg_chrom_sizes: dictionary of the type (chrom) --> size :param qr_chrom_sizes: chromosome sizes of the query :type qr_chrom_sizes: dictionary of the type (chrom) --> size :return: A L{Chain} instance""" # size, target, query arrays S, T, Q = [], [], [] # the target strand of the chain must be on the forward strand trg_intervals = trg_comp.intervals(reverse=trg_comp.strand == '-') qr_intervals = qr_comp.intervals(reverse=trg_comp.strand == '-') if len(trg_intervals) == 0 or len(qr_intervals) == 0: log.warning("deletion/insertion only intervals") return None A, B = rem_dash(trg_intervals, qr_intervals) # correct for when cigar starts/ends with dashes (in number of bases) tr_start_correction = max(B[0][0] - A[0][0], 0) tr_end_correction = max(A[-1][1] - B[-1][1], 0) qr_start_correction = max(A[0][0] - B[0][0], 0) qr_end_correction = max(B[-1][1] - A[-1][1], 0) a, b = A.pop(0), B.pop(0) # intervals are 0-base, halfo-open => lengths = coordinate difference while A or B: if a[1] < b[1]: T.append(0) Q.append(A[0][0] - a[1]) S.append(min(a[1], b[1]) - max(a[0], b[0])) a = A.pop(0) elif b[1] < a[1]: Q.append(0) T.append(B[0][0] - b[1]) S.append(min(a[1], b[1]) - max(a[0], b[0])) b = B.pop(0) elif A and B: assert 1 > 2, "there are dash columns" else: break S.append(min(a[1], b[1]) - max(a[0], b[0])) assert len(T) == len(Q) == len(S) - 1, "(S, T, Q) = (%d, %d, %d)" % tuple(map(len, (S, T, Q))) tSize = trg_chrom_sizes[trg_comp.chrom] qSize = qr_chrom_sizes[qr_comp.chrom] # UCSC coordinates are 0-based, half-open and e! coordinates are 1-base, closed # chain_start = epo_start - 1 and chain_end = epo_end if qr_comp.strand == '+': chain = Chain( 0, trg_comp.chrom, tSize, "+", (trg_comp.start - 1) + tr_start_correction, trg_comp.end - tr_end_correction, qr_comp.chrom, qSize, (qr_comp.strand == trg_comp.strand and '+' or '-'), (qr_comp.start - 1) + qr_start_correction, qr_comp.end - qr_end_correction, qr_comp.gabid) else: chain = Chain( 0, trg_comp.chrom, tSize, "+", (trg_comp.start - 1) + tr_start_correction, trg_comp.end - tr_end_correction, qr_comp.chrom, qSize, (qr_comp.strand == trg_comp.strand and '+' or '-'), (qr_comp.start - 1) + qr_end_correction, qr_comp.end - qr_start_correction, qr_comp.gabid) # strand correction. in UCSC coordinates this is: size - coord if chain.qStrand == '-': chain = chain._replace( qEnd=chain.qSize - chain.qStart, qStart=chain.qSize - chain.qEnd) assert chain.tEnd - chain.tStart == sum(S) + sum(T), "[%s] %d != %d" % ( str(chain), chain.tEnd - chain.tStart, sum(S) + sum(T)) assert chain.qEnd - chain.qStart == sum(S) + sum(Q), "[%s] %d != %d" % ( str(chain), chain.qEnd - chain.qStart, sum(S) + sum(Q)) return chain, S, T, Q def slice(self, who): "return the slice entry (in a bed6 format), AS IS in the chain header" assert who in ('t', 'q'), "who should be 't' or 'q'" if who == 't': return (self.tName, self.tStart, self.tEnd, self.id, self.score, self.tStrand) else: return (self.qName, self.qStart, self.qEnd, self.id, self.score, self.qStrand) def bedInterval(self, who): "return a BED6 entry, thus DOES coordinate conversion for minus strands" if who == 't': st, en = self.tStart, self.tEnd if self.tStrand == '-': st, en = self.tSize-en, self.tSize-st return (self.tName, st, en, self.id, self.score, self.tStrand) else: st, en = self.qStart, self.qEnd if self.qStrand == '-': st, en = self.qSize-en, self.qSize-st assert en-st == self.qEnd - self.qStart return (self.qName, st, en, self.id, self.score, self.qStrand) @classmethod def _parse_file(cls, path, pickle=False): """parse a .chain file into a list of the type [(L{Chain}, arr, arr, arr) ...] :param fname: name of the file""" fname = path if fname.endswith(".gz"): fname = path[:-3] if fname.endswith('.pkl'): # you asked for the pickled file. I'll give it to you log.debug("loading pickled file %s ...", fname) with open(fname, "rb") as f: return cPickle.load(f) elif os.path.isfile("%s.pkl" % fname): # there is a cached version I can give to you log.info("loading pickled file %s.pkl ...", fname) if os.stat(path).st_mtime > os.stat("%s.pkl" % fname).st_mtime: log.critical("*** pickled file %s.pkl is not up to date ***", fname) try: with open("%s.pkl" % fname, "rb") as f: return cPickle.load(f) except Exception: log.warning("Loading pickled file %s.pkl failed", fname) data = fastLoadChain(path, cls._strfactory) if pickle and not os.path.isfile('%s.pkl' % fname): log.info("pickling to %s.pkl", fname) with open('%s.pkl' % fname, 'wb') as f: cPickle.dump(data, f) return data class EPOitem(namedtuple('Epo_item', 'species gabid chrom start end strand cigar')): "this format is how alignments are delivered from e!" __slots__ = () cigar_pattern = re.compile(r"(\d*)([MD])") def __repr__(self): return str(self) def __str__(self): c = self.cigar[:5] + "..." + self.cigar[-5:] return "(%s %s %s %d %d %s %s)" % tuple(self[:6] + (c,)) @classmethod def _strfactory(cls, line): """factory method for an EPOitem :param line: a line of input""" cmp = line.rstrip().split() chrom = cmp[2] if not chrom.startswith("chr"): chrom = "chr%s" % chrom instance = tuple.__new__( cls, (cmp[0], cmp[1], chrom, int(cmp[3]), int(cmp[4]), {'1': '+', '-1': '-'}[cmp[5]], cmp[6])) span = instance.end - instance.start + 1 m_num = sum((t[1] == "M" and [t[0]] or [0])[0] for t in instance.cigar_iter(False)) if span != m_num: log.warning("[{gabid}] {species}.{chrom}:{start}-{end}.".format(**instance._asdict()) + "(span) %d != %d (matches)" % (span, m_num)) return None return instance @classmethod def _parse_epo(cls, fname): """Load an entire file in the EPO format into a dictionary of the type {gab_id => [Epoitem, ...]} :param fname: file name""" data = {} with open(fname) as fd: for el in (cls._strfactory(_) for _ in fd): if el: data.setdefault(el.gabid, []).append(el) log.info("parsed %d elements from %s", len(data), fname) return data def cigar_iter(self, reverse): """self.cigar => [(length, type) ... ] iterate the cigar :param reverse: whether to iterate in the reverse direction (right-to-left) :type reverse: boolean :return a list of pairs of the type [(length, M/D) ..] """ l = 0 P = self.cigar_pattern data = [] cigar = self.cigar parsed_cigar = re.findall(P, cigar) if reverse: parsed_cigar = parsed_cigar[::-1] for _l, t in parsed_cigar: # 1M is encoded as M l = (_l and int(_l) or 1) # int(_l) cannot be 0 data.append((l, t)) return data def intervals(self, reverse, thr=0): """return a list of (0-based half-open) intervals representing the match regions of the cigar for example 4MD4M2DM with reverse=False will produce [(0,4), (5,9), (11,12)] 4MD4M2DM with reverse=True will produce [(0,1), (3,7), (8,12)] (= 12 - previous interval) :param reverse: whether to iterate in the reverse direction (right-to-left) (this is passed as is to self.cigar_iter) :type reverse: boolean :param thr: shift all intervals by this much :type thr: integer :return: list of pairs""" d = [(thr, thr)] dl = 0 for tup in self.cigar_iter(reverse): if tup[1] == "D": dl = tup[0] else: s = d[-1][1] + dl d.append((s, s+tup[0])) assert d[0] == (thr, thr) # assert that nr. of Ms in the interval == sum of produced intervals assert sum(t[0] for t in self.cigar_iter(False) if t[1] == "M") == sum(t[1]-t[0] for t in d) d_sum = sum(t[1]-t[0] for t in d) assert self.end - self.start + 1 == d_sum, "[ (%d, %d) = %d ] != %d" % ( self.start, self.end, self.end-self.start+1, d_sum) return d[1:] # clip the (thr, thr) entry
mit
7,633,953,274,690,669,000
38.462329
144
0.540484
false
3.350683
false
false
false
Arcensoth/cogbot
cogbot/cogs/join_leave/join_leave_server_state.py
1
2346
from discord import Member, Role from discord.ext.commands import Context from cogbot.cogs.abc.base_cog import BaseCogServerState from cogbot.cogs.join_leave.join_leave_options import JoinLeaveOptions class JoinLeaveServerState(BaseCogServerState[JoinLeaveOptions]): async def create_options(self) -> JoinLeaveOptions: return await JoinLeaveOptions().init(self, self.raw_options) async def join_role(self, ctx: Context, author: Member, role_alias: str): try: role_entry = self.options.role_entry_from_alias[role_alias.lower()] role = self.bot.get_role(self.server, role_entry.role_id) await self.bot.add_roles(author, role) await self.bot.say(f"{author.mention} has joined {role}") except: self.log.info(f"{author} failed to join the role: {role_alias}") await self.bot.react_question(ctx) async def leave_role(self, ctx: Context, author: Member, role_alias: str): try: role_entry = self.options.role_entry_from_alias[role_alias] role = self.bot.get_role(self.server, role_entry.role_id) await self.bot.remove_roles(author, role) await self.bot.say(f"{author.mention} has left {role}") except: self.log.info(f"{author} failed to leave the role: {role_alias}") await self.bot.react_question(ctx) async def list_roles(self, ctx: Context, author: Member): role_lines = [] for role_entry in self.options.role_entries: role: Role = self.bot.get_role(self.server, role_entry.role_id) role_lines.append(f"{role}") role_aliases = role_entry.aliases first_role_alias = role_aliases[0] other_role_aliases = role_aliases[1:] role_aliases_line = f" >join {first_role_alias}" if other_role_aliases: other_role_aliases_str = " or ".join( f'"{role_alias}"' for role_alias in other_role_aliases ) role_aliases_line = f"{role_aliases_line} (or {other_role_aliases_str})" role_lines.append(role_aliases_line) roles_str = "\n".join(role_lines) await self.bot.say( f"{author.mention} Available self-assignable roles:\n```\n{roles_str}\n```" )
mit
4,599,399,970,453,194,000
45.92
88
0.6185
false
3.581679
false
false
false
mypinballs/whirlwind
effects.py
1
8263
# Top Rollover Lanes __author__="jim" __date__ ="$Jan 18, 2011 1:36:37 PM$" import procgame import locale from procgame import * base_path = config.value_for_key_path('base_path') game_path = base_path+"games/whirlwind/" class Effects(game.Mode): def __init__(self, game, priority): super(Effects, self).__init__(game, priority) def drive_lamp(self, lamp_name, style='on',time=2): if style == 'slow': self.game.lamps[lamp_name].schedule(schedule=0x00ff00ff, cycle_seconds=0, now=True) elif style == 'medium': self.game.lamps[lamp_name].schedule(schedule=0x0f0f0f0f, cycle_seconds=0, now=True) elif style == 'fast': self.game.lamps[lamp_name].schedule(schedule=0x99999999, cycle_seconds=0, now=True) elif style == 'superfast': self.game.lamps[lamp_name].schedule(schedule=0xaaaaaaaa, cycle_seconds=0, now=True) elif style == 'on': self.game.lamps[lamp_name].enable() elif style == 'off': self.off(lamp_name) elif style == 'smarton': self.game.lamps[lamp_name].schedule(schedule=0xaaaaaaaa, cycle_seconds=0, now=True) self.cancel_delayed(lamp_name+'_on') self.delay(name=lamp_name+'_on', event_type=None, delay=0.6, handler=self.game.lamps[lamp_name].enable) elif style == 'timedon': self.game.lamps[lamp_name].enable() self.cancel_delayed(lamp_name+'_off') self.delay(name=lamp_name+'_off', event_type=None, delay=time, handler=self.off,param=lamp_name) elif style == 'timeout': if time>10: self.cancel_delayed(lamp_name+'_medium') self.delay(name=lamp_name+'_medium', event_type=None, delay=time-10, handler=lambda:self.drive_lamp(lamp_name,'medium')) if time>5: self.cancel_delayed(lamp_name+'_fast') self.delay(name=lamp_name+'_fast', event_type=None, delay=time-5, handler=lambda:self.drive_lamp(lamp_name,'fast')) if time>1: self.cancel_delayed(lamp_name+'_superfast') self.delay(name=lamp_name+'_superfast', event_type=None, delay=time-1, handler=lambda:self.drive_lamp(lamp_name,'superfast')) self.delay(name=lamp_name+'_off', event_type=None, delay=time, handler=self.off,param=lamp_name) def clear_lamp_timers(self,lamp_name): self.cancel_delayed(lamp_name+'_medium') self.cancel_delayed(lamp_name+'_fast') self.cancel_delayed(lamp_name+'_superfast') self.cancel_delayed(lamp_name+'on') self.cancel_delayed(lamp_name+'_off') def off(self,lamp_name): self.clear_lamp_timers(lamp_name) self.game.lamps[lamp_name].disable() # def drive_super_fast(self, lamp_name): # self.game.lamps[lamp_name].schedule(schedule=0x99999999, cycle_seconds=0, now=True) # # def drive_fast(self, lamp_name): # self.game.lamps[lamp_name].schedule(schedule=0x55555555, cycle_seconds=0, now=True) # # def drive_medium(self, lamp_name): # self.game.lamps[lamp_name].schedule(schedule=0x0f0f0f0f, cycle_seconds=0, now=True) def drive_flasher(self, data, style='medium',cycle=0,time=2): if isinstance(data, basestring): flasher_name=data else: flasher_name=data[0] style = data[1] time = data[2] if style == 'slow': self.game.coils[flasher_name].schedule(schedule=0x00003000, cycle_seconds=cycle, now=True) elif style == 'medium': self.game.coils[flasher_name].schedule(schedule=0x30003000, cycle_seconds=cycle, now=True) elif style == 'fast': self.game.coils[flasher_name].schedule(schedule=0x11111111, cycle_seconds=cycle, now=True) elif style == 'super': self.game.coils[flasher_name].schedule(schedule=0x55555555, cycle_seconds=cycle, now=True) elif style == 'super2': self.game.coils[flasher_name].schedule(schedule=0x55055055, cycle_seconds=cycle, now=True) elif style == 'strobe': self.game.coils[flasher_name].schedule(schedule=0xeeeeeeee, cycle_seconds=cycle, now=True) elif style == 'chaos': self.game.coils[flasher_name].schedule(schedule=0x019930AB, cycle_seconds=cycle, now=True) elif style == 'fade': self.game.coils[flasher_name].schedule(schedule=0xAAA99933, cycle_seconds=cycle, now=True) if time>0: self.delay(name=flasher_name+'_off', event_type=None, delay=time, handler=self.game.coils[flasher_name].disable) # def strobe_flasher_set(self,flasher_list,time=0.5): # timer = 0 # for fname in flasher_list: # self.delay(name=fname+'strobe', event_type=None, delay=timer, handler=self.drive_flasher, param=[fname,'fast',time]) # timer+=time def strobe_flasher_set(self,flasher_list,time=1,overlap=0.2,repeats=1,enable=True): timer = 0 for i in range(repeats): for fname in flasher_list: if enable: self.delay(name=fname+'strobe', event_type=None, delay=timer, handler=self.drive_flasher, param=[fname,'fast',time+overlap]) timer+=time else: self.cancel_delayed(fname+'strobe') self.game.coils[fname].disable() def strobe_controlled_flasher_set(self,flasher_list,time=0.1,overlap=0.2,repeats=1,enable=True): timer = 0 #playfield flashers sequence=[] for j in range(repeats): sequence += flasher_list for i in range(len(sequence)): def flash(i,time,delay): self.delay(delay=delay,handler=lambda:self.game.switched_coils.drive(name=sequence[i],style='fast',time=time+0.1)) flash(i,time,timer) timer+=time def drive_led(self,lamp_name,colour): if colour=='red': self.led_colour_data(lamp_name,'on','off','off') elif colour=='pink': self.led_colour_data(lamp_name,'on','off','med') elif colour=='magenta': self.led_colour_data(lamp_name,'on','off','on') elif colour=='purple': self.led_colour_data(lamp_name,'med','off','on') elif colour=='skyblue': self.led_colour_data(lamp_name,'off','med','on') elif colour=='blue': self.led_colour_data(lamp_name,'off','off','on') elif colour=='cyan': self.led_colour_data(lamp_name,'off','on','on') elif colour=='turquoise': self.led_colour_data(lamp_name,'off','on','med') elif colour=='green': self.led_colour_data(lamp_name,'off','on','off') elif colour=='limegreen': self.led_colour_data(lamp_name,'med','on','off') elif colour=='yellow': self.led_colour_data(lamp_name,'on','on','off') elif colour=='orange': self.led_colour_data(lamp_name,'on','med','off') elif colour=='white': self.led_colour_data(lamp_name,'on','on','on') elif colour=='black': self.led_colour_data(lamp_name,'off','off','off') def led_colour_data(self,lamp_name,red,blue,green): data=[red,green,blue] name=['Red','Green','Blue'] for i in range(len(data)): if data[i]=='off': self.game.lamps[lamp_name+name[i]].disable() elif data[i]=='on': self.game.lamps[lamp_name+name[i]].enable() elif data[i]=='med': self.game.lamps[lamp_name+name[i]].schedule(schedule=0x80808080, cycle_seconds=0, now=True) # self.game.lamps[lamp_name+name[i]].patter()
gpl-3.0
1,920,861,269,690,406,000
44.15847
148
0.563839
false
3.418701
false
false
false
lilmuck/lilmuck
plugin.video.szenestreams/default.py
1
6874
#!/usr/bin/python # -*- coding: utf-8 -*- import urllib,urllib2,re,xbmcaddon,xbmcplugin,xbmcgui,xbmc,HTMLParser from stream import * htmlparser = HTMLParser.HTMLParser() pluginhandle = int(sys.argv[1]) itemcnt = 0 baseurl = 'http://www.szene-streams.com' settings = xbmcaddon.Addon(id='plugin.video.szene-streams') maxitems = (int(settings.getSetting("items_per_page"))+1)*10 filterUnknownHoster = settings.getSetting("filterUnknownHoster") == 'true' forceMovieViewMode = settings.getSetting("forceMovieViewMode") == 'true' movieViewMode = str(settings.getSetting("movieViewMode")) dbg = False def CATEGORIES(): data = getUrl(baseurl) cats = re.findall('<a[^>]*?class="CatInf"[^>]*?href="(.*?)"[^>]*?>.*?<div class="CatNumInf">(.*?)</div>[^<]*?<div[^>]*?class="CatNameInf">(.*?)</div>', data, re.S|re.I) addDir('Letzte Updates', baseurl, 1, '', True) addDir('Serien', baseurl + '/load', 0, '', True) for (url, num, name) in cats: if 'http:' not in url: url = baseurl + url addDir(name + ' [COLOR=blue](' + num + ')[/COLOR]', url, 1, '', True) xbmc.executebuiltin("Container.SetViewMode(400)") def SERIES(url): data = getUrl(url) cats = re.findall('<a[^>]*?class="CatInf"[^>]*?href="(.*?)"[^>]*?>.*?<div class="CatNumInf">(.*?)</div>[^<]*?<div[^>]*?class="CatNameInf">(.*?)</div>', data, re.S|re.I) addDir('Letzte Updates', baseurl + '/load/0-1', 1, '', True) for (url, num, name) in cats: if 'http:' not in url: url = baseurl + url addDir(name + ' [COLOR=blue](' + num + ')[/COLOR]', url, 1, '', True) xbmc.executebuiltin("Container.SetViewMode(400)") def INDEX(url): global itemcnt nextPageUrl = re.sub('-[\d]+$', '', url) print url data = getUrl(url) movies = re.findall('<div class="ImgWrapNews">[^<]*<a[^<]*<img[^>]*src="([^"]*.[jpg|png])"[^>]*alt="([^"]*)"[^>]*>.*?class="[^"]*entryLink[^"]*".*?href="([^"]*)"', data, re.S|re.I) if movies: for (image, title, url) in movies: if 'http:' not in url: url = baseurl + url addDir(clean(title), url, 2, image, True) itemcnt = itemcnt + 1 nextPage = re.findall('<a class="swchItem"[^>]*onclick="spages\(\'(\d+)\'[^>]*?"[^>]*><span>&raquo;</span>', data, re.S) if nextPage: if itemcnt >= maxitems: addDir('Weiter >>', nextPageUrl + '-' + nextPage[0], 1, '', True) else: INDEX(nextPageUrl + '-' + nextPage[0]) if forceMovieViewMode: xbmc.executebuiltin("Container.SetViewMode(" + movieViewMode + ")") def VIDEOLINKS(url, image): data = getUrl(url) streams = [] raw = re.findall('(<fieldset[^>]*>[^<]*<legend>.*?</fieldset>)', data, re.S) if raw: for each in raw: series = re.findall('<div class="spoiler"><font[^>]*><b[^>]*>(.+?)</b>(.*?)<input', each, re.S|re.I) if not series: series = re.findall('<legend>(.+?)</legend>[^<]*<div class="spoiler">(.*?)<input', each, re.S|re.I) if not series: series = re.findall('<legend>(.+?)</legend>.*?(<iframe.*?</iframe>|<a[^>]*href=".+"[^>]*>).*', each, re.S|re.I) if series: for ser in series: for (s, n) in re.findall('<a[^>]*href="([^"]+)"[^>]*>([^<]*)<', each, re.S|re.I): if dbg: print 'ser1' if ser: n = clean(ser[1]) + ' ' + extractFilename(s) n = clean(n) if n else extractFilename(s) if n: streams += [(n, s)] for s in re.findall('<iframe[^>]*src="([^"]*)"[^>]*>', each, re.S|re.I): if dbg: print 'ser2' if ser: n = clean(ser[1]) if not n: n = 'unknown' if n: streams += [(n, s)] elif re.match('.*?iframe.*?src.*', each, re.S|re.I): if dbg: print 'nonser1' streams += re.findall('<font[^>]*>.*?src=".*?/player/(.*?)\..{3}".*?<iframe.*?src=["|\'](.*?)["|\']', each, re.S|re.I) else: if dbg: print 'nonser2' streams += re.findall('<font[^>]*>.*?src=".*?/player/(.*?)\..{3}".*?</font>.*?target="_blank" href=["|\'](.*?)["|\']', each, re.S|re.I) if streams: for (filename, stream) in streams: hoster = get_stream_link().get_hostername(stream) if filterUnknownHoster and hoster == 'Not Supported': continue entry = '[COLOR=blue](' + hoster + ')[/COLOR] ' + filename addLink(entry, clean(stream), 3, image) def clean(s): try: s = htmlparser.unescape(s) except: print "could not unescape string '%s'"%(s) s = re.sub('<[^>]*>', '', s) s = s.replace('_', ' ') s = re.sub('[ ]+', ' ', s) for hit in set(re.findall("&#\d+;", s)): try: s = s.replace(hit, unichr(int(hit[2:-1]))) except ValueError: pass return s.strip('\n').strip() def extractFilename(path): path = re.sub('^.*/', '',clean(path)).replace('.html', '').replace('_', ' ') return re.sub('\.[a-zA-Z]{3}', '', path) def GETLINK(url): stream_url = get_stream_link().get_stream(url) if stream_url: if re.match('^Error: ', stream_url, re.S|re.I): xbmc.executebuiltin("XBMC.Notification(Fehler!, " + re.sub('^Error: ','',stream_url) + ", 4000)") else: listitem = xbmcgui.ListItem(path=stream_url) return xbmcplugin.setResolvedUrl(pluginhandle, True, listitem) def getUrl(url): req = urllib2.Request(url) req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3') response = urllib2.urlopen(req) data = response.read() response.close() return data def get_params(): param=[] paramstring=sys.argv[2] if len(paramstring)>=2: params=sys.argv[2] cleanedparams=params.replace('?','') if (params[len(params)-1]=='/'): params=params[0:len(params)-2] pairsofparams=cleanedparams.split('&') param={} for i in range(len(pairsofparams)): splitparams={} splitparams=pairsofparams[i].split('=') if (len(splitparams))==2: param[splitparams[0]]=splitparams[1] return param def addLink(name, url, mode, image): u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode) liz = xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=image) liz.setInfo( type="Video", infoLabels={ "Title": name } ) liz.setProperty('IsPlayable', 'true') return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz) def addDir(name, url, mode, image, is_folder=False): u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&image="+urllib.quote_plus(image) liz = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=image) liz.setInfo( type="Video", infoLabels={ "Title": name } ) return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=is_folder) params = get_params() url = mode = image = None try: url = urllib.unquote_plus(params["url"]) except: pass try: mode = int(params["mode"]) except: pass try: image = urllib.unquote_plus(params["image"]) except: pass if mode==None or url==None or len(url)<1: CATEGORIES() elif mode==0: SERIES(url) elif mode==1: INDEX(url) elif mode==2: VIDEOLINKS(url, image) elif mode==3: GETLINK(url) xbmcplugin.endOfDirectory(int(sys.argv[1]))
gpl-2.0
-6,652,518,995,713,558,000
40.167665
181
0.608816
false
2.818368
false
false
false
Skyeouyang/Text-Analytics-Project
lexicon analysis.py
1
2398
####################################### ##Author Skye Ouyang ##Date 19th Apr. ####################################### import glob import os def IsNotNull(value): return value is not None and len(value) > 0 #create weapon list dict_weapon = [] weapons = open('D:/1. msba/Trimester II Jan.2017-May.2017/text analytics/project/lexicon/weapon_words.txt','r') for weapon in weapons: t = weapon.strip().lower() if (IsNotNull(t)): dict_weapon.append(t) weapons.close() #create bloody words list dict_bloody = [] bloodys = open('D:/1. msba/Trimester II Jan.2017-May.2017/text analytics/project/lexicon/bloody_words.txt','r') for bloody in bloodys: b = bloody.strip().lower() if (IsNotNull(b)): dict_bloody.append(b) #create mysterious words list dict_mysterious = [] mysteriouss = open('D:/1. msba/Trimester II Jan.2017-May.2017/text analytics/project/lexicon/mysterious_words.txt','r') for mysterious in mysteriouss: m = mysterious.strip().lower() if (IsNotNull(m)): dict_mysterious.append(m) #input data path ="D:/1. msba/Trimester II Jan.2017-May.2017/text analytics/project/dataset/low_score_novel" allFiles = glob.glob(path + "/*.txt") #file = open('D:/1. msba/Trimester II Jan.2017-May.2017/text analytics/project/dataset/high_score_novel/01. The Girl with the Dragon Tattoo.txt','r') weapon_cnt = [] bloody_cnt = [] mysterious_cnt = [] for file in allFiles: with open(file) as fle: fiction = fle.read() # set for loop wea_cnt = 0 blo_cnt = 0 mys_cnt = 0 # count of weapon words for word in dict_weapon: if (word in fiction): wea_cnt = wea_cnt + 1 for word in dict_bloody: if (word in fiction): blo_cnt = blo_cnt + 1 for word in dict_mysterious: if (word in fiction): mys_cnt = mys_cnt + 1 print (wea_cnt, blo_cnt , mys_cnt) # write into list weapon_cnt.append(wea_cnt) bloody_cnt.append(blo_cnt) mysterious_cnt.append(mys_cnt) weapon_cnt ''' for file in allFiles: with open (file) as fle: blo_cnt = 0 fiction = fle.read() ''' #file_name = os.path.splitext(path + '/*.txt')[0] #print ('The size of %s is ' % (file_name) + str(len(fiction)))
apache-2.0
-6,619,393,933,516,462,000
27.604938
149
0.582569
false
2.960494
false
false
false
gandalfcode/gandalf
examples/example09.py
1
1749
#============================================================================== # example09.py # Create initial conditions for pure N-body simulation inside the python # script, and then run the simulation to completion while plotting results. #============================================================================== from gandalf.analysis.facade import * import numpy as np import time # Create empty numpy arrays for setting star initial conditions Nstar = 3 x = np.zeros(Nstar) y = np.zeros(Nstar) vx = np.zeros(Nstar) vy = np.zeros(Nstar) m = np.zeros(Nstar) h = 0.000001*np.ones(Nstar) # Set values for each star individually (Note all velocities initially zero) m[0] = 3.0; x[0] = 1.0; y[0] = 3.0 m[1] = 4.0; x[1] = -2.0; y[1] = -1.0 m[2] = 5.0; x[2] = 1.0; y[2] = -1.0 # Create new 1D simulation object and set parameters sim = newsim(ndim=2,sim='nbody') sim.SetParam('ic','python') sim.SetParam('nbody','hermite4ts') sim.SetParam('sub_systems',0) sim.SetParam('Npec',3) sim.SetParam('Nlevels',1) sim.SetParam('Nstar',Nstar) sim.SetParam('tend',80.0) sim.SetParam('dt_snap',1.0) sim.SetParam('noutputstep',128) sim.SetParam('ndiagstep',2048) sim.SetParam('dimensionless',1) sim.SetParam('run_id','BURRAU1') sim.SetParam('out_file_form','su') # Call setup routines and import particle data sim.PreSetupForPython() sim.ImportArray(x,'x','star') sim.ImportArray(y,'y','star') sim.ImportArray(vx,'vx','star') sim.ImportArray(vy,'vy','star') sim.ImportArray(m,'m','star') sim.ImportArray(h,'h','star') sim.SetupSimulation() # Plot the density of all particles near the shock plot("x","y",type="star") limit("x",-30.0,30.0,window="all") limit("y",-20.0,40.0,window="all") # Run simulation and save plot to file run() block()
gpl-2.0
-2,850,669,717,202,946,000
29.684211
79
0.63522
false
2.867213
false
false
false
Sjc1000/PyRC
UI/Disabled/FriendsList.py
1
2227
#!/usr/bin/env python3 from gi.repository import Gtk, Gdk import json class FriendsList(): servers = {} active_server = None def __init__(self, MainWindow): self.MainWindow = MainWindow self.position = [8, 5, 1, 4] def prebuild(self): self.MainWindow.ui_plugins['UserList'].position = (8, 0, 1, 5) return None def build(self): self.scroll_window = Gtk.ScrolledWindow() self.list = Gtk.ListStore(str, str) self.view = Gtk.TreeView(self.list) self.view.set_activate_on_single_click(True) self.view.set_hexpand(True) self.view.connect('row-activated', self.clicked) text_render = Gtk.CellRendererText() username = Gtk.TreeViewColumn('Friends', text_render, text=0, foreground=1) self.view.append_column(username) self.scroll_window.add(self.view) self.MainWindow.grid.attach(self.scroll_window, *self.position) return None def clicked(self, TreeView, TreePath, TreeViewColumn): print('User list clicked') return None def add_friend(self, connection, nickname): connection.send('MONITOR + ' + nickname) self.servers[connection.server]['friends'][nickname] = {'iter': None, 'online': False} if connection.server == self.active_server: iter = self.list.append([nickname, 'grey']) self.servers[connection.server]['friends'][nickname]['iter'] = iter return None def activate_path(self, server, channel, clicked=False): self.active_server = server #redraw return None def on376(self, connection, *junk): with open('UI/friends.json', 'r') as ffile: friends = json.loads(ffile.read()) if connection.server not in friends: return None self.servers[connection.server] = {'friends': {}} for nickname in sorted(friends[connection.server]): self.add_friend(connection, nickname) connection.send('MONITOR s') return None def on730(self, connection, host, nickname, uhost): if nickname == connection.nickname: return None print( uhost ) return None
gpl-2.0
-5,208,045,553,747,212,000
32.253731
94
0.619668
false
3.955595
false
false
false
wujuguang/sqlalchemy
lib/sqlalchemy/dialects/postgresql/pygresql.py
1
8129
# postgresql/pygresql.py # Copyright (C) 2005-2019 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: postgresql+pygresql :name: pygresql :dbapi: pgdb :connectstring: postgresql+pygresql://user:password@host:port/dbname[?key=value&key=value...] :url: http://www.pygresql.org/ .. note:: The pygresql dialect is **not tested as part of SQLAlchemy's continuous integration** and may have unresolved issues. The recommended PostgreSQL dialect is psycopg2. """ # noqa import decimal import re from .base import _DECIMAL_TYPES from .base import _FLOAT_TYPES from .base import _INT_TYPES from .base import PGCompiler from .base import PGDialect from .base import PGIdentifierPreparer from .base import UUID from .hstore import HSTORE from .json import JSON from .json import JSONB from ... import exc from ... import processors from ... import util from ...sql.elements import Null from ...types import JSON as Json from ...types import Numeric class _PGNumeric(Numeric): def bind_processor(self, dialect): return None def result_processor(self, dialect, coltype): if not isinstance(coltype, int): coltype = coltype.oid if self.asdecimal: if coltype in _FLOAT_TYPES: return processors.to_decimal_processor_factory( decimal.Decimal, self._effective_decimal_return_scale ) elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: # PyGreSQL returns Decimal natively for 1700 (numeric) return None else: raise exc.InvalidRequestError( "Unknown PG numeric type: %d" % coltype ) else: if coltype in _FLOAT_TYPES: # PyGreSQL returns float natively for 701 (float8) return None elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: return processors.to_float else: raise exc.InvalidRequestError( "Unknown PG numeric type: %d" % coltype ) class _PGHStore(HSTORE): def bind_processor(self, dialect): if not dialect.has_native_hstore: return super(_PGHStore, self).bind_processor(dialect) hstore = dialect.dbapi.Hstore def process(value): if isinstance(value, dict): return hstore(value) return value return process def result_processor(self, dialect, coltype): if not dialect.has_native_hstore: return super(_PGHStore, self).result_processor(dialect, coltype) class _PGJSON(JSON): def bind_processor(self, dialect): if not dialect.has_native_json: return super(_PGJSON, self).bind_processor(dialect) json = dialect.dbapi.Json def process(value): if value is self.NULL: value = None elif isinstance(value, Null) or ( value is None and self.none_as_null ): return None if value is None or isinstance(value, (dict, list)): return json(value) return value return process def result_processor(self, dialect, coltype): if not dialect.has_native_json: return super(_PGJSON, self).result_processor(dialect, coltype) class _PGJSONB(JSONB): def bind_processor(self, dialect): if not dialect.has_native_json: return super(_PGJSONB, self).bind_processor(dialect) json = dialect.dbapi.Json def process(value): if value is self.NULL: value = None elif isinstance(value, Null) or ( value is None and self.none_as_null ): return None if value is None or isinstance(value, (dict, list)): return json(value) return value return process def result_processor(self, dialect, coltype): if not dialect.has_native_json: return super(_PGJSONB, self).result_processor(dialect, coltype) class _PGUUID(UUID): def bind_processor(self, dialect): if not dialect.has_native_uuid: return super(_PGUUID, self).bind_processor(dialect) uuid = dialect.dbapi.Uuid def process(value): if value is None: return None if isinstance(value, (str, bytes)): if len(value) == 16: return uuid(bytes=value) return uuid(value) if isinstance(value, int): return uuid(int=value) return value return process def result_processor(self, dialect, coltype): if not dialect.has_native_uuid: return super(_PGUUID, self).result_processor(dialect, coltype) if not self.as_uuid: def process(value): if value is not None: return str(value) return process class _PGCompiler(PGCompiler): def visit_mod_binary(self, binary, operator, **kw): return ( self.process(binary.left, **kw) + " %% " + self.process(binary.right, **kw) ) def post_process_text(self, text): return text.replace("%", "%%") class _PGIdentifierPreparer(PGIdentifierPreparer): def _escape_identifier(self, value): value = value.replace(self.escape_quote, self.escape_to_quote) return value.replace("%", "%%") class PGDialect_pygresql(PGDialect): driver = "pygresql" statement_compiler = _PGCompiler preparer = _PGIdentifierPreparer @classmethod def dbapi(cls): import pgdb return pgdb colspecs = util.update_copy( PGDialect.colspecs, { Numeric: _PGNumeric, HSTORE: _PGHStore, Json: _PGJSON, JSON: _PGJSON, JSONB: _PGJSONB, UUID: _PGUUID, }, ) def __init__(self, **kwargs): super(PGDialect_pygresql, self).__init__(**kwargs) try: version = self.dbapi.version m = re.match(r"(\d+)\.(\d+)", version) version = (int(m.group(1)), int(m.group(2))) except (AttributeError, ValueError, TypeError): version = (0, 0) self.dbapi_version = version if version < (5, 0): has_native_hstore = has_native_json = has_native_uuid = False if version != (0, 0): util.warn( "PyGreSQL is only fully supported by SQLAlchemy" " since version 5.0." ) else: self.supports_unicode_statements = True self.supports_unicode_binds = True has_native_hstore = has_native_json = has_native_uuid = True self.has_native_hstore = has_native_hstore self.has_native_json = has_native_json self.has_native_uuid = has_native_uuid def create_connect_args(self, url): opts = url.translate_connect_args(username="user") if "port" in opts: opts["host"] = "%s:%s" % ( opts.get("host", "").rsplit(":", 1)[0], opts.pop("port"), ) opts.update(url.query) return [], opts def is_disconnect(self, e, connection, cursor): if isinstance(e, self.dbapi.Error): if not connection: return False try: connection = connection.connection except AttributeError: pass else: if not connection: return False try: return connection.closed except AttributeError: # PyGreSQL < 5.0 return connection._cnx is None return False dialect = PGDialect_pygresql
mit
-2,064,211,738,100,849,400
29.56015
97
0.570058
false
4.282929
false
false
false
LockScreen/Backend
venv/lib/python2.7/site-packages/botocore/docs/sharedexample.py
1
9129
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import re import numbers from botocore.utils import parse_timestamp from datetime import datetime class SharedExampleDocumenter(object): def document_shared_example(self, example, prefix, section, operation_model): """Documents a single shared example based on its definition. :param example: The model of the example :param prefix: The prefix to use in the method example. :param section: The section to write to. :param operation_model: The model of the operation used in the example """ section.style.new_paragraph() section.write(example.get('description')) section.style.new_line() self.document_input(section, example, prefix, operation_model.input_shape) self.document_output(section, example, operation_model.output_shape) def document_input(self, section, example, prefix, shape): input_section = section.add_new_section('input') input_section.style.start_codeblock() if prefix is not None: input_section.write(prefix) params = example['input'] comments = example.get('comments') if comments: comments = comments.get('input') param_section = input_section.add_new_section('parameters') self._document_params(param_section, params, comments, [], shape) closing_section = input_section.add_new_section('input-close') closing_section.style.new_line() closing_section.style.new_line() closing_section.write('print(response)') closing_section.style.end_codeblock() def document_output(self, section, example, shape): output_section = section.add_new_section('output') output_section.writeln('Expected Output:') output_section.style.start_codeblock() params = example.get('output', {}) # There might not be an output, but we will return metadata anyway params['ResponseMetadata'] = {"...": "..."} comments = example.get('comments') if comments: comments = comments.get('output') self._document_dict(output_section, params, comments, [], shape, True) closing_section = output_section.add_new_section('output-close') closing_section.style.end_codeblock() def _document(self, section, value, comments, path, shape): """ :param section: The section to add the docs to. :param value: The input / output values representing the parameters that are included in the example. :param comments: The dictionary containing all the comments to be applied to the example. :param path: A list describing where the documenter is in traversing the parameters. This is used to find the equivalent location in the comments dictionary. """ if isinstance(value, dict): self._document_dict(section, value, comments, path, shape) elif isinstance(value, list): self._document_list(section, value, comments, path, shape) elif isinstance(value, numbers.Number): self._document_number(section, value, path) elif shape and shape.type_name == 'timestamp': self._document_datetime(section, value, path) else: self._document_str(section, value, path) def _document_dict(self, section, value, comments, path, shape, top_level=False): dict_section = section.add_new_section('dict-value') self._start_nested_value(dict_section, '{') for key, val in value.items(): path.append('.%s' % key) item_section = dict_section.add_new_section(key) item_section.style.new_line() item_comment = self._get_comment(path, comments) if item_comment: item_section.write(item_comment) item_section.style.new_line() item_section.write("'%s': " % key) # Shape could be none if there is no output besides ResponseMetadata item_shape = None if shape: if shape.type_name == 'structure': item_shape = shape.members.get(key) elif shape.type_name == 'map': item_shape = shape.value self._document(item_section, val, comments, path, item_shape) path.pop() dict_section_end = dict_section.add_new_section('ending-brace') self._end_nested_value(dict_section_end, '}') if not top_level: dict_section_end.write(',') def _document_params(self, section, value, comments, path, shape): param_section = section.add_new_section('param-values') self._start_nested_value(param_section, '(') for key, val in value.items(): path.append('.%s' % key) item_section = param_section.add_new_section(key) item_section.style.new_line() item_comment = self._get_comment(path, comments) if item_comment: item_section.write(item_comment) item_section.style.new_line() item_section.write(key + '=') # Shape could be none if there are no input parameters item_shape = None if shape: item_shape = shape.members.get(key) self._document(item_section, val, comments, path, item_shape) path.pop() param_section_end = param_section.add_new_section('ending-parenthesis') self._end_nested_value(param_section_end, ')') def _document_list(self, section, value, comments, path, shape): list_section = section.add_new_section('list-section') self._start_nested_value(list_section, '[') item_shape = shape.member for index, val in enumerate(value): item_section = list_section.add_new_section(index) item_section.style.new_line() path.append('[%s]' % index) item_comment = self._get_comment(path, comments) if item_comment: item_section.write(item_comment) item_section.style.new_line() self._document(item_section, val, comments, path, item_shape) path.pop() list_section_end = list_section.add_new_section('ending-bracket') self._end_nested_value(list_section_end, '],') def _document_str(self, section, value, path): # We do the string conversion because this might accept a type that # we don't specifically address. section.write("'%s'," % str(value)) def _document_number(self, section, value, path): section.write("%s," % str(value)) def _document_datetime(self, section, value, path): datetime_tuple = parse_timestamp(value).timetuple() datetime_str = str(datetime_tuple[0]) for i in range(1, len(datetime_tuple)): datetime_str += ", " + str(datetime_tuple[i]) section.write("datetime(%s)," % datetime_str) def _get_comment(self, path, comments): key = re.sub('^\.', '', ''.join(path)) if comments and key in comments: return '# ' + comments[key] else: return '' def _start_nested_value(self, section, start): section.write(start) section.style.indent() section.style.indent() def _end_nested_value(self, section, end): section.style.dedent() section.style.dedent() section.style.new_line() section.write(end) def document_shared_examples(section, operation_model, example_prefix, shared_examples): """Documents the shared examples :param section: The section to write to. :param operation_model: The model of the operation. :param example_prefix: The prefix to use in the method example. :param shared_examples: The shared JSON examples from the model. """ container_section = section.add_new_section('shared-examples') container_section.style.new_paragraph() container_section.style.bold('Examples') documenter = SharedExampleDocumenter() for example in shared_examples: documenter.document_shared_example( example=example, section=container_section.add_new_section(example['id']), prefix=example_prefix, operation_model=operation_model )
mit
-1,774,796,653,096,055,800
40.684932
80
0.614197
false
4.212737
false
false
false
disqus/zumanji
src/zumanji/views.py
1
6969
from django.conf import settings from django.core.urlresolvers import reverse from django.db import transaction from django.http import HttpResponseRedirect, HttpResponseForbidden from django.shortcuts import render, get_object_or_404 from django.utils import simplejson from django.views.decorators.csrf import csrf_protect, csrf_exempt from functools import wraps from zumanji.forms import UploadJsonForm from zumanji.helpers import get_trace_data, get_changes, get_git_changes from zumanji.models import Project, Build, BuildTag, Test from zumanji.importer import import_build NOTSET = object() def api_auth(func): @wraps(func) def wrapped(request, *args, **kwargs): if request.REQUEST.get('api_key'): if request.REQUEST['api_key'] != settings.ZUMANJI_CONFIG.get('API_KEY', NOTSET): return HttpResponseForbidden('Invalid api_key') return func(request, *args, **kwargs) return csrf_protect(func)(request, *args, **kwargs) return csrf_exempt(wrapped) def index(request): build_qs = Build.objects.order_by('-revision__datetime', '-datetime').select_related('revision') project_list = [] # lol O(N) for project in Project.objects.all(): try: latest_build = build_qs.filter(project=project)[0] except IndexError: latest_build = None project_list.append((project, latest_build)) return render(request, 'zumanji/index.html', { 'project_list': project_list, }) def view_project(request, project_label): project = get_object_or_404(Project, label=project_label) build_list = list(Build.objects .filter(project=project) .order_by('-revision__datetime', '-datetime') .select_related('revision', 'project')) return render(request, 'zumanji/project.html', { 'project': project, 'build_list': build_list, }) def view_tag(request, project_label, tag_id): project = get_object_or_404(Project, label=project_label) tag = get_object_or_404(BuildTag, pk=tag_id) build_list = list(Build.objects .filter(project=project, tags=tag) .order_by('-datetime') .select_related('revision', 'project')) return render(request, 'zumanji/tag.html', { 'project': project, 'tag': tag, 'build_list': build_list, }) def view_build(request, project_label, build_id, tag_id=None): filter_args = dict(project__label=project_label, id=build_id) tag = None if tag_id: tag = get_object_or_404(BuildTag, id=tag_id) filter_args["tags"] = tag build = get_object_or_404(Build, **filter_args) project = build.project previous_build = build.get_previous_build(tag=tag) next_build = build.get_next_build(tag=tag) test_list = list(build.test_set .filter(parent__isnull=True) .order_by('-upper90_duration')) compare_with = request.GET.get('compare_with') if compare_with: try: compare_build = Build.objects.get(project__label=project_label, id=compare_with) except Build.DoesNotExist: compare_build = None else: compare_build = previous_build changes = get_changes(compare_build, test_list) if compare_build: git_changes = get_git_changes(build, compare_build) else: git_changes = None return render(request, 'zumanji/build.html', { 'project': project, 'tag': tag, 'build': build, 'previous_build': previous_build, 'compare_build': compare_build, 'next_build': next_build, 'test_list': test_list, 'changes': changes, 'git_changes': git_changes, }) def view_test(request, project_label, build_id, test_label): test = get_object_or_404(Test, project__label=project_label, build=build_id, label=test_label) project = test.project build = test.build test_list = list(Test.objects.filter(parent=test) .order_by('-upper90_duration') .select_related('parent')) # this is actually a <Test> previous_test_by_build = test.get_test_in_previous_build() next_test_by_build = test.get_test_in_next_build() breadcrumbs = [ (reverse('zumanji:view_build', kwargs={'project_label': project.label, 'build_id': build.id}), 'Build #%s' % build.id) ] last = '' for node in test.get_context(): node_label = node.label[len(last):] breadcrumbs.append( (reverse('zumanji:view_test', kwargs={ 'project_label': project.label, 'build_id': build.id, 'test_label': node.label, }), node_label) ) last = node.label + '.' # include the dot previous_builds = test.get_previous_builds(50) compare_with = request.GET.get('compare_with') if compare_with: try: compare_build = Build.objects.get(project__label=project_label, id=compare_with) except Build.DoesNotExist: compare_build = None else: compare_build = previous_test_by_build.build if previous_test_by_build else None if compare_build: try: compare_test = compare_build.test_set.get(label=test.label) except Test.DoesNotExist: compare_test = None git_changes = get_git_changes(build, compare_build) else: compare_test = None git_changes = None trace_results = get_trace_data(test, compare_test) if previous_test_by_build: tests_to_check = test_list changes = get_changes(compare_build, tests_to_check) else: changes = [] return render(request, 'zumanji/test.html', { 'breadcrumbs': breadcrumbs, 'project': project, 'build': build, 'previous_test_by_build': previous_test_by_build, 'next_test_by_build': next_test_by_build, 'previous_builds': previous_builds, 'test': test, 'test_list': test_list, 'changes': changes, 'compare_build': compare_build, 'trace_results': trace_results, 'git_changes': git_changes, }) @api_auth @transaction.commit_on_success def upload_project_build(request, project_label): project = get_object_or_404(Project, label=project_label) form = UploadJsonForm(request.POST or None, request.FILES or None) if form.is_valid(): data = simplejson.loads(request.FILES['json_file'].read()) try: build = import_build(data, project=project.label, revision=form.cleaned_data.get('revision')) except Exception, e: form.errors['json_file'] = unicode(e) else: return HttpResponseRedirect(reverse('zumanji:view_build', kwargs={ 'project_label': project.label, 'build_id': build.id})) return render(request, 'zumanji/upload_build.html', { 'project': project, 'form': form, })
apache-2.0
3,989,766,211,965,808,000
31.565421
126
0.627924
false
3.656348
true
false
false
ZwEin27/phone-number-matcher
dig_phone_extractor.py
1
23737
# -*- coding: utf-8 -*- # @Author: ZwEin # @Date: 2016-06-21 12:36:47 # @Last Modified by: ZwEin # @Last Modified time: 2016-09-29 21:54:12 import os import re import sys import json import copy import types import string import collections import phonenumbers from datetime import datetime from crf_tokenizer import CrfTokenizer from urlparse import urlparse from string import maketrans from phonenumbers.phonenumberutil import NumberParseException from difflib import SequenceMatcher def is_valid_datetime(raw, date_format): try: datetime.strptime(raw, date_format) return True except ValueError: return False class Preprocessor(): re_prep = re.compile(r'[\(\)]') reg_simple_format = [ r'(?:(?<=[ \A\b-\.\?])\d{3}[ \?\.-]\d{3}[ \?\.-]\d{4}(?=[ \Z\b-\.\?]))' ] re_simple_format = re.compile(r'(?:'+r'|'.join(reg_simple_format)+r')') datetime_regexes = [ r"(?:\d{2}[ _-]\d{2}[ _-]\d{4})", r"(?:\d{4}[ _-]\d{2}[ _-]\d{2})" ] datetime_regex = r"(?:" + r"|".join(datetime_regexes) + ")" re_datetime_regex = re.compile(datetime_regex) re_digits_regex = re.compile(r"\d+") def prep_datetime(self, raw): m = Preprocessor.re_datetime_regex.findall(raw) for d in m: dd = ''.join(Preprocessor.re_digits_regex.findall(d)) if is_valid_datetime(dd, '%Y%m%d') or is_valid_datetime(dd, '%m%d%Y'): raw = raw.replace(d, "") return raw money_regex = r"(?:(?<=[\D])\$\d+(?=[\W_]))" units = ['lbs', 'kg', 'hour', 'hr', 'hh'] unit_regex = r"(?:\d+[\s\W]*(" + r"|".join(units) + "))" others_regexes = [ r"24/7", r"#\d+", r"\d+\'\d+", r"(?<=[\W_])\d{5}[\W_]{1,}\d{5}(?=[\W_])", r"- {1,}\d+$", r"\d+\%" ] other_regex = r"(?:" + "|".join(others_regexes) + ")" all_regexes = [money_regex, unit_regex, other_regex] all_regex = r"(" + r"|".join(all_regexes) + ")" re_all_regex = re.compile(all_regex) def preprocess(self, raw): raw = raw.lower() raw = raw.encode('ascii', 'ignore') raw = self.prep_datetime(raw) raw = Preprocessor.re_prep.sub(' ', raw) raw = Preprocessor.re_all_regex.sub('', raw) raw = Preprocessor.re_simple_format.sub('pnwrapper \g<0> pnwrapper', raw) return raw SOURCE_TYPE_TEXT = 'text' SOURCE_TYPE_URL = 'url' class Tokenizer(): re_2_digts_only_in_url_regex = re.compile(r'(?<=[-_])\d{2}(?=[_/])') re_all_alphabet_in_url_regex = re.compile(r'\w+') def __init__(self, source_type='text'): self.set_source_type(source_type) def set_source_type(self, source_type): """ 'text' or 'url' """ st = source_type.lower() if source_type.lower() not in [SOURCE_TYPE_TEXT, SOURCE_TYPE_URL] : raise Exception(source_type + ' is not a source type, which should be "text" or "url"') self.source_type = source_type def remove_punctuation(self, raw): return raw.translate(string.maketrans("",""), string.punctuation) def tokenize(self, raw): result = None if self.source_type == SOURCE_TYPE_TEXT: result = self.tokenize_text(raw) elif self.source_type == SOURCE_TYPE_URL: result = self.tokenize_url(raw) return ' '.join(result.split()) def tokenize_text(self, raw): t = CrfTokenizer() t.setRecognizeHtmlEntities(True) t.setRecognizeHtmlTags(True) t.setSkipHtmlTags(True) t.setRecognizePunctuation(True) tokens = t.tokenize(raw) tokens = ' '.join(tokens) tokens = self.remove_punctuation(tokens) return tokens def tokenize_url(self, raw): SEPARATOR = ' ' url_obj = urlparse(raw) # parse netloc netloc = url_obj.netloc.split('.')[:-2] # get rid of port numbers, ext and domain name # parse path path = url_obj.path path = Tokenizer.re_2_digts_only_in_url_regex.sub('', path) path = path.split('/') content = netloc + path content = [SEPARATOR.join(Tokenizer.re_all_alphabet_in_url_regex.findall(_)) for _ in content] # parse params # url_obj.params # parse query # url_obj.query return ' sep '.join(content) class Cleaner(): def prep_misspelled_numeral_words(self, raw): misspelling_dict = { "th0usand": "thousand", "th1rteen": "thirteen", "f0urteen": "fourteen", "e1ghteen": "eighteen", "n1neteen": "nineteen", "f1fteen": "fifteen", "s1xteen": "sixteen", "th1rty": "thirty", "e1ghty": "eighty", "n1nety": "ninety", "fourty": "forty", "f0urty": "forty", "e1ght": "eight", "f0rty": "forty", "f1fty": "fifty", "s1xty": "sixty", "zer0": "zero", "for": "four", "f0ur": "four", "f1ve": "five", "n1ne": "nine", "0ne": "one", "too": "two", "tw0": "two", "to": "two", "s1x": "six" } for key in misspelling_dict.keys(): raw = raw.replace(key, misspelling_dict[key]) return raw numbers = ['zero', 'one', 'two', 'three', 'four', 'five', 'siz', 'seven', 'eight', 'nine'] re_twenty_x = re.compile(r"(two|twenty[\W_]+(?=(\d|" + r"|".join(numbers) + ")))") re_thirty_x = re.compile(r"(three|thirty[\W_]+(?=(\d|" + r"|".join(numbers) + ")))") re_forty_x = re.compile(r"(four|forty[\W_]+(?=(\d|" + r"|".join(numbers) + ")))") re_fifty_x = re.compile(r"(five|fifty[\W_]+(?=(\d|" + r"|".join(numbers) + ")))") re_sixty_x = re.compile(r"(six|sixty[\W_]+(?=(\d|" + r"|".join(numbers) + ")))") re_seventy_x = re.compile(r"(seven|seventy[\W_]+(?=(\d|" + r"|".join(numbers) + ")))") re_eighty_x = re.compile(r"(eight|eighty[\W_]+(?=(\d|" + r"|".join(numbers) + ")))") re_ninety_x = re.compile(r"(nine|ninety[\W_]+(?=(\d|" + r"|".join(numbers) + ")))") re_ten = re.compile(r"(?<=[ilo0-9])ten(?=[ \b0-9])") re_one = re.compile(r'(?:(?<=([0-9yneorxt]| ))one|(?:(?<=[ils])[i]((?=[ils])|$)))') re_zero = re.compile(r'(?:zero|oh|(?:(?<=[0-9])(o+?))|(?:o(?=[0-9]))|(?:(?<=[o\s])o(?=[o\s])))') def prep_replace_numeral_words(self, raw): raw = raw.replace("hundred", "00") raw = raw.replace("thousand", "000") raw = raw.replace("eleven", "11") raw = raw.replace("twelve", "12") raw = raw.replace("thirteen", "13") raw = raw.replace("fourteen", "14") raw = raw.replace("fifteen", "15") raw = raw.replace("sixteen", "16") raw = raw.replace("seventeen", "17") raw = raw.replace("eighteen", "18") raw = raw.replace("nineteen", "19") raw = Cleaner.re_twenty_x.sub("2", raw) raw = Cleaner.re_thirty_x.sub("3", raw) raw = Cleaner.re_forty_x.sub("4", raw) raw = Cleaner.re_fifty_x.sub("5", raw) raw = Cleaner.re_sixty_x.sub("6", raw) raw = Cleaner.re_seventy_x.sub("7", raw) raw = Cleaner.re_eighty_x.sub("8", raw) raw = Cleaner.re_ninety_x.sub("9", raw) raw = Cleaner.re_ten.sub("10", raw) raw = Cleaner.re_one.sub("1", raw) raw = Cleaner.re_zero.sub("0", raw) raw = raw.replace("twenty", "20") raw = raw.replace("thirty", "30") raw = raw.replace("forty", "40") raw = raw.replace("fifty", "50") raw = raw.replace("sixty", "60") raw = raw.replace("seventy", "70") raw = raw.replace("eighty", "80") raw = raw.replace("ninety", "90") return raw def clean(self, raw): raw = self.prep_misspelled_numeral_words(raw) raw = self.prep_replace_numeral_words(raw) # print raw return raw class ZEExtractor(): def __init__(self): pass prefix = r'(?:(?<=[\A\b\sa-zA-Z])|^)' # prefix = r'\b' # prefix = r'[ ]?' postfix = r'(?:(?=[\Z\b\sa-zA-Z])|$)' # postfix = r'\b' # postfix = r'[ ]?' phone_number_format_regex = [ r'(?:'+prefix+r"\d{10,13}"+postfix+r')', r'(?:'+prefix+r"\d{9,10}"+postfix+r')', r'(?:'+prefix+r"\d{8}[ ]\d{3,4}"+postfix+r')', r'(?:'+prefix+r"\d{7}[ ]\d{3,4}"+postfix+r')', r'(?:'+prefix+r"\d{6}[ ]\d{4}"+postfix+r')', r'(?:'+prefix+r"\d{5}[ ]\d{6}"+postfix+r')', r'(?:'+prefix+r"\d{5}[ ]\d{4}[ ]\d{4}"+postfix+r')', r'(?:'+prefix+r"\d{5}[ ]\d{4}"+postfix+r')', r'(?:'+prefix+r"\d{5}[ ]\d{4}[ ]\d{2}[ ]\d{2}"+postfix+r')', r'(?:'+prefix+r"\d{4}[ ]\d{4}[ ]\d{2}"+postfix+r')', r'(?:'+prefix+r"\d{4}[ ]\d{2}[ ]\d{2}[ ]\d{2}[ ]\d{2}"+postfix+r')', r'(?:'+prefix+r"\d{4}[ ]\d{3}[ ]\d{3}"+postfix+r')', r'(?:'+prefix+r"\d{3}[ ]\d{7,8}"+postfix+r')', r'(?:'+prefix+r"\d{3}[ ]\d{4}[ ]\d{4}"+postfix+r')', r'(?:'+prefix+r"\d{3}[ ]\d{4}[ ]\d{3}"+postfix+r')', r'(?:'+prefix+r"\d{3}[ ]\d{3}[ ]\d{4}"+postfix+r')', r'(?:'+prefix+r"\d{3}[ ]\d{3}[ ]\d{3}[ ]\d{1}"+postfix+r')', r'(?:'+prefix+r"\d{3}[ ]\d{3}[ ]\d{2}[ ]\d{1}[ ]\d{1}"+postfix+r')', r'(?:'+prefix+r"\d{3}[ ]\d{3}[ ]\d{1}[ ]\d{3}"+postfix+r')', r'(?:'+prefix+r"\d{3}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{4}"+postfix+r')', r'(?:'+prefix+r"\d{2}[ ]\d{4}[ ]\d{4}"+postfix+r')', r'(?:'+prefix+r"\d{2}[ ]\d{8}"+postfix+r')', r'(?:'+prefix+r"\d{1}[ ]\d{8}[ ]\d{1}"+postfix+r')', # \d{2}[ ] ... r'(?:'+prefix+r"\d{1}[ ]\d{3}[ ]\d{3}[ ]\d{3}"+postfix+r')', r'(?:'+prefix+r"\d{2}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}"+postfix+r')', r'(?:'+prefix+r"\d{1}[ ]\d{2}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}"+postfix+r')', r'(?:'+prefix+r"\d{1}[ ]\d{1}[ ]\d{2}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}"+postfix+r')', r'(?:'+prefix+r"\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{2}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}"+postfix+r')', r'(?:'+prefix+r"\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{2}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}"+postfix+r')', r'(?:'+prefix+r"\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{2}[ ]\d{1}[ ]\d{1}[ ]\d{1}"+postfix+r')', r'(?:'+prefix+r"\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{2}[ ]\d{1}[ ]\d{1}"+postfix+r')', r'(?:'+prefix+r"\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{2}[ ]\d{1}"+postfix+r')', r'(?:'+prefix+r"\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{2}"+postfix+r')', r'(?:'+prefix+r"\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}"+postfix+r')' ] # numbers_regex = r"(?:" + r"|".join(phone_number_format_regex) + r")" numbers_regex = r"(?:" + r"|".join(phone_number_format_regex) + r")" re_numbers_regex = re.compile(numbers_regex) # print numbers_regex def extract(self, raw): raw = ZEExtractor.re_numbers_regex.findall(raw) raw = [''.join(_.split()) for _ in raw if len(_.strip()) >= 10] return '\t'.join(raw) class Validator(): re_zero = re.compile(r'0{3,}') def validate_phone_number_with_coutry_code(self, raw, country_code='US'): try: z = phonenumbers.parse(raw, country_code) except NumberParseException, e: pass """ if e.error_type == NumberParseException.INVALID_COUNTRY_CODE: # Invalid country code specified return [] elif e.error_type == NumberParseException.NOT_A_NUMBER: # The string passed in had fewer than 3 digits in it. # The number failed to match the regular expression return [] elif e.error_type == NumberParseException.TOO_SHORT_AFTER_IDD: # The string started with an international dialing prefix # but after this was removed, it had fewer digits than any # valid phone number (including country code) could have. return [] elif e.error_type == NumberParseException.TOO_SHORT_NSN: # After any country code has been stripped, the string # had fewer digits than any valid phone number could have. return [] elif e.error_type == NumberParseException.TOO_LONG: # String had more digits than any valid phone number could have return [] """ # print e.error_type, e._msg else: if phonenumbers.is_possible_number(z) and phonenumbers.is_valid_number(z): return [raw] else: return [] def validate_phone_number(self, raw): # match all countries if using area_code.get_all_country_iso_two_letter_code() # may include too short phone numbers if use 'DE' country_code_list = ['US', 'CN', 'IN', 'UA', 'JP', 'RU', 'IT', 'DE', 'CA', 'TR'] for country_code in country_code_list: rtn = self.validate_phone_number_with_coutry_code(raw, country_code=country_code) if rtn: return rtn def is_datetime(self, raw): size = len(raw) date_format = '' if size == 14: return is_valid_datetime(raw, '%Y%m%d%H%M%S') elif size == 8: return is_valid_datetime(raw, '%Y%m%d') elif size == 6: return is_valid_datetime(raw, '%Y%m%d') or is_valid_datetime(raw, '%H%M%S') else: return False re_num_digits = [ None, re.compile(r"\d{1}"), re.compile(r"\d{2}"), re.compile(r"\d{3}"), re.compile(r"\d{4}"), re.compile(r"\d{5}"), re.compile(r"\d{6}") ] def is_all_dup_digits(self, raw): for i in range(1, 6): rtn = Validator.re_num_digits[i].findall(raw) if len(raw) % i != 0: continue if all(rtn[0] == rest for rest in rtn): return True return False re_start_zero = re.compile(r'^0+') def suggest_most_overlap(self, extracted_phone_list): def similar(a, b): return SequenceMatcher(None, a, b).ratio() potential_invalid, potential_valid = [], [] for pn in extracted_phone_list: if len(pn) == 10: potential_valid.append(pn) else: potential_invalid.append(pn) ans = list(potential_valid) for pi in potential_invalid: if any(similar(pi, pv) < .3 for pv in potential_valid): ans.append(pi) return ans def validate(self, raw): ans = [] for nums in raw.split('\t'): nums = nums.strip() nums = Validator.re_start_zero.sub('', nums) if len(nums) > 16: continue if len(Validator.re_zero.findall(nums)): continue if self.is_all_dup_digits(nums): continue if self.is_datetime(nums): continue ans += [nums] # valid = self.validate_phone_number(nums) # if valid: # ans.extend(valid) ans = list(set(ans)) ans = self.suggest_most_overlap(ans) return ' '.join(ans) class Normalizer(): # try extracting from this one live escort reviews pnwrapper 754 307 7279 pnwrapper 49 91 3524432077 you won t be disappointedangel re_digits = re.compile(r'(?:(?<=[ \s\b\Aa-zA-Z])[\d ]+(?=[ \s\b\Za-zA-Z]))') def normalize(self, cleaned_output, uncleaned_output, output_format='list'): # print [_.strip() for _ in Normalizer.re_digits.findall(tokenized_content) if _.strip() != ''] if output_format == 'obfuscation': output = [] for co in cleaned_output.split(): phonenum = {} phonenum['telephone'] = co if co in uncleaned_output: phonenum['obfuscation'] = 'False' else: phonenum['obfuscation'] = 'True' output.append(phonenum) return output else: return cleaned_output.split() class PhoneNumberExtractor(object): PN_OUTPUT_FORMAT_LIST = 'list' PN_OUTPUT_FORMAT_OBFUSCATION = 'obfuscation' def __init__(self, _output_format='list'): self.preprocessor = Preprocessor() self.tokenizer = Tokenizer(source_type='text') self.extractor = ZEExtractor() self.cleaner = Cleaner() self.validator = Validator() self.normalizer = Normalizer() self.set_output_format(_output_format) def set_output_format(self, _output_format): # 1. list, 2. obfuscation if _output_format not in [PhoneNumberExtractor.PN_OUTPUT_FORMAT_LIST, PhoneNumberExtractor.PN_OUTPUT_FORMAT_OBFUSCATION]: raise Exception('output_format should be "list" or "obfuscation"') self.output_format = _output_format def do_process(self, content, source_type='text', do_preprocess=True, do_tokenize=True, do_clean=True, do_extract=True, do_validate=True): if do_preprocess: content = self.preprocessor.preprocess(content) if do_tokenize: self.tokenizer.set_source_type(source_type) content = self.tokenizer.tokenize(content) if do_clean: content = self.cleaner.clean(content) if do_extract: content = self.extractor.extract(content) if do_validate: content = self.validator.validate(content) return content def match(self, content, source_type='text'): cleaned_ans = self.do_process(content, source_type=source_type) uncleaned_ans = self.do_process(content, source_type=source_type, do_clean=False) return self.normalizer.normalize(cleaned_ans, uncleaned_ans, output_format=self.output_format) ######################################################################## # URLExtractor ######################################################################## import esm import idna import tldextract re_dot = re.compile(r'(?:\s+?dot\s+?)', re.IGNORECASE) reg_url_charactor = '[a-z0-9-.]' re_url_charactor = re.compile(reg_url_charactor, re.IGNORECASE) re_pretld = re.compile(reg_url_charactor+'+?$', re.IGNORECASE) re_posttld = re.compile(':?[0-9]*[/[!#$&-;=?a-z_]+]?', re.IGNORECASE) class URLExtractor(object): def __init_tld_index(): tldindex = esm.Index() tlds = (tldextract.TLDExtract()._get_tld_extractor().tlds) ldindex = esm.Index() for tld in tlds: tldindex.enter('.' + tld.encode('idna')) tldindex.fix() return tldindex tldindex = __init_tld_index() @staticmethod def preprocess(text): def clean(text): text = re_dot.sub('.', text) return text text = clean(text) return text @staticmethod def query(text): ans = [] exts = URLExtractor.tldindex.query(text) for ext in exts: pretld, posttld = None, None url = '' tld = ext[1] startpt, endpt = ext[0][0], ext[0][1] if len(text) > endpt: nextcharacter = text[endpt] if re_url_charactor.match(nextcharacter): continue posttld = re_posttld.match(text[endpt:]) pretld = re_pretld.search(text[:startpt]) if pretld: url = pretld.group(0) startpt -= len(pretld.group(0)) url += tld if posttld: url += posttld.group(0) endpt += len(posttld.group(0)) url = url.rstrip(',.') ans.append(url) ans = list(set([_ for _ in ans if _])) return ans @staticmethod def extract(text): text = text.encode('ascii', 'ignore') text= URLExtractor.preprocess(text) ans = URLExtractor.query(text) return ans # in production # from digExtractor.extractor import Extractor # in test class Extractor: def extract(doc): raise NotImplementedError( "Need to implement extract function" ) # should create a new dictionary each time def get_metadata(): raise NotImplementedError( "Need to implement get_metadata function" ) def set_metadata(): raise NotImplementedError( "Need to implement set_metadata function" ) def get_renamed_input_fields(self): raise NotImplementedError( "Need to implement get_renamed_input_fields function" ) def set_renamed_input_fields(self, renamed_input_fields): if not (isinstance(renamed_input_fields, basestring) or isinstance(renamed_input_fields, types.ListType)): raise ValueError("renamed_input_fields must be a string or a list") self.renamed_input_fields = renamed_input_fields return self class PhoneExtractor(Extractor): def __init__(self): self.renamed_input_fields = '' # ? renamed_input_fields def extract(self, doc): urls = URLExtractor.extract(doc) extractor = PhoneNumberExtractor() extracts = [] for url in urls: extracts += extractor.match(url, source_type='url') doc = doc.replace(url, '') extracts += extractor.match(doc, source_type='text') return extracts def get_metadata(self): return copy.copy(self.metadata) def set_metadata(self, metadata): self.metadata = metadata return self def get_renamed_input_fields(self): return self.renamed_input_fields def set_renamed_input_fields(self, renamed_input_fields): if not (isinstance(renamed_input_fields, basestring) or isinstance(renamed_input_fields, types.ListType)): raise ValueError("renamed_input_fields must be a string or a list") self.renamed_input_fields = renamed_input_fields return self if __name__ == '__main__': doc = "71857376 71857376718 test 71857376719 718573767185 71837376718 71981090718 718573767198 719810907185 71857376150 1171857376 http://costarica.backpage.com/BodyRubs/hoy-cerramos-a-las-11-71857376/2909373 Sexy new girl in town searching for a great date wiff u Naughty fresh girl here searching 4 a great date wiff you Sweet new girl in town seeking for a good date with u for80 2sixseven one9zerofor 90hr incall or out call" pe = PhoneExtractor() print pe.extract(doc) """ # Samples # from phone_number_extractor import PhoneNumberExtractor extractor = PhoneNumberExtractor() url_string = "http://costarica.backpage.com/BodyRubs/hoy-cerramos-a-las-11-71857376/2909373" url_phone_numbers = extractor.match(url_string, source_type='url') print url_phone_numbers # text_string = "Sexy new girl in town searching for a great date wiff u Naughty fresh girl here searching 4 a great date wiff you Sweet new girl in town seeking for a good date with u for80 2sixseven one9zerofor 90hr incall or out call" text_string = "71857376 71857376718 test 71857376719 718573767185 71837376718 71981090718 718573767198 719810907185 71857376150 1171857376" text_phone_numbers = extractor.match(text_string, source_type='text') print text_phone_numbers """
apache-2.0
-8,800,745,410,716,933,000
34.694737
433
0.532376
false
3.109786
false
false
false
kobejean/tensorflow
tensorflow/contrib/distribute/python/tpu_strategy.py
1
20404
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TPU Distribution Strategy. This is experimental. It's not ready for general use. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.distribute.python import cross_tower_ops as cross_tower_ops_lib from tensorflow.contrib.distribute.python import one_device_strategy from tensorflow.contrib.distribute.python import values from tensorflow.contrib.tpu.python.ops import tpu_ops from tensorflow.contrib.tpu.python.tpu import tpu from tensorflow.contrib.tpu.python.tpu import tpu_system_metadata as tpu_system_metadata_lib from tensorflow.contrib.tpu.python.tpu import training_loop from tensorflow.python.eager import context from tensorflow.python.eager import tape from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variable_scope as vs from tensorflow.python.ops import variables as variables_lib from tensorflow.python.training import device_util from tensorflow.python.training import distribute as distribute_lib from tensorflow.python.util import nest _TPU_INITIALIZE_SYSTEM_COLLECTION = "TPU_STRATEGY_INITIALIZE" def get_tpu_system_metadata(tpu_cluster_resolver): """Retrieves TPU system metadata given a TPUClusterResolver.""" master = tpu_cluster_resolver.master() # pylint: disable=protected-access cluster_spec = tpu_cluster_resolver.cluster_spec() cluster_def = cluster_spec.as_cluster_def() if cluster_spec else None tpu_system_metadata = ( tpu_system_metadata_lib._query_tpu_system_metadata( master, cluster_def=cluster_def, query_topology=False)) return tpu_system_metadata # TODO(jhseu): Deduplicate with MirroredStrategy? def _create_tpu_mirrored_variable(devices, real_mirrored_creator, *args, **kwargs): # pylint: disable=g-missing-docstring # Figure out what collections this variable should be added to. # We'll add the TPUMirroredVariable to those collections instead. collections = kwargs.pop("collections", None) if collections is None: collections = [ops.GraphKeys.GLOBAL_VARIABLES] kwargs["collections"] = [] # TODO(jhseu): Should we have different behavior for different # synchronization settings? # Get aggregation value # TODO(jhseu): Support aggregation in a tower context. aggregation = kwargs.pop("aggregation", vs.VariableAggregation.NONE) if aggregation not in [ vs.VariableAggregation.NONE, vs.VariableAggregation.SUM, vs.VariableAggregation.MEAN, vs.VariableAggregation.ONLY_FIRST_TOWER, ]: raise ValueError("Invalid variable aggregation mode: {} for variable: {}" .format(aggregation, kwargs["name"])) # Ignore user-specified caching device, not needed for mirrored variables. kwargs.pop("caching_device", None) # TODO(josh11b,apassos): It would be better if variable initialization # was never recorded on the tape instead of having to do this manually # here. with tape.stop_recording(): index = real_mirrored_creator(devices, *args, **kwargs) result = values.TPUMirroredVariable(index, index[devices[0]], aggregation) if not context.executing_eagerly(): g = ops.get_default_graph() # If "trainable" is True, next_creator() will add the member variables # to the TRAINABLE_VARIABLES collection, so we manually remove # them and replace with the MirroredVariable. We can't set # "trainable" to False for next_creator() since that causes functions # like implicit_gradients to skip those variables. if kwargs.get("trainable", True): collections.append(ops.GraphKeys.TRAINABLE_VARIABLES) l = g.get_collection_ref(ops.GraphKeys.TRAINABLE_VARIABLES) for v in index.values(): l.remove(v) g.add_to_collections(collections, result) return result # TODO(jhseu): Stop inheriting from OneDeviceStrategy. class TPUStrategy(one_device_strategy.OneDeviceStrategy): """Experimental TPU distribution strategy implementation.""" def __init__(self, tpu_cluster_resolver, steps_per_run, num_cores=None): """Initializes the TPUStrategy object. Args: tpu_cluster_resolver: A tf.contrib.cluster_resolver.TPUClusterResolver, which provides information about the TPU cluster. steps_per_run: Number of steps to run on device before returning to the host. Note that this can have side-effects on performance, hooks, metrics, summaries etc. This parameter is only used when Distribution Strategy is used with estimator or keras. num_cores: Number of cores to use on the TPU. If None specified, then auto-detect the cores and topology of the TPU system. """ # TODO(sourabhbajaj): OneDeviceStrategy should be initialized with the # master node fetched from the cluster resolver. super(TPUStrategy, self).__init__('/device:CPU:0') self._tpu_cluster_resolver = tpu_cluster_resolver self._tpu_metadata = get_tpu_system_metadata(self._tpu_cluster_resolver) # TODO(sourabhbajaj): Change this from num_cores to metadata_override self._num_cores_override = num_cores # TODO(jhseu): Switch to DeviceAssignment to support pods and model # parallelism. device_map = {d.name: i for i, d in enumerate(self._tpu_metadata.devices) if "device:TPU:" in d.name} self._device_index = values.PerDevice(device_map) self._tpu_devices = sorted(device_map.keys()) # Only create variables for the number of towers we're running. self._tpu_devices = self._tpu_devices[:self.num_towers] # TODO(sourabhbajaj): Remove this once performance of running one step # at a time is comparable to multiple steps. self.steps_per_run = steps_per_run def _get_enqueue_op_per_host(self, host_id, iterator, input_shapes, iterations): """Create an enqueue op for a single host identified using host_id. The while_loop op returned will run `iterations` times and in each run enqueue batches for each shard. Args: host_id: integer, id of the host to run the enqueue ops on. iterator: `tf.data` iterator to read the input data. input_shapes: shape of inputs to be enqueue on the queue. This is same as the value of `nest.flatten(iterator.output_shapes)`. iterations: integer, number of iterations to be run; determines the number of batches to be enqueued. Returns: while_loop_op running `iterations` times; in each run we enqueue a batch on the infeed queue from the host with id `host_id` for each device shard. """ host = self.get_host_cpu_device(host_id) def _infeed_enqueue_ops_fn(): """Enqueue ops for one iteration.""" control_deps = [] sharded_inputs = [] enqueue_ops = [] with ops.device(host): for _ in range(self.num_towers_per_host): # Use control dependencies to ensure a deterministic ordering. with ops.control_dependencies(control_deps): inputs = nest.flatten(iterator.get_next()) control_deps.extend(inputs) sharded_inputs.append(inputs) for core_id, shard_input in enumerate(sharded_inputs): enqueue_ops.append( tpu_ops.infeed_enqueue_tuple( inputs=shard_input, shapes=input_shapes, device_ordinal=core_id)) return enqueue_ops def enqueue_ops_loop_body(i): """Callable for the loop body of the while_loop instantiated below.""" with ops.control_dependencies(_infeed_enqueue_ops_fn()): return i + 1 with ops.device(host): enqueue_op_per_host = control_flow_ops.while_loop( lambda i: i < iterations, enqueue_ops_loop_body, [constant_op.constant(0)], parallel_iterations=1) return enqueue_op_per_host def distribute_dataset(self, dataset_fn): # TODO(priyag): Perhaps distribute across cores here. return self._call_dataset_fn(dataset_fn) # TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed. # TODO(sourabhbajaj): Remove the initial_loop_values parameter when we have # a mechanism to infer the outputs of `fn`. Pending b/110550782. def _run_steps_on_dataset(self, fn, iterator, iterations, initial_loop_values=None): shapes = nest.flatten(iterator.output_shapes) if any([not s.is_fully_defined() for s in shapes]): raise ValueError( 'TPU currently requires fully defined shapes. Either use ' 'set_shape() on the input tensors or use ' 'dataset.batch(..., drop_remainder=True).') types = nest.flatten(iterator.output_types) enqueue_ops = [ self._get_enqueue_op_per_host(host_id, iterator, shapes, iterations) for host_id in range(self.num_hosts)] def dequeue_fn(): dequeued = tpu_ops.infeed_dequeue_tuple(dtypes=types, shapes=shapes) return nest.pack_sequence_as(iterator.output_shapes, dequeued) # Wrap `fn` for repeat. if initial_loop_values is None: initial_loop_values = {} initial_loop_values = nest.flatten(initial_loop_values) ctx = values.MultiStepContext() def run_fn(*args, **kwargs): """Single step on the TPU device.""" del args, kwargs fn_inputs = dequeue_fn() if not isinstance(fn_inputs, tuple): fn_inputs = (fn_inputs,) fn_result = fn(ctx, *fn_inputs) flat_last_step_outputs = nest.flatten(ctx.last_step_outputs) if flat_last_step_outputs: with ops.control_dependencies([fn_result]): return [array_ops.identity(f) for f in flat_last_step_outputs] else: return fn_result # TODO(sourabhbajaj): The input to while loop should be based on the output # type of the step_fn def iterate_on_tpu(): return training_loop.repeat(iterations, run_fn, initial_loop_values) # We capture the control_flow_context at this point, before we run `fn` # inside a while_loop and TPU replicate context. This is useful in cases # where we might need to exit these contexts and get back to the outer # context to do some things, for e.g. create an op which should be # evaluated only once at the end of the loop on the host. One such usage # is in creating metrics' value op. self._outer_control_flow_context = ( ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access replicate_inputs = [[]] * self.num_towers replicate_outputs = tpu.replicate(iterate_on_tpu, replicate_inputs) del self._outer_control_flow_context ctx.run_op = control_flow_ops.group(replicate_outputs, enqueue_ops) # Filter out any ops from the outputs, typically this would be the case # when there were no tensor outputs. last_step_tensor_outputs = [x for x in replicate_outputs if not isinstance(x, ops.Operation)] # Outputs are currently of the structure (grouped by device) # [[output0_device0, output1_device0, output2_device0], # [output0_device1, output1_device1, output2_device1]] # Convert this to the following structure instead: (grouped by output) # [[output0_device0, output0_device1], # [output1_device0, output1_device1], # [output2_device0, output2_device1]] last_step_tensor_outputs = [list(x) for x in zip(*last_step_tensor_outputs)] # Convert replicate_outputs to the original dict structure of # last_step_outputs. last_step_tensor_outputs_dict = nest.pack_sequence_as( ctx.last_step_outputs, last_step_tensor_outputs) for (name, aggregation) in ctx._last_step_outputs_aggregations.items(): # pylint: disable=protected-access output = last_step_tensor_outputs_dict[name] # For outputs that have already been aggregated, take the first value # from the list as each value should be the same. Else return the full # list of values. if aggregation is not variables_lib.VariableAggregation.NONE: # TODO(priyag): Should this return the element or a list with 1 element last_step_tensor_outputs_dict[name] = output[0] ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access return ctx def _call_for_each_tower(self, fn, *args, **kwargs): # TODO(jhseu): Consider making it so call_for_each_tower implies that we're # in a tpu.rewrite(), and update TPUMirroredVariable accordingly. kwargs.pop('run_concurrently', None) with one_device_strategy._OneDeviceTowerContext(self): # pylint: disable=protected-access return fn(*args, **kwargs) def initialize(self): if context.executing_eagerly(): # TODO(priyag): Add appopriate call here when eager is supported for TPUs. raise NotImplementedError('Eager mode not supported in TPUStrategy.') else: # TODO(jhseu): We need this hack because DistributionStrategies must be # pickleable for copy.deepcopy(). Remove when initialize_system goes away. graph = ops.get_default_graph() tpu_init = graph.get_collection(_TPU_INITIALIZE_SYSTEM_COLLECTION) if tpu_init: return tpu_init graph.add_to_collection(_TPU_INITIALIZE_SYSTEM_COLLECTION, tpu.initialize_system()) return graph.get_collection(_TPU_INITIALIZE_SYSTEM_COLLECTION) def finalize(self): if context.executing_eagerly(): # TODO(priyag): Add appopriate call here when eager is supported for TPUs. raise NotImplementedError('Eager mode not supported in TPUStrategy.') else: return [tpu.shutdown_system()] def _get_devices_from(self, colocate_with=None): # TODO(jhseu): Change this when we support model parallelism. return self._tpu_devices def _create_variable(self, next_creator, *args, **kwargs): """Create a TPUMirroredVariable. See `DistributionStrategy.scope`.""" colocate_with = kwargs.pop("colocate_with", None) devices = self._get_devices_from(colocate_with) def _real_mirrored_creator(devices, *args, **kwargs): # pylint: disable=g-missing-docstring index = {} for i, d in enumerate(devices): with ops.device(d): if i > 0: # Give replicas meaningful distinct names: var0name = index[devices[0]].name.split(":")[0] # We append a / to variable names created on towers with id > 0 to # ensure that we ignore the name scope and instead use the given # name as the absolute name of the variable. kwargs["name"] = "%s/replica_%d/" % (var0name, i) # Initialize replicas with the same value: if context.executing_eagerly(): kwargs["initial_value"] = array_ops.identity( index[devices[0]].value()) else: def initial_value_fn(device=d): with ops.device(device): return array_ops.identity(index[devices[0]].initial_value) kwargs["initial_value"] = initial_value_fn with context.context().device_policy(context.DEVICE_PLACEMENT_SILENT): v = next_creator(*args, **kwargs) assert not isinstance(v, values.TPUMirroredVariable) index[d] = v return index return _create_tpu_mirrored_variable(devices, _real_mirrored_creator, *args, **kwargs) def _reduce(self, aggregation, value, destinations): if values._enclosing_tpu_context() is not None: # pylint: disable=protected-access if aggregation == vs.VariableAggregation.MEAN: # TODO(jhseu): Revisit once we support model-parallelism. value *= (1. / self.num_towers) elif aggregation != vs.VariableAggregation.SUM: raise NotImplementedError( "Currently only support sum & mean in TPUStrategy.") return tpu_ops.cross_replica_sum(value) # Validate that the destination is same as the host device # Note we don't do this when in replicate context as the reduction is # performed on the TPU device itself. devices = cross_tower_ops_lib.get_devices_from(destinations) if len(devices) == 1: assert device_util.canonicalize(devices[0]) == device_util.canonicalize( self.get_host_cpu_device(0)) else: raise ValueError('Multiple devices are not supported for TPUStrategy') if aggregation == vs.VariableAggregation.ONLY_FIRST_TOWER: return value[0] output = math_ops.add_n(value) if aggregation == vs.VariableAggregation.MEAN: return output * (1. / len(value)) return output def _update(self, var, fn, *args, **kwargs): # TODO(jhseu): Consider supporting grouped==False. assert isinstance(var, values.TPUMirroredVariable) if values._enclosing_tpu_context() is not None: # pylint: disable=protected-access return fn(var, *args, **kwargs) # Otherwise, we revert to MirroredStrategy behavior and update each variable # directly. updates = {} for d, v in var._index.items(): # pylint: disable=protected-access name = "update_%d" % self._device_index.get(d) with ops.device(d), distribute_lib.UpdateContext(d), ops.name_scope(name): # If args and kwargs are not mirrored, the value is returned as is. updates[d] = fn(v, *values.select_device_mirrored(d, args), **values.select_device_mirrored(d, kwargs)) # Make a single control dependency to keep the variables mirrored. If one # assignment is fetched, then run all assignments. sorted_keys = sorted(updates.keys()) update_tuple = control_flow_ops.tuple([updates[d] for d in sorted_keys]) for i, d in enumerate(sorted_keys): updates[d] = update_tuple[i] return values.regroup(updates, values.Mirrored) def read_var(self, var): assert isinstance(var, values.TPUMirroredVariable) return var.read_value() def _unwrap(self, value): if isinstance(value, list): return value return [value] @property def num_towers(self): return self._num_cores_override or self._tpu_metadata.num_cores @property def num_hosts(self): return self._tpu_metadata.num_hosts @property def num_towers_per_host(self): return self._tpu_metadata.num_of_cores_per_host @property def between_graph(self): return False @property def should_init(self): return True @property def should_checkpoint(self): return True @property def should_save_summary(self): return True @property def worker_devices(self): return self._tpu_devices @property def parameter_devices(self): return self._tpu_devices def get_host_cpu_device(self, host_id): if self._tpu_cluster_resolver.get_master() in ('', 'local'): return '/replica:0/task:0/device:CPU:0' job_name = self._tpu_cluster_resolver.get_job_name() or 'tpu_worker' return '/job:%s/task:%d/device:CPU:0' % (job_name, host_id) def configure(self, session_config=None, cluster_spec=None, task_type=None, task_id=None): del cluster_spec, task_type, task_id if session_config: session_config.isolate_session_state = True cluster_spec = self._tpu_cluster_resolver.cluster_spec() if cluster_spec: session_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
apache-2.0
6,564,893,554,403,699,000
40.897331
111
0.680259
false
3.88944
false
false
false
ladybug-tools/honeybee
honeybee_plus/utilcol.py
1
1078
"""A collection of useful utilities for Honeybee""" import uuid import re def random_name(shorten=True): """Generate a random name as a string using uuid. Args: shorten: If True the name will be the first to segment of uuid. """ if shorten: return '-'.join(str(uuid.uuid4()).split('-')[:2]) else: return str(uuid.uuid4()) def check_name(name): """Check if a name is a valid honeybee name. A valid name can only have alphabet, digits, - and _. """ name = name.encode('utf-8') try: match = re.match(b"^[.A-Za-z0-9_-]*$", name) except TypeError: match = re.match(r"^[.A-Za-z0-9_-]*$", name) if match: return True else: raise ValueError( 'Invalid input name: ({}).' ' Name can only contain letters, numbers,' ' dots, underscores and dashes.'.format(name) ) if __name__ == '__main__': check_name('should_be_fine') # check_name('also-fine') check_name('this.is.also.fine.1234') # check_name('not good')
gpl-3.0
1,852,447,149,315,065,000
24.069767
71
0.56308
false
3.511401
false
false
false
zjj/trac_hack
sample-plugins/HelloWorld.py
1
2140
"""Example macro.""" revision = "$Rev: 6326 $" url = "$URL: https://svn.edgewall.org/repos/trac/tags/trac-0.12.2/sample-plugins/HelloWorld.py $" # # The following shows the code for macro, old-style. # # The `execute` function serves no purpose other than to illustrate # the example, it will not be used anymore. # # ---- (ignore in your own macro) ---- # -- from trac.util import escape def execute(hdf, txt, env): # Currently hdf is set only when the macro is called # From a wiki page if hdf: hdf['wiki.macro.greeting'] = 'Hello World' # args will be `None` if the macro is called without parenthesis. args = txt or 'No arguments' # then, as `txt` comes from the user, it's important to guard against # the possibility to inject malicious HTML/Javascript, by using `escape()`: return 'Hello World, args = ' + escape(args) # -- # ---- (ignore in your own macro) ---- # # The following is the converted new-style macro # # ---- (reuse for your own macro) ---- # -- from trac.wiki.macros import WikiMacroBase class HelloWorldMacro(WikiMacroBase): """Simple HelloWorld macro. Note that the name of the class is meaningful: - it must end with "Macro" - what comes before "Macro" ends up being the macro name The documentation of the class (i.e. what you're reading) will become the documentation of the macro, as shown by the !MacroList macro (usually used in the TracWikiMacros page). """ def expand_macro(self, formatter, name, args): """Return some output that will be displayed in the Wiki content. `name` is the actual name of the macro (no surprise, here it'll be `'HelloWorld'`), `args` is the text enclosed in parenthesis at the call of the macro. Note that if there are ''no'' parenthesis (like in, e.g. [[HelloWorld]]), then `args` is `None`. """ return 'Hello World, args = ' + unicode(args) # Note that there's no need to HTML escape the returned data, # as the template engine (Genshi) will do it for us. # -- # ---- (reuse for your own macro) ----
bsd-3-clause
5,799,304,578,152,899,000
31.923077
97
0.649533
false
3.721739
false
false
false
rymate1234/rymate-blog
migrations/versions/413f129e8b07_.py
1
1535
"""empty message Revision ID: 413f129e8b07 Revises: None Create Date: 2014-05-02 08:09:09.906725 """ # revision identifiers, used by Alembic. revision = '413f129e8b07' down_revision = None from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.create_table('users', sa.Column('id', sa.Integer(), nullable=False), sa.Column('username', sa.String(length=80), nullable=False), sa.Column('email', sa.String(length=80), nullable=False), sa.Column('password', sa.String(length=128), nullable=True), sa.Column('created_at', sa.DateTime(), nullable=False), sa.Column('first_name', sa.String(length=30), nullable=True), sa.Column('last_name', sa.String(length=30), nullable=True), sa.Column('active', sa.Boolean(), nullable=True), sa.Column('is_admin', sa.Boolean(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('email'), sa.UniqueConstraint('username') ) op.create_table('roles', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=80), nullable=False), sa.Column('user_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['user_id'], ['users.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name') ) ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_table('roles') op.drop_table('users') ### end Alembic commands ###
bsd-3-clause
5,584,822,916,619,234,000
30.979167
65
0.663192
false
3.449438
false
false
false