code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
from .connection import Connection
from .pool import Pool
# Backward comparability with versions prior to 0.1.7
from .connection import Connection as Conn
__title__ = 'tinys3'
__version__ = '0.1.7'
__author__ = 'Shlomi Atar'
__license__ = 'Apache 2.0'
| URXtech/tinys3 | tinys3/__init__.py | Python | mit | 279 |
from django.apps import AppConfig
class LibraryConfig(AppConfig):
name = 'library'
| airportmarc/the416life | src/apps/library/apps.py | Python | mit | 89 |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# Movie Ultra 7K
# Version 0.2 (09.01.2016)
#------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Gracias a la librer铆a plugintools de Jes煤s (www.mimediacenter.info)
import os
import sys
import urllib
import urllib2
import re
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import plugintools
import requests
from resources.tools.resolvers import *
playlists = xbmc.translatePath(os.path.join('special://userdata/playlists', ''))
temp = xbmc.translatePath(os.path.join('special://userdata/playlists/tmp', ''))
addonName = xbmcaddon.Addon().getAddonInfo("name")
addonVersion = xbmcaddon.Addon().getAddonInfo("version")
addonId = xbmcaddon.Addon().getAddonInfo("id")
addonPath = xbmcaddon.Addon().getAddonInfo("path")
url = 'http://www.chollotv.com/page/2/'
url_ref = 'http://iframe.chollotv.com/'
def chollotv0(params):
plugintools.log("[%s %s] Parser CholloTV.com... %s " % (addonName, addonVersion, repr(params)))
thumbnail = 'https://dl.dropbox.com/sh/i4ccoqhgk7k1t2v/AABmQBAUIKDt_k89dKnUP6nGa/Chollo%20tv.jpg'
fanart = 'https://dl.dropbox.com/sh/i4ccoqhgk7k1t2v/AAChTJOeg7LsgPDxxos5NSyva/fondo tv.jpg'
plugintools.add_item(action="",url="",title="[COLOR blue][B]Chollo [COLOR white]TV[/B][/COLOR]",thumbnail=thumbnail,fanart=fanart,folder=False,isPlayable=False)
plugintools.add_item(action="",url="",title="",thumbnail=thumbnail, fanart=fanart, folder=False, isPlayable=False)
plugintools.add_item(action="chollo_categoria",title="[COLOR white][B]Cine y Series[/B][/COLOR]", url="http://www.chollotv.com/category/cine-y-series/",thumbnail="https://dl.dropbox.com/sh/i4ccoqhgk7k1t2v/AABmQBAUIKDt_k89dKnUP6nGa/Chollo%20tv.jpg", extra="Cine y Series", fanart=fanart, folder=True, isPlayable=False)
plugintools.add_item(action="chollo_categoria",title="[COLOR white][B]Deportes[/B][/COLOR]", url="http://www.chollotv.com/category/deportes/",thumbnail="https://dl.dropbox.com/sh/i4ccoqhgk7k1t2v/AABmQBAUIKDt_k89dKnUP6nGa/Chollo%20tv.jpg", extra="Deportes", fanart=fanart, folder=True, isPlayable=False)
plugintools.add_item(action="chollo_categoria",title="[COLOR white][B]Infantiles[/B][/COLOR]", url="http://www.chollotv.com/category/infantiles/",thumbnail="https://dl.dropbox.com/sh/i4ccoqhgk7k1t2v/AABmQBAUIKDt_k89dKnUP6nGa/Chollo%20tv.jpg", extra="Infantiles", fanart=fanart, folder=True, isPlayable=False)
plugintools.add_item(action="chollo_categoria",title="[COLOR white][B]Documentales[/B][/COLOR]", url="http://www.chollotv.com/category/documentales/",thumbnail="https://dl.dropbox.com/sh/i4ccoqhgk7k1t2v/AABmQBAUIKDt_k89dKnUP6nGa/Chollo%20tv.jpg", extra="Documentales", fanart=fanart, folder=True, isPlayable=False)
plugintools.add_item(action="chollo_categoria",title="[COLOR white][B]Entretenimiento[/B][/COLOR]", url="http://www.chollotv.com/category/entretenimiento/",thumbnail="https://dl.dropbox.com/sh/i4ccoqhgk7k1t2v/AABmQBAUIKDt_k89dKnUP6nGa/Chollo%20tv.jpg", extra="Entretenimiento", fanart=fanart, folder=True, isPlayable=False)
plugintools.add_item(action="chollo_categoria",title="[COLOR white][B]M煤sica[/B][/COLOR]", url="http://www.chollotv.com/category/musica/",thumbnail="https://dl.dropbox.com/sh/i4ccoqhgk7k1t2v/AABmQBAUIKDt_k89dKnUP6nGa/Chollo%20tv.jpg", extra="M煤sica", fanart=fanart, folder=True, isPlayable=False)
plugintools.add_item(action="chollo_categoria",title="[COLOR white][B]TDT[/B][/COLOR]", url="http://www.chollotv.com/category/nacionales/",thumbnail="https://dl.dropbox.com/sh/i4ccoqhgk7k1t2v/AABmQBAUIKDt_k89dKnUP6nGa/Chollo%20tv.jpg", extra="TDT", fanart=fanart, folder=True, isPlayable=False)
plugintools.add_item(action="chollo_categoria",title="[COLOR white][B]Auton贸micas[/B][/COLOR]", url="http://www.chollotv.com/category/autonomicas/",thumbnail="https://dl.dropbox.com/sh/i4ccoqhgk7k1t2v/AABmQBAUIKDt_k89dKnUP6nGa/Chollo%20tv.jpg", extra="Auton贸micas", fanart=fanart, folder=True, isPlayable=False)
plugintools.add_item(action="chollo_categoria",title="[COLOR white][B]Extranjeras[/B][/COLOR]", url="http://www.chollotv.com/category/internacionales/", extra="Extranjeras", thumbnail="https://dl.dropbox.com/sh/i4ccoqhgk7k1t2v/AABmQBAUIKDt_k89dKnUP6nGa/Chollo%20tv.jpg", fanart=fanart, folder=True, isPlayable=False)
plugintools.add_item(action="",title="",thumbnail=thumbnail, fanart=fanart, folder=False, isPlayable=False)
plugintools.add_item(action="chollo_categoria",title="[COLOR red][B]Adultos[/B][/COLOR]", url="http://www.chollotv.com/category/adultos/", extra="Adultos", thumbnail="https://dl.dropbox.com/sh/i4ccoqhgk7k1t2v/AABmQBAUIKDt_k89dKnUP6nGa/Chollo%20tv.jpg", fanart=fanart, folder=True, isPlayable=False)
## Cargo las Diferentes Categor铆as
def chollo_categoria(params):
url = params.get("url")
fanart = params.get("fanart")
thumbnail = params.get("thumbnail")
title = params.get("title")
titulo = params.get("extra")
headers = {"User-Agent": 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0', "Referer": url}
r=requests.get(url, headers=headers)
#r = requests.get(url)
data = r.content
plugintools.add_item(action="",url="",title="[COLOR blue][B]路路路路路"+titulo+"路路路路路[/B][/COLOR]",thumbnail=thumbnail,fanart=fanart,folder=False,isPlayable=False)
plugintools.add_item(action="",url="",title="",thumbnail=thumbnail, fanart=fanart, folder=False, isPlayable=False)
group_channel = plugintools.find_single_match(data,'Categorias</a></li><li class="active"(.*?)class="pagination')
cada_canal = plugintools.find_multiple_matches(group_channel,'<li class="col-lg-6">(.*?)<p class="details">')
for item in cada_canal:
''' caracteres especiales en A&E !!! Cipri'''
titulo_canal=plugintools.find_single_match(item,'title="(.*?)"')
caratula_canal=plugintools.find_single_match(item,'src="(.*?)"')
url_primaria=plugintools.find_single_match(item,'href="(.*?)/"')
plugintools.add_item(action="chollo_enlaces",title=titulo_canal,url=url_primaria,thumbnail=caratula_canal,fanart=fanart,folder=False,isPlayable=False)
#En vez de localizar al principio TODAS las Url_Final (q ralentiza mucho pues paso x 3 webs),
#guardo tan solo la Url_Primaria y cuando de seleccione el canal en concreto, es cuando lo mando
#a Chollo_Enlaces() para localizar la Url_Final y lanzarla... Acelera el proceso q te cagas!!!.... GRACIAS CIPRI!!!
#Resuelvo la posibilidad de mas de 1 P谩gina en la Categor铆a Versi贸n Cipri
try:
mas_pag='<ul\sclass=[\'"]pagination pagination-success[\'"]>(.*?)<\/ul>'
mas_pag=plugintools.find_single_match(data,mas_pag);
current='<li\sclass=[\'"]active[\'"]><a\shref=[\'"]\#[\'"]>(\d)<\/a><\/li>.*?href=[\'"]([^\d]+)'
current=plugintools.find_multiple_matches(data,current)
link=current[0][1];current=current[0][0];
tot_pag='>(\d)<\/a>'
tot_pag=plugintools.find_multiple_matches(data,tot_pag)
for x in xrange(int(current),len(tot_pag)):
it=link+str(x+1)+'/';
plugintools.add_item(action='chollo_categoria',title="[COLORred][B]P谩gina: " + str(x+1) + " >>>[/B][/COLOR]", url=it, extra=titulo, thumbnail=thumbnail, fanart=fanart, folder=True, isPlayable=False);
except:pass
def chollo_enlaces(params):
url = params.get("url")
fanart = params.get("fanart")
thumbnail = params.get("thumbnail")
title = params.get("title")
headers = {"User-Agent": 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0', "Referer": url}
r1=requests.get(url, headers=headers)
data1 = r1.content
#saco una url as铆: http://iframe.chollotv.com/channel/ver-canal-plus-liga-2-d que TAMPOCO sirve para llamada SportsDevil
url_sec=plugintools.find_single_match(data1,'src="http://iframe(.*?)" frameborder')
url_secundaria='http://iframe'+url_sec
headers = {"User-Agent": 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0', "Referer": url_secundaria}
r2=requests.get(url_secundaria, headers=headers)
data2 = r2.content
#saco una url as铆: http://iframe.chollotv.com/embed/watch-bt-sport-2-live-streaming-online que ESTA S脥 sirve para llamada SportsDevil
url_final=plugintools.find_single_match(data2,'var url = "(.*?)";')
#09/01/2015: Como estan a帽adiendo algunos canales q no son PrivateStream ni DinoZap, sino m3u8, por si acaso leo tb esta 煤ltima web y detecto si es uno de los m3u8 cuya estructura en la web es diferente
#Ser铆a as铆: src="http://iframe.chollotv.com/stream.php?file=http://a3live-lh.akamaihd.net/i/a3shds/geoa3series_1@122775/master.m3u8&title=atreseries" y me quedar铆a con el enlace hasta el .m3u8
headers = {"User-Agent": 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0', "Referer": url_secundaria}
r2=requests.get(url_final, headers=headers)
data2 = r2.content
url_control=plugintools.find_single_match(data2,'id="video_display"(.*?)</div>')
if "file=" in url_control: # Es q NO es un PrivateStream, dinozap, etc... es un m3u8
url_final=plugintools.find_single_match(url_control,'file=(.*?)&title')
else:
url_final=plugintools.find_single_match(url_control,'src="(.*?)"')
url_montada = 'plugin://plugin.video.SportsDevil/?mode=1&item=catcher%3dstreams%26url='+url_final+'%26referer='+url_ref
##La lanzo directamente
xbmc.executebuiltin('XBMC.RunPlugin('+url_montada+')');
| corvorepack/REPOIVAN | plugin.video.movie.ultra.7k/resources/tools/chollotv.py | Python | gpl-2.0 | 9,416 |
from duralex.AbstractVisitor import AbstractVisitor
from duralex.alinea_parser import *
import duralex.tree
class ForkReferenceVisitor(AbstractVisitor):
def visit_node(self, node):
if duralex.tree.is_reference(node) and 'children' in node and len(node['children']) > 1:
ref_nodes = [n for n in node['children'] if duralex.tree.is_reference(n)]
for i in range(1, len(ref_nodes)):
ref = ref_nodes[i]
fork = copy_node(node, recursive=False)
remove_node(node, ref)
push_node(fork, ref)
push_node(node['parent'], fork)
super(ForkReferenceVisitor, self).visit_node(node)
| Legilibre/duralex | duralex/ForkReferenceVisitor.py | Python | mit | 693 |
### BEGIN LICENSE
# Copyright (C) 2011 Guillaume Hain <zedtux@zedroot.org>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>
### END LICENSE
import re
import gconf
## GConf setup
# GConf root path
GCONF_ROOT_DIR = "/apps/naturalscrolling"
class InvalidKey(Exception):
""" Raised class when key is unknown """
class InvalidKeyType(Exception):
""" Raised class when key type is unknown """
class GConfServer(object):
# Singleton
_instance = None
_init_done = False
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(GConfServer, cls).__new__(cls, *args,
**kwargs)
return cls._instance
def __init__(self):
"""
Open connection to GConf
and connect to callback on naturalscrolling keys updates
"""
if self._init_done:
return
if not hasattr(self, "__key_update_observators"):
self.__key_update_observators = {}
if not hasattr(self, "__keys_update_observators"):
self.__keys_update_observators = []
if not hasattr(self, "client"):
# Get GConf client:
self.client = gconf.client_get_default()
# Add the root directory to the list of directories that our GConf
# client will watch for changes:
self.client.add_dir(GCONF_ROOT_DIR, gconf.CLIENT_PRELOAD_NONE)
# Assign a callback function for when changes are made to keys in
# the root directory namespace:
self.client.notify_add(GCONF_ROOT_DIR, self.on_settings_changed)
self._init_done = True
def fire_me_when_update_on_key(self, key, method):
"""
Register a Class instance method to fire
swhen the given an update on the given key have been catched
"""
self.__key_update_observators[key] = method
def on_update_fire(self, method):
"""
Register method to fire when a key of the GConf root path
has been updated
"""
self.__keys_update_observators.append(method)
def on_settings_changed(self, client, timestamp, entry, *extra):
"""
This is the callback function that is called when the keys in our
namespace change (such as editing them with gconf-editor).
"""
# Do nothing when the key has been removed
if not entry.value:
return
key = entry.key
gconf_key = GConfKey(key, entry.value.type)
self.execute_callback_on_observers(key, gconf_key)
def execute_callback_on_observers(self, key, gconf_key):
if key in self.__key_update_observators:
# Execute observer's method passing GConf key value as parameter
self.__key_update_observators[key](gconf_key.get_value())
if self.__keys_update_observators:
for observator in self.__keys_update_observators:
observator(gconf_key.name, gconf_key.get_value())
def entries(self):
"""
Return a list of all entries from naturalscrolling root path
"""
return self.client.all_entries("/apps/naturalscrolling")
class GConfKey(object):
class KeyDoesntExits(Exception):
pass
def __init__(self, key, type=None):
self.__gconf = GConfServer().client
self.__value = None
if key.startswith(GCONF_ROOT_DIR):
self.__key = key
self.__name = self._without_root_path(key)
else:
self.__key = self._with_root_path(key)
self.__name = key
if type:
self.__type = type
else:
try:
self.__type = self.__gconf.get(self.__key).type
except AttributeError:
raise GConfKey.KeyDoesntExits(_("Can't find the key '%s'") %
self.__key)
def get_name(self):
return self.__name
name = property(get_name)
def _without_root_path(self, key):
return re.sub("%s/" % GCONF_ROOT_DIR, "", key)
def _with_root_path(self, key):
return "%s/%s" % (GCONF_ROOT_DIR, key)
def get_value(self):
"""
Magic method to read the value from GConf (auto cast)
"""
if self.__type == gconf.VALUE_BOOL:
return self.__gconf.get_bool(self.__key)
elif self.__type == gconf.VALUE_STRING:
return self.__gconf.get_string(self.__key)
elif self.__type == gconf.VALUE_INT:
return self.__gconf.get_int(self.__key)
else:
raise InvalidKeyType(_("Can't read the value for type '%s'") %
self.__type)
def set_value(self, value):
"""
Magic method to write the value to GConf (auto cast)
"""
if self.__type == gconf.VALUE_BOOL:
self.__gconf.set_bool(self.__key, value)
elif self.__type == gconf.VALUE_STRING:
self.__gconf.set_string(self.__key, value)
elif self.__type == gconf.VALUE_INT:
self.__gconf.set_int(self.__key, value)
else:
raise InvalidKeyType(_("Can't write the value '%s'"
" for type '%s'") % (value, self.__type))
def is_enable(self):
return self.__value == True
def enable(self):
"""
Set a boolean key value to True
"""
self.__value = 1
self.set_value()
def disable(self):
"""
Set a boolean key value to False
"""
self.__value = 0
self.set_value()
def find_or_create(self):
"""
Check if the current instance of GConfKey exists otherwise create it
"""
if not self.__gconf.get(self.__key):
self.set_value(False)
def remove(self):
""" Remove the key from GConf """
self.__gconf.unset(self.__key)
class GConfSettings(object):
def server(self):
"""
Return the Singleton instance of the GConfServer
"""
return GConfServer()
def initialize(self, devices):
"""
Check if all keys exists
Create missing keys
"""
for device in devices:
if not device.keys()[0]:
print (_("Warning: The XID of the device with name %s "
"wasn't found") % device.values()[0])
else:
gconf_key = GConfKey(device.keys()[0], gconf.VALUE_BOOL)
gconf_key.find_or_create()
# As you're in the initializing step, if there is at least one
# observator, then fire it with all the actual configuration
self.server().execute_callback_on_observers(device.keys()[0],
gconf_key)
def key(self, key, type=None):
"""
Ruby styled method to define which is the key to check
This method return an instance of the GConfKey class
otherwise raise a InvalidKey or InvalidKeyType
"""
return GConfKey(key, self.python_type_to_gconf_type(type))
def python_type_to_gconf_type(self, type):
"""
Convert a Python type (bool, int, str, ...) to GConf type
"""
if type == bool:
return gconf.VALUE_BOOL
elif type == str:
return gconf.VALUE_STRING
elif type == int:
return gconf.VALUE_INT
def keys(self):
"""
Return a list of all keys for natural scrolling
"""
return GConfServer().client.all_entries(GCONF_ROOT_DIR)
def activated_devices_xids(self):
"""
Return a list of all XIDs of devices where naturalscrolling was
registered as activated.
"""
activated_devices_xids = []
for entry in self.server().entries():
try:
gconf_key = GConfKey(entry.key)
if gconf_key.get_value():
activated_devices_xids.append(gconf_key.name)
except GConfKey.KeyDoesntExits:
# Pass the removed key
pass
return activated_devices_xids
| cemmanouilidis/naturalscrolling | naturalscrolling_lib/gconfsettings.py | Python | gpl-3.0 | 8,829 |
'''
Module: cell
Author: David Frye
Description: Contains the Cell class.
'''
from utility import Direction
class Cell:
'''
Class: Cell
Description: Represents an individual cell in a maze.
'''
# The print character for a visited cell.
VISITED_STRING = " "
# The print character for an unvisited cell.
UNVISITED_STRING = "/"
# The print character for a horizontal wall.
WALL_HORIZONTAL_STRING = "-"
# The print character for a vertical wall.
WALL_VERTICAL_STRING = "|"
def __init__(self, content, position):
'''
Method: __init__
Description: Cell constructor.
Parameters: content, position
content: String - A string visually representing the cell
position: 2-Tuple - The cell's position in the maze that owns it
[0] - Cell's x-position
[1] - Cell's y-position
Return: None
'''
self.m_content = content
self.m_position = position
self.m_visited = False
self.m_walls = {
Direction.NORTH : True,
Direction.EAST : True,
Direction.SOUTH : True,
Direction.WEST : True
}
def visit(self):
'''
Method: visit
Description: Sets the cell's content attribute into the visited state.
Parameters: No parameters
Return: None
'''
self.m_visited = True
self.m_content = self.VISITED_STRING
def unvisit(self):
'''
Method: visit
Description: Sets the cell's content attribute into the unvisited state.
Parameters: No parameters
Return: None
'''
self.m_visited = False
self.m_content = self.UNVISITED_STRING
def is_visited(self):
'''
Method: is_visited
Description: Determines whether or not the cell is in the visited state.
Parameters: No parameters
Return: Boolean - Whether or not the cell is in the visited state
'''
return self.m_visited
def get_content(self):
'''
Method: get_content
Description: Gets the cell's content attribute.
Parameters: No parameters
Return: String - Cell's content attribute
'''
return self.m_content
def get_position_x(self):
'''
Method: get_position_x
Description: Gets the cell's x-position attribute.
Parameters: No parameters
Return: String - Cell's x-position attribute
'''
return self.m_position[0]
def get_position_y(self):
'''
Method: get_position_y
Description: Gets the cell's y-position attribute.
Parameters: No parameters
Return: String - Cell's y-position attribute
'''
return self.m_position[1]
def get_wall(self, direction):
'''
Method: get_wall
Description: Gets the cell's wall attribute corresponding to the given direction.
Parameters: direction
direction: Direction - Direction corresponding to the desired wall
Return: String - Cell's wall attribute corresponding to the given direction
'''
return self.m_walls.get(direction)
def set_content(self, content):
'''
Method: set_content
Description: Sets the cell's content attribute.
Parameters: content
content: String - A string visually representing the cell
Return: None
'''
self.m_content = content
def set_position_x(self, x):
'''
Method: set_position_x
Description: Sets the cell's x-position attribute.
Parameters: x
x: Int - Cell's x-position within the maze that owns it
Return: None
'''
self.m_position[0] = x
def set_position_y(self, y):
'''
Method: set_position_y
Description: Sets the cell's y-position attribute.
Parameters: y
y: Int - Cell's y-position within the maze that owns it
Return: None
'''
self.m_position[1] = y
def set_wall(self, direction, value):
'''
Method: set_wall
Description: Sets the cell's wall attribute corresponding to the given direction.
Parameters: direction, value
direction: Direction - Direction corresponding to the desired wall
value: Boolean - Whether the wall exists or not
Return: None
'''
self.m_walls[direction] = value | DFrye333/DynamicMaze | cell.py | Python | mit | 3,827 |
# Copyright 2015, Cisco Systems.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test class for UcsPower module."""
import mock
from oslo_config import cfg
from oslo_utils import importutils
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers.modules.ucs import helper as ucs_helper
from ironic.drivers.modules.ucs import power as ucs_power
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.objects import utils as obj_utils
ucs_error = importutils.try_import('UcsSdk.utils.exception')
INFO_DICT = db_utils.get_test_ucs_info()
CONF = cfg.CONF
class UcsPowerTestCase(db_base.DbTestCase):
def setUp(self):
super(UcsPowerTestCase, self).setUp()
driver_info = INFO_DICT
mgr_utils.mock_the_extension_manager(driver="fake_ucs")
self.node = obj_utils.create_test_node(self.context,
driver='fake_ucs',
driver_info=driver_info)
CONF.set_override('max_retry', 2, 'cisco_ucs')
CONF.set_override('action_interval', 0, 'cisco_ucs')
self.interface = ucs_power.Power()
def test_get_properties(self):
expected = ucs_helper.COMMON_PROPERTIES
expected.update(ucs_helper.COMMON_PROPERTIES)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected, task.driver.get_properties())
@mock.patch.object(ucs_helper, 'parse_driver_info',
spec_set=True, autospec=True)
def test_validate(self, mock_parse_driver_info):
mock_parse_driver_info.return_value = {}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.interface.validate(task)
mock_parse_driver_info.assert_called_once_with(task.node)
@mock.patch.object(ucs_helper, 'parse_driver_info',
spec_set=True, autospec=True)
def test_validate_fail(self, mock_parse_driver_info):
side_effect = iter([exception.InvalidParameterValue('Invalid Input')])
mock_parse_driver_info.side_effect = side_effect
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
self.interface.validate,
task)
mock_parse_driver_info.assert_called_once_with(task.node)
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test_get_power_state_up(self, mock_power_helper, mock_helper):
mock_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
mock_power = mock_power_helper.return_value
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
mock_power.get_power_state.return_value = 'up'
self.assertEqual(states.POWER_ON,
self.interface.get_power_state(task))
mock_power.get_power_state.assert_called_once_with()
mock_power.get_power_state.reset_mock()
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test_get_power_state_down(self, mock_power_helper, mock_helper):
mock_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
mock_power = mock_power_helper.return_value
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
mock_power.get_power_state.return_value = 'down'
self.assertEqual(states.POWER_OFF,
self.interface.get_power_state(task))
mock_power.get_power_state.assert_called_once_with()
mock_power.get_power_state.reset_mock()
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test_get_power_state_error(self, mock_power_helper, mock_helper):
mock_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
mock_power = mock_power_helper.return_value
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
mock_power.get_power_state.return_value = states.ERROR
self.assertEqual(states.ERROR,
self.interface.get_power_state(task))
mock_power.get_power_state.assert_called_once_with()
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test_get_power_state_fail(self,
mock_ucs_power,
mock_helper):
mock_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
power = mock_ucs_power.return_value
power.get_power_state.side_effect = (
ucs_error.UcsOperationError(operation='getting power state',
error='failed'))
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.UcsOperationError,
self.interface.get_power_state,
task)
power.get_power_state.assert_called_with()
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power._wait_for_state_change',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test_set_power_state(self, mock_power_helper, mock__wait, mock_helper):
target_state = states.POWER_ON
mock_power = mock_power_helper.return_value
mock_power.get_power_state.side_effect = ['down', 'up']
mock_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
mock__wait.return_value = target_state
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertIsNone(self.interface.set_power_state(task,
target_state))
mock_power.set_power_state.assert_called_once_with('up')
mock_power.get_power_state.assert_called_once_with()
mock__wait.assert_called_once_with(target_state, mock_power)
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test_set_power_state_fail(self, mock_power_helper, mock_helper):
mock_power = mock_power_helper.return_value
mock_power.set_power_state.side_effect = (
ucs_error.UcsOperationError(operation='setting power state',
error='failed'))
mock_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.UcsOperationError,
self.interface.set_power_state,
task, states.POWER_OFF)
mock_power.set_power_state.assert_called_once_with('down')
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
def test_set_power_state_invalid_state(self, mock_helper):
mock_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
self.interface.set_power_state,
task, states.ERROR)
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test__wait_for_state_change_already_target_state(
self,
mock_ucs_power,
mock_helper):
mock_power = mock_ucs_power.return_value
target_state = states.POWER_ON
mock_power.get_power_state.return_value = 'up'
mock_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
self.assertEqual(states.POWER_ON,
ucs_power._wait_for_state_change(
target_state, mock_power))
mock_power.get_power_state.assert_called_with()
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test__wait_for_state_change_exceed_iterations(
self,
mock_power_helper,
mock_helper):
mock_power = mock_power_helper.return_value
target_state = states.POWER_ON
mock_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
mock_power.get_power_state.side_effect = (
['down', 'down', 'down', 'down'])
self.assertEqual(states.ERROR,
ucs_power._wait_for_state_change(
target_state, mock_power)
)
mock_power.get_power_state.assert_called_with()
self.assertEqual(4, mock_power.get_power_state.call_count)
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power._wait_for_state_change',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test_set_and_wait_for_state_change_fail(
self,
mock_power_helper,
mock__wait,
mock_helper):
target_state = states.POWER_ON
mock_power = mock_power_helper.return_value
mock_power.get_power_state.return_value = 'down'
mock_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
mock__wait.return_value = states.POWER_OFF
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.PowerStateFailure,
self.interface.set_power_state,
task,
target_state)
mock_power.set_power_state.assert_called_once_with('up')
mock_power.get_power_state.assert_called_once_with()
mock__wait.assert_called_once_with(target_state, mock_power)
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power._wait_for_state_change',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test_reboot(self, mock_power_helper, mock__wait, mock_helper):
mock_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
mock_power = mock_power_helper.return_value
mock__wait.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertIsNone(self.interface.reboot(task))
mock_power.reboot.assert_called_once_with()
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test_reboot_fail(self, mock_power_helper,
mock_ucs_helper):
mock_ucs_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
mock_power = mock_power_helper.return_value
mock_power.reboot.side_effect = (
ucs_error.UcsOperationError(operation='rebooting', error='failed'))
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.UcsOperationError,
self.interface.reboot,
task
)
mock_power.reboot.assert_called_once_with()
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power._wait_for_state_change',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test_reboot__wait_state_change_fail(self, mock_power_helper,
mock__wait,
mock_ucs_helper):
mock_ucs_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
mock_power = mock_power_helper.return_value
mock__wait.return_value = states.ERROR
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.PowerStateFailure,
self.interface.reboot,
task)
mock_power.reboot.assert_called_once_with()
| redhat-openstack/ironic | ironic/tests/drivers/ucs/test_power.py | Python | apache-2.0 | 15,208 |
"""
Discrete Fourier Transforms - FFT.py
The underlying code for these functions is an f2c translated and modified
version of the FFTPACK routines.
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
"""
__all__ = ['fft','ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn',
'refft', 'irefft','refftn','irefftn', 'refft2', 'irefft2']
from numpy.core import asarray, zeros, swapaxes, shape, conjugate, \
take
import fftpack_lite as fftpack
from helper import *
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache = _fft_cache ):
a = asarray(a)
if n == None: n = a.shape[axis]
if n < 1: raise ValueError("Invalid number of FFT data points (%d) specified." % n)
try:
wsave = fft_cache[n]
except(KeyError):
wsave = init_function(n)
fft_cache[n] = wsave
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0,n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0,s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
return r
def fft(a, n=None, axis=-1):
"""fft(a, n=None, axis=-1)
Return the n point discrete Fourier transform of a. n defaults to
the length of a. If n is larger than the length of a, then a will
be zero-padded to make up the difference. If n is smaller than
the length of a, only the first n items in a will be used.
The packing of the result is "standard": If A = fft(a, n), then A[0]
contains the zero-frequency term, A[1:n/2+1] contains the
positive-frequency terms, and A[n/2+1:] contains the negative-frequency
terms, in order of decreasingly negative frequency. So for an 8-point
transform, the frequencies of the result are [ 0, 1, 2, 3, 4, -3, -2, -1].
This is most efficient for n a power of two. This also stores a cache of
working memory for different sizes of fft's, so you could theoretically
run into memory problems if you call this too many times with too many
different n's."""
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
def ifft(a, n=None, axis=-1):
"""ifft(a, n=None, axis=-1)
Return the n point inverse discrete Fourier transform of a. n
defaults to the length of a. If n is larger than the length of a,
then a will be zero-padded to make up the difference. If n is
smaller than the length of a, then a will be truncated to reduce
its size.
The input array is expected to be packed the same way as the output of
fft, as discussed in it's documentation.
This is the inverse of fft: ifft(fft(a)) == a within numerical
accuracy.
This is most efficient for n a power of two. This also stores a cache of
working memory for different sizes of fft's, so you could theoretically
run into memory problems if you call this too many times with too many
different n's."""
a = asarray(a).astype(complex)
if n == None:
n = shape(a)[axis]
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) / n
def rfft(a, n=None, axis=-1):
"""rfft(a, n=None, axis=-1)
Return the n point discrete Fourier transform of the real valued
array a. n defaults to the length of a. n is the length of the
input, not the output.
The returned array will be the nonnegative frequency terms of the
Hermite-symmetric, complex transform of the real array. So for an 8-point
transform, the frequencies in the result are [ 0, 1, 2, 3, 4]. The first
term will be real, as will the last if n is even. The negative frequency
terms are not needed because they are the complex conjugates of the
positive frequency terms. (This is what I mean when I say
Hermite-symmetric.)
This is most efficient for n a power of two."""
a = asarray(a).astype(float)
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, _real_fft_cache)
def irfft(a, n=None, axis=-1):
"""irfft(a, n=None, axis=-1)
Return the real valued n point inverse discrete Fourier transform
of a, where a contains the nonnegative frequency terms of a
Hermite-symmetric sequence. n is the length of the result, not the
input. If n is not supplied, the default is 2*(len(a)-1). If you
want the length of the result to be odd, you have to say so.
If you specify an n such that a must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to m points via Fourier interpolation by: a_resamp
= irfft(rfft(a), m).
This is the inverse of rfft:
irfft(rfft(a), len(a)) == a
within numerical accuracy."""
a = asarray(a).astype(complex)
if n == None:
n = (shape(a)[axis] - 1) * 2
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache) / n
def hfft(a, n=None, axis=-1):
"""hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
These are a pair analogous to rfft/irfft, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's hermite_fft for which
you must supply the length of the result if it is to be odd.
ihfft(hfft(a), len(a)) == a
within numerical accuracy."""
a = asarray(a).astype(complex)
if n == None:
n = (shape(a)[axis] - 1) * 2
return irfft(conjugate(a), n, axis) * n
def ihfft(a, n=None, axis=-1):
"""hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
These are a pair analogous to rfft/irfft, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's hfft for which
you must supply the length of the result if it is to be odd.
ihfft(hfft(a), len(a)) == a
within numerical accuracy."""
a = asarray(a).astype(float)
if n == None:
n = shape(a)[axis]
return conjugate(rfft(a, n, axis))/n
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes == None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes == None:
axes = range(-len(s), 0)
if len(s) != len(axes):
raise ValueError, "Shape and axes have different lengths."
if invreal and shapeless:
s[axes[-1]] = (s[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = range(len(axes))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii])
return a
def fftn(a, s=None, axes=None):
"""fftn(a, s=None, axes=None)
The n-dimensional fft of a. s is a sequence giving the shape of the input
an result along the transformed axes, as n for fft. Results are packed
analogously to fft: the term for zero frequency in all axes is in the
low-order corner, while the term for the Nyquist frequency in all axes is
in the middle.
If neither s nor axes is specified, the transform is taken along all
axes. If s is specified and axes is not, the last len(s) axes are used.
If axes are specified and s is not, the input shape along the specified
axes is used. If s and axes are both specified and are not the same
length, an exception is raised."""
return _raw_fftnd(a,s,axes,fft)
def ifftn(a, s=None, axes=None):
"""ifftn(a, s=None, axes=None)
The inverse of fftn."""
return _raw_fftnd(a, s, axes, ifft)
def fft2(a, s=None, axes=(-2,-1)):
"""fft2(a, s=None, axes=(-2,-1))
The 2d fft of a. This is really just fftn with different default
behavior."""
return _raw_fftnd(a,s,axes,fft)
def ifft2(a, s=None, axes=(-2,-1)):
"""ifft2(a, s=None, axes=(-2, -1))
The inverse of fft2d. This is really just ifftn with different
default behavior."""
return _raw_fftnd(a, s, axes, ifft)
def rfftn(a, s=None, axes=None):
"""rfftn(a, s=None, axes=None)
The n-dimensional discrete Fourier transform of a real array a. A real
transform as rfft is performed along the axis specified by the last
element of axes, then complex transforms as fft are performed along the
other axes."""
a = asarray(a).astype(float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1])
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii])
return a
def rfft2(a, s=None, axes=(-2,-1)):
"""rfft2(a, s=None, axes=(-2,-1))
The 2d fft of the real valued array a. This is really just rfftn with
different default behavior."""
return rfftn(a, s, axes)
def irfftn(a, s=None, axes=None):
"""irfftn(a, s=None, axes=None)
The inverse of rfftn. The transform implemented in ifft is
applied along all axes but the last, then the transform implemented in
irfft is performed along the last axis. As with
irfft, the length of the result along that axis must be
specified if it is to be odd."""
a = asarray(a).astype(complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii])
a = irfft(a, s[-1], axes[-1])
return a
def irfft2(a, s=None, axes=(-2,-1)):
"""irfft2(a, s=None, axes=(-2, -1))
The inverse of rfft2. This is really just irfftn with
different default behavior."""
return irfftn(a, s, axes)
# Deprecated names
from numpy import deprecate
refft = deprecate(rfft, 'refft', 'rfft')
irefft = deprecate(irfft, 'irefft', 'irfft')
refft2 = deprecate(rfft2, 'refft2', 'rfft2')
irefft2 = deprecate(irfft2, 'irefft2', 'irfft2')
refftn = deprecate(rfftn, 'refftn', 'rfftn')
irefftn = deprecate(irfftn, 'irefftn', 'irfftn')
| santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/numpy-1.0.4-py2.5-linux-x86_64.egg/numpy/fft/fftpack.py | Python | bsd-3-clause | 10,629 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp.addons.mysql_connector.model.mysql_connector \
import mysql_connector
class GPConnect(mysql_connector):
""" Contains all the utility methods needed to talk with the MySQL server
used by GP, as well as all mappings
from OpenERP fields to corresponding MySQL fields. """
def pushPassword(self, id, value):
""" Push the password of a OpenERP user to GP. """
return self.query("UPDATE login SET ERP_PWD = AES_ENCRYPT(%s, "
"SHA2('gpT0ErpP455w0rd!',512)) WHERE ERP_ID = %s",
(value, id))
| Secheron/compassion-switzerland | password_pusher_compassion/gp_connector.py | Python | agpl-3.0 | 1,024 |
from django.shortcuts import render
def index(request):
return render(request, 'bs_brochure_site/index.html')
| zmetcalf/django-webdev | bs_brochure_site/views.py | Python | gpl-3.0 | 115 |
from django.conf.urls import include, url
urlpatterns = [
url(r'^one/', include('sample_project.app_one.urls')),
url(r'^one/(?P<realm_slug>[\w-]+)/news/',
include('micropress.urls', namespace='app_one'),
{'realm_content_type': 'app_one.onegame'}),
url(r'^two/', include('sample_project.app_one.urls')),
url(r'^two/games/(?P<realm_slug>[\w-]+)/news/',
include('micropress.urls', namespace='app_two'),
{'realm_content_type': 'app_two.twogame'}),
url(r'^two/also/(?P<realm_slug>[\w-]+)/news/',
include('micropress.urls', namespace='app_two_also'),
{'realm_content_type': 'app_two.twogamealso'}),
]
| jbradberry/django-micro-press | sample_project/urls.py | Python | mit | 665 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Flibcpp(CMakePackage):
"""Fortran bindings to the C++ Standard Library.
"""
homepage = "https://flibcpp.readthedocs.io/en/latest"
git = "https://github.com/swig-fortran/flibcpp.git"
url = "https://github.com/swig-fortran/flibcpp/archive/v1.0.1.tar.gz"
version('1.0.1', sha256='8569c71eab0257097a6aa666a6d86bdcb6cd6e31244d32cc5b2478d0e936ca7a')
version('0.5.2', sha256='b9b4eb6431d5b56a54c37f658df7455eafd3d204a5534903b127e0c8a1c9b827')
version('0.5.1', sha256='76db24ce7893f19ab97ea7260c39490ae1bd1e08a4cc5111ad7e70525a916993')
version('0.5.0', sha256='94204198304ba4187815431859e5958479fa651a6f06f460b099badbf50f16b2')
version('0.4.1', sha256='5c9a11af391fcfc95dd11b95338cff19ed8104df66d42b00ae54f6cde4da5bdf')
version('0.4.0', sha256='ccb0acf58a4480977fdb3c62a0bd267297c1dfa687a142ea8822474c38aa322b')
version('0.3.1', sha256='871570124122c18018478275d5040b4b787d1966e50ee95b634b0b5e0cd27e91')
variant('doc', default=False, description='Build and install documentation')
variant('shared', default=True, description='Build shared libraries')
variant('swig', default=False,
description='Regenerate source files using SWIG')
variant('fstd', default='03', values=('none', '03', '08', '15', '18'),
multi=False, description='Build with this Fortran standard')
depends_on('swig@4.0.2-fortran', type='build', when="+swig")
depends_on('py-sphinx', type='build', when="+doc")
@run_before('cmake')
def die_without_fortran(self):
# Until we can pass compiler requirements through virtual
# dependencies, explicitly check for Fortran compiler instead of
# waiting for configure error.
if (self.compiler.f77 is None) or (self.compiler.fc is None):
raise InstallError('Flibcpp requires a Fortran compiler')
def cmake_args(self):
from_variant = self.define_from_variant
fstd_key = ('FLIBCPP_Fortran_STANDARD' if self.version > Version('1.0.0')
else 'FLIBCPP_FORTRAN_STD')
return [
from_variant('BUILD_SHARED_LIBS', 'shared'),
from_variant('FLIBCPP_BUILD_DOCS', 'doc'),
from_variant(fstd_key, 'fstd'),
from_variant('FLIBCPP_USE_SWIG', 'swig'),
self.define('FLIBCPP_BUILD_TESTS', bool(self.run_tests)),
self.define('FLIBCPP_BUILD_EXAMPLES', bool(self.run_tests)),
]
examples_src_dir = 'example'
@run_after('install')
def setup_smoke_tests(self):
"""Copy the example source files after the package is installed to an
install test subdirectory for use during `spack test run`."""
self.cache_extra_test_sources([self.examples_src_dir])
@property
def cached_tests_work_dir(self):
"""The working directory for cached test sources."""
return join_path(self.test_suite.current_test_cache_dir,
self.examples_src_dir)
def test(self):
"""Perform stand-alone/smoke tests."""
cmake_args = [
self.define('CMAKE_PREFIX_PATH', self.prefix),
self.define('CMAKE_Fortran_COMPILER', self.compiler.fc),
]
cmake_args.append(self.cached_tests_work_dir)
self.run_test("cmake", cmake_args,
purpose="test: calling cmake",
work_dir=self.cached_tests_work_dir)
self.run_test("make", [],
purpose="test: building the tests",
work_dir=self.cached_tests_work_dir)
self.run_test("run-examples.sh", [],
purpose="test: running the examples",
work_dir=self.cached_tests_work_dir)
| LLNL/spack | var/spack/repos/builtin/packages/flibcpp/package.py | Python | lgpl-2.1 | 3,945 |
#!/usr/bin/env python
# Copyright 2012, 2018 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, digital, blocks
import pmt
class qa_digital_carrier_allocator_cvc (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
self.tsb_key = "ts_last"
def tearDown (self):
self.tb = None
def test_001_t (self):
"""
pretty simple (the carrier allocation here is not a practical OFDM configuration!)
"""
fft_len = 6
tx_symbols = (1, 2, 3)
# ^ this gets mapped to the DC carrier because occupied_carriers[0][0] == 0
pilot_symbols = ((1j,),)
occupied_carriers = ((0, 1, 2),)
pilot_carriers = ((3,),)
sync_word = (list(range(fft_len)),)
expected_result = tuple(sync_word[0] + [1j, 0, 0, 1, 2, 3])
# ^ DC carrier
src = blocks.vector_source_c(tx_symbols, False, 1)
alloc = digital.ofdm_carrier_allocator_cvc(fft_len,
occupied_carriers,
pilot_carriers,
pilot_symbols, sync_word,
self.tsb_key)
sink = blocks.tsb_vector_sink_c(vlen=fft_len, tsb_key=self.tsb_key)
self.tb.connect(
src,
blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, 1, len(tx_symbols), self.tsb_key),
alloc,
sink
)
self.tb.run()
self.assertEqual(sink.data()[0], expected_result)
def test_001_t2 (self):
"""
pretty simple (same as before, but odd fft len)
"""
fft_len = 5
tx_symbols = (1, 2, 3)
# ^ this gets mapped to the DC carrier because occupied_carriers[0][0] == 0
occupied_carriers = ((0, 1, 2),)
pilot_carriers = ((-2,),)
pilot_symbols = ((1j,),)
expected_result = (1j, 0, 1, 2, 3)
# ^ DC carrier
src = blocks.vector_source_c(tx_symbols, False, 1)
alloc = digital.ofdm_carrier_allocator_cvc(
fft_len,
occupied_carriers,
pilot_carriers,
pilot_symbols, (),
self.tsb_key
)
sink = blocks.tsb_vector_sink_c(vlen=fft_len, tsb_key=self.tsb_key)
self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, 1, len(tx_symbols), self.tsb_key), alloc, sink)
self.tb.run ()
self.assertEqual(sink.data()[0], expected_result)
def test_002_t (self):
"""
same, but using negative carrier indices
"""
fft_len = 6
tx_symbols = (1, 2, 3)
pilot_symbols = ((1j,),)
occupied_carriers = ((-1, 1, 2),)
pilot_carriers = ((3,),)
expected_result = (1j, 0, 1, 0, 2, 3)
src = blocks.vector_source_c(tx_symbols, False, 1)
alloc = digital.ofdm_carrier_allocator_cvc(fft_len,
occupied_carriers,
pilot_carriers,
pilot_symbols, (),
self.tsb_key)
sink = blocks.tsb_vector_sink_c(fft_len)
self.tb.connect(
src,
blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, 1, len(tx_symbols), self.tsb_key),
alloc,
sink
)
self.tb.run ()
self.assertEqual(sink.data()[0], expected_result)
def test_002_t (self):
"""
once again, but this time add a sync word
"""
fft_len = 6
sync_word = (0,) * fft_len
tx_symbols = (1, 2, 3, 4, 5, 6)
pilot_symbols = ((1j,),)
occupied_carriers = ((-1, 1, 2),)
pilot_carriers = ((3,),)
expected_result = sync_word + (1j, 0, 1, 0, 2, 3) + (1j, 0, 4, 0, 5, 6)
special_tag1 = gr.tag_t()
special_tag1.offset = 0
special_tag1.key = pmt.string_to_symbol("spam")
special_tag1.value = pmt.to_pmt(23)
special_tag2 = gr.tag_t()
special_tag2.offset = 4
special_tag2.key = pmt.string_to_symbol("eggs")
special_tag2.value = pmt.to_pmt(42)
src = blocks.vector_source_c(
tx_symbols, False, 1,
(special_tag1, special_tag2)
)
alloc = digital.ofdm_carrier_allocator_cvc(
fft_len,
occupied_carriers,
pilot_carriers,
pilot_symbols,
sync_words=(sync_word,),
len_tag_key=self.tsb_key
)
sink = blocks.tsb_vector_sink_c(fft_len)
self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, 1, len(tx_symbols), self.tsb_key), alloc, sink)
self.tb.run ()
self.assertEqual(sink.data()[0], expected_result)
tags = [gr.tag_to_python(x) for x in sink.tags()]
tags = sorted([(x.offset, x.key, x.value) for x in tags])
tags_expected = [
(0, 'spam', 23),
(2, 'eggs', 42),
]
self.assertEqual(tags, tags_expected)
def test_003_t (self):
"""
more advanced:
- 6 symbols per carrier
- 2 pilots per carrier
- have enough data for nearly 3 OFDM symbols
- send that twice
- add some random tags
- don't shift
"""
tx_symbols = list(range(1, 16)); # 15 symbols
pilot_symbols = ((1j, 2j), (3j, 4j))
occupied_carriers = ((1, 3, 4, 11, 12, 14), (1, 2, 4, 11, 13, 14),)
pilot_carriers = ((2, 13), (3, 12))
expected_result = (0, 1, 1j, 2, 3, 0, 0, 0, 0, 0, 0, 4, 5, 2j, 6, 0,
0, 7, 8, 3j, 9, 0, 0, 0, 0, 0, 0, 10, 4j, 11, 12, 0,
0, 13, 1j, 14, 15, 0, 0, 0, 0, 0, 0, 0, 0, 2j, 0, 0)
fft_len = 16
testtag1 = gr.tag_t()
testtag1.offset = 0
testtag1.key = pmt.string_to_symbol('tag1')
testtag1.value = pmt.from_long(0)
testtag2 = gr.tag_t()
testtag2.offset = 7 # On the 2nd OFDM symbol
testtag2.key = pmt.string_to_symbol('tag2')
testtag2.value = pmt.from_long(0)
testtag3 = gr.tag_t()
testtag3.offset = len(tx_symbols)+1 # First OFDM symbol of packet 2
testtag3.key = pmt.string_to_symbol('tag3')
testtag3.value = pmt.from_long(0)
testtag4 = gr.tag_t()
testtag4.offset = 2*len(tx_symbols)-1 # Last OFDM symbol of packet 2
testtag4.key = pmt.string_to_symbol('tag4')
testtag4.value = pmt.from_long(0)
src = blocks.vector_source_c(tx_symbols * 2, False, 1, (testtag1, testtag2, testtag3, testtag4))
alloc = digital.ofdm_carrier_allocator_cvc(fft_len,
occupied_carriers,
pilot_carriers,
pilot_symbols, (),
self.tsb_key,
False)
sink = blocks.tsb_vector_sink_c(fft_len)
self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, 1, len(tx_symbols), self.tsb_key), alloc, sink)
self.tb.run ()
self.assertEqual(sink.data()[0], expected_result)
tags_found = {'tag1': False, 'tag2': False, 'tag3': False, 'tag4': False}
correct_offsets = {'tag1': 0, 'tag2': 1, 'tag3': 3, 'tag4': 5}
for tag in sink.tags():
key = pmt.symbol_to_string(tag.key)
if key in list(tags_found.keys()):
tags_found[key] = True
self.assertEqual(correct_offsets[key], tag.offset)
self.assertTrue(all(tags_found.values()))
def test_004_t (self):
"""
Provoking RuntimeError exceptions providing wrong user input (earlier invisible SIGFPE).
"""
fft_len = 6
# Occupied carriers
with self.assertRaises(RuntimeError) as oc:
alloc = digital.ofdm_carrier_allocator_cvc(fft_len,
(),
((),),
((),),
(),
self.tsb_key)
# Pilot carriers
with self.assertRaises(RuntimeError) as pc:
alloc = digital.ofdm_carrier_allocator_cvc(fft_len,
((),),
(),
((),),
(),
self.tsb_key)
# Pilot carrier symbols
with self.assertRaises(RuntimeError) as ps:
alloc = digital.ofdm_carrier_allocator_cvc(fft_len,
((),),
((),),
(),
(),
self.tsb_key)
self.assertEqual(str(oc.exception), "Occupied carriers must be of type vector of vector i.e. ((),).")
self.assertEqual(str(pc.exception), "Pilot carriers must be of type vector of vector i.e. ((),).")
self.assertEqual(str(ps.exception), "Pilot symbols must be of type vector of vector i.e. ((),).")
if __name__ == '__main__':
gr_unittest.run(qa_digital_carrier_allocator_cvc, "qa_digital_carrier_allocator_cvc.xml")
| michaelld/gnuradio | gr-digital/python/digital/qa_ofdm_carrier_allocator_cvc.py | Python | gpl-3.0 | 9,911 |
# Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
App Builder Library
+++++++++++++++++++
Contains utility methods useful for compiling and deploying applets and apps
onto the platform.
You can specify the destination project in the following ways (with the earlier
ones taking precedence):
* Supply the *project* argument to :func:`upload_resources()` or
:func:`upload_applet()`.
* Supply the 'project' attribute in your ``dxapp.json``.
* Set the ``DX_WORKSPACE_ID`` environment variable (when running in a job context).
You can use the function :func:`get_destination_project` to determine
the effective destination project.
'''
from __future__ import print_function, unicode_literals, division, absolute_import
import os, sys, json, subprocess, tempfile, multiprocessing
import datetime
import gzip
import hashlib
import io
import tarfile
import stat
import dxpy
import dxpy.executable_builder
from . import logger
from .utils import merge
from .utils.printing import fill
from .compat import input
from .cli import INTERACTIVE_CLI
NUM_CORES = multiprocessing.cpu_count()
DX_TOOLKIT_PKGS = ('dx-toolkit',)
DX_TOOLKIT_GIT_URLS = ("git@github.com:dnanexus/dx-toolkit.git",)
class AppBuilderException(Exception):
"""
This exception is raised by the methods in this module when app or applet
building fails.
"""
pass
def _validate_applet_spec(applet_spec):
if 'runSpec' not in applet_spec:
raise AppBuilderException("Required field 'runSpec' not found in dxapp.json")
def _validate_app_spec(app_spec):
pass
def _get_applet_spec(src_dir):
applet_spec_file = os.path.join(src_dir, "dxapp.json")
with open(applet_spec_file) as fh:
applet_spec = json.load(fh)
_validate_applet_spec(applet_spec)
if 'project' not in applet_spec:
applet_spec['project'] = dxpy.WORKSPACE_ID
return applet_spec
def _get_app_spec(src_dir):
app_spec_file = os.path.join(src_dir, "dxapp.json")
with open(app_spec_file) as fh:
app_spec = json.load(fh)
_validate_app_spec(app_spec)
return app_spec
def build(src_dir, parallel_build=True):
"""
Runs any build scripts that are found in the specified directory.
In particular, runs ``./configure`` if it exists, followed by ``make -jN``
if it exists (building with as many parallel tasks as there are CPUs on the
system).
"""
# TODO: use Gentoo or deb buildsystem
config_script = os.path.join(src_dir, "configure")
if os.path.isfile(config_script) and os.access(config_script, os.X_OK):
logger.debug("Running ./configure in {cwd}".format(cwd=os.path.abspath(src_dir)))
try:
subprocess.check_call([config_script])
except subprocess.CalledProcessError as e:
raise AppBuilderException("./configure in target directory failed with exit code %d" % (e.returncode,))
if os.path.isfile(os.path.join(src_dir, "Makefile")) \
or os.path.isfile(os.path.join(src_dir, "makefile")) \
or os.path.isfile(os.path.join(src_dir, "GNUmakefile")):
if parallel_build:
make_shortcmd = "make -j%d" % (NUM_CORES,)
else:
make_shortcmd = "make"
logger.debug("Building with {make} in {cwd}".format(make=make_shortcmd, cwd=os.path.abspath(src_dir)))
try:
make_cmd = ["make", "-C", src_dir]
if parallel_build:
make_cmd.append("-j" + str(NUM_CORES))
subprocess.check_call(make_cmd)
except subprocess.CalledProcessError as e:
raise AppBuilderException("%s in target directory failed with exit code %d" % (make_shortcmd, e.returncode))
def get_destination_project(src_dir, project=None):
"""
:returns: Project ID where applet specified by src_dir would be written
:rtype: str
Returns the project ID where the applet specified in *src_dir* (or
its associated resource bundles) would be written. This returns the
same project that would be used by :func:`upload_resources()` and
:func:`upload_applet()`, given the same *src_dir* and *project*
parameters.
"""
if project is not None:
return project
return _get_applet_spec(src_dir)['project']
def is_link_local(link_target):
"""
:param link_target: The target of a symbolic link, as given by os.readlink()
:type link_target: string
:returns: A boolean indicating the link is local to the current directory.
This is defined to mean that os.path.isabs(link_target) == False
and the link NEVER references the parent directory, so
"./foo/../../curdir/foo" would return False.
:rtype: boolean
"""
is_local=(not os.path.isabs(link_target))
if is_local:
# make sure that the path NEVER extends outside the resources directory!
d,l = os.path.split(link_target)
link_parts = []
while l:
link_parts.append(l)
d,l = os.path.split(d)
curr_path = os.sep
for p in reversed(link_parts):
is_local = (is_local and not (curr_path == os.sep and p == os.pardir) )
curr_path = os.path.abspath(os.path.join(curr_path, p))
return is_local
def _fix_perms(perm_obj):
"""
:param perm_obj: A permissions object, as given by os.stat()
:type perm_obj: integer
:returns: A permissions object that is the result of "chmod a+rX" on the
given permission object. This is defined to be the permission object
bitwise or-ed with all stat.S_IR*, and if the stat.S_IXUSR bit is
set, then the permission object should also be returned bitwise or-ed
with stat.S_IX* (stat.S_IXUSR not included because it would be redundant).
:rtype: integer
"""
ret_perm = perm_obj | stat.S_IROTH | stat.S_IRGRP | stat.S_IRUSR
if ret_perm & stat.S_IXUSR:
ret_perm = ret_perm | stat.S_IXGRP | stat.S_IXOTH
return ret_perm
def _fix_perm_filter(tar_obj):
"""
:param tar_obj: A TarInfo object to be added to a tar file
:tpye tar_obj: tarfile.TarInfo
:returns: A TarInfo object with permissions changed (a+rX)
:rtype: tarfile.TarInfo
"""
tar_obj.mode = _fix_perms(tar_obj.mode)
return tar_obj
def upload_resources(src_dir, project=None, folder='/', ensure_upload=False, force_symlinks=False, brief=False):
"""
:param ensure_upload: If True, will bypass checksum of resources directory
and upload resources bundle unconditionally;
will NOT be able to reuse this bundle in future builds.
Else if False, will compute checksum and upload bundle
if checksum is different from a previously uploaded
bundle's checksum.
:type ensure_upload: boolean
:param force_symlinks: If true, will bypass the attempt to dereference any
non-local symlinks and will unconditionally include
the link as-is. Note that this will almost certainly
result in a broken link within the resource directory
unless you really know what you're doing.
:type force_symlinks: boolean
:returns: A list (possibly empty) of references to the generated archive(s)
:rtype: list
If it exists, archives and uploads the contents of the
``resources/`` subdirectory of *src_dir* to a new remote file
object, and returns a list describing a single bundled dependency in
the form expected by the ``bundledDepends`` field of a run
specification. Returns an empty list, if no archive was created.
"""
applet_spec = _get_applet_spec(src_dir)
if project is None:
dest_project = applet_spec['project']
else:
dest_project = project
applet_spec['project'] = project
resources_dir = os.path.join(src_dir, "resources")
if os.path.exists(resources_dir) and len(os.listdir(resources_dir)) > 0:
target_folder = applet_spec['folder'] if 'folder' in applet_spec else folder
# While creating the resource bundle, optimistically look for a
# resource bundle with the same contents, and reuse it if possible.
# The resource bundle carries a property 'resource_bundle_checksum'
# that indicates the checksum; the way in which the checksum is
# computed is given below. If the checksum matches (and
# ensure_upload is False), then we will use the existing file,
# otherwise, we will compress and upload the tarball.
# The input to the SHA1 contains entries of the form (whitespace
# only included here for readability):
#
# / \0 MODE \0 MTIME \0
# /foo \0 MODE \0 MTIME \0
# ...
#
# where there is one entry for each directory or file (order is
# specified below), followed by a numeric representation of the
# mode, and the mtime in milliseconds since the epoch.
#
# Note when looking at a link, if the link is to be dereferenced,
# the mtime and mode used are that of the target (using os.stat())
# If the link is to be kept as a link, the mtime and mode are those
# of the link itself (using os.lstat())
with tempfile.NamedTemporaryFile(suffix=".tar") as tar_tmp_fh:
output_sha1 = hashlib.sha1()
tar_fh = tarfile.open(fileobj=tar_tmp_fh, mode='w')
for dirname, subdirs, files in os.walk(resources_dir):
if not dirname.startswith(resources_dir):
raise AssertionError('Expected %r to start with root directory %r' % (dirname, resources_dir))
# Add an entry for the directory itself
relative_dirname = dirname[len(resources_dir):]
dir_stat = os.lstat(dirname)
if not relative_dirname.startswith('/'):
relative_dirname = '/' + relative_dirname
fields = [relative_dirname, str(_fix_perms(dir_stat.st_mode)), str(int(dir_stat.st_mtime * 1000))]
output_sha1.update(b''.join(s.encode('utf-8') + b'\0' for s in fields))
# add an entry in the tar file for the current directory, but
# do not recurse!
tar_fh.add(dirname, arcname='.' + relative_dirname, recursive=False, filter=_fix_perm_filter)
# Canonicalize the order of subdirectories; this is the order in
# which they will be visited by os.walk
subdirs.sort()
# check the subdirectories for symlinks. We should throw an error
# if there are any links that point outside of the directory (unless
# --force-symlinks is given). If a link is pointing internal to
# the directory (or --force-symlinks is given), we should add it
# as a file.
for subdir_name in subdirs:
dir_path = os.path.join(dirname, subdir_name)
# If we do have a symlink,
if os.path.islink(dir_path):
# Let's get the pointed-to path to ensure that it is
# still in the directory
link_target = os.readlink(dir_path)
# If this is a local link, add it to the list of files (case 1)
# else raise an error
if force_symlinks or is_link_local(link_target):
files.append(subdir_name)
else:
raise AppBuilderException("Cannot include symlinks to directories outside of the resource directory. '%s' points to directory '%s'" % (dir_path, os.path.realpath(dir_path)))
# Canonicalize the order of files so that we compute the
# checksum in a consistent order
for filename in sorted(files):
deref_link = False
relative_filename = os.path.join(relative_dirname, filename)
true_filename = os.path.join(dirname, filename)
file_stat = os.lstat(true_filename)
# check for a link here, please!
if os.path.islink(true_filename):
# Get the pointed-to path
link_target = os.readlink(true_filename)
if not (force_symlinks or is_link_local(link_target)):
# if we are pointing outside of the directory, then:
# try to get the true stat of the file and make sure
# to dereference the link!
try:
file_stat = os.stat(os.path.join(dirname, link_target))
deref_link = True
except OSError:
# uh-oh! looks like we have a broken link!
# since this is guaranteed to cause problems (and
# we know we're not forcing symlinks here), we
# should throw an error
raise AppBuilderException("Broken symlink: Link '%s' points to '%s', which does not exist" % (true_filename, os.path.realpath(true_filename)) )
fields = [relative_filename, str(_fix_perms(file_stat.st_mode)), str(int(file_stat.st_mtime * 1000))]
output_sha1.update(b''.join(s.encode('utf-8') + b'\0' for s in fields))
# If we are to dereference, use the target fn
if deref_link:
true_filename = os.path.realpath(true_filename)
tar_fh.add(true_filename, arcname='.' + relative_filename, filter=_fix_perm_filter)
# end for filename in sorted(files)
# end for dirname, subdirs, files in os.walk(resources_dir):
# at this point, the tar is complete, so close the tar_fh
tar_fh.close()
# Optimistically look for a resource bundle with the same
# contents, and reuse it if possible. The resource bundle
# carries a property 'resource_bundle_checksum' that indicates
# the checksum; the way in which the checksum is computed is
# given in the documentation of _directory_checksum.
if ensure_upload:
properties_dict = {}
existing_resources = False
else:
directory_checksum = output_sha1.hexdigest()
properties_dict = dict(resource_bundle_checksum=directory_checksum)
existing_resources = dxpy.find_one_data_object(
project=dest_project,
folder=target_folder,
properties=dict(resource_bundle_checksum=directory_checksum),
visibility='either',
zero_ok=True,
state='closed',
return_handler=True
)
if existing_resources:
if not brief:
logger.info("Found existing resource bundle that matches local resources directory: " +
existing_resources.get_id())
dx_resource_archive = existing_resources
else:
logger.debug("Uploading in " + src_dir)
# We need to compress the tar that we've created
targz_fh = tempfile.NamedTemporaryFile(suffix=".tar.gz", delete=False)
# compress the file by reading the tar file and passing
# it though a GzipFile object, writing the given
# block size (by default 8192 bytes) at a time
targz_gzf = gzip.GzipFile(fileobj=targz_fh, mode='wb')
tar_tmp_fh.seek(0)
dat = tar_tmp_fh.read(io.DEFAULT_BUFFER_SIZE)
while dat:
targz_gzf.write(dat)
dat = tar_tmp_fh.read(io.DEFAULT_BUFFER_SIZE)
targz_gzf.flush()
targz_gzf.close()
targz_fh.close()
if 'folder' in applet_spec:
try:
dxpy.get_handler(dest_project).new_folder(applet_spec['folder'], parents=True)
except dxpy.exceptions.DXAPIError:
pass # TODO: make this better
dx_resource_archive = dxpy.upload_local_file(
targz_fh.name,
wait_on_close=True,
project=dest_project,
folder=target_folder,
hidden=True,
properties=properties_dict
)
os.unlink(targz_fh.name)
# end compressed file creation and upload
archive_link = dxpy.dxlink(dx_resource_archive.get_id())
# end tempfile.NamedTemporaryFile(suffix=".tar") as tar_fh
return [{'name': 'resources.tar.gz', 'id': archive_link}]
else:
return []
def upload_applet(src_dir, uploaded_resources, check_name_collisions=True, overwrite=False, archive=False,
project=None, override_folder=None, override_name=None, dx_toolkit_autodep="stable",
dry_run=False, brief=False, **kwargs):
"""
Creates a new applet object.
:param project: ID of container in which to create the applet.
:type project: str, or None to use whatever is specified in dxapp.json
:param override_folder: folder name for the resulting applet which, if specified, overrides that given in dxapp.json
:type override_folder: str
:param override_name: name for the resulting applet which, if specified, overrides that given in dxapp.json
:type override_name: str
:param dx_toolkit_autodep: What type of dx-toolkit dependency to
inject if none is present. "stable" for the APT package; "git"
for HEAD of dx-toolkit master branch; or False for no
dependency.
:type dx_toolkit_autodep: boolean or string
"""
applet_spec = _get_applet_spec(src_dir)
if project is None:
dest_project = applet_spec['project']
else:
dest_project = project
applet_spec['project'] = project
if 'name' not in applet_spec:
try:
applet_spec['name'] = os.path.basename(os.path.abspath(src_dir))
except:
raise AppBuilderException("Could not determine applet name from the specification (dxapp.json) or from the name of the working directory (%r)" % (src_dir,))
if override_folder:
applet_spec['folder'] = override_folder
if 'folder' not in applet_spec:
applet_spec['folder'] = '/'
if override_name:
applet_spec['name'] = override_name
if 'dxapi' not in applet_spec:
applet_spec['dxapi'] = dxpy.API_VERSION
applets_to_overwrite = []
archived_applet = None
if check_name_collisions and not dry_run:
destination_path = applet_spec['folder'] + ('/' if not applet_spec['folder'].endswith('/') else '') + applet_spec['name']
logger.debug("Checking for existing applet at " + destination_path)
for result in dxpy.find_data_objects(classname="applet", name=applet_spec["name"], folder=applet_spec['folder'], project=dest_project, recurse=False):
if overwrite:
# Don't remove the old applet until after the new one
# has been created. This avoids a race condition where
# we remove the old applet, but that causes garbage
# collection of the bundled resources that will be
# shared with the new applet
applets_to_overwrite.append(result['id'])
elif archive:
logger.debug("Archiving applet %s" % (result['id']))
proj = dxpy.DXProject(dest_project)
archive_folder = '/.Applet_archive'
try:
proj.list_folder(archive_folder)
except dxpy.DXAPIError:
proj.new_folder(archive_folder)
proj.move(objects=[result['id']], destination=archive_folder)
archived_applet = dxpy.DXApplet(result['id'], project=dest_project)
now = datetime.datetime.fromtimestamp(archived_applet.created/1000).ctime()
new_name = archived_applet.name + " ({d})".format(d=now)
archived_applet.rename(new_name)
if not brief:
logger.info("Archived applet %s to %s:\"%s/%s\"" % (result['id'], dest_project, archive_folder, new_name))
else:
raise AppBuilderException("An applet already exists at %s (id %s) and the --overwrite (-f) or --archive (-a) options were not given" % (destination_path, result['id']))
# -----
# Override various fields from the pristine dxapp.json
# Carry region-specific values from regionalOptions into the main
# runSpec
applet_spec["runSpec"].setdefault("bundledDepends", [])
applet_spec["runSpec"].setdefault("assetDepends", [])
if not dry_run:
region = dxpy.api.project_describe(dest_project, input_params={"fields": {"region": True}})["region"]
# if regionalOptions contain at least one region, they must include
# the region of the target project
if len(applet_spec.get('regionalOptions', {})) != 0 and region not in applet_spec.get('regionalOptions', {}):
err_mesg = "destination project is in region {} but \"regionalOptions\" do not contain this region. ".format(region)
err_mesg += "Please, update your \"regionalOptions\" specification"
raise AppBuilderException(err_mesg)
regional_options = applet_spec.get('regionalOptions', {}).get(region, {})
# We checked earlier that if region-specific values for the
# fields below are given, the same fields are not also specified
# in the top-level runSpec. So the operations below should not
# result in any user-supplied settings being clobbered.
if 'systemRequirements' in regional_options:
applet_spec["runSpec"]["systemRequirements"] = regional_options['systemRequirements']
if 'bundledDepends' in regional_options:
applet_spec["runSpec"]["bundledDepends"].extend(regional_options["bundledDepends"])
if 'assetDepends' in regional_options:
applet_spec["runSpec"]["assetDepends"].extend(regional_options["assetDepends"])
# Inline Readme.md and Readme.developer.md
dxpy.executable_builder.inline_documentation_files(applet_spec, src_dir)
# Inline the code of the program
if "file" in applet_spec["runSpec"]:
# Put it into runSpec.code instead
with open(os.path.join(src_dir, applet_spec["runSpec"]["file"])) as code_fh:
applet_spec["runSpec"]["code"] = code_fh.read()
del applet_spec["runSpec"]["file"]
# If this is applet requires a cluster, inline any bootstrapScript code that may be provided.
# bootstrapScript is an *optional* clusterSpec parameter.
# NOTE: assumes bootstrapScript is always provided as a filename
if "systemRequirements" in applet_spec["runSpec"]:
sys_reqs = applet_spec["runSpec"]["systemRequirements"]
for entry_point in sys_reqs:
try:
bootstrap_script = os.path.join(src_dir, sys_reqs[entry_point]["clusterSpec"]["bootstrapScript"])
with open(bootstrap_script) as code_fh:
sys_reqs[entry_point]["clusterSpec"]["bootstrapScript"] = code_fh.read()
except KeyError:
# either no "clusterSpec" or no "bootstrapScript" within "clusterSpec"
continue
except IOError:
raise AppBuilderException("The clusterSpec \"bootstrapScript\" could not be read.")
# Attach bundled resources to the app
if uploaded_resources is not None:
applet_spec["runSpec"]["bundledDepends"].extend(uploaded_resources)
# Validate and process assetDepends
asset_depends = applet_spec["runSpec"]["assetDepends"]
if type(asset_depends) is not list or any(type(dep) is not dict for dep in asset_depends):
raise AppBuilderException("Expected runSpec.assetDepends to be an array of objects")
for asset in asset_depends:
asset_project = asset.get("project", None)
asset_folder = asset.get("folder", '/')
asset_stages = asset.get("stages", None)
if "id" in asset:
asset_record = dxpy.DXRecord(asset["id"]).describe(fields={'details'}, default_fields=True)
elif "name" in asset and asset_project is not None and "version" in asset:
try:
asset_record = dxpy.find_one_data_object(zero_ok=True, classname="record", typename="AssetBundle",
name=asset["name"], properties=dict(version=asset["version"]),
project=asset_project, folder=asset_folder, recurse=False,
describe={"defaultFields": True, "fields": {"details": True}},
state="closed", more_ok=False)
except dxpy.exceptions.DXSearchError:
msg = "Found more than one asset record that matches: name={0}, folder={1} in project={2}."
raise AppBuilderException(msg.format(asset["name"], asset_folder, asset_project))
else:
raise AppBuilderException("Each runSpec.assetDepends element must have either {'id'} or "
"{'name', 'project' and 'version'} field(s).")
if asset_record:
if "id" in asset:
asset_details = asset_record["details"]
else:
asset_details = asset_record["describe"]["details"]
if "archiveFileId" in asset_details:
archive_file_id = asset_details["archiveFileId"]
else:
raise AppBuilderException("The required field 'archiveFileId' was not found in "
"the details of the asset bundle %s " % asset_record["id"])
archive_file_name = dxpy.DXFile(archive_file_id).describe()["name"]
bundle_depends = {
"name": archive_file_name,
"id": archive_file_id
}
if asset_stages:
bundle_depends["stages"] = asset_stages
applet_spec["runSpec"]["bundledDepends"].append(bundle_depends)
# If the file is not found in the applet destination project, clone it from the asset project
if (not dry_run and
dxpy.DXRecord(dxid=asset_record["id"], project=dest_project).describe()["project"] != dest_project):
dxpy.DXRecord(asset_record["id"], project=asset_record["project"]).clone(dest_project)
else:
raise AppBuilderException("No asset bundle was found that matched the specification %s"
% (json.dumps(asset)))
# Include the DNAnexus client libraries as an execution dependency, if they are not already
# there
if dx_toolkit_autodep == "git":
dx_toolkit_dep = {"name": "dx-toolkit",
"package_manager": "git",
"url": "git://github.com/dnanexus/dx-toolkit.git",
"tag": "master",
"build_commands": "make install DESTDIR=/ PREFIX=/opt/dnanexus"}
elif dx_toolkit_autodep == "stable":
dx_toolkit_dep = {"name": "dx-toolkit", "package_manager": "apt"}
elif dx_toolkit_autodep:
raise AppBuilderException("dx_toolkit_autodep must be one of 'stable', 'git', or False; got %r instead" % (dx_toolkit_autodep,))
if dx_toolkit_autodep:
applet_spec["runSpec"].setdefault("execDepends", [])
exec_depends = applet_spec["runSpec"]["execDepends"]
if type(exec_depends) is not list or any(type(dep) is not dict for dep in exec_depends):
raise AppBuilderException("Expected runSpec.execDepends to be an array of objects")
dx_toolkit_dep_found = any(dep.get('name') in DX_TOOLKIT_PKGS or dep.get('url') in DX_TOOLKIT_GIT_URLS for dep in exec_depends)
if not dx_toolkit_dep_found:
exec_depends.append(dx_toolkit_dep)
if dx_toolkit_autodep == "git":
applet_spec.setdefault("access", {})
applet_spec["access"].setdefault("network", [])
# Note: this can be set to "github.com" instead of "*" if the build doesn't download any deps
if "*" not in applet_spec["access"]["network"]:
applet_spec["access"]["network"].append("*")
merge(applet_spec, kwargs)
# -----
# Now actually create the applet
if dry_run:
print("Would create the following applet:")
print(json.dumps(applet_spec, indent=2))
print("*** DRY-RUN-- no applet was created ***")
return None, None
if applet_spec.get("categories", []):
if "tags" not in applet_spec:
applet_spec["tags"] = []
applet_spec["tags"] = list(set(applet_spec["tags"]) | set(applet_spec["categories"]))
applet_id = dxpy.api.applet_new(applet_spec)["id"]
if archived_applet:
archived_applet.set_properties({'replacedWith': applet_id})
# Now it is permissible to delete the old applet(s), if any
if applets_to_overwrite:
if not brief:
logger.info("Deleting applet(s) %s" % (','.join(applets_to_overwrite)))
dxpy.DXProject(dest_project).remove_objects(applets_to_overwrite)
return applet_id, applet_spec
def _create_or_update_version(app_name, version, app_spec, try_update=True):
"""
Creates a new version of the app. Returns an app_id, or None if the app has
already been created and published.
"""
# This has a race condition since the app could have been created or
# published since we last looked.
try:
app_id = dxpy.api.app_new(app_spec)["id"]
return app_id
except dxpy.exceptions.DXAPIError as e:
# TODO: detect this error more reliably
if e.name == 'InvalidInput' and e.msg == 'Specified name and version conflict with an existing alias':
print('App %s/%s already exists' % (app_spec["name"], version), file=sys.stderr)
# The version number was already taken, so app/new doesn't work.
# However, maybe it hasn't been published yet, so we might be able
# to app-xxxx/update it.
app_describe = dxpy.api.app_describe("app-" + app_name, alias=version)
if app_describe.get("published", 0) > 0:
return None
return _update_version(app_name, version, app_spec, try_update=try_update)
raise e
def _update_version(app_name, version, app_spec, try_update=True):
"""
Updates a version of the app in place. Returns an app_id, or None if the
app has already been published.
"""
if not try_update:
return None
try:
app_id = dxpy.api.app_update("app-" + app_name, version, app_spec)["id"]
return app_id
except dxpy.exceptions.DXAPIError as e:
if e.name == 'InvalidState':
print('App %s/%s has already been published' % (app_spec["name"], version), file=sys.stderr)
return None
raise e
def create_app_multi_region(regional_options, app_name, src_dir, publish=False, set_default=False, billTo=None,
try_versions=None, try_update=True, confirm=True, inherited_metadata={}, brief=False):
"""
Creates a new app object from the specified applet(s).
:param regional_options: Region-specific options for the app. See
https://documentation.dnanexus.com/developer/api/running-analyses/apps#api-method-app-new
for details; this should contain keys for each region the app is
to be enabled in, and for the values, a dict containing (at
minimum) a key "applet" whose value is an applet ID for that
region.
:type regional_options: dict
"""
return _create_app(dict(regionalOptions=regional_options), app_name, src_dir, publish=publish,
set_default=set_default, billTo=billTo, try_versions=try_versions, try_update=try_update,
confirm=confirm, inherited_metadata=inherited_metadata, brief=brief)
def create_app(applet_id, applet_name, src_dir, publish=False, set_default=False, billTo=None, try_versions=None,
try_update=True, confirm=True, regional_options=None):
"""
Creates a new app object from the specified applet.
.. deprecated:: 0.204.0
Use :func:`create_app_multi_region()` instead.
"""
# In this case we don't know the region of the applet, so we use the
# legacy API {"applet": applet_id} without specifying a region
# specifically.
return _create_app(dict(applet=applet_id), applet_name, src_dir, publish=publish, set_default=set_default,
billTo=billTo, try_versions=try_versions, try_update=try_update, confirm=confirm)
def _create_app(applet_or_regional_options, app_name, src_dir, publish=False, set_default=False, billTo=None,
try_versions=None, try_update=True, confirm=True, inherited_metadata={}, brief=False):
if src_dir:
app_spec = _get_app_spec(src_dir)
if not brief:
logger.info("Will create app with spec: %s" % (json.dumps(app_spec),))
# Inline Readme.md and Readme.developer.md
dxpy.executable_builder.inline_documentation_files(app_spec, src_dir)
else:
app_spec = inherited_metadata
app_spec.update(applet_or_regional_options, name=app_name)
if billTo:
app_spec["billTo"] = billTo
if not try_versions:
try_versions = [app_spec["version"]]
for version in try_versions:
logger.debug("Attempting to create version %s..." % (version,))
app_spec['version'] = version
app_describe = None
try:
# 404, which is rather likely in this app_describe request
# (the purpose of the request is to find out whether the
# version of interest exists), would ordinarily cause this
# request to be retried multiple times, introducing a
# substantial delay. So we disable retrying here for this
# request.
app_describe = dxpy.api.app_describe("app-" + app_spec["name"], alias=version, always_retry=False)
except dxpy.exceptions.DXAPIError as e:
if e.name == 'ResourceNotFound':
pass
else:
raise e
# Now app_describe is None if the app didn't exist, OR it contains the
# app describe content.
# The describe check does not eliminate race conditions since an app
# may always have been created, or published, since we last looked at
# it. So the describe that happens here is just to save time and avoid
# unnecessary API calls, but we always have to be prepared to recover
# from API errors.
if app_describe is None:
logger.debug('App %s/%s does not yet exist' % (app_spec["name"], version))
app_id = _create_or_update_version(app_spec['name'], app_spec['version'], app_spec, try_update=try_update)
if app_id is None:
continue
if not brief:
logger.info("Created app " + app_id)
# Success!
break
elif app_describe.get("published", 0) == 0:
logger.debug('App %s/%s already exists and has not been published' % (app_spec["name"], version))
app_id = _update_version(app_spec['name'], app_spec['version'], app_spec, try_update=try_update)
if app_id is None:
continue
if not brief:
logger.info("Updated existing app " + app_id)
# Success!
break
else:
logger.debug('App %s/%s already exists and has been published' % (app_spec["name"], version))
# App has already been published. Give up on this version.
continue
else:
# All versions requested failed
if len(try_versions) != 1:
tried_versions = 'any of the requested versions: ' + ', '.join(try_versions)
else:
tried_versions = 'the requested version: ' + try_versions[0]
raise AppBuilderException('Could not create %s' % (tried_versions,))
# Set categories appropriately.
categories_to_set = app_spec.get("categories", [])
existing_categories = dxpy.api.app_list_categories(app_id)['categories']
categories_to_add = set(categories_to_set).difference(set(existing_categories))
categories_to_remove = set(existing_categories).difference(set(categories_to_set))
if categories_to_add:
dxpy.api.app_add_categories(app_id, input_params={'categories': list(categories_to_add)})
if categories_to_remove:
dxpy.api.app_remove_categories(app_id, input_params={'categories': list(categories_to_remove)})
# Set developers list appropriately, but only if provided.
developers_to_set = app_spec.get("developers")
if developers_to_set is not None:
existing_developers = dxpy.api.app_list_developers(app_id)['developers']
developers_to_add = set(developers_to_set) - set(existing_developers)
developers_to_remove = set(existing_developers) - set(developers_to_set)
skip_updating_developers = False
if developers_to_add or developers_to_remove:
parts = []
if developers_to_add:
parts.append('the following developers will be added: ' + ', '.join(sorted(developers_to_add)))
if developers_to_remove:
parts.append('the following developers will be removed: ' + ', '.join(sorted(developers_to_remove)))
developer_change_message = '; and '.join(parts)
if confirm:
if INTERACTIVE_CLI:
try:
print('***')
print(fill('WARNING: ' + developer_change_message))
print('***')
value = input('Confirm updating developers list [y/N]: ')
except KeyboardInterrupt:
value = 'n'
if not value.lower().startswith('y'):
skip_updating_developers = True
else:
# Default to NOT updating developers if operating
# without a TTY.
logger.warn('skipping requested change to the developer list. Rerun "dx build" interactively or pass --yes to confirm this change.')
skip_updating_developers = True
else:
logger.warn(developer_change_message)
if not skip_updating_developers:
if developers_to_add:
dxpy.api.app_add_developers(app_id, input_params={'developers': list(developers_to_add)})
if developers_to_remove:
dxpy.api.app_remove_developers(app_id, input_params={'developers': list(developers_to_remove)})
# Set authorizedUsers list appropriately, but only if provided.
authorized_users_to_set = app_spec.get("authorizedUsers")
existing_authorized_users = dxpy.api.app_list_authorized_users(app_id)['authorizedUsers']
if authorized_users_to_set is not None:
authorized_users_to_add = set(authorized_users_to_set) - set(existing_authorized_users)
authorized_users_to_remove = set(existing_authorized_users) - set(authorized_users_to_set)
skip_adding_public = False
if 'PUBLIC' in authorized_users_to_add:
acl_change_message = 'app-%s will be made public. Anyone will be able to view and run all published versions of this app.' % (app_spec['name'],)
if confirm:
if INTERACTIVE_CLI:
try:
print('***')
print(fill('WARNING: ' + acl_change_message))
print('***')
value = input('Confirm making this app public [y/N]: ')
except KeyboardInterrupt:
value = 'n'
if not value.lower().startswith('y'):
skip_adding_public = True
else:
# Default to NOT adding PUBLIC if operating
# without a TTY.
logger.warn('skipping requested change to add PUBLIC to the authorized users list. Rerun "dx build" interactively or pass --yes to confirm this change.')
skip_adding_public = True
else:
logger.warn(acl_change_message)
if skip_adding_public:
authorized_users_to_add -= {'PUBLIC'}
if authorized_users_to_add:
dxpy.api.app_add_authorized_users(app_id, input_params={'authorizedUsers': list(authorized_users_to_add)})
if skip_adding_public:
logger.warn('the app was NOT made public as requested in the app spec. To make it so, run "dx add users app-%s PUBLIC".' % (app_spec["name"],))
if authorized_users_to_remove:
dxpy.api.app_remove_authorized_users(app_id, input_params={'authorizedUsers': list(authorized_users_to_remove)})
elif not len(existing_authorized_users) and not brief:
# Apps that had authorized users added by any other means will
# not have this message printed.
logger.warn('authorizedUsers is missing from the app spec. No one will be able to view or run the app except the app\'s developers.')
if publish:
dxpy.api.app_publish(app_id, input_params={'makeDefault': set_default})
else:
# If no versions of this app have ever been published, then
# we'll set the "default" tag to point to the latest
# (unpublished) version.
no_published_versions = len(list(dxpy.find_apps(name=app_name, published=True, limit=1))) == 0
if no_published_versions:
dxpy.api.app_add_tags(app_id, input_params={'tags': ['default']})
return app_id
def get_enabled_regions(app_spec, from_command_line):
"""Returns a list of the regions in which the app should be enabled.
Also validates that app_spec['regionalOptions'], if supplied, is
well-formed.
:param app_spec: app specification
:type app_spec: dict
:param from_command_line: The regions specified on the command-line
via --region
:type from_command_line: list or None
"""
enabled_regions = dxpy.executable_builder.get_enabled_regions('app', app_spec, from_command_line, AppBuilderException)
if enabled_regions is not None and len(enabled_regions) == 0:
raise AssertionError("This app should be enabled in at least one region")
return enabled_regions
| dnanexus/dx-toolkit | src/python/dxpy/app_builder.py | Python | apache-2.0 | 44,192 |
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import subprocess
import sys
sys.path.append(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir, 'tools', 'telemetry'))
from telemetry import benchmark_runner
def _LaunchDBus():
"""Launches DBus to work around a bug in GLib.
Works around a bug in GLib where it performs operations which aren't
async-signal-safe (in particular, memory allocations) between fork and exec
when it spawns subprocesses. This causes threads inside Chrome's browser and
utility processes to get stuck, and this harness to hang waiting for those
processes, which will never terminate. This doesn't happen on users'
machines, because they have an active desktop session and the
DBUS_SESSION_BUS_ADDRESS environment variable set, but it does happen on the
bots. See crbug.com/309093 for more details.
Returns:
True if it actually spawned DBus.
"""
import platform
if (platform.uname()[0].lower() != 'linux' or
'DBUS_SESSION_BUS_ADDRESS' in os.environ):
return False
# Only start DBus on systems that are actually running X. Using DISPLAY
# variable is not reliable, because is it set by the /etc/init.d/buildbot
# script for all slaves.
# TODO(sergiyb): When all GPU slaves are migrated to swarming, we can remove
# assignment of the DISPLAY from /etc/init.d/buildbot because this hack was
# used to run GPU tests on buildbot. After it is removed, we can use DISPLAY
# variable here to check if we are running X.
if subprocess.call(['pidof', 'X'], stdout=subprocess.PIPE) == 0:
try:
print 'DBUS_SESSION_BUS_ADDRESS env var not found, starting dbus-launch'
dbus_output = subprocess.check_output(['dbus-launch']).split('\n')
for line in dbus_output:
m = re.match(r'([^=]+)\=(.+)', line)
if m:
os.environ[m.group(1)] = m.group(2)
print ' setting %s to %s' % (m.group(1), m.group(2))
return True
except (subprocess.CalledProcessError, OSError) as e:
print 'Exception while running dbus_launch: %s' % e
return False
def _ShutdownDBus():
"""Manually kills the previously-launched DBus daemon.
It appears that passing --exit-with-session to dbus-launch in
_LaunchDBus(), above, doesn't cause the launched dbus-daemon to shut
down properly. Manually kill the sub-process using the PID it gave
us at launch time.
This function is called when the flag --spawn-dbus is given, and if
_LaunchDBus(), above, actually spawned the dbus-daemon.
"""
import signal
if 'DBUS_SESSION_BUS_PID' in os.environ:
dbus_pid = os.environ['DBUS_SESSION_BUS_PID']
try:
os.kill(int(dbus_pid), signal.SIGTERM)
print ' killed dbus-daemon with PID %s' % dbus_pid
except OSError as e:
print ' error killing dbus-daemon with PID %s: %s' % (dbus_pid, e)
# Try to clean up any stray DBUS_SESSION_BUS_ADDRESS environment
# variable too. Some of the bots seem to re-invoke runtest.py in a
# way that this variable sticks around from run to run.
if 'DBUS_SESSION_BUS_ADDRESS' in os.environ:
del os.environ['DBUS_SESSION_BUS_ADDRESS']
print ' cleared DBUS_SESSION_BUS_ADDRESS environment variable'
if __name__ == '__main__':
top_level_dir = os.path.dirname(os.path.realpath(__file__))
config = benchmark_runner.ProjectConfig(
top_level_dir=top_level_dir,
benchmark_dirs=[os.path.join(top_level_dir, 'gpu_tests')])
did_launch_dbus = _LaunchDBus()
try:
retcode = benchmark_runner.main(config)
finally:
if did_launch_dbus:
_ShutdownDBus()
sys.exit(retcode)
| vadimtk/chrome4sdp | content/test/gpu/run_gpu_test.py | Python | bsd-3-clause | 3,766 |
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
("forum", "0005_auto_20151119_2224"),
]
state_operations = [
migrations.CreateModel(
name="TopicFollowed",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
("email", models.BooleanField(default=False, db_index=True, verbose_name=b"Notification par courriel")),
("topic", models.ForeignKey(to="forum.Topic", on_delete=models.CASCADE)),
(
"user",
models.ForeignKey(
related_name="topics_followed", to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE
),
),
],
options={
"verbose_name": "Sujet suivi",
"verbose_name_plural": "Sujets suivis",
"db_table": "notification_topicfollowed",
},
bases=(models.Model,),
),
]
operations = [migrations.SeparateDatabaseAndState(state_operations=state_operations)]
| ChantyTaguan/zds-site | zds/notification/migrations/0001_initial.py | Python | gpl-3.0 | 1,221 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
freeseer - vga/presentation capture software
Copyright (C) 2013 Free and Open Source Software Learning Centre
http://fosslc.org
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
For support, questions, suggestions or any other inquiries, visit:
http://wiki.github.com/Freeseer/freeseer/
@author: Thanh Ha
'''
from PyQt4.QtCore import Qt
from PyQt4.QtCore import QDate
from PyQt4.QtGui import QDateEdit
from PyQt4.QtGui import QGridLayout
from PyQt4.QtGui import QHBoxLayout
from PyQt4.QtGui import QLabel
from PyQt4.QtGui import QLineEdit
from PyQt4.QtGui import QPlainTextEdit
from PyQt4.QtGui import QTimeEdit
from PyQt4.QtGui import QWidget
from PyQt4.QtGui import QIcon
from PyQt4.QtGui import QPushButton
class TalkDetailsWidget(QWidget):
def __init__(self, parent=None):
super(TalkDetailsWidget, self).__init__(parent)
self.layout = QGridLayout()
self.setLayout(self.layout)
self.buttonLayout = QHBoxLayout()
saveIcon = QIcon.fromTheme("document-save")
self.saveButton = QPushButton('Save Talk')
self.saveButton.setIcon(saveIcon)
self.buttonLayout.addWidget(self.saveButton)
self.layout.addLayout(self.buttonLayout, 0, 1, 1, 1)
self.titleLabel = QLabel('Title')
self.titleLineEdit = QLineEdit()
self.presenterLabel = QLabel('Presenter')
self.presenterLineEdit = QLineEdit()
self.layout.addWidget(self.titleLabel, 1, 0, 1, 1)
self.layout.addWidget(self.titleLineEdit, 1, 1, 1, 1)
self.layout.addWidget(self.presenterLabel, 1, 2, 1, 1)
self.layout.addWidget(self.presenterLineEdit, 1, 3, 1, 1)
self.eventLabel = QLabel('Event')
self.eventLineEdit = QLineEdit()
self.categoryLabel = QLabel('Category')
self.categoryLineEdit = QLineEdit()
self.layout.addWidget(self.eventLabel, 2, 0, 1, 1)
self.layout.addWidget(self.eventLineEdit, 2, 1, 1, 1)
self.layout.addWidget(self.categoryLabel, 2, 2, 1, 1)
self.layout.addWidget(self.categoryLineEdit, 2, 3, 1, 1)
self.roomLabel = QLabel('Room')
self.roomLineEdit = QLineEdit()
self.dateLayout = QHBoxLayout()
self.dateLabel = QLabel('Date')
self.dateEdit = QDateEdit()
currentDate = QDate()
self.dateEdit.setDate(currentDate.currentDate())
self.dateEdit.setCalendarPopup(True)
self.layout.addWidget(self.roomLabel, 3, 0, 1, 1)
self.layout.addWidget(self.roomLineEdit, 3, 1, 1, 1)
self.dateLayout.addWidget(self.dateEdit)
self.layout.addWidget(self.dateLabel, 3, 2, 1, 1)
self.layout.addLayout(self.dateLayout, 3, 3, 1, 1)
self.startTimeLayout = QHBoxLayout()
self.startTimeLabel = QLabel('Start Time')
self.startTimeEdit = QTimeEdit()
self.startTimeLayout.addWidget(self.startTimeEdit)
self.endTimeLayout = QHBoxLayout()
self.endTimeLabel = QLabel('End Time')
self.endTimeEdit = QTimeEdit()
self.endTimeLayout.addWidget(self.endTimeEdit)
self.layout.addWidget(self.startTimeLabel, 4, 0, 1, 1)
self.layout.addLayout(self.startTimeLayout, 4, 1, 1, 1)
self.layout.addWidget(self.endTimeLabel, 4, 2, 1, 1)
self.layout.addLayout(self.endTimeLayout, 4, 3, 1, 1)
self.descriptionLabel = QLabel('Description')
self.descriptionLabel.setAlignment(Qt.AlignTop)
self.descriptionTextEdit = QPlainTextEdit()
self.layout.addWidget(self.descriptionLabel, 5, 0, 1, 1)
self.layout.addWidget(self.descriptionTextEdit, 5, 1, 1, 3)
def enable_input_fields(self):
self.titleLineEdit.setPlaceholderText("Enter Talk Title")
self.presenterLineEdit.setPlaceholderText("Enter Presenter Name")
self.categoryLineEdit.setPlaceholderText("Enter Category Type")
self.eventLineEdit.setPlaceholderText("Enter Event Name")
self.roomLineEdit.setPlaceholderText("Enter Room Location")
self.titleLineEdit.setEnabled(True)
self.presenterLineEdit.setEnabled(True)
self.categoryLineEdit.setEnabled(True)
self.eventLineEdit.setEnabled(True)
self.roomLineEdit.setEnabled(True)
self.dateEdit.setEnabled(True)
self.startTimeEdit.setEnabled(True)
self.endTimeEdit.setEnabled(True)
self.descriptionTextEdit.setEnabled(True)
def disable_input_fields(self):
self.titleLineEdit.setPlaceholderText("")
self.presenterLineEdit.setPlaceholderText("")
self.categoryLineEdit.setPlaceholderText("")
self.eventLineEdit.setPlaceholderText("")
self.roomLineEdit.setPlaceholderText("")
self.titleLineEdit.setEnabled(False)
self.presenterLineEdit.setEnabled(False)
self.categoryLineEdit.setEnabled(False)
self.eventLineEdit.setEnabled(False)
self.roomLineEdit.setEnabled(False)
self.dateEdit.setEnabled(False)
self.startTimeEdit.setEnabled(False)
self.endTimeEdit.setEnabled(False)
self.descriptionTextEdit.setEnabled(False)
def clear_input_fields(self):
self.titleLineEdit.clear()
self.presenterLineEdit.clear()
self.categoryLineEdit.clear()
self.eventLineEdit.clear()
self.roomLineEdit.clear()
self.descriptionTextEdit.clear()
if __name__ == "__main__":
import sys
from PyQt4.QtGui import QApplication
app = QApplication(sys.argv)
main = TalkDetailsWidget()
main.show()
sys.exit(app.exec_())
| Freeseer/freeseer | src/freeseer/frontend/talkeditor/TalkDetailsWidget.py | Python | gpl-3.0 | 6,299 |
import unittest, time, re
from selenium import selenium
class Action(unittest.TestCase):
def __init__ (self, selenium):
self.sel = selenium
self._diag = True # make True for profiling diagnostics
self._diagResults = None
self._diag_sleepTime = None
self._diag_performCalls = None
#self.openReport();
def openReport(self):
" used to save the diagnostics to a file "
if self._diag:
self._diagResults = open("../results/diagResults.txt", "a")
self._diagResults.write(time.strftime("New Search run %d %b %Y (%H:%M:%S)\n"))
def closeReport(self, msg):
" Close the file that is recording the diagnostics "
if self._diag:
self._diagResults.write(msg)
self._diagResults.close()
# Methods for managing logging into Sahana
# login
# logout
def login(self, username, password, reveal=True):
"""
login to the system using the name provided
username: the username to be used
password: the password of the user
reveal: show the password on any error message
"""
print "Logging in as user: %s" % username
sel = self.sel
if sel.is_element_present("link=Logout"):
# Already logged in check the account
if sel.is_element_present("link=%s" % username):
# already logged in
return
else:
# logged in but as a different user
self.logout()
sel.open("default/user/login")
sel.click("auth_user_email")
sel.type("auth_user_email", username)
sel.type("auth_user_password", password)
sel.click("//input[@value='Login']")
msg = "Unable to log in as %s" % username
if reveal:
msg += " with password %s " % password
self.assertTrue(self.successMsg("Logged in"), msg)
def logout(self):
" logout of the system "
sel = self.sel
if sel.is_element_present("link=Logout"):
sel.click("link=Logout")
#self.successMsg("Logged out")
self.findResponse("Logged out", "")
# Searching methods
# _performSearch
# search
# searchUnique
# clearSearch
def _performSearch(self, searchString):
# Not intended to be called directly:
# Searches using the searchString in the quick search filter box
# The search filter is part of the http://datatables.net/ JavaScript getting it to work with Selenium needs a bit of care.
# Entering text in the filter textbox doesn't always trigger off the filtering and it is not possible with this method to clear the filter.
# The solution is to put in a call to the DataTables API, namely the fnFilter function
# However, the first time that the fnFilter() is called in the testing suite it doesn't complete the processing, hence it is called twice.
sel = self.sel
clearString = ""
if searchString == '': clearString = "Clearing..."
# First clear the search field and add a short pause
sel.run_script("oTable = $('#list').dataTable(); oTable.fnFilter( '%s' );" % clearString)
time.sleep(1)
self._diag_sleepTime += 1
# Now trigger off the true search
sel.run_script("oTable = $('#list').dataTable(); oTable.fnFilter( '%s' );" % searchString)
for i in range(10):
if not sel.is_visible('list_processing'):
found = True
return True
time.sleep(1)
self._diag_sleepTime += 1
return False
def search(self, searchString, expected):
# Perform a search using the search string, checking against the expected outcome
# searchString: the value to search for
# expected: the expected result of the search
# return Boolean indicating the success of the search
# side effect: TestCase::fail() called in case of no search data being returned
sel = self.sel
self._diag_sleepTime = 0
self._diag_performCalls = 0
found = False
result = ""
# perform the search it should work first time but, give it up to three attempts before giving up
for i in range (3):
self._diag_performCalls += 1
found = self._performSearch(searchString)
if found:
break
if not found:
if self._diag:
self._diagResults.write("%s\tFAILED\t%s\t%s\n" % (searchString, self._diag_sleepTime, self._diag_performCalls))
self.fail("time out search didn't respond, whilst searching for %s" % searchString)
else:
if self._diag:
self._diagResults.write("%s\tSUCCEEDED\t%s\t%s\n" % (searchString, self._diag_sleepTime, self._diag_performCalls))
# The search has returned now read the results
try:
result = sel.get_text("//div[@id='table-container']")
except:
self.fail("No search data found, whilst searching for %s" % searchString)
return expected in result
def searchUnique(self, uniqueName):
# Perform a search when one and only one result will be returned
# uniqueName: the value to search for
self.search(uniqueName, r"1 entries")
def clearSearch(self):
# Helper function used to clear the search results
self.search("", r"entries")
# Many actions are reported on in Sahana by displaying a banner at the top of the page
# Methods to check each banner for the desired message
# _checkBanner
# _findMsg
# successMsg
# errorMsg
def _checkBanner(self, message, type):
# This method should not be called directly
# message: the message to be searched for in the banner
# type: the type of banner to be searched for (confirmation or error)
# return: boolean reporting success
sel = self.sel
i = 1
while sel.is_element_present('//div[@class="%s"][%s]' % (type, i)):
banner = sel.get_text('//div[@class="%s"][%s]' % (type, i))
if message in banner:
if self._diag:
self._diagResults.write("%s\tSUCCEEDED\t%s\t\n" % (message, self._diag_sleepTime))
return True
i += 1
def _findMsg(self, message, type):
# This method should not be called directly.
# Method to locate a message in a div with a class given by type
# The method will pause for up to 10 seconds waiting for the banner to appear.
# message: the message to be searched for in the banner
# type: the type of banner to be searched for (confirmation or error)
# return: boolean reporting success
sel = self.sel
self._diag_sleepTime = 0
for cnt in range (10):
if self._checkBanner(message, type):
return True
time.sleep(1)
self._diag_sleepTime += 1
if self._diag:
self._diagResults.write("%s\tFAILED\t%s\t\n" % (message, self._diag_sleepTime))
return False
def successMsg(self, message):
# Method used to check for confirmation messages
# message: the message to be searched for in the banner
# return: boolean reporting success
return self._findMsg(message, "confirmation")
def errorMsg(self, message):
# Method used to check for error messages
# message: the message to be searched for in the banner
# return: boolean reporting success
return self._findMsg(message, "error")
def findResponse(self, successMsg, errorMsg):
# Method to check on the response of an action by looking at the message
# SuccessMsg: the message to be searched for in the banner upon success
# errorMsg: the message to be searched for in the banner upon failure
# return: boolean reflecting the type of message found
# side effect: exception if neither message found
sel = self.sel
sType = "confirmation"
eType = "error"
self._diag_sleepTime = 0
for cnt in range (10):
if self._checkBanner(successMsg, sType):
return True
if self._checkBanner(errorMsg, eType):
return False
time.sleep(1)
self._diag_sleepTime += 1
if self._diag:
self._diagResults.write("findReponse %s\tFAILED\t%s\t\n" % (successMsg, self._diag_sleepTime))
raise UserWarning("Response not found")
# Methods to manage form manipulation
# saveForm
# checkForm
def saveForm(self, message=None):
# Method to save the details
sel = self.sel
sel.click("//input[@value='Save']")
sel.wait_for_page_to_load("30000")
if message != None:
return self.successMsg(message)
def checkForm (self, elementList, buttonList, helpList):
# Method to check the layout of a form
# elementList: data to check the elements on the form
# buttonList: data to check the buttons on the form
# helpList: data to check the help balloons
# side effects: TestCase::fail() is called if any check failed
# side effects: messages are written out reflecting what was verified
elements = []
failed = []
for element in elementList:
result = self._element(element)
if result == True:
if len(element) > 2 and element[2]: elements.append(element[1])
else: failed.append(result)
for name in buttonList:
self._button(name)
for title in helpList:
self._helpBalloon(title)
if len(failed) > 0:
msg = '/n'.join(failed)
self.fail(msg)
if len(elements) > 0:
print "Verified the following form elements %s" % elements
def _button(self, name):
# Method to check that form button is present
sel = self.sel
element = '//input[@value="%s"]' % (name)
errmsg = "%s button is missing" % (name)
self.assertTrue(sel.is_element_present(element), errmsg)
print "%s button is present" % (name)
def _helpBalloon(self, helpTitle):
# Method to check that the help message is displayed
# helpTitle: the balloon help that is displyed on the form
sel = self.sel
element = "//div[contains(@title,'%s')]" % (helpTitle)
self.assertTrue(sel.is_element_present(element))
sel.mouse_over(element)
self.assertFalse(sel.is_element_present(element), "Help %s is missing" % (helpTitle))
print "Help %s is present" % (helpTitle)
def _element(self, elementDetails):
# Method to check that form _element is present
# The elementDetails parameter is a list of up to 4 elements
# elementDetails[0] the type of HTML tag
# elementDetails[1] the id associated with the HTML tag
# elementDetails[2] *optional* the visibility of the HTML tag
# elementDetails[3] *optional* the value or text of the HTML tag
# return True on success error message on failure
sel = self.sel
type = elementDetails[0]
id = elementDetails[1]
msg = ""
if (len(elementDetails) >= 3):
visible = elementDetails[2]
else:
visible = True
if (len(elementDetails) >= 4):
value = elementDetails[3]
else:
value = None
elementDetails = '//%s[@id="%s"]' % (type, id)
if visible:
if not sel.is_element_present(elementDetails): return "%s element %s is missing" % (type, id)
if sel.is_visible(elementDetails) != visible: return "%s element %s doesn't have a visibility of %s" % (type, id, visible)
if value != None:
actual = sel.get_value(elementDetails)
msg = "expected %s for element %s doesn't equal the actual value of %s" % (value, id, actual)
if value != actual: return msg
return True
def showElement(self, elementDetails):
# Method to set an element to be visible
elementDetails[3] = True
def hideElement(self, elementDetails):
# Method to set an element to be hidden
elementDetails[3] = False
def showNamedElement(self, name, elementList):
# Method to set an element to be visible
# name: The id of the element
# elementList: The element list
for element in elementList:
if element[1] == name:
self.showElement(element)
return True
return False
def hideNamedElement(self, name, elementList):
# Method to set an element to be hidden
# name: The id of the element
# elementList: The element list
for element in elementList:
if element[1] == name:
self.hideElement(element)
return True
return False
# Method to check on the rheading table that displays read only data related to a form
def checkHeading(self, detailMap):
# Method to check the details that are displayed in the heading
# detailMap: A (name, value) pair of the data which is displayed in Sahana as a table
# side effect: Assert the values are present
sel = self.sel
heading = sel.get_text("//div[@id='rheader']/div/table/tbody")
searchString = ""
for key, value in detailMap.items():
msg = "Unable to find details of %s in the header of %s"
self.assertTrue(key in heading, msg % (key, heading))
self.assertTrue(value in heading, msg % (value, heading))
def clickTab(self, name):
# Method to click on a tab
sel = self.sel
element = "//div[@id='rheader_tabs']/span/a[text()='%s']" % (name)
sel.click(element)
sel.wait_for_page_to_load("30000")
def btnLink(self, id, name):
# Method to check button link
sel = self.sel
element = '//a[@id="%s"]' % (id)
errMsg = "%s button is missing" % (name)
self.assertTrue(sel.is_element_present(element), errMsg)
self.assertTrue(sel.get_text(element), errMsg)
print "%s button is present" % (name)
def noBtnLink(self, id, name):
# Method to check button link is not present
sel = self.sel
element = '//a[@id="%s"]' % (id)
errMsg = "Unexpected presence of %s button" % (name)
if sel.is_element_present(element):
self.assertFalse(sel.get_text(element), errMsg)
print "%s button is not present" % (name)
def deleteObject(self, page, objName, type="Object"):
sel = self.sel
# need the following line which reloads the page otherwise the search gets stuck
sel.open(page)
try:
self.searchUnique(objName)
sel.click("link=Delete")
self.assertTrue(re.search(r"^Sure you want to delete this object[\s\S]$", sel.get_confirmation()))
if self.findResponse("%s deleted" % type, "Integrity error:"):
print "%s %s deleted" % (type, objName)
else:
print "Failed to delete %s %s" % (type, objName)
except:
print "Failed to delete %s %s from page %s" % (type, objName, page)
def registerUser(self, first_name, last_name, email, password):
first_name = first_name.strip()
last_name = last_name.strip()
email = email.strip()
password = password.strip()
sel = self.sel
sel.open("default/user/register")
sel.type("auth_user_first_name", first_name)
sel.type("auth_user_last_name", last_name)
sel.select("auth_user_language", "label=English")
sel.type("auth_user_email", email)
sel.type("auth_user_password", password)
sel.type("password_two", password)
sel.click("//input[@value='Register']")
sel.wait_for_page_to_load("30000")
msg = "Unable to register user %s %s with email %s" % (first_name, last_name, email)
self.assertTrue(self.successMsg("Registration successful"), msg)
sel.open("admin/user")
self.searchUnique(email)
self.assertTrue(re.search(r"Showing 1 to 1 of 1 entries", sel.get_text("//div[@class='dataTables_info']")))
print "User %s created" % (email)
def addUser(self, first_name, last_name, email, password):
first_name = first_name.strip()
last_name = last_name.strip()
email = email.strip()
password = password.strip()
sel = self.sel
# TODO only open this page if on another page
sel.open("admin/user")
self.assertTrue(sel.is_element_present("show-add-btn"))
sel.click("show-add-btn")
sel.type("auth_user_first_name", first_name)
sel.type("auth_user_last_name", last_name)
sel.select("auth_user_language", "label=English")
sel.type("auth_user_email", email)
sel.type("auth_user_password", password)
sel.type("password_two", password)
sel.click("//input[@value='Save']")
sel.wait_for_page_to_load("30000")
msg = "Unable to create user %s %s with email %s" % (first_name, last_name, email)
self.assertTrue(self.successMsg("User added"), msg)
self.searchUnique(email)
self.assertTrue(re.search(r"Showing 1 to 1 of 1 entries", sel.get_text("//div[@class='dataTables_info']")))
print "User %s created" % (email)
def addRole(self, email, roles):
email = email.strip()
roles = roles.strip()
roleList = roles.split(" ")
sel = self.sel
self.searchUnique(email)
self.assertEqual("Roles", sel.get_text("//table[@id='list']/tbody/tr[1]/td[1]/a[2]"))
sel.click("//table[@id='list']/tbody/tr[1]/td[1]/a[2]")
sel.wait_for_page_to_load("30000")
for role in roleList:
sel.click("//input[@name='roles' and @value='%s']" % role.strip())
sel.click("//input[@value='Save']")
sel.wait_for_page_to_load("30000")
# @ToDo: Message to get all roles (if multiple) not just last 1
msg = "Failed to add role %s to user %s" % (role.strip() , email)
self.assertTrue(self.successMsg("User Updated"), msg)
print "User %s added to group %s" % (email, role.strip())
sel.open("admin/user")
def delUser(self, email):
email = email.strip()
print "Deleting user %s" % email
sel = self.sel
sel.open("admin/user")
self.searchUnique(email)
sel.click("link=Delete")
self.assertTrue(re.search(r"^Sure you want to delete this object[\s\S]$", sel.get_confirmation()))
self.assertTrue(self.successMsg("User deleted"))
self.search(email, r"No matching records found")
print "User %s deleted" % (email)
| sinsai/Sahana_eden | static/selenium/scripts/actions.py | Python | mit | 19,331 |
#!/usr/bin/env python
import sys
from multiprocessing import Process
sys.path.append("../")
from laspy import file as File
inFile = File.File(sys.argv[1],mode= "r")
outFile = File.File(sys.argv[2],mode= "w", header = inFile.header)
outFile.writer.pad_file_for_point_recs(len(inFile))
outFile.close(ignore_header_changes =True)
spec = inFile.reader.point_format.lookup.keys()
def write_dimension(dimname, dimdata):
file_view = File.File(sys.argv[2], mode = "rw")
file_view.writer.set_dimension(dimname, dimdata)
file_view.close(ignore_header_changes = True)
processes = []
for dimname in spec:
print(dimname)
dataList = list(inFile.reader.get_dimension(dimname))
p = Process(target=write_dimension, args=(dimname, dataList))
p.start()
processes.append(p)
for p in processes:
p.join()
#def f(x):
# print("outFile." + str(x)+" = "+"inFile." + str(x))
# outFile.writer.set_dimension(x, inFile.reader.get_dimension(x))
#map(f, spec)
inFile.close()
| blazbratanic/laspy | misc/file_copy3.py | Python | bsd-2-clause | 995 |
## @file
# This file is used to parse meta files
#
# Copyright (c) 2008 - 2018, Intel Corporation. All rights reserved.<BR>
# (C) Copyright 2015-2018 Hewlett Packard Enterprise Development LP<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import print_function
from __future__ import absolute_import
import Common.LongFilePathOs as os
import re
import time
import copy
from hashlib import md5
import Common.EdkLogger as EdkLogger
import Common.GlobalData as GlobalData
from CommonDataClass.DataClass import *
from Common.DataType import *
from Common.StringUtils import *
from Common.Misc import GuidStructureStringToGuidString, CheckPcdDatum, PathClass, AnalyzePcdData, AnalyzeDscPcd, AnalyzePcdExpression, ParseFieldValue, StructPattern
from Common.Expression import *
from CommonDataClass.Exceptions import *
from Common.LongFilePathSupport import OpenLongFilePath as open
from collections import defaultdict
from .MetaFileTable import MetaFileStorage
from .MetaFileCommentParser import CheckInfComment
## RegEx for finding file versions
hexVersionPattern = re.compile(r'0[xX][\da-f-A-F]{5,8}')
decVersionPattern = re.compile(r'\d+\.\d+')
CODEPattern = re.compile(r"{CODE\([a-fA-F0-9Xx\{\},\s]*\)}")
## A decorator used to parse macro definition
def ParseMacro(Parser):
def MacroParser(self):
Match = gMacroDefPattern.match(self._CurrentLine)
if not Match:
# Not 'DEFINE/EDK_GLOBAL' statement, call decorated method
Parser(self)
return
TokenList = GetSplitValueList(self._CurrentLine[Match.end(1):], TAB_EQUAL_SPLIT, 1)
# Syntax check
if not TokenList[0]:
EdkLogger.error('Parser', FORMAT_INVALID, "No macro name given",
ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex + 1)
if len(TokenList) < 2:
TokenList.append('')
Type = Match.group(1)
Name, Value = TokenList
# Global macros can be only defined via environment variable
if Name in GlobalData.gGlobalDefines:
EdkLogger.error('Parser', FORMAT_INVALID, "%s can only be defined via environment variable" % Name,
ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex + 1)
# Only upper case letters, digit and '_' are allowed
if not gMacroNamePattern.match(Name):
EdkLogger.error('Parser', FORMAT_INVALID, "The macro name must be in the pattern [A-Z][A-Z0-9_]*",
ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex + 1)
Value = ReplaceMacro(Value, self._Macros)
if Type in self.DataType:
self._ItemType = self.DataType[Type]
else:
self._ItemType = MODEL_META_DATA_DEFINE
# DEFINE defined macros
if Type == TAB_DSC_DEFINES_DEFINE:
#
# First judge whether this DEFINE is in conditional directive statements or not.
#
if isinstance(self, DscParser) and self._InDirective > -1:
pass
else:
if isinstance(self, DecParser):
if MODEL_META_DATA_HEADER in self._SectionType:
self._FileLocalMacros[Name] = Value
else:
self._ConstructSectionMacroDict(Name, Value)
elif self._SectionType == MODEL_META_DATA_HEADER:
self._FileLocalMacros[Name] = Value
else:
self._ConstructSectionMacroDict(Name, Value)
# EDK_GLOBAL defined macros
elif not isinstance(self, DscParser):
EdkLogger.error('Parser', FORMAT_INVALID, "EDK_GLOBAL can only be used in .dsc file",
ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex + 1)
elif self._SectionType != MODEL_META_DATA_HEADER:
EdkLogger.error('Parser', FORMAT_INVALID, "EDK_GLOBAL can only be used under [Defines] section",
ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex + 1)
elif (Name in self._FileLocalMacros) and (self._FileLocalMacros[Name] != Value):
EdkLogger.error('Parser', FORMAT_INVALID, "EDK_GLOBAL defined a macro with the same name and different value as one defined by 'DEFINE'",
ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex + 1)
self._ValueList = [Type, Name, Value]
return MacroParser
## Base class of parser
#
# This class is used for derivation purpose. The specific parser for one kind
# type file must derive this class and implement some public interfaces.
#
# @param FilePath The path of platform description file
# @param FileType The raw data of DSC file
# @param Table Database used to retrieve module/package information
# @param Macros Macros used for replacement in file
# @param Owner Owner ID (for sub-section parsing)
# @param From ID from which the data comes (for !INCLUDE directive)
#
class MetaFileParser(object):
# data type (file content) for specific file type
DataType = {}
# Parser objects used to implement singleton
MetaFiles = {}
## Factory method
#
# One file, one parser object. This factory method makes sure that there's
# only one object constructed for one meta file.
#
# @param Class class object of real AutoGen class
# (InfParser, DecParser or DscParser)
# @param FilePath The path of meta file
# @param *args The specific class related parameters
# @param **kwargs The specific class related dict parameters
#
def __new__(Class, FilePath, *args, **kwargs):
if FilePath in Class.MetaFiles:
return Class.MetaFiles[FilePath]
else:
ParserObject = super(MetaFileParser, Class).__new__(Class)
Class.MetaFiles[FilePath] = ParserObject
return ParserObject
## Constructor of MetaFileParser
#
# Initialize object of MetaFileParser
#
# @param FilePath The path of platform description file
# @param FileType The raw data of DSC file
# @param Arch Default Arch value for filtering sections
# @param Table Database used to retrieve module/package information
# @param Owner Owner ID (for sub-section parsing)
# @param From ID from which the data comes (for !INCLUDE directive)
#
def __init__(self, FilePath, FileType, Arch, Table, Owner= -1, From= -1):
self._Table = Table
self._RawTable = Table
self._Arch = Arch
self._FileType = FileType
self.MetaFile = FilePath
self._FileDir = self.MetaFile.Dir
self._Defines = {}
self._FileLocalMacros = {}
self._SectionsMacroDict = defaultdict(dict)
# for recursive parsing
self._Owner = [Owner]
self._From = From
# parsr status for parsing
self._ValueList = ['', '', '', '', '']
self._Scope = []
self._LineIndex = 0
self._CurrentLine = ''
self._SectionType = MODEL_UNKNOWN
self._SectionName = ''
self._InSubsection = False
self._SubsectionType = MODEL_UNKNOWN
self._SubsectionName = ''
self._ItemType = MODEL_UNKNOWN
self._LastItem = -1
self._Enabled = 0
self._Finished = False
self._PostProcessed = False
# Different version of meta-file has different way to parse.
self._Version = 0
self._GuidDict = {} # for Parser PCD value {GUID(gTokeSpaceGuidName)}
## Store the parsed data in table
def _Store(self, *Args):
return self._Table.Insert(*Args)
## Virtual method for starting parse
def Start(self):
raise NotImplementedError
## Notify a post-process is needed
def DoPostProcess(self):
self._PostProcessed = False
## Set parsing complete flag in both class and table
def _Done(self):
self._Finished = True
self._Table.SetEndFlag()
def _PostProcess(self):
self._PostProcessed = True
## Get the parse complete flag
@property
def Finished(self):
return self._Finished
## Set the complete flag
@Finished.setter
def Finished(self, Value):
self._Finished = Value
## Remove records that do not match given Filter Arch
def _FilterRecordList(self, RecordList, FilterArch):
NewRecordList = []
for Record in RecordList:
Arch = Record[3]
if Arch == TAB_ARCH_COMMON or Arch == FilterArch:
NewRecordList.append(Record)
return NewRecordList
## Use [] style to query data in table, just for readability
#
# DataInfo = [data_type, scope1(arch), scope2(platform/moduletype)]
#
def __getitem__(self, DataInfo):
if not isinstance(DataInfo, type(())):
DataInfo = (DataInfo,)
# Parse the file first, if necessary
self.StartParse()
# No specific ARCH or Platform given, use raw data
if self._RawTable and (len(DataInfo) == 1 or DataInfo[1] is None):
return self._FilterRecordList(self._RawTable.Query(*DataInfo), self._Arch)
# Do post-process if necessary
if not self._PostProcessed:
self._PostProcess()
return self._FilterRecordList(self._Table.Query(*DataInfo), DataInfo[1])
def StartParse(self):
if not self._Finished:
if self._RawTable.IsIntegrity():
self._Finished = True
else:
self._Table = self._RawTable
self._PostProcessed = False
self.Start()
## Data parser for the common format in different type of file
#
# The common format in the meatfile is like
#
# xxx1 | xxx2 | xxx3
#
@ParseMacro
def _CommonParser(self):
TokenList = GetSplitValueList(self._CurrentLine, TAB_VALUE_SPLIT)
self._ValueList[0:len(TokenList)] = TokenList
## Data parser for the format in which there's path
#
# Only path can have macro used. So we need to replace them before use.
#
@ParseMacro
def _PathParser(self):
TokenList = GetSplitValueList(self._CurrentLine, TAB_VALUE_SPLIT)
self._ValueList[0:len(TokenList)] = TokenList
# Don't do macro replacement for dsc file at this point
if not isinstance(self, DscParser):
Macros = self._Macros
self._ValueList = [ReplaceMacro(Value, Macros) for Value in self._ValueList]
## Skip unsupported data
def _Skip(self):
EdkLogger.warn("Parser", "Unrecognized content", File=self.MetaFile,
Line=self._LineIndex + 1, ExtraData=self._CurrentLine);
self._ValueList[0:1] = [self._CurrentLine]
## Skip unsupported data for UserExtension Section
def _SkipUserExtension(self):
self._ValueList[0:1] = [self._CurrentLine]
## Section header parser
#
# The section header is always in following format:
#
# [section_name.arch<.platform|module_type>]
#
def _SectionHeaderParser(self):
self._Scope = []
self._SectionName = ''
ArchList = set()
for Item in GetSplitValueList(self._CurrentLine[1:-1], TAB_COMMA_SPLIT):
if Item == '':
continue
ItemList = GetSplitValueList(Item, TAB_SPLIT, 3)
# different section should not mix in one section
if self._SectionName != '' and self._SectionName != ItemList[0].upper():
EdkLogger.error('Parser', FORMAT_INVALID, "Different section names in the same section",
File=self.MetaFile, Line=self._LineIndex + 1, ExtraData=self._CurrentLine)
self._SectionName = ItemList[0].upper()
if self._SectionName in self.DataType:
self._SectionType = self.DataType[self._SectionName]
# Check if the section name is valid
if self._SectionName not in SECTIONS_HAVE_ITEM_AFTER_ARCH_SET and len(ItemList) > 3:
EdkLogger.error("Parser", FORMAT_UNKNOWN_ERROR, "%s is not a valid section name" % Item,
self.MetaFile, self._LineIndex + 1, self._CurrentLine)
elif self._Version >= 0x00010005:
EdkLogger.error("Parser", FORMAT_UNKNOWN_ERROR, "%s is not a valid section name" % Item,
self.MetaFile, self._LineIndex + 1, self._CurrentLine)
else:
self._SectionType = MODEL_UNKNOWN
# S1 is always Arch
if len(ItemList) > 1:
S1 = ItemList[1].upper()
else:
S1 = TAB_ARCH_COMMON
ArchList.add(S1)
# S2 may be Platform or ModuleType
if len(ItemList) > 2:
if self._SectionName.upper() in SECTIONS_HAVE_ITEM_PCD_SET:
S2 = ItemList[2]
else:
S2 = ItemList[2].upper()
else:
S2 = TAB_COMMON
if len(ItemList) > 3:
S3 = ItemList[3]
else:
S3 = TAB_COMMON
self._Scope.append([S1, S2, S3])
# 'COMMON' must not be used with specific ARCHs at the same section
if TAB_ARCH_COMMON in ArchList and len(ArchList) > 1:
EdkLogger.error('Parser', FORMAT_INVALID, "'common' ARCH must not be used with specific ARCHs",
File=self.MetaFile, Line=self._LineIndex + 1, ExtraData=self._CurrentLine)
# If the section information is needed later, it should be stored in database
self._ValueList[0] = self._SectionName
## [defines] section parser
@ParseMacro
def _DefineParser(self):
TokenList = GetSplitValueList(self._CurrentLine, TAB_EQUAL_SPLIT, 1)
self._ValueList[1:len(TokenList)] = TokenList
if not self._ValueList[1]:
EdkLogger.error('Parser', FORMAT_INVALID, "No name specified",
ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex + 1)
if not self._ValueList[2]:
EdkLogger.error('Parser', FORMAT_INVALID, "No value specified",
ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex + 1)
self._ValueList = [ReplaceMacro(Value, self._Macros) for Value in self._ValueList]
Name, Value = self._ValueList[1], self._ValueList[2]
MacroUsed = GlobalData.gMacroRefPattern.findall(Value)
if len(MacroUsed) != 0:
for Macro in MacroUsed:
if Macro in GlobalData.gGlobalDefines:
EdkLogger.error("Parser", FORMAT_INVALID, "Global macro %s is not permitted." % (Macro), ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex + 1)
else:
EdkLogger.error("Parser", FORMAT_INVALID, "%s not defined" % (Macro), ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex + 1)
# Sometimes, we need to make differences between EDK and EDK2 modules
if Name == 'INF_VERSION':
if hexVersionPattern.match(Value):
self._Version = int(Value, 0)
elif decVersionPattern.match(Value):
ValueList = Value.split('.')
Major = int(ValueList[0], 0)
Minor = int(ValueList[1], 0)
if Major > 0xffff or Minor > 0xffff:
EdkLogger.error('Parser', FORMAT_INVALID, "Invalid version number",
ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex + 1)
self._Version = int('0x{0:04x}{1:04x}'.format(Major, Minor), 0)
else:
EdkLogger.error('Parser', FORMAT_INVALID, "Invalid version number",
ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex + 1)
if isinstance(self, InfParser) and self._Version < 0x00010005:
# EDK module allows using defines as macros
self._FileLocalMacros[Name] = Value
self._Defines[Name] = Value
## [BuildOptions] section parser
@ParseMacro
def _BuildOptionParser(self):
self._CurrentLine = CleanString(self._CurrentLine, BuildOption=True)
TokenList = GetSplitValueList(self._CurrentLine, TAB_EQUAL_SPLIT, 1)
TokenList2 = GetSplitValueList(TokenList[0], ':', 1)
if len(TokenList2) == 2:
self._ValueList[0] = TokenList2[0] # toolchain family
self._ValueList[1] = TokenList2[1] # keys
else:
self._ValueList[1] = TokenList[0]
if len(TokenList) == 2 and not isinstance(self, DscParser): # value
self._ValueList[2] = ReplaceMacro(TokenList[1], self._Macros)
if self._ValueList[1].count('_') != 4:
EdkLogger.error(
'Parser',
FORMAT_INVALID,
"'%s' must be in format of <TARGET>_<TOOLCHAIN>_<ARCH>_<TOOL>_FLAGS" % self._ValueList[1],
ExtraData=self._CurrentLine,
File=self.MetaFile,
Line=self._LineIndex + 1
)
def GetValidExpression(self, TokenSpaceGuid, PcdCName):
return self._Table.GetValidExpression(TokenSpaceGuid, PcdCName)
@property
def _Macros(self):
Macros = {}
Macros.update(self._FileLocalMacros)
Macros.update(self._GetApplicableSectionMacro())
return Macros
## Construct section Macro dict
def _ConstructSectionMacroDict(self, Name, Value):
ScopeKey = [(Scope[0], Scope[1], Scope[2]) for Scope in self._Scope]
ScopeKey = tuple(ScopeKey)
#
# DecParser SectionType is a list, will contain more than one item only in Pcd Section
# As Pcd section macro usage is not alllowed, so here it is safe
#
if isinstance(self, DecParser):
SectionDictKey = self._SectionType[0], ScopeKey
else:
SectionDictKey = self._SectionType, ScopeKey
self._SectionsMacroDict[SectionDictKey][Name] = Value
## Get section Macros that are applicable to current line, which may come from other sections
## that share the same name while scope is wider
def _GetApplicableSectionMacro(self):
Macros = {}
ComComMacroDict = {}
ComSpeMacroDict = {}
SpeSpeMacroDict = {}
ActiveSectionType = self._SectionType
if isinstance(self, DecParser):
ActiveSectionType = self._SectionType[0]
for (SectionType, Scope) in self._SectionsMacroDict:
if SectionType != ActiveSectionType:
continue
for ActiveScope in self._Scope:
Scope0, Scope1, Scope2= ActiveScope[0], ActiveScope[1], ActiveScope[2]
if(Scope0, Scope1, Scope2) not in Scope:
break
else:
SpeSpeMacroDict.update(self._SectionsMacroDict[(SectionType, Scope)])
for ActiveScope in self._Scope:
Scope0, Scope1, Scope2 = ActiveScope[0], ActiveScope[1], ActiveScope[2]
if(Scope0, Scope1, Scope2) not in Scope and (Scope0, TAB_COMMON, TAB_COMMON) not in Scope and (TAB_COMMON, Scope1, TAB_COMMON) not in Scope:
break
else:
ComSpeMacroDict.update(self._SectionsMacroDict[(SectionType, Scope)])
if (TAB_COMMON, TAB_COMMON, TAB_COMMON) in Scope:
ComComMacroDict.update(self._SectionsMacroDict[(SectionType, Scope)])
Macros.update(ComComMacroDict)
Macros.update(ComSpeMacroDict)
Macros.update(SpeSpeMacroDict)
return Macros
_SectionParser = {}
## INF file parser class
#
# @param FilePath The path of platform description file
# @param FileType The raw data of DSC file
# @param Table Database used to retrieve module/package information
# @param Macros Macros used for replacement in file
#
class InfParser(MetaFileParser):
# INF file supported data types (one type per section)
DataType = {
TAB_UNKNOWN.upper() : MODEL_UNKNOWN,
TAB_INF_DEFINES.upper() : MODEL_META_DATA_HEADER,
TAB_DSC_DEFINES_DEFINE : MODEL_META_DATA_DEFINE,
TAB_BUILD_OPTIONS.upper() : MODEL_META_DATA_BUILD_OPTION,
TAB_INCLUDES.upper() : MODEL_EFI_INCLUDE,
TAB_LIBRARIES.upper() : MODEL_EFI_LIBRARY_INSTANCE,
TAB_LIBRARY_CLASSES.upper() : MODEL_EFI_LIBRARY_CLASS,
TAB_PACKAGES.upper() : MODEL_META_DATA_PACKAGE,
TAB_NMAKE.upper() : MODEL_META_DATA_NMAKE,
TAB_INF_FIXED_PCD.upper() : MODEL_PCD_FIXED_AT_BUILD,
TAB_INF_PATCH_PCD.upper() : MODEL_PCD_PATCHABLE_IN_MODULE,
TAB_INF_FEATURE_PCD.upper() : MODEL_PCD_FEATURE_FLAG,
TAB_INF_PCD_EX.upper() : MODEL_PCD_DYNAMIC_EX,
TAB_INF_PCD.upper() : MODEL_PCD_DYNAMIC,
TAB_SOURCES.upper() : MODEL_EFI_SOURCE_FILE,
TAB_GUIDS.upper() : MODEL_EFI_GUID,
TAB_PROTOCOLS.upper() : MODEL_EFI_PROTOCOL,
TAB_PPIS.upper() : MODEL_EFI_PPI,
TAB_DEPEX.upper() : MODEL_EFI_DEPEX,
TAB_BINARIES.upper() : MODEL_EFI_BINARY_FILE,
TAB_USER_EXTENSIONS.upper() : MODEL_META_DATA_USER_EXTENSION
}
## Constructor of InfParser
#
# Initialize object of InfParser
#
# @param FilePath The path of module description file
# @param FileType The raw data of DSC file
# @param Arch Default Arch value for filtering sections
# @param Table Database used to retrieve module/package information
#
def __init__(self, FilePath, FileType, Arch, Table):
# prevent re-initialization
if hasattr(self, "_Table"):
return
MetaFileParser.__init__(self, FilePath, FileType, Arch, Table)
self.PcdsDict = {}
## Parser starter
def Start(self):
NmakeLine = ''
Content = ''
try:
Content = open(str(self.MetaFile), 'r').readlines()
except:
EdkLogger.error("Parser", FILE_READ_FAILURE, ExtraData=self.MetaFile)
# parse the file line by line
IsFindBlockComment = False
GetHeaderComment = False
TailComments = []
SectionComments = []
Comments = []
for Index in range(0, len(Content)):
# skip empty, commented, block commented lines
Line, Comment = CleanString2(Content[Index], AllowCppStyleComment=True)
NextLine = ''
if Index + 1 < len(Content):
NextLine, NextComment = CleanString2(Content[Index + 1])
if Line == '':
if Comment:
Comments.append((Comment, Index + 1))
elif GetHeaderComment:
SectionComments.extend(Comments)
Comments = []
continue
if Line.find(DataType.TAB_COMMENT_EDK_START) > -1:
IsFindBlockComment = True
continue
if Line.find(DataType.TAB_COMMENT_EDK_END) > -1:
IsFindBlockComment = False
continue
if IsFindBlockComment:
continue
self._LineIndex = Index
self._CurrentLine = Line
# section header
if Line[0] == TAB_SECTION_START and Line[-1] == TAB_SECTION_END:
if not GetHeaderComment:
for Cmt, LNo in Comments:
self._Store(MODEL_META_DATA_HEADER_COMMENT, Cmt, '', '', TAB_COMMON,
TAB_COMMON, self._Owner[-1], LNo, -1, LNo, -1, 0)
GetHeaderComment = True
else:
TailComments.extend(SectionComments + Comments)
Comments = []
self._SectionHeaderParser()
# Check invalid sections
if self._Version < 0x00010005:
if self._SectionType in [MODEL_META_DATA_BUILD_OPTION,
MODEL_EFI_LIBRARY_CLASS,
MODEL_META_DATA_PACKAGE,
MODEL_PCD_FIXED_AT_BUILD,
MODEL_PCD_PATCHABLE_IN_MODULE,
MODEL_PCD_FEATURE_FLAG,
MODEL_PCD_DYNAMIC_EX,
MODEL_PCD_DYNAMIC,
MODEL_EFI_GUID,
MODEL_EFI_PROTOCOL,
MODEL_EFI_PPI,
MODEL_META_DATA_USER_EXTENSION]:
EdkLogger.error('Parser', FORMAT_INVALID,
"Section [%s] is not allowed in inf file without version" % (self._SectionName),
ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex + 1)
elif self._SectionType in [MODEL_EFI_INCLUDE,
MODEL_EFI_LIBRARY_INSTANCE,
MODEL_META_DATA_NMAKE]:
EdkLogger.error('Parser', FORMAT_INVALID,
"Section [%s] is not allowed in inf file with version 0x%08x" % (self._SectionName, self._Version),
ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex + 1)
continue
# merge two lines specified by '\' in section NMAKE
elif self._SectionType == MODEL_META_DATA_NMAKE:
if Line[-1] == '\\':
if NextLine == '':
self._CurrentLine = NmakeLine + Line[0:-1]
NmakeLine = ''
else:
if NextLine[0] == TAB_SECTION_START and NextLine[-1] == TAB_SECTION_END:
self._CurrentLine = NmakeLine + Line[0:-1]
NmakeLine = ''
else:
NmakeLine = NmakeLine + ' ' + Line[0:-1]
continue
else:
self._CurrentLine = NmakeLine + Line
NmakeLine = ''
# section content
self._ValueList = ['', '', '']
# parse current line, result will be put in self._ValueList
self._SectionParser[self._SectionType](self)
if self._ValueList is None or self._ItemType == MODEL_META_DATA_DEFINE:
self._ItemType = -1
Comments = []
continue
if Comment:
Comments.append((Comment, Index + 1))
if GlobalData.gOptions and GlobalData.gOptions.CheckUsage:
CheckInfComment(self._SectionType, Comments, str(self.MetaFile), Index + 1, self._ValueList)
#
# Model, Value1, Value2, Value3, Arch, Platform, BelongsToItem=-1,
# LineBegin=-1, ColumnBegin=-1, LineEnd=-1, ColumnEnd=-1, Enabled=-1
#
for Arch, Platform, _ in self._Scope:
LastItem = self._Store(self._SectionType,
self._ValueList[0],
self._ValueList[1],
self._ValueList[2],
Arch,
Platform,
self._Owner[-1],
self._LineIndex + 1,
- 1,
self._LineIndex + 1,
- 1,
0
)
for Comment, LineNo in Comments:
self._Store(MODEL_META_DATA_COMMENT, Comment, '', '', Arch, Platform,
LastItem, LineNo, -1, LineNo, -1, 0)
Comments = []
SectionComments = []
TailComments.extend(SectionComments + Comments)
if IsFindBlockComment:
EdkLogger.error("Parser", FORMAT_INVALID, "Open block comments (starting with /*) are expected to end with */",
File=self.MetaFile)
# If there are tail comments in INF file, save to database whatever the comments are
for Comment in TailComments:
self._Store(MODEL_META_DATA_TAIL_COMMENT, Comment[0], '', '', TAB_COMMON,
TAB_COMMON, self._Owner[-1], -1, -1, -1, -1, 0)
self._Done()
## Data parser for the format in which there's path
#
# Only path can have macro used. So we need to replace them before use.
#
def _IncludeParser(self):
TokenList = GetSplitValueList(self._CurrentLine, TAB_VALUE_SPLIT)
self._ValueList[0:len(TokenList)] = TokenList
Macros = self._Macros
if Macros:
for Index in range(0, len(self._ValueList)):
Value = self._ValueList[Index]
if not Value:
continue
if Value.upper().find('$(EFI_SOURCE)\Edk'.upper()) > -1 or Value.upper().find('$(EFI_SOURCE)/Edk'.upper()) > -1:
Value = '$(EDK_SOURCE)' + Value[17:]
if Value.find('$(EFI_SOURCE)') > -1 or Value.find('$(EDK_SOURCE)') > -1:
pass
elif Value.startswith('.'):
pass
elif Value.startswith('$('):
pass
else:
Value = '$(EFI_SOURCE)/' + Value
self._ValueList[Index] = ReplaceMacro(Value, Macros)
## Parse [Sources] section
#
# Only path can have macro used. So we need to replace them before use.
#
@ParseMacro
def _SourceFileParser(self):
TokenList = GetSplitValueList(self._CurrentLine, TAB_VALUE_SPLIT)
self._ValueList[0:len(TokenList)] = TokenList
Macros = self._Macros
# For Acpi tables, remove macro like ' TABLE_NAME=Sata1'
if 'COMPONENT_TYPE' in Macros:
if self._Defines['COMPONENT_TYPE'].upper() == 'ACPITABLE':
self._ValueList[0] = GetSplitValueList(self._ValueList[0], ' ', 1)[0]
if self._Defines['BASE_NAME'] == 'Microcode':
pass
self._ValueList = [ReplaceMacro(Value, Macros) for Value in self._ValueList]
## Parse [Binaries] section
#
# Only path can have macro used. So we need to replace them before use.
#
@ParseMacro
def _BinaryFileParser(self):
TokenList = GetSplitValueList(self._CurrentLine, TAB_VALUE_SPLIT, 2)
if len(TokenList) < 2:
EdkLogger.error('Parser', FORMAT_INVALID, "No file type or path specified",
ExtraData=self._CurrentLine + " (<FileType> | <FilePath> [| <Target>])",
File=self.MetaFile, Line=self._LineIndex + 1)
if not TokenList[0]:
EdkLogger.error('Parser', FORMAT_INVALID, "No file type specified",
ExtraData=self._CurrentLine + " (<FileType> | <FilePath> [| <Target>])",
File=self.MetaFile, Line=self._LineIndex + 1)
if not TokenList[1]:
EdkLogger.error('Parser', FORMAT_INVALID, "No file path specified",
ExtraData=self._CurrentLine + " (<FileType> | <FilePath> [| <Target>])",
File=self.MetaFile, Line=self._LineIndex + 1)
self._ValueList[0:len(TokenList)] = TokenList
self._ValueList[1] = ReplaceMacro(self._ValueList[1], self._Macros)
## [nmake] section parser (Edk.x style only)
def _NmakeParser(self):
TokenList = GetSplitValueList(self._CurrentLine, TAB_EQUAL_SPLIT, 1)
self._ValueList[0:len(TokenList)] = TokenList
# remove macros
self._ValueList[1] = ReplaceMacro(self._ValueList[1], self._Macros)
# remove self-reference in macro setting
#self._ValueList[1] = ReplaceMacro(self._ValueList[1], {self._ValueList[0]:''})
## [FixedPcd], [FeaturePcd], [PatchPcd], [Pcd] and [PcdEx] sections parser
@ParseMacro
def _PcdParser(self):
TokenList = GetSplitValueList(self._CurrentLine, TAB_VALUE_SPLIT, 1)
ValueList = GetSplitValueList(TokenList[0], TAB_SPLIT)
if len(ValueList) != 2:
EdkLogger.error('Parser', FORMAT_INVALID, "Illegal token space GUID and PCD name format",
ExtraData=self._CurrentLine + " (<TokenSpaceGuidCName>.<PcdCName>)",
File=self.MetaFile, Line=self._LineIndex + 1)
self._ValueList[0:1] = ValueList
if len(TokenList) > 1:
self._ValueList[2] = TokenList[1]
if self._ValueList[0] == '' or self._ValueList[1] == '':
EdkLogger.error('Parser', FORMAT_INVALID, "No token space GUID or PCD name specified",
ExtraData=self._CurrentLine + " (<TokenSpaceGuidCName>.<PcdCName>)",
File=self.MetaFile, Line=self._LineIndex + 1)
# if value are 'True', 'true', 'TRUE' or 'False', 'false', 'FALSE', replace with integer 1 or 0.
if self._ValueList[2] != '':
InfPcdValueList = GetSplitValueList(TokenList[1], TAB_VALUE_SPLIT, 1)
if InfPcdValueList[0] in ['True', 'true', 'TRUE']:
self._ValueList[2] = TokenList[1].replace(InfPcdValueList[0], '1', 1);
elif InfPcdValueList[0] in ['False', 'false', 'FALSE']:
self._ValueList[2] = TokenList[1].replace(InfPcdValueList[0], '0', 1);
if (self._ValueList[0], self._ValueList[1]) not in self.PcdsDict:
self.PcdsDict[self._ValueList[0], self._ValueList[1]] = self._SectionType
elif self.PcdsDict[self._ValueList[0], self._ValueList[1]] != self._SectionType:
EdkLogger.error('Parser', FORMAT_INVALID, "It is not permissible to list a specified PCD in different PCD type sections.",
ExtraData=self._CurrentLine + " (<TokenSpaceGuidCName>.<PcdCName>)",
File=self.MetaFile, Line=self._LineIndex + 1)
## [depex] section parser
@ParseMacro
def _DepexParser(self):
self._ValueList[0:1] = [self._CurrentLine]
_SectionParser = {
MODEL_UNKNOWN : MetaFileParser._Skip,
MODEL_META_DATA_HEADER : MetaFileParser._DefineParser,
MODEL_META_DATA_BUILD_OPTION : MetaFileParser._BuildOptionParser,
MODEL_EFI_INCLUDE : _IncludeParser, # for Edk.x modules
MODEL_EFI_LIBRARY_INSTANCE : MetaFileParser._CommonParser, # for Edk.x modules
MODEL_EFI_LIBRARY_CLASS : MetaFileParser._PathParser,
MODEL_META_DATA_PACKAGE : MetaFileParser._PathParser,
MODEL_META_DATA_NMAKE : _NmakeParser, # for Edk.x modules
MODEL_PCD_FIXED_AT_BUILD : _PcdParser,
MODEL_PCD_PATCHABLE_IN_MODULE : _PcdParser,
MODEL_PCD_FEATURE_FLAG : _PcdParser,
MODEL_PCD_DYNAMIC_EX : _PcdParser,
MODEL_PCD_DYNAMIC : _PcdParser,
MODEL_EFI_SOURCE_FILE : _SourceFileParser,
MODEL_EFI_GUID : MetaFileParser._CommonParser,
MODEL_EFI_PROTOCOL : MetaFileParser._CommonParser,
MODEL_EFI_PPI : MetaFileParser._CommonParser,
MODEL_EFI_DEPEX : _DepexParser,
MODEL_EFI_BINARY_FILE : _BinaryFileParser,
MODEL_META_DATA_USER_EXTENSION : MetaFileParser._SkipUserExtension,
}
## DSC file parser class
#
# @param FilePath The path of platform description file
# @param FileType The raw data of DSC file
# @param Table Database used to retrieve module/package information
# @param Macros Macros used for replacement in file
# @param Owner Owner ID (for sub-section parsing)
# @param From ID from which the data comes (for !INCLUDE directive)
#
class DscParser(MetaFileParser):
# DSC file supported data types (one type per section)
DataType = {
TAB_SKUIDS.upper() : MODEL_EFI_SKU_ID,
TAB_DEFAULT_STORES.upper() : MODEL_EFI_DEFAULT_STORES,
TAB_LIBRARIES.upper() : MODEL_EFI_LIBRARY_INSTANCE,
TAB_LIBRARY_CLASSES.upper() : MODEL_EFI_LIBRARY_CLASS,
TAB_BUILD_OPTIONS.upper() : MODEL_META_DATA_BUILD_OPTION,
TAB_PCDS_FIXED_AT_BUILD_NULL.upper() : MODEL_PCD_FIXED_AT_BUILD,
TAB_PCDS_PATCHABLE_IN_MODULE_NULL.upper() : MODEL_PCD_PATCHABLE_IN_MODULE,
TAB_PCDS_FEATURE_FLAG_NULL.upper() : MODEL_PCD_FEATURE_FLAG,
TAB_PCDS_DYNAMIC_DEFAULT_NULL.upper() : MODEL_PCD_DYNAMIC_DEFAULT,
TAB_PCDS_DYNAMIC_HII_NULL.upper() : MODEL_PCD_DYNAMIC_HII,
TAB_PCDS_DYNAMIC_VPD_NULL.upper() : MODEL_PCD_DYNAMIC_VPD,
TAB_PCDS_DYNAMIC_EX_DEFAULT_NULL.upper() : MODEL_PCD_DYNAMIC_EX_DEFAULT,
TAB_PCDS_DYNAMIC_EX_HII_NULL.upper() : MODEL_PCD_DYNAMIC_EX_HII,
TAB_PCDS_DYNAMIC_EX_VPD_NULL.upper() : MODEL_PCD_DYNAMIC_EX_VPD,
TAB_COMPONENTS.upper() : MODEL_META_DATA_COMPONENT,
TAB_COMPONENTS_SOURCE_OVERRIDE_PATH.upper() : MODEL_META_DATA_COMPONENT_SOURCE_OVERRIDE_PATH,
TAB_DSC_DEFINES.upper() : MODEL_META_DATA_HEADER,
TAB_DSC_DEFINES_DEFINE : MODEL_META_DATA_DEFINE,
TAB_DSC_DEFINES_EDKGLOBAL : MODEL_META_DATA_GLOBAL_DEFINE,
TAB_INCLUDE.upper() : MODEL_META_DATA_INCLUDE,
TAB_IF.upper() : MODEL_META_DATA_CONDITIONAL_STATEMENT_IF,
TAB_IF_DEF.upper() : MODEL_META_DATA_CONDITIONAL_STATEMENT_IFDEF,
TAB_IF_N_DEF.upper() : MODEL_META_DATA_CONDITIONAL_STATEMENT_IFNDEF,
TAB_ELSE_IF.upper() : MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSEIF,
TAB_ELSE.upper() : MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSE,
TAB_END_IF.upper() : MODEL_META_DATA_CONDITIONAL_STATEMENT_ENDIF,
TAB_USER_EXTENSIONS.upper() : MODEL_META_DATA_USER_EXTENSION,
TAB_ERROR.upper() : MODEL_META_DATA_CONDITIONAL_STATEMENT_ERROR,
}
# Valid names in define section
DefineKeywords = [
"DSC_SPECIFICATION",
"PLATFORM_NAME",
"PLATFORM_GUID",
"PLATFORM_VERSION",
"SKUID_IDENTIFIER",
"PCD_INFO_GENERATION",
"PCD_VAR_CHECK_GENERATION",
"SUPPORTED_ARCHITECTURES",
"BUILD_TARGETS",
"OUTPUT_DIRECTORY",
"FLASH_DEFINITION",
"BUILD_NUMBER",
"RFC_LANGUAGES",
"ISO_LANGUAGES",
"TIME_STAMP_FILE",
"VPD_TOOL_GUID",
"FIX_LOAD_TOP_MEMORY_ADDRESS",
"PREBUILD",
"POSTBUILD"
]
SubSectionDefineKeywords = [
"FILE_GUID"
]
SymbolPattern = ValueExpression.SymbolPattern
IncludedFiles = set()
## Constructor of DscParser
#
# Initialize object of DscParser
#
# @param FilePath The path of platform description file
# @param FileType The raw data of DSC file
# @param Arch Default Arch value for filtering sections
# @param Table Database used to retrieve module/package information
# @param Owner Owner ID (for sub-section parsing)
# @param From ID from which the data comes (for !INCLUDE directive)
#
def __init__(self, FilePath, FileType, Arch, Table, Owner= -1, From= -1):
# prevent re-initialization
if hasattr(self, "_Table") and self._Table is Table:
return
MetaFileParser.__init__(self, FilePath, FileType, Arch, Table, Owner, From)
self._Version = 0x00010005 # Only EDK2 dsc file is supported
# to store conditional directive evaluation result
self._DirectiveStack = []
self._DirectiveEvalStack = []
self._Enabled = 1
#
# Specify whether current line is in uncertain condition
#
self._InDirective = -1
# Final valid replacable symbols
self._Symbols = {}
#
# Map the ID between the original table and new table to track
# the owner item
#
self._IdMapping = {-1:-1}
self._PcdCodeValue = ""
self._PcdDataTypeCODE = False
self._CurrentPcdName = ""
self._Content = None
## Parser starter
def Start(self):
Content = ''
try:
Content = open(str(self.MetaFile), 'r').readlines()
except:
EdkLogger.error("Parser", FILE_READ_FAILURE, ExtraData=self.MetaFile)
OwnerId = {}
Content = self.ProcessMultipleLineCODEValue(Content)
for Index in range(0, len(Content)):
Line = CleanString(Content[Index])
# skip empty line
if Line == '':
continue
self._CurrentLine = Line
self._LineIndex = Index
if self._InSubsection and self._Owner[-1] == -1:
self._Owner.append(self._LastItem)
# section header
if Line[0] == TAB_SECTION_START and Line[-1] == TAB_SECTION_END:
self._SectionType = MODEL_META_DATA_SECTION_HEADER
# subsection ending
elif Line[0] == '}' and self._InSubsection:
self._InSubsection = False
self._SubsectionType = MODEL_UNKNOWN
self._SubsectionName = ''
self._Owner[-1] = -1
OwnerId.clear()
continue
# subsection header
elif Line[0] == TAB_OPTION_START and Line[-1] == TAB_OPTION_END:
self._SubsectionType = MODEL_META_DATA_SUBSECTION_HEADER
# directive line
elif Line[0] == '!':
TokenList = GetSplitValueList(Line, ' ', 1)
if TokenList[0] == TAB_INCLUDE:
for Arch, ModuleType, DefaultStore in self._Scope:
if self._SubsectionType != MODEL_UNKNOWN and Arch in OwnerId:
self._Owner[-1] = OwnerId[Arch]
self._DirectiveParser()
else:
self._DirectiveParser()
continue
if Line[0] == TAB_OPTION_START and not self._InSubsection:
EdkLogger.error("Parser", FILE_READ_FAILURE, "Missing the '{' before %s in Line %s" % (Line, Index+1), ExtraData=self.MetaFile)
if self._InSubsection:
SectionType = self._SubsectionType
else:
SectionType = self._SectionType
self._ItemType = SectionType
self._ValueList = ['', '', '']
self._SectionParser[SectionType](self)
if self._ValueList is None:
continue
#
# Model, Value1, Value2, Value3, Arch, ModuleType, BelongsToItem=-1, BelongsToFile=-1,
# LineBegin=-1, ColumnBegin=-1, LineEnd=-1, ColumnEnd=-1, Enabled=-1
#
for Arch, ModuleType, DefaultStore in self._Scope:
Owner = self._Owner[-1]
if self._SubsectionType != MODEL_UNKNOWN and Arch in OwnerId:
Owner = OwnerId[Arch]
self._LastItem = self._Store(
self._ItemType,
self._ValueList[0],
self._ValueList[1],
self._ValueList[2],
Arch,
ModuleType,
DefaultStore,
Owner,
self._From,
self._LineIndex + 1,
- 1,
self._LineIndex + 1,
- 1,
self._Enabled
)
if self._SubsectionType == MODEL_UNKNOWN and self._InSubsection:
OwnerId[Arch] = self._LastItem
if self._DirectiveStack:
Type, Line, Text = self._DirectiveStack[-1]
EdkLogger.error('Parser', FORMAT_INVALID, "No matching '!endif' found",
ExtraData=Text, File=self.MetaFile, Line=Line)
self._Done()
## <subsection_header> parser
def _SubsectionHeaderParser(self):
self._SubsectionName = self._CurrentLine[1:-1].upper()
if self._SubsectionName in self.DataType:
self._SubsectionType = self.DataType[self._SubsectionName]
else:
self._SubsectionType = MODEL_UNKNOWN
EdkLogger.warn("Parser", "Unrecognized sub-section", File=self.MetaFile,
Line=self._LineIndex + 1, ExtraData=self._CurrentLine)
self._ValueList[0] = self._SubsectionName
## Directive statement parser
def _DirectiveParser(self):
self._ValueList = ['', '', '']
TokenList = GetSplitValueList(self._CurrentLine, ' ', 1)
self._ValueList[0:len(TokenList)] = TokenList
# Syntax check
DirectiveName = self._ValueList[0].upper()
if DirectiveName not in self.DataType:
EdkLogger.error("Parser", FORMAT_INVALID, "Unknown directive [%s]" % DirectiveName,
File=self.MetaFile, Line=self._LineIndex + 1)
if DirectiveName in ['!IF', '!IFDEF', '!IFNDEF']:
self._InDirective += 1
if DirectiveName in ['!ENDIF']:
self._InDirective -= 1
if DirectiveName in ['!IF', '!IFDEF', '!INCLUDE', '!IFNDEF', '!ELSEIF'] and self._ValueList[1] == '':
EdkLogger.error("Parser", FORMAT_INVALID, "Missing expression",
File=self.MetaFile, Line=self._LineIndex + 1,
ExtraData=self._CurrentLine)
ItemType = self.DataType[DirectiveName]
Scope = [[TAB_COMMON, TAB_COMMON, TAB_COMMON]]
if ItemType == MODEL_META_DATA_INCLUDE:
Scope = self._Scope
elif ItemType == MODEL_META_DATA_CONDITIONAL_STATEMENT_ERROR:
Scope = self._Scope
if ItemType == MODEL_META_DATA_CONDITIONAL_STATEMENT_ENDIF:
# Remove all directives between !if and !endif, including themselves
while self._DirectiveStack:
# Remove any !else or !elseif
DirectiveInfo = self._DirectiveStack.pop()
if DirectiveInfo[0] in [MODEL_META_DATA_CONDITIONAL_STATEMENT_IF,
MODEL_META_DATA_CONDITIONAL_STATEMENT_IFDEF,
MODEL_META_DATA_CONDITIONAL_STATEMENT_IFNDEF]:
break
else:
EdkLogger.error("Parser", FORMAT_INVALID, "Redundant '!endif'",
File=self.MetaFile, Line=self._LineIndex + 1,
ExtraData=self._CurrentLine)
elif ItemType not in {MODEL_META_DATA_INCLUDE, MODEL_META_DATA_CONDITIONAL_STATEMENT_ERROR}:
# Break if there's a !else is followed by a !elseif
if ItemType == MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSEIF and \
self._DirectiveStack and \
self._DirectiveStack[-1][0] == MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSE:
EdkLogger.error("Parser", FORMAT_INVALID, "'!elseif' after '!else'",
File=self.MetaFile, Line=self._LineIndex + 1,
ExtraData=self._CurrentLine)
self._DirectiveStack.append((ItemType, self._LineIndex + 1, self._CurrentLine))
#
# Model, Value1, Value2, Value3, Arch, ModuleType, BelongsToItem=-1, BelongsToFile=-1,
# LineBegin=-1, ColumnBegin=-1, LineEnd=-1, ColumnEnd=-1, Enabled=-1
#
for Arch, ModuleType, DefaultStore in Scope:
self._LastItem = self._Store(
ItemType,
self._ValueList[0],
self._ValueList[1],
self._ValueList[2],
Arch,
ModuleType,
DefaultStore,
self._Owner[-1],
self._From,
self._LineIndex + 1,
- 1,
self._LineIndex + 1,
- 1,
0
)
## [defines] section parser
@ParseMacro
def _DefineParser(self):
TokenList = GetSplitValueList(self._CurrentLine, TAB_EQUAL_SPLIT, 1)
self._ValueList[1:len(TokenList)] = TokenList
# Syntax check
if not self._ValueList[1]:
EdkLogger.error('Parser', FORMAT_INVALID, "No name specified",
ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex + 1)
if not self._ValueList[2]:
EdkLogger.error('Parser', FORMAT_INVALID, "No value specified",
ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex + 1)
if (not self._ValueList[1] in self.DefineKeywords and
(self._InSubsection and self._ValueList[1] not in self.SubSectionDefineKeywords)):
EdkLogger.error('Parser', FORMAT_INVALID,
"Unknown keyword found: %s. "
"If this is a macro you must "
"add it as a DEFINE in the DSC" % self._ValueList[1],
ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex + 1)
if not self._InSubsection:
self._Defines[self._ValueList[1]] = self._ValueList[2]
self._ItemType = self.DataType[TAB_DSC_DEFINES.upper()]
@ParseMacro
def _SkuIdParser(self):
TokenList = GetSplitValueList(self._CurrentLine, TAB_VALUE_SPLIT)
if len(TokenList) not in (2, 3):
EdkLogger.error('Parser', FORMAT_INVALID, "Correct format is '<Number>|<UiName>[|<UiName>]'",
ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex + 1)
self._ValueList[0:len(TokenList)] = TokenList
@ParseMacro
def _DefaultStoresParser(self):
TokenList = GetSplitValueList(self._CurrentLine, TAB_VALUE_SPLIT)
if len(TokenList) != 2:
EdkLogger.error('Parser', FORMAT_INVALID, "Correct format is '<Number>|<UiName>'",
ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex + 1)
self._ValueList[0:len(TokenList)] = TokenList
## Parse Edk style of library modules
@ParseMacro
def _LibraryInstanceParser(self):
self._ValueList[0] = self._CurrentLine
def ProcessMultipleLineCODEValue(self,Content):
CODEBegin = False
CODELine = ""
continuelinecount = 0
newContent = []
for Index in range(0, len(Content)):
Line = Content[Index]
if CODEBegin:
CODELine = CODELine + Line
continuelinecount +=1
if ")}" in Line:
newContent.append(CODELine)
for _ in range(continuelinecount):
newContent.append("")
CODEBegin = False
CODELine = ""
continuelinecount = 0
else:
if not Line:
newContent.append(Line)
continue
if "{CODE(" not in Line:
newContent.append(Line)
continue
elif CODEPattern.findall(Line):
newContent.append(Line)
continue
else:
CODEBegin = True
CODELine = Line
return newContent
def _DecodeCODEData(self):
pass
## PCD sections parser
#
# [PcdsFixedAtBuild]
# [PcdsPatchableInModule]
# [PcdsFeatureFlag]
# [PcdsDynamicEx
# [PcdsDynamicExDefault]
# [PcdsDynamicExVpd]
# [PcdsDynamicExHii]
# [PcdsDynamic]
# [PcdsDynamicDefault]
# [PcdsDynamicVpd]
# [PcdsDynamicHii]
#
@ParseMacro
def _PcdParser(self):
if self._PcdDataTypeCODE:
self._PcdCodeValue = self._PcdCodeValue + "\n " + self._CurrentLine
if self._CurrentLine.endswith(")}"):
self._CurrentLine = "|".join((self._CurrentPcdName, self._PcdCodeValue))
self._PcdDataTypeCODE = False
self._PcdCodeValue = ""
else:
self._ValueList = None
return
TokenList = GetSplitValueList(self._CurrentLine, TAB_VALUE_SPLIT, 1)
self._CurrentPcdName = TokenList[0]
if len(TokenList) == 2 and TokenList[1].strip().startswith("{CODE"):
self._PcdDataTypeCODE = True
self._PcdCodeValue = TokenList[1].strip()
if self._PcdDataTypeCODE:
if self._CurrentLine.endswith(")}"):
self._PcdDataTypeCODE = False
self._PcdCodeValue = ""
else:
self._ValueList = None
return
self._ValueList[0:1] = GetSplitValueList(TokenList[0], TAB_SPLIT)
PcdNameTockens = GetSplitValueList(TokenList[0], TAB_SPLIT)
if len(PcdNameTockens) == 2:
self._ValueList[0], self._ValueList[1] = PcdNameTockens[0], PcdNameTockens[1]
elif len(PcdNameTockens) == 3:
self._ValueList[0], self._ValueList[1] = ".".join((PcdNameTockens[0], PcdNameTockens[1])), PcdNameTockens[2]
elif len(PcdNameTockens) > 3:
self._ValueList[0], self._ValueList[1] = ".".join((PcdNameTockens[0], PcdNameTockens[1])), ".".join(PcdNameTockens[2:])
if len(TokenList) == 2:
self._ValueList[2] = TokenList[1]
if self._ValueList[0] == '' or self._ValueList[1] == '':
EdkLogger.error('Parser', FORMAT_INVALID, "No token space GUID or PCD name specified",
ExtraData=self._CurrentLine + " (<TokenSpaceGuidCName>.<TokenCName>|<PcdValue>)",
File=self.MetaFile, Line=self._LineIndex + 1)
if self._ValueList[2] == '':
#
# The PCD values are optional for FIXEDATBUILD, PATCHABLEINMODULE, Dynamic/DynamicEx default
#
if self._SectionType in (MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE, MODEL_PCD_DYNAMIC_DEFAULT, MODEL_PCD_DYNAMIC_EX_DEFAULT):
return
EdkLogger.error('Parser', FORMAT_INVALID, "No PCD value given",
ExtraData=self._CurrentLine + " (<TokenSpaceGuidCName>.<TokenCName>|<PcdValue>)",
File=self.MetaFile, Line=self._LineIndex + 1)
# Validate the datum type of Dynamic Defaul PCD and DynamicEx Default PCD
ValueList = GetSplitValueList(self._ValueList[2])
if len(ValueList) > 1 and ValueList[1] in [TAB_UINT8, TAB_UINT16, TAB_UINT32, TAB_UINT64] \
and self._ItemType in [MODEL_PCD_DYNAMIC_DEFAULT, MODEL_PCD_DYNAMIC_EX_DEFAULT]:
EdkLogger.error('Parser', FORMAT_INVALID, "The datum type '%s' of PCD is wrong" % ValueList[1],
ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex + 1)
# Validate the VariableName of DynamicHii and DynamicExHii for PCD Entry must not be an empty string
if self._ItemType in [MODEL_PCD_DYNAMIC_HII, MODEL_PCD_DYNAMIC_EX_HII]:
DscPcdValueList = GetSplitValueList(TokenList[1], TAB_VALUE_SPLIT, 1)
if len(DscPcdValueList[0].replace('L', '').replace('"', '').strip()) == 0:
EdkLogger.error('Parser', FORMAT_INVALID, "The VariableName field in the HII format PCD entry must not be an empty string",
ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex + 1)
# if value are 'True', 'true', 'TRUE' or 'False', 'false', 'FALSE', replace with integer 1 or 0.
DscPcdValueList = GetSplitValueList(TokenList[1], TAB_VALUE_SPLIT, 1)
if DscPcdValueList[0] in ['True', 'true', 'TRUE']:
self._ValueList[2] = TokenList[1].replace(DscPcdValueList[0], '1', 1);
elif DscPcdValueList[0] in ['False', 'false', 'FALSE']:
self._ValueList[2] = TokenList[1].replace(DscPcdValueList[0], '0', 1);
## [components] section parser
@ParseMacro
def _ComponentParser(self):
if self._CurrentLine[-1] == '{':
self._ValueList[0] = self._CurrentLine[0:-1].strip()
self._InSubsection = True
self._SubsectionType = MODEL_UNKNOWN
else:
self._ValueList[0] = self._CurrentLine
## [LibraryClasses] section
@ParseMacro
def _LibraryClassParser(self):
TokenList = GetSplitValueList(self._CurrentLine, TAB_VALUE_SPLIT)
if len(TokenList) < 2:
EdkLogger.error('Parser', FORMAT_INVALID, "No library class or instance specified",
ExtraData=self._CurrentLine + " (<LibraryClassName>|<LibraryInstancePath>)",
File=self.MetaFile, Line=self._LineIndex + 1)
if TokenList[0] == '':
EdkLogger.error('Parser', FORMAT_INVALID, "No library class specified",
ExtraData=self._CurrentLine + " (<LibraryClassName>|<LibraryInstancePath>)",
File=self.MetaFile, Line=self._LineIndex + 1)
if TokenList[1] == '':
EdkLogger.error('Parser', FORMAT_INVALID, "No library instance specified",
ExtraData=self._CurrentLine + " (<LibraryClassName>|<LibraryInstancePath>)",
File=self.MetaFile, Line=self._LineIndex + 1)
self._ValueList[0:len(TokenList)] = TokenList
def _CompponentSourceOverridePathParser(self):
self._ValueList[0] = self._CurrentLine
## [BuildOptions] section parser
@ParseMacro
def _BuildOptionParser(self):
self._CurrentLine = CleanString(self._CurrentLine, BuildOption=True)
TokenList = GetSplitValueList(self._CurrentLine, TAB_EQUAL_SPLIT, 1)
TokenList2 = GetSplitValueList(TokenList[0], ':', 1)
if len(TokenList2) == 2:
self._ValueList[0] = TokenList2[0] # toolchain family
self._ValueList[1] = TokenList2[1] # keys
else:
self._ValueList[1] = TokenList[0]
if len(TokenList) == 2: # value
self._ValueList[2] = TokenList[1]
if self._ValueList[1].count('_') != 4:
EdkLogger.error(
'Parser',
FORMAT_INVALID,
"'%s' must be in format of <TARGET>_<TOOLCHAIN>_<ARCH>_<TOOL>_FLAGS" % self._ValueList[1],
ExtraData=self._CurrentLine,
File=self.MetaFile,
Line=self._LineIndex + 1
)
## Override parent's method since we'll do all macro replacements in parser
@property
def _Macros(self):
Macros = {}
Macros.update(self._FileLocalMacros)
Macros.update(self._GetApplicableSectionMacro())
Macros.update(GlobalData.gEdkGlobal)
Macros.update(GlobalData.gPlatformDefines)
Macros.update(GlobalData.gCommandLineDefines)
# PCD cannot be referenced in macro definition
if self._ItemType not in [MODEL_META_DATA_DEFINE, MODEL_META_DATA_GLOBAL_DEFINE]:
Macros.update(self._Symbols)
if GlobalData.BuildOptionPcd:
for Item in GlobalData.BuildOptionPcd:
if isinstance(Item, tuple):
continue
PcdName, TmpValue = Item.split("=")
TmpValue = BuildOptionValue(TmpValue, self._GuidDict)
Macros[PcdName.strip()] = TmpValue
return Macros
def _PostProcess(self):
Processer = {
MODEL_META_DATA_SECTION_HEADER : self.__ProcessSectionHeader,
MODEL_META_DATA_SUBSECTION_HEADER : self.__ProcessSubsectionHeader,
MODEL_META_DATA_HEADER : self.__ProcessDefine,
MODEL_META_DATA_DEFINE : self.__ProcessDefine,
MODEL_META_DATA_GLOBAL_DEFINE : self.__ProcessDefine,
MODEL_META_DATA_INCLUDE : self.__ProcessDirective,
MODEL_META_DATA_CONDITIONAL_STATEMENT_IF : self.__ProcessDirective,
MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSE : self.__ProcessDirective,
MODEL_META_DATA_CONDITIONAL_STATEMENT_IFDEF : self.__ProcessDirective,
MODEL_META_DATA_CONDITIONAL_STATEMENT_IFNDEF : self.__ProcessDirective,
MODEL_META_DATA_CONDITIONAL_STATEMENT_ENDIF : self.__ProcessDirective,
MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSEIF : self.__ProcessDirective,
MODEL_EFI_SKU_ID : self.__ProcessSkuId,
MODEL_EFI_DEFAULT_STORES : self.__ProcessDefaultStores,
MODEL_EFI_LIBRARY_INSTANCE : self.__ProcessLibraryInstance,
MODEL_EFI_LIBRARY_CLASS : self.__ProcessLibraryClass,
MODEL_PCD_FIXED_AT_BUILD : self.__ProcessPcd,
MODEL_PCD_PATCHABLE_IN_MODULE : self.__ProcessPcd,
MODEL_PCD_FEATURE_FLAG : self.__ProcessPcd,
MODEL_PCD_DYNAMIC_DEFAULT : self.__ProcessPcd,
MODEL_PCD_DYNAMIC_HII : self.__ProcessPcd,
MODEL_PCD_DYNAMIC_VPD : self.__ProcessPcd,
MODEL_PCD_DYNAMIC_EX_DEFAULT : self.__ProcessPcd,
MODEL_PCD_DYNAMIC_EX_HII : self.__ProcessPcd,
MODEL_PCD_DYNAMIC_EX_VPD : self.__ProcessPcd,
MODEL_META_DATA_COMPONENT : self.__ProcessComponent,
MODEL_META_DATA_COMPONENT_SOURCE_OVERRIDE_PATH : self.__ProcessSourceOverridePath,
MODEL_META_DATA_BUILD_OPTION : self.__ProcessBuildOption,
MODEL_UNKNOWN : self._Skip,
MODEL_META_DATA_USER_EXTENSION : self._SkipUserExtension,
MODEL_META_DATA_CONDITIONAL_STATEMENT_ERROR : self._ProcessError,
}
self._Table = MetaFileStorage(self._RawTable.DB, self.MetaFile, MODEL_FILE_DSC, True)
self._DirectiveStack = []
self._DirectiveEvalStack = []
self._FileWithError = self.MetaFile
self._FileLocalMacros = {}
self._SectionsMacroDict.clear()
GlobalData.gPlatformDefines = {}
# Get all macro and PCD which has straitforward value
self.__RetrievePcdValue()
self._Content = self._RawTable.GetAll()
self._ContentIndex = 0
self._InSubsection = False
while self._ContentIndex < len(self._Content) :
Id, self._ItemType, V1, V2, V3, S1, S2, S3, Owner, self._From, \
LineStart, ColStart, LineEnd, ColEnd, Enabled = self._Content[self._ContentIndex]
if self._From < 0:
self._FileWithError = self.MetaFile
self._ContentIndex += 1
self._Scope = [[S1, S2, S3]]
#
# For !include directive, handle it specially,
# merge arch and module type in case of duplicate items
#
while self._ItemType == MODEL_META_DATA_INCLUDE:
if self._ContentIndex >= len(self._Content):
break
Record = self._Content[self._ContentIndex]
if LineStart == Record[10] and LineEnd == Record[12]:
if [Record[5], Record[6], Record[7]] not in self._Scope:
self._Scope.append([Record[5], Record[6], Record[7]])
self._ContentIndex += 1
else:
break
self._LineIndex = LineStart - 1
self._ValueList = [V1, V2, V3]
if Owner > 0 and Owner in self._IdMapping:
self._InSubsection = True
else:
self._InSubsection = False
try:
Processer[self._ItemType]()
except EvaluationException as Excpt:
#
# Only catch expression evaluation error here. We need to report
# the precise number of line on which the error occurred
#
if hasattr(Excpt, 'Pcd'):
if Excpt.Pcd in GlobalData.gPlatformOtherPcds:
Info = GlobalData.gPlatformOtherPcds[Excpt.Pcd]
EdkLogger.error('Parser', FORMAT_INVALID, "Cannot use this PCD (%s) in an expression as"
" it must be defined in a [PcdsFixedAtBuild] or [PcdsFeatureFlag] section"
" of the DSC file, and it is currently defined in this section:"
" %s, line #: %d." % (Excpt.Pcd, Info[0], Info[1]),
File=self._FileWithError, ExtraData=' '.join(self._ValueList),
Line=self._LineIndex + 1)
else:
EdkLogger.error('Parser', FORMAT_INVALID, "PCD (%s) is not defined in DSC file" % Excpt.Pcd,
File=self._FileWithError, ExtraData=' '.join(self._ValueList),
Line=self._LineIndex + 1)
else:
EdkLogger.error('Parser', FORMAT_INVALID, "Invalid expression: %s" % str(Excpt),
File=self._FileWithError, ExtraData=' '.join(self._ValueList),
Line=self._LineIndex + 1)
except MacroException as Excpt:
EdkLogger.error('Parser', FORMAT_INVALID, str(Excpt),
File=self._FileWithError, ExtraData=' '.join(self._ValueList),
Line=self._LineIndex + 1)
if self._ValueList is None:
continue
NewOwner = self._IdMapping.get(Owner, -1)
self._Enabled = int((not self._DirectiveEvalStack) or (False not in self._DirectiveEvalStack))
self._LastItem = self._Store(
self._ItemType,
self._ValueList[0],
self._ValueList[1],
self._ValueList[2],
S1,
S2,
S3,
NewOwner,
self._From,
self._LineIndex + 1,
- 1,
self._LineIndex + 1,
- 1,
self._Enabled
)
self._IdMapping[Id] = self._LastItem
GlobalData.gPlatformDefines.update(self._FileLocalMacros)
self._PostProcessed = True
self._Content = None
def _ProcessError(self):
if not self._Enabled:
return
EdkLogger.error('Parser', ERROR_STATEMENT, self._ValueList[1], File=self.MetaFile, Line=self._LineIndex + 1)
def __ProcessSectionHeader(self):
self._SectionName = self._ValueList[0]
if self._SectionName in self.DataType:
self._SectionType = self.DataType[self._SectionName]
else:
self._SectionType = MODEL_UNKNOWN
def __ProcessSubsectionHeader(self):
self._SubsectionName = self._ValueList[0]
if self._SubsectionName in self.DataType:
self._SubsectionType = self.DataType[self._SubsectionName]
else:
self._SubsectionType = MODEL_UNKNOWN
def __RetrievePcdValue(self):
Content = open(str(self.MetaFile), 'r').readlines()
GlobalData.gPlatformOtherPcds['DSCFILE'] = str(self.MetaFile)
for PcdType in (MODEL_PCD_PATCHABLE_IN_MODULE, MODEL_PCD_DYNAMIC_DEFAULT, MODEL_PCD_DYNAMIC_HII,
MODEL_PCD_DYNAMIC_VPD, MODEL_PCD_DYNAMIC_EX_DEFAULT, MODEL_PCD_DYNAMIC_EX_HII,
MODEL_PCD_DYNAMIC_EX_VPD):
Records = self._RawTable.Query(PcdType, BelongsToItem= -1.0)
for TokenSpaceGuid, PcdName, Value, Dummy2, Dummy3, Dummy4, ID, Line in Records:
Name = TokenSpaceGuid + '.' + PcdName
if Name not in GlobalData.gPlatformOtherPcds:
PcdLine = Line
while not Content[Line - 1].lstrip().startswith(TAB_SECTION_START):
Line -= 1
GlobalData.gPlatformOtherPcds[Name] = (CleanString(Content[Line - 1]), PcdLine, PcdType)
def __ProcessDefine(self):
if not self._Enabled:
return
Type, Name, Value = self._ValueList
Value = ReplaceMacro(Value, self._Macros, False)
#
# If it is <Defines>, return
#
if self._InSubsection:
self._ValueList = [Type, Name, Value]
return
if self._ItemType == MODEL_META_DATA_DEFINE:
if self._SectionType == MODEL_META_DATA_HEADER:
self._FileLocalMacros[Name] = Value
else:
self._ConstructSectionMacroDict(Name, Value)
elif self._ItemType == MODEL_META_DATA_GLOBAL_DEFINE:
GlobalData.gEdkGlobal[Name] = Value
#
# Keyword in [Defines] section can be used as Macros
#
if (self._ItemType == MODEL_META_DATA_HEADER) and (self._SectionType == MODEL_META_DATA_HEADER):
self._FileLocalMacros[Name] = Value
self._ValueList = [Type, Name, Value]
def __ProcessDirective(self):
Result = None
if self._ItemType in [MODEL_META_DATA_CONDITIONAL_STATEMENT_IF,
MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSEIF]:
Macros = self._Macros
Macros.update(GlobalData.gGlobalDefines)
try:
Result = ValueExpression(self._ValueList[1], Macros)()
except SymbolNotFound as Exc:
EdkLogger.debug(EdkLogger.DEBUG_5, str(Exc), self._ValueList[1])
Result = False
except WrnExpression as Excpt:
#
# Catch expression evaluation warning here. We need to report
# the precise number of line and return the evaluation result
#
EdkLogger.warn('Parser', "Suspicious expression: %s" % str(Excpt),
File=self._FileWithError, ExtraData=' '.join(self._ValueList),
Line=self._LineIndex + 1)
Result = Excpt.result
if self._ItemType in [MODEL_META_DATA_CONDITIONAL_STATEMENT_IF,
MODEL_META_DATA_CONDITIONAL_STATEMENT_IFDEF,
MODEL_META_DATA_CONDITIONAL_STATEMENT_IFNDEF]:
self._DirectiveStack.append(self._ItemType)
if self._ItemType == MODEL_META_DATA_CONDITIONAL_STATEMENT_IF:
Result = bool(Result)
else:
Macro = self._ValueList[1]
Macro = Macro[2:-1] if (Macro.startswith("$(") and Macro.endswith(")")) else Macro
Result = Macro in self._Macros
if self._ItemType == MODEL_META_DATA_CONDITIONAL_STATEMENT_IFNDEF:
Result = not Result
self._DirectiveEvalStack.append(Result)
elif self._ItemType == MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSEIF:
self._DirectiveStack.append(self._ItemType)
self._DirectiveEvalStack[-1] = not self._DirectiveEvalStack[-1]
self._DirectiveEvalStack.append(bool(Result))
elif self._ItemType == MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSE:
self._DirectiveStack.append(self._ItemType)
self._DirectiveEvalStack[-1] = not self._DirectiveEvalStack[-1]
self._DirectiveEvalStack.append(True)
elif self._ItemType == MODEL_META_DATA_CONDITIONAL_STATEMENT_ENDIF:
# Back to the nearest !if/!ifdef/!ifndef
while self._DirectiveStack:
self._DirectiveEvalStack.pop()
Directive = self._DirectiveStack.pop()
if Directive in [MODEL_META_DATA_CONDITIONAL_STATEMENT_IF,
MODEL_META_DATA_CONDITIONAL_STATEMENT_IFDEF,
MODEL_META_DATA_CONDITIONAL_STATEMENT_IFNDEF]:
break
elif self._ItemType == MODEL_META_DATA_INCLUDE:
# The included file must be relative to workspace or same directory as DSC file
__IncludeMacros = {}
#
# Allow using system environment variables in path after !include
#
__IncludeMacros['WORKSPACE'] = GlobalData.gGlobalDefines['WORKSPACE']
if "ECP_SOURCE" in GlobalData.gGlobalDefines:
__IncludeMacros['ECP_SOURCE'] = GlobalData.gGlobalDefines['ECP_SOURCE']
#
# During GenFds phase call DSC parser, will go into this branch.
#
elif "ECP_SOURCE" in GlobalData.gCommandLineDefines:
__IncludeMacros['ECP_SOURCE'] = GlobalData.gCommandLineDefines['ECP_SOURCE']
__IncludeMacros['EFI_SOURCE'] = GlobalData.gGlobalDefines['EFI_SOURCE']
__IncludeMacros['EDK_SOURCE'] = GlobalData.gGlobalDefines['EDK_SOURCE']
#
# Allow using MACROs comes from [Defines] section to keep compatible.
#
__IncludeMacros.update(self._Macros)
IncludedFile = NormPath(ReplaceMacro(self._ValueList[1], __IncludeMacros, RaiseError=True))
#
# First search the include file under the same directory as DSC file
#
IncludedFile1 = PathClass(IncludedFile, self.MetaFile.Dir)
ErrorCode, ErrorInfo1 = IncludedFile1.Validate()
if ErrorCode != 0:
#
# Also search file under the WORKSPACE directory
#
IncludedFile1 = PathClass(IncludedFile, GlobalData.gWorkspace)
ErrorCode, ErrorInfo2 = IncludedFile1.Validate()
if ErrorCode != 0:
EdkLogger.error('parser', ErrorCode, File=self._FileWithError,
Line=self._LineIndex + 1, ExtraData=ErrorInfo1 + "\n" + ErrorInfo2)
self._FileWithError = IncludedFile1
FromItem = self._Content[self._ContentIndex - 1][0]
if self._InSubsection:
Owner = self._Content[self._ContentIndex - 1][8]
else:
Owner = self._Content[self._ContentIndex - 1][0]
IncludedFileTable = MetaFileStorage(self._RawTable.DB, IncludedFile1, MODEL_FILE_DSC, False, FromItem=FromItem)
Parser = DscParser(IncludedFile1, self._FileType, self._Arch, IncludedFileTable,
Owner=Owner, From=FromItem)
self.IncludedFiles.add (IncludedFile1)
# set the parser status with current status
Parser._SectionName = self._SectionName
Parser._SubsectionType = self._SubsectionType
Parser._InSubsection = self._InSubsection
Parser._SectionType = self._SectionType
Parser._Scope = self._Scope
Parser._Enabled = self._Enabled
# Parse the included file
Parser.StartParse()
# Insert all records in the table for the included file into dsc file table
Records = IncludedFileTable.GetAll()
if Records:
self._Content[self._ContentIndex:self._ContentIndex] = Records
self._Content.pop(self._ContentIndex - 1)
self._ValueList = None
self._ContentIndex -= 1
def __ProcessSkuId(self):
self._ValueList = [ReplaceMacro(Value, self._Macros, RaiseError=True)
for Value in self._ValueList]
def __ProcessDefaultStores(self):
self._ValueList = [ReplaceMacro(Value, self._Macros, RaiseError=True)
for Value in self._ValueList]
def __ProcessLibraryInstance(self):
self._ValueList = [ReplaceMacro(Value, self._Macros) for Value in self._ValueList]
def __ProcessLibraryClass(self):
self._ValueList[1] = ReplaceMacro(self._ValueList[1], self._Macros, RaiseError=True)
def __ProcessPcd(self):
if self._ItemType not in [MODEL_PCD_FEATURE_FLAG, MODEL_PCD_FIXED_AT_BUILD]:
self._ValueList[2] = ReplaceMacro(self._ValueList[2], self._Macros, RaiseError=True)
return
ValList, Valid, Index = AnalyzeDscPcd(self._ValueList[2], self._ItemType)
if not Valid:
if self._ItemType in (MODEL_PCD_DYNAMIC_DEFAULT, MODEL_PCD_DYNAMIC_EX_DEFAULT, MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE):
if ValList[1] != TAB_VOID and StructPattern.match(ValList[1]) is None and ValList[2]:
EdkLogger.error('build', FORMAT_INVALID, "Pcd format incorrect. The datum type info should be VOID* or a valid struct name.", File=self._FileWithError,
Line=self._LineIndex + 1, ExtraData="%s.%s|%s" % (self._ValueList[0], self._ValueList[1], self._ValueList[2]))
EdkLogger.error('build', FORMAT_INVALID, "Pcd format incorrect.", File=self._FileWithError, Line=self._LineIndex + 1,
ExtraData="%s.%s|%s" % (self._ValueList[0], self._ValueList[1], self._ValueList[2]))
PcdValue = ValList[Index]
if PcdValue and "." not in self._ValueList[0]:
try:
ValList[Index] = ValueExpression(PcdValue, self._Macros)(True)
except WrnExpression as Value:
ValList[Index] = Value.result
except:
pass
if ValList[Index] == 'True':
ValList[Index] = '1'
if ValList[Index] == 'False':
ValList[Index] = '0'
if (not self._DirectiveEvalStack) or (False not in self._DirectiveEvalStack):
GlobalData.gPlatformPcds[TAB_SPLIT.join(self._ValueList[0:2])] = PcdValue
self._Symbols[TAB_SPLIT.join(self._ValueList[0:2])] = PcdValue
try:
self._ValueList[2] = '|'.join(ValList)
except Exception:
print(ValList)
def __ProcessComponent(self):
self._ValueList[0] = ReplaceMacro(self._ValueList[0], self._Macros)
def __ProcessSourceOverridePath(self):
self._ValueList[0] = ReplaceMacro(self._ValueList[0], self._Macros)
def __ProcessBuildOption(self):
self._ValueList = [ReplaceMacro(Value, self._Macros, RaiseError=False)
for Value in self._ValueList]
_SectionParser = {
MODEL_META_DATA_HEADER : _DefineParser,
MODEL_EFI_SKU_ID : _SkuIdParser,
MODEL_EFI_DEFAULT_STORES : _DefaultStoresParser,
MODEL_EFI_LIBRARY_INSTANCE : _LibraryInstanceParser,
MODEL_EFI_LIBRARY_CLASS : _LibraryClassParser,
MODEL_PCD_FIXED_AT_BUILD : _PcdParser,
MODEL_PCD_PATCHABLE_IN_MODULE : _PcdParser,
MODEL_PCD_FEATURE_FLAG : _PcdParser,
MODEL_PCD_DYNAMIC_DEFAULT : _PcdParser,
MODEL_PCD_DYNAMIC_HII : _PcdParser,
MODEL_PCD_DYNAMIC_VPD : _PcdParser,
MODEL_PCD_DYNAMIC_EX_DEFAULT : _PcdParser,
MODEL_PCD_DYNAMIC_EX_HII : _PcdParser,
MODEL_PCD_DYNAMIC_EX_VPD : _PcdParser,
MODEL_META_DATA_COMPONENT : _ComponentParser,
MODEL_META_DATA_COMPONENT_SOURCE_OVERRIDE_PATH : _CompponentSourceOverridePathParser,
MODEL_META_DATA_BUILD_OPTION : _BuildOptionParser,
MODEL_UNKNOWN : MetaFileParser._Skip,
MODEL_META_DATA_USER_EXTENSION : MetaFileParser._SkipUserExtension,
MODEL_META_DATA_SECTION_HEADER : MetaFileParser._SectionHeaderParser,
MODEL_META_DATA_SUBSECTION_HEADER : _SubsectionHeaderParser,
}
## DEC file parser class
#
# @param FilePath The path of platform description file
# @param FileType The raw data of DSC file
# @param Table Database used to retrieve module/package information
# @param Macros Macros used for replacement in file
#
class DecParser(MetaFileParser):
# DEC file supported data types (one type per section)
DataType = {
TAB_DEC_DEFINES.upper() : MODEL_META_DATA_HEADER,
TAB_DSC_DEFINES_DEFINE : MODEL_META_DATA_DEFINE,
TAB_INCLUDES.upper() : MODEL_EFI_INCLUDE,
TAB_LIBRARY_CLASSES.upper() : MODEL_EFI_LIBRARY_CLASS,
TAB_GUIDS.upper() : MODEL_EFI_GUID,
TAB_PPIS.upper() : MODEL_EFI_PPI,
TAB_PROTOCOLS.upper() : MODEL_EFI_PROTOCOL,
TAB_PCDS_FIXED_AT_BUILD_NULL.upper() : MODEL_PCD_FIXED_AT_BUILD,
TAB_PCDS_PATCHABLE_IN_MODULE_NULL.upper() : MODEL_PCD_PATCHABLE_IN_MODULE,
TAB_PCDS_FEATURE_FLAG_NULL.upper() : MODEL_PCD_FEATURE_FLAG,
TAB_PCDS_DYNAMIC_NULL.upper() : MODEL_PCD_DYNAMIC,
TAB_PCDS_DYNAMIC_EX_NULL.upper() : MODEL_PCD_DYNAMIC_EX,
TAB_USER_EXTENSIONS.upper() : MODEL_META_DATA_USER_EXTENSION,
}
## Constructor of DecParser
#
# Initialize object of DecParser
#
# @param FilePath The path of platform description file
# @param FileType The raw data of DSC file
# @param Arch Default Arch value for filtering sections
# @param Table Database used to retrieve module/package information
#
def __init__(self, FilePath, FileType, Arch, Table):
# prevent re-initialization
if hasattr(self, "_Table"):
return
MetaFileParser.__init__(self, FilePath, FileType, Arch, Table, -1)
self._Comments = []
self._Version = 0x00010005 # Only EDK2 dec file is supported
self._AllPCDs = [] # Only for check duplicate PCD
self._AllPcdDict = {}
self._CurrentStructurePcdName = ""
self._include_flag = False
self._package_flag = False
## Parser starter
def Start(self):
Content = ''
try:
Content = open(str(self.MetaFile), 'r').readlines()
except:
EdkLogger.error("Parser", FILE_READ_FAILURE, ExtraData=self.MetaFile)
self._DefinesCount = 0
for Index in range(0, len(Content)):
Line, Comment = CleanString2(Content[Index])
self._CurrentLine = Line
self._LineIndex = Index
# save comment for later use
if Comment:
self._Comments.append((Comment, self._LineIndex + 1))
# skip empty line
if Line == '':
continue
# section header
if Line[0] == TAB_SECTION_START and Line[-1] == TAB_SECTION_END:
self._SectionHeaderParser()
if self._SectionName == TAB_DEC_DEFINES.upper():
self._DefinesCount += 1
self._Comments = []
continue
if self._SectionType == MODEL_UNKNOWN:
EdkLogger.error("Parser", FORMAT_INVALID,
""
"Not able to determine \"%s\" in which section."%self._CurrentLine,
self.MetaFile, self._LineIndex + 1)
elif len(self._SectionType) == 0:
self._Comments = []
continue
# section content
self._ValueList = ['', '', '']
self._SectionParser[self._SectionType[0]](self)
if self._ValueList is None or self._ItemType == MODEL_META_DATA_DEFINE:
self._ItemType = -1
self._Comments = []
continue
#
# Model, Value1, Value2, Value3, Arch, BelongsToItem=-1, LineBegin=-1,
# ColumnBegin=-1, LineEnd=-1, ColumnEnd=-1, FeatureFlag='', Enabled=-1
#
for Arch, ModuleType, Type in self._Scope:
self._LastItem = self._Store(
Type,
self._ValueList[0],
self._ValueList[1],
self._ValueList[2],
Arch,
ModuleType,
self._Owner[-1],
self._LineIndex + 1,
- 1,
self._LineIndex + 1,
- 1,
0
)
for Comment, LineNo in self._Comments:
self._Store(
MODEL_META_DATA_COMMENT,
Comment,
self._ValueList[0],
self._ValueList[1],
Arch,
ModuleType,
self._LastItem,
LineNo,
- 1,
LineNo,
- 1,
0
)
self._Comments = []
if self._DefinesCount > 1:
EdkLogger.error('Parser', FORMAT_INVALID, 'Multiple [Defines] section is exist.', self.MetaFile )
if self._DefinesCount == 0:
EdkLogger.error('Parser', FORMAT_INVALID, 'No [Defines] section exist.', self.MetaFile)
self._Done()
## Section header parser
#
# The section header is always in following format:
#
# [section_name.arch<.platform|module_type>]
#
def _SectionHeaderParser(self):
self._Scope = []
self._SectionName = ''
self._SectionType = []
ArchList = set()
PrivateList = set()
Line = re.sub(',[\s]*', TAB_COMMA_SPLIT, self._CurrentLine)
for Item in Line[1:-1].split(TAB_COMMA_SPLIT):
if Item == '':
EdkLogger.error("Parser", FORMAT_UNKNOWN_ERROR,
"section name can NOT be empty or incorrectly use separator comma",
self.MetaFile, self._LineIndex + 1, self._CurrentLine)
ItemList = Item.split(TAB_SPLIT)
# different types of PCD are permissible in one section
self._SectionName = ItemList[0].upper()
if self._SectionName == TAB_DEC_DEFINES.upper() and (len(ItemList) > 1 or len(Line.split(TAB_COMMA_SPLIT)) > 1):
EdkLogger.error("Parser", FORMAT_INVALID, "Defines section format is invalid",
self.MetaFile, self._LineIndex + 1, self._CurrentLine)
if self._SectionName in self.DataType:
if self.DataType[self._SectionName] not in self._SectionType:
self._SectionType.append(self.DataType[self._SectionName])
else:
EdkLogger.error("Parser", FORMAT_UNKNOWN_ERROR, "%s is not a valid section name" % Item,
self.MetaFile, self._LineIndex + 1, self._CurrentLine)
if MODEL_PCD_FEATURE_FLAG in self._SectionType and len(self._SectionType) > 1:
EdkLogger.error(
'Parser',
FORMAT_INVALID,
"%s must not be in the same section of other types of PCD" % TAB_PCDS_FEATURE_FLAG_NULL,
File=self.MetaFile,
Line=self._LineIndex + 1,
ExtraData=self._CurrentLine
)
# S1 is always Arch
if len(ItemList) > 1:
S1 = ItemList[1].upper()
else:
S1 = TAB_ARCH_COMMON
ArchList.add(S1)
# S2 may be Platform or ModuleType
if len(ItemList) > 2:
S2 = ItemList[2].upper()
# only Includes, GUIDs, PPIs, Protocols section have Private tag
if self._SectionName in [TAB_INCLUDES.upper(), TAB_GUIDS.upper(), TAB_PROTOCOLS.upper(), TAB_PPIS.upper()]:
if S2 != 'PRIVATE':
EdkLogger.error("Parser", FORMAT_INVALID, 'Please use keyword "Private" as section tag modifier.',
File=self.MetaFile, Line=self._LineIndex + 1, ExtraData=self._CurrentLine)
else:
S2 = TAB_COMMON
PrivateList.add(S2)
if [S1, S2, self.DataType[self._SectionName]] not in self._Scope:
self._Scope.append([S1, S2, self.DataType[self._SectionName]])
# 'COMMON' must not be used with specific ARCHs at the same section
if TAB_ARCH_COMMON in ArchList and len(ArchList) > 1:
EdkLogger.error('Parser', FORMAT_INVALID, "'common' ARCH must not be used with specific ARCHs",
File=self.MetaFile, Line=self._LineIndex + 1, ExtraData=self._CurrentLine)
# It is not permissible to mix section tags without the Private attribute with section tags with the Private attribute
if TAB_COMMON in PrivateList and len(PrivateList) > 1:
EdkLogger.error('Parser', FORMAT_INVALID, "Can't mix section tags without the Private attribute with section tags with the Private attribute",
File=self.MetaFile, Line=self._LineIndex + 1, ExtraData=self._CurrentLine)
## [guids], [ppis] and [protocols] section parser
@ParseMacro
def _GuidParser(self):
TokenList = GetSplitValueList(self._CurrentLine, TAB_EQUAL_SPLIT, 1)
if len(TokenList) < 2:
EdkLogger.error('Parser', FORMAT_INVALID, "No GUID name or value specified",
ExtraData=self._CurrentLine + " (<CName> = <GuidValueInCFormat>)",
File=self.MetaFile, Line=self._LineIndex + 1)
if TokenList[0] == '':
EdkLogger.error('Parser', FORMAT_INVALID, "No GUID name specified",
ExtraData=self._CurrentLine + " (<CName> = <GuidValueInCFormat>)",
File=self.MetaFile, Line=self._LineIndex + 1)
if TokenList[1] == '':
EdkLogger.error('Parser', FORMAT_INVALID, "No GUID value specified",
ExtraData=self._CurrentLine + " (<CName> = <GuidValueInCFormat>)",
File=self.MetaFile, Line=self._LineIndex + 1)
if TokenList[1][0] != '{' or TokenList[1][-1] != '}' or GuidStructureStringToGuidString(TokenList[1]) == '':
EdkLogger.error('Parser', FORMAT_INVALID, "Invalid GUID value format",
ExtraData=self._CurrentLine + \
" (<CName> = <GuidValueInCFormat:{8,4,4,{2,2,2,2,2,2,2,2}}>)",
File=self.MetaFile, Line=self._LineIndex + 1)
self._ValueList[0] = TokenList[0]
self._ValueList[1] = TokenList[1]
if self._ValueList[0] not in self._GuidDict:
self._GuidDict[self._ValueList[0]] = self._ValueList[1]
def ParsePcdName(self,namelist):
if "[" in namelist[1]:
pcdname = namelist[1][:namelist[1].index("[")]
arrayindex = namelist[1][namelist[1].index("["):]
namelist[1] = pcdname
if len(namelist) == 2:
namelist.append(arrayindex)
else:
namelist[2] = ".".join((arrayindex,namelist[2]))
return namelist
## PCD sections parser
#
# [PcdsFixedAtBuild]
# [PcdsPatchableInModule]
# [PcdsFeatureFlag]
# [PcdsDynamicEx
# [PcdsDynamic]
#
@ParseMacro
def _PcdParser(self):
if self._CurrentStructurePcdName:
self._ValueList[0] = self._CurrentStructurePcdName
if "|" not in self._CurrentLine:
if "<HeaderFiles>" == self._CurrentLine:
self._include_flag = True
self._package_flag = False
self._ValueList = None
return
if "<Packages>" == self._CurrentLine:
self._package_flag = True
self._ValueList = None
self._include_flag = False
return
if self._include_flag:
self._ValueList[1] = "<HeaderFiles>_" + md5(self._CurrentLine).hexdigest()
self._ValueList[2] = self._CurrentLine
if self._package_flag and "}" != self._CurrentLine:
self._ValueList[1] = "<Packages>_" + md5(self._CurrentLine).hexdigest()
self._ValueList[2] = self._CurrentLine
if self._CurrentLine == "}":
self._package_flag = False
self._include_flag = False
self._ValueList = None
return
else:
PcdTockens = self._CurrentLine.split(TAB_VALUE_SPLIT)
PcdNames = self.ParsePcdName(PcdTockens[0].split(TAB_SPLIT))
if len(PcdNames) == 2:
if PcdNames[1].strip().endswith("]"):
PcdName = PcdNames[1][:PcdNames[1].index('[')]
Index = PcdNames[1][PcdNames[1].index('['):]
self._ValueList[0] = TAB_SPLIT.join((PcdNames[0],PcdName))
self._ValueList[1] = Index
self._ValueList[2] = PcdTockens[1]
else:
self._CurrentStructurePcdName = ""
else:
if self._CurrentStructurePcdName != TAB_SPLIT.join(PcdNames[:2]):
EdkLogger.error('Parser', FORMAT_INVALID, "Pcd Name does not match: %s and %s " % (self._CurrentStructurePcdName, TAB_SPLIT.join(PcdNames[:2])),
File=self.MetaFile, Line=self._LineIndex + 1)
self._ValueList[1] = TAB_SPLIT.join(PcdNames[2:])
self._ValueList[2] = PcdTockens[1]
if not self._CurrentStructurePcdName:
TokenList = GetSplitValueList(self._CurrentLine, TAB_VALUE_SPLIT, 1)
self._ValueList[0:1] = GetSplitValueList(TokenList[0], TAB_SPLIT)
ValueRe = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*')
# check PCD information
if self._ValueList[0] == '' or self._ValueList[1] == '':
EdkLogger.error('Parser', FORMAT_INVALID, "No token space GUID or PCD name specified",
ExtraData=self._CurrentLine + \
" (<TokenSpaceGuidCName>.<PcdCName>|<DefaultValue>|<DatumType>|<Token>)",
File=self.MetaFile, Line=self._LineIndex + 1)
# check format of token space GUID CName
if not ValueRe.match(self._ValueList[0]):
EdkLogger.error('Parser', FORMAT_INVALID, "The format of the token space GUID CName is invalid. The correct format is '(a-zA-Z_)[a-zA-Z0-9_]*'",
ExtraData=self._CurrentLine + \
" (<TokenSpaceGuidCName>.<PcdCName>|<DefaultValue>|<DatumType>|<Token>)",
File=self.MetaFile, Line=self._LineIndex + 1)
# check format of PCD CName
if not ValueRe.match(self._ValueList[1]):
EdkLogger.error('Parser', FORMAT_INVALID, "The format of the PCD CName is invalid. The correct format is '(a-zA-Z_)[a-zA-Z0-9_]*'",
ExtraData=self._CurrentLine + \
" (<TokenSpaceGuidCName>.<PcdCName>|<DefaultValue>|<DatumType>|<Token>)",
File=self.MetaFile, Line=self._LineIndex + 1)
# check PCD datum information
if len(TokenList) < 2 or TokenList[1] == '':
EdkLogger.error('Parser', FORMAT_INVALID, "No PCD Datum information given",
ExtraData=self._CurrentLine + \
" (<TokenSpaceGuidCName>.<PcdCName>|<DefaultValue>|<DatumType>|<Token>)",
File=self.MetaFile, Line=self._LineIndex + 1)
ValueRe = re.compile(r'^\s*L?\".*\|.*\"')
PtrValue = ValueRe.findall(TokenList[1])
# Has VOID* type string, may contain "|" character in the string.
if len(PtrValue) != 0:
ptrValueList = re.sub(ValueRe, '', TokenList[1])
ValueList = AnalyzePcdExpression(ptrValueList)
ValueList[0] = PtrValue[0]
else:
ValueList = AnalyzePcdExpression(TokenList[1])
# check if there's enough datum information given
if len(ValueList) != 3:
EdkLogger.error('Parser', FORMAT_INVALID, "Invalid PCD Datum information given",
ExtraData=self._CurrentLine + \
" (<TokenSpaceGuidCName>.<PcdCName>|<DefaultValue>|<DatumType>|<Token>)",
File=self.MetaFile, Line=self._LineIndex + 1)
# check default value
if ValueList[0] == '':
EdkLogger.error('Parser', FORMAT_INVALID, "Missing DefaultValue in PCD Datum information",
ExtraData=self._CurrentLine + \
" (<TokenSpaceGuidCName>.<PcdCName>|<DefaultValue>|<DatumType>|<Token>)",
File=self.MetaFile, Line=self._LineIndex + 1)
# check datum type
if ValueList[1] == '':
EdkLogger.error('Parser', FORMAT_INVALID, "Missing DatumType in PCD Datum information",
ExtraData=self._CurrentLine + \
" (<TokenSpaceGuidCName>.<PcdCName>|<DefaultValue>|<DatumType>|<Token>)",
File=self.MetaFile, Line=self._LineIndex + 1)
# check token of the PCD
if ValueList[2] == '':
EdkLogger.error('Parser', FORMAT_INVALID, "Missing Token in PCD Datum information",
ExtraData=self._CurrentLine + \
" (<TokenSpaceGuidCName>.<PcdCName>|<DefaultValue>|<DatumType>|<Token>)",
File=self.MetaFile, Line=self._LineIndex + 1)
PcdValue = ValueList[0]
if PcdValue:
try:
self._GuidDict.update(self._AllPcdDict)
ValueList[0] = ValueExpressionEx(ValueList[0], ValueList[1], self._GuidDict)(True)
except BadExpression as Value:
EdkLogger.error('Parser', FORMAT_INVALID, Value, ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex + 1)
# check format of default value against the datum type
IsValid, Cause = CheckPcdDatum(ValueList[1], ValueList[0])
if not IsValid:
EdkLogger.error('Parser', FORMAT_INVALID, Cause, ExtraData=self._CurrentLine,
File=self.MetaFile, Line=self._LineIndex + 1)
if Cause == "StructurePcd":
self._CurrentStructurePcdName = TAB_SPLIT.join(self._ValueList[0:2])
self._ValueList[0] = self._CurrentStructurePcdName
self._ValueList[1] = ValueList[1].strip()
if ValueList[0] in ['True', 'true', 'TRUE']:
ValueList[0] = '1'
elif ValueList[0] in ['False', 'false', 'FALSE']:
ValueList[0] = '0'
# check for duplicate PCD definition
if (self._Scope[0], self._ValueList[0], self._ValueList[1]) in self._AllPCDs:
EdkLogger.error('Parser', FORMAT_INVALID,
"The same PCD name and GUID have been already defined",
ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex + 1)
else:
self._AllPCDs.append((self._Scope[0], self._ValueList[0], self._ValueList[1]))
self._AllPcdDict[TAB_SPLIT.join(self._ValueList[0:2])] = ValueList[0]
self._ValueList[2] = ValueList[0].strip() + '|' + ValueList[1].strip() + '|' + ValueList[2].strip()
_SectionParser = {
MODEL_META_DATA_HEADER : MetaFileParser._DefineParser,
MODEL_EFI_INCLUDE : MetaFileParser._PathParser,
MODEL_EFI_LIBRARY_CLASS : MetaFileParser._PathParser,
MODEL_EFI_GUID : _GuidParser,
MODEL_EFI_PPI : _GuidParser,
MODEL_EFI_PROTOCOL : _GuidParser,
MODEL_PCD_FIXED_AT_BUILD : _PcdParser,
MODEL_PCD_PATCHABLE_IN_MODULE : _PcdParser,
MODEL_PCD_FEATURE_FLAG : _PcdParser,
MODEL_PCD_DYNAMIC : _PcdParser,
MODEL_PCD_DYNAMIC_EX : _PcdParser,
MODEL_UNKNOWN : MetaFileParser._Skip,
MODEL_META_DATA_USER_EXTENSION : MetaFileParser._SkipUserExtension,
}
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
pass
| MattDevo/edk2 | BaseTools/Source/Python/Workspace/MetaFileParser.py | Python | bsd-2-clause | 107,232 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 Vertel AB (<http://vertel.se>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| vertelab/odoo-smart | sale_sequence/__init__.py | Python | agpl-3.0 | 1,050 |
from __future__ import print_function
import argparse
import fnmatch
import functools
import logging
import os
import pprint
import subprocess
from pkg_resources import iter_entry_points
logging.basicConfig()
logger = logging.getLogger('mfind')
class Primary(object):
"""This will be extended by all primaries
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def path_match(context, case_match=True):
"""Compares a file path against a pattern
similar to `find -path arg1`
"""
dirname = context['dirname']
pattern = context['args']
if case_match and fnmatch.fnmatchcase(dirname, pattern):
return context
if not case_match and fnmatch.fnmatch(dirname.lower(), pattern.lower()):
return context
def name_match(context, case_match=True):
"""Compares a file name against a pattern
similar to `find -name arg1`
"""
filename = context['filename']
pattern = context['args']
if case_match and fnmatch.fnmatchcase(filename, pattern):
return context
if not case_match and fnmatch.fnmatch(filename.lower(), pattern.lower()):
return context
def print_path(context, null=False):
"""Prints out the filename
similar to `find . -print`
"""
path = context['path']
suffix = context['args']
context['buffer'].append(path)
if suffix:
context['buffer'].append(suffix)
if null:
context['buffer'].append('\0')
return context
def print_linefeed(context):
"""Prints out a new line
Useful to separate outputs into several lines
"""
context['buffer'].append('\n')
return context
def print_context(context):
"""Prints out the conext
Useful for debugging
"""
context['buffer'].append(pprint.pformat(context))
return context
def exec_command(context):
"""Calls an external command, inspired by find -exec
{} will be converted to file path anywhere it appears in the args.
TODO: added context params inside {}
"""
path = context['path']
command = context['args']
command = [path if t == '{}' else t for t in command]
subprocess.call(command[:-1])
return context
tests_map = {
'name': name_match,
'iname': functools.partial(name_match, case_match=True),
'path': path_match,
'ipath': functools.partial(path_match, case_match=True),
'true': lambda context: context,
'false': lambda _: False,
}
actions_map = {
'print': print_path,
'println': print_linefeed,
'print0': functools.partial(print_path, null=True),
'print_context': print_context,
'exec': exec_command,
}
def evaluate(dirname, filename, tests, actions, verbosity):
"""Evaluates a user test and return True or False, like GNU find tests
"""
context = {
'dirname': dirname,
'filename': filename,
'path': os.path.relpath(os.path.join(dirname, filename)),
'verbosity': verbosity,
'buffer': [],
}
for test, args in tests:
context.update({'args': args})
test = tests_map[test]
context = test(context)
if not context:
return False
for action, args in actions:
context.update({'args': args})
action = actions_map[action]
context = action(context)
if not context:
return False
line = ''.join(context['buffer'])
if line.strip():
print(line)
return True
class TreeWalker(object):
"""provides a functionality similar to os.walk but can do
pre defined depth when needed.
"""
def __init__(self, *args, **kwargs):
self.top = kwargs.get('top', os.getcwd())
if not os.path.exists(self.top):
raise IOError('{}: No such file or directory'.format(self.top))
self.max_depth = kwargs.get('max_depth')
if isinstance(self.max_depth, list):
self.max_depth = self.max_depth[0]
self.depth_first = kwargs.get('depth_first', False)
self._depth = 0
self.recursive = self.max_depth is None or self.max_depth > 0
self.follow_links = kwargs.get('follow_links', False)
def __repr__(self):
return 'TreeWalker(top=%(top)s, max_depth=%(max_depth)r)' % self.__dict__
def walk(self, top=None, depth=0):
if not top:
top = self.top
if self.max_depth is not None:
if depth > self.max_depth:
return
if os.path.isdir(top):
for f in sorted(os.listdir(top), key=os.path.isdir,
reverse=self.depth_first):
file_path = os.path.join(top, f)
if os.path.isdir(file_path) and self.recursive:
islink = os.path.islink(file_path)
if islink and not self.follow_links:
continue
for d, f in self.walk(file_path, depth + 1):
yield d, f
elif os.path.isfile(file_path):
yield top, f
else:
yield os.path.split(top)
class ArgTest(argparse.Action):
"""An Action that collects arguments in the order they appear at the shell
"""
def __call__(self, parser, namespace, values, option_string=None):
if 'tests' not in namespace:
setattr(namespace, 'tests', [])
namespace.tests.append((self.dest, values))
class ArgAction(argparse.Action):
"""An Action that collects arguments in the order they appear at the shell
"""
def __call__(self, parser, namespace, values, option_string=None):
if 'actions' not in namespace:
setattr(namespace, 'actions', [])
namespace.actions.append((self.dest, values))
def cli_args():
"""
"""
parser = argparse.ArgumentParser(
description="extensible pure python gnu file like tool."
)
parser.add_argument('-follow',
dest='follow_links',
action='store_true',
help="Follow symbolic links, the default is not to follow.",
default=False)
parser.add_argument('-depth',
'-d',
dest='depth_first',
action='store_true',
default=False,
help=("Process the subdirectory before processing the "
"sibling files available under that directory.")
)
parser.add_argument('-maxdepth',
dest='max_depth',
action='store',
default=None,
type=int,
nargs=1,
help=("Limit the recursion to a maximum depth."
" The default is unlimited depth. ")
)
parser.add_argument('path', action='store', nargs='?', default=os.getcwd(),
help="""The root of the processing tree, this defaults to the
current working directory `pwd`.""")
parser.add_argument('-v', action='count', dest='verbose', default=0, help="""
The level of verbosirty. The more v you add the more stuff you will
see.
""")
parser.add_argument('-name', dest='name', action=ArgTest,
help="""Match by filename, accepts UNIX globbing patterns.
e.g. `-name *.rst`
""")
parser.add_argument('-iname', dest='iname', action=ArgTest,
help="""Match by filename, similar to `-name` but this is case
insensitive match.
""")
parser.add_argument('-path', dest='path', action=ArgTest,
help="""Match by path, accepts UNIX globbing patterns.
e.g. `-path *.rst`
""")
parser.add_argument('-ipath', dest='ipath', action=ArgTest,
help="""Match by filename, similar to `-ipath` but this is case
insensitive match.
""")
parser.add_argument('-true', dest='true', action=ArgTest, nargs=0,
help="""Always evaluates to True""")
parser.add_argument('-false', dest='false', action=ArgTest, nargs=0,
help="""Always evaluates to False""")
parser.add_argument('-print', dest='print', action=ArgAction, nargs='?',
help="""Prints the file path. It accepts an optional argument as
a string which is used as a seperator, e.g. `-print ','` would
print the file path followed by a comma, thus any further print
from thie file context would be printed on the same line after the
comma. Each file is printed in a new line so this should not be
confused as a separator between matching files.""")
parser.add_argument('-print0', dest='print0', action=ArgAction, nargs=0,
help="""Print the file path follows by a null character rather than
space. Helpful to be used with `xargs -0`.""")
parser.add_argument('-println', dest='println', action=ArgAction,
nargs=0, help="""Print the file path followed by a new line.
""")
parser.add_argument('-print-context', dest='print_context',
action=ArgAction, nargs=0, help=""""
Prints the context for the match, the context is implemented as a
mapping object where primaries can add/remove/modify any of the
key/value pairs.""")
parser.add_argument('-exec', dest='exec', action=ArgAction, nargs='+',
help="""Execute a shell command when a match happens, any `{}` will
be replaced by the match path.""")
# add plugins
for plugin in iter_entry_points(group='mfind.plugin', name='cli_args'):
parser = plugin.load()(parser)
return parser
def main():
parser = cli_args()
ns = parser.parse_args()
verbose = logging.DEBUG if ns.verbose >= 2 else logging.INFO
logger.setLevel(verbose)
tw = TreeWalker(top=ns.path, **ns.__dict__)
tests = getattr(ns, 'tests', [])
actions = getattr(ns, 'actions', [])
if not actions:
logger.debug('defaulting action to print')
actions = [('print', None)]
# add plugins
for plugin in iter_entry_points(group='mfind.plugin', name='tests'):
tests_map.update(plugin.load())
for plugin in iter_entry_points(group='mfind.plugin', name='actions'):
actions_map.update(plugin.load())
for dirname, filename in tw.walk():
evaluate(dirname, filename, tests, actions, ns.verbose)
| meitham/mfind | mfinder.py | Python | bsd-3-clause | 11,002 |
import tkinter as tk
from tkinter import ttk
from collections import namedtuple
import sqlite3
from enum import Enum
import re
import datetime
import logging
import os
import time
# Regular expression we rely upon
win_loss_pattern = r'PowerTaskList\.DebugPrintPower\(\) - TAG_CHANGE Entity=(.+) tag=PLAYSTATE value=(WON|LOST|TIED)'
full_entity_pattern = r'PowerTaskList\.DebugPrintPower\(\) - FULL_ENTITY - Updating'
show_entity_pattern = r'PowerTaskList\.DebugPrintPower\(\) - SHOW_ENTITY - Updating'
show_entity_sub_pattern = "Entity=(\[.+?\]) CardID=(.+)"
hero_pattern = r'HERO_0(\d)'
create_game_pattern = r'PowerTaskList\.DebugPrintPower\(\) - CREATE_GAME'
tag_change_pattern = r"PowerTaskList\.DebugPrintPower\(\) - TAG_CHANGE"
action_begin_pattern = "PowerTaskList\.DebugPrintPower\(\) - ACTION_START"
action_end_pattern = "PowerTaskList\.DebugPrintPower\(\) - ACTION_END"
action_param_pattern = "Entity=(.+) BlockType=(.+) Index=(.+) Target=(.+)"
entity_pattern = "\[id=(\d+?) cardId= type=(.+?) zone=(.+?) zonePos=(\d+?) player=(\d)\]"
entity_pattern2 = "\[name=(.+?) id=(.+?) zone=(.+?) zonePos=(\d+) cardId=(.+?) player=(\d)\]"
tag_param_pattern = "Entity=(.+) tag=(.+) value=(.+)"
player_pattern = "PowerTaskList\.DebugPrintPower\(\) - Player"
player_acc_pattern = "EntityID=(\d) PlayerID=(\d) GameAccountId=\[hi=(\d+?) lo=(\d+?)\]"
# Hero to int mappings
hero_dict = {9: 'Priest', 3: 'Rogue', 8: 'Mage', 4: 'Paladin', 1: 'Warrior',
7: 'Warlock', 5: 'Hunter', 2: 'Shaman', 6: 'Druid'}
hero_dict_names = {v: k for k, v in hero_dict.items()}
class AutocompleteCardEntry(ttk.Entry):
""" Requires a working database cursor to work """
def __init__(self, parent, cursor, **kwargs):
ttk.Entry.__init__(self, parent, **kwargs)
self.var = self['textvariable']
self.parent = parent
self.cursor = cursor
if self.var == '':
self.var = self['textvariable'] = tk.StringVar()
self.var.trace('w', self.changed)
self.bind("<Right>", self.selection)
self.bind("<Return>", self.selection)
self.bind("<Up>", self.up)
self.bind("<Down>", self.down)
self.cb = []
self.lb_up = False
def bind_card_cb(self, func):
self.cb.append(func)
def changed(self, name, index, mode):
if self.var.get() == '':
self.lb.destroy()
self.lb_up = False
else:
words = self.comparison()
if words:
if not self.lb_up:
self.lb = tk.Listbox(self.parent)
self.lb.bind("<Double-Button-1>", self.selection)
self.lb.bind("<Right>", self.selection)
self.lb.place(x=self.winfo_x(), y=self.winfo_y()+self.winfo_height())
self.lb_up = True
self.lb.delete(0, tk.END)
for w in words:
self.lb.insert(tk.END,w)
else:
if self.lb_up:
self.lb.destroy()
self.lb_up = False
def selection(self, event):
if self.lb_up:
self.var.set(self.lb.get(tk.ACTIVE))
self.lb.destroy()
self.lb_up = False
self.icursor(tk.END)
for f in self.cb:
f(self.var.get())
else:
for f in self.cb:
f(self.var.get())
def up(self, event):
if self.lb_up:
if self.lb.curselection() == ():
index = '0'
else:
index = self.lb.curselection()[0]
if index != '0':
self.lb.selection_clear(first=index)
index = str(int(index)-1)
self.lb.selection_set(first=index)
self.lb.activate(index)
def down(self, event):
if self.lb_up:
if self.lb.curselection() == ():
index = '0'
else:
index = self.lb.curselection()[0]
if index != tk.END:
self.lb.selection_clear(first=index)
index = str(int(index)+1)
self.lb.selection_set(first=index)
self.lb.activate(index)
def comparison(self):
search = '%'+self.var.get()+'%'
results = self.cursor.execute(r"SELECT name FROM cards WHERE name LIKE ?", (search,))
rows = results.fetchall()
l = []
for row in rows:
l.append(row[0])
return l
#This function contains all of the logic required to parse the Hearthstone log
# and generate events from the log, there are some serious issues with reponening
# this is one of the major problems i need to research and address
def thread_func(*args):
path = args[0]
exit_flag = args[1]
state_queue = args[2]
parser = LogParser(state_queue)
file = open(path, 'r')
counter = 0
old_size = os.stat(path).st_size
file.seek(0,2)
while 1:
if exit_flag.is_set():
file.close()
return
where = file.tell()
line = file.readline()
# No new line
if not line:
time.sleep(1)
file.seek(where)
counter += 1
if counter > 5:
file.close()
size = os.stat(path).st_size
file = open(path, 'r')
if size == old_size or size > old_size:
file.seek(where)
old_size = size
counter = 0
else:
counter -= 1
parser.parse_line(line)
# We need a well defined interface to pass events to the GUI
# Events we need:
# Game started and playerid's'
# Who the foreign player is
# when a card is played, what turn it was played, and who played it
# when the game ends and the outcome (including relevant statistics)
Player = namedtuple('Player', ['name', 'id', 'high', 'low', 'hero', 'hero_name'])
GameStart = namedtuple('GameStart', ['players',])
GameOutcome = namedtuple('GameOutcome', ['won', 'first', 'duration', 'turns'])
CardPlayed = namedtuple('CardPlayed', ['cardId', 'turn', 'player'])
CardDrawn = namedtuple('CardDrawn', ['cardId', 'turn'])
CardShuffled = namedtuple('CardDrawn', ['cardId', 'player'])
GameEvent = namedtuple('GameEvent', ['type', 'data']) # This will get passed back to the GUI
# Enum for GameEvent types
class EventType(Enum):
#Contains a dictionary with information about the local and foreign player data['foreign'] = Player(...)
GameStart = 1
#Contains a dictionary with information about the outcome of the game
GameEnd = 2
#Contains a dictionary with the information about who, when, and what
CardPlayed = 3
CardDrawn = 4
CardShuffled = 5
class LogParser():
def __init__(self, state_queue):
self.q = state_queue
self._compile_regex()
self._reset()
def _reset(self):
self.players = {}
self.in_game = False
self.turn_num = 1
self.game_start_time = None
self.entities = {}
self.local_player_found = False
self.foreign_player_found = False
self.first = True
def _game_start(self):
self.in_game = True
self.game_start_time = datetime.datetime.now()
return
def _player_acc(self, entityid, playerid, high, low):
self.players[playerid] = Player(None, playerid, high, low, None, None)
return
def _full_entity(self, entity):
name = entity.get('name', None)
if self.local_player_found is False:
if name is not None:
pinfo = self.players[entity['player']]
self.players['local'] = Player(None, pinfo.id, pinfo.high, pinfo.low,
None, None)
print('The local player is ID: {0}'.format(pinfo.id))
self.local_player_found = True
if self.foreign_player_found is False:
if self.local_player_found is True:
if entity['player'] is not self.players['local'].id:
pinfo = self.players[entity['player']]
self.players['foreign'] = Player(None, pinfo.id, pinfo.high, pinfo.low,
None, None)
print('The foreign player is ID: {0}'.format(pinfo.id))
self.foreign_player_found = True
cardId = entity.get('cardId', None)
if cardId is not None:
e = CardDrawn(cardId, self.turn_num)
self.q.put(GameEvent(EventType.CardDrawn, e))
if cardId == 'GAME_005':
self.first = False
m = self.re_hero.match(cardId)
if m is not None:
if entity['player'] == self.players['local'].id:
tmp_p = self.players['local']
p = Player(tmp_p.name, tmp_p.id, tmp_p.high,
tmp_p.low, int(m.group(1)), entity.get('name', None))
self.players['local'] = p
print("The local player is playing {0}".format(entity['name']))
return
else:
tmp_p = self.players['foreign']
p = Player(tmp_p.name, tmp_p.id, tmp_p.high,
tmp_p.low, int(m.group(1)), entity.get('name', None))
self.players['foreign'] = p
print("The foreign player is playing {0}".format(entity['name']))
return
def _show_entity(self, entity, cardId):
if entity['player'] is self.players['foreign'].id:
if entity['zone'] in ('DECK', 'HAND'):
e = CardPlayed(cardId, self.turn_num, entity['player'])
self.q.put(GameEvent(EventType.CardPlayed, e))
if entity['player'] is self.players['local'].id:
if entity['zone'] == 'DECK':
e = CardDrawn(cardId, self.turn_num)
self.q.put(GameEvent(EventType.CardDrawn, e))
def _tag_change(self, tag, value, entity):
if tag == 'PLAYSTATE':
if value in ('WON', 'LOST', 'TIED'):
if self.in_game:
if entity != self.players['local'].name:
return
else:
deltaT = datetime.datetime.now() - self.game_start_time
duration = deltaT.total_seconds()
outcome = None
if value == 'WON':
outcome = GameOutcome(True, self.first, duration, self.turn_num)
else:
outcome = GameOutcome(False, self.first, duration, self.turn_num)
self.q.put(GameEvent(EventType.GameEnd, outcome))
self._reset()
return
elif tag == 'PLAYER_ID':
if value == self.players['local'].id:
tmp_p = self.players['local']
p = Player(entity, tmp_p.id, tmp_p.high,
tmp_p.low, tmp_p.hero, tmp_p.hero_name)
self.players['local'] = p
else:
tmp_p = self.players['foreign']
p = Player(entity, tmp_p.id, tmp_p.high,
tmp_p.low, tmp_p.hero, tmp_p.hero_name)
self.players['foreign'] = p
if self.players['foreign'].name is not None:
if self.players['local'].name is not None:
#Submit the event to the GUI
self.q.put(GameEvent(EventType.GameStart, GameStart(self.players)))
return
return
elif tag == 'ZONE':
if value == 'PLAY':
if isinstance(entity, dict):
#Local player played a card
cardid = entity.get('cardId', None)
if cardid is not None:
e = CardPlayed(entity['cardId'], self.turn_num, entity['player'])
self.q.put(GameEvent(EventType.CardPlayed, e))
if value == 'DECK':
if isinstance(entity, dict):
#Local player played a card
cardid = entity.get('cardId', None)
if cardid is not None:
e = CardShuffled(entity['cardId'], entity['player'])
self.q.put(GameEvent(EventType.CardShuffled, e))
elif tag == 'TURN':
if entity == 'GameEntity':
self.turn_num = int(value)
def parse_entity(self, subline):
# try the two more specific regular expressions
match = self.re_ent_id.match(subline)
if match:
#entity_pattern = "\[id=(\d+?) cardId= type=(.+?) zone=(.+?) zonePos=(\d+?) player=(\d)\]"
id = match.group(1)
cardId = None
t = match.group(2)
zone = match.group(3)
zonePos = match.group(4)
player = match.group(5)
return {'id': id, 'type': t, 'zone': zone, 'zonePos': zonePos, 'player': player}
match = self.re_ent_name.match(subline)
if match:
#entity_pattern2 = "\[name=(.+?) id=(.+?) zone=(.+?) zonePos=(\d+) cardId=(.+?) player=(\d)\]"
name = match.group(1)
id = match.group(2)
zone = match.group(3)
zonePos = match.group(4)
cardId = match.group(5)
player = match.group(6)
return {'name': name, 'id': id, 'zone': zone, 'zonePos': zonePos, 'cardId': cardId, 'player': player}
return subline
def parse_line(self, line):
magic = line[0]
log_timestamp = line[2:17]
try:
log_time = datetime.strptime(log_timestamp, '%H:%M:%S.%f')
except:
pass
# program_time = datetime.now()
# self.timestamp = program_time.strftime('[%H:%M:%S.%f] ')
# Take only the data we need
data = line[19:]
self.handle_line(data)
def handle_line(self, line):
# TAG CHANGE
m = self.re_tag_change.match(line)
if m is not None:
subline = line[m.end(0) + 1:]
mm = self.re_tag_param.match(subline)
if mm is not None:
ent_str = mm.group(1)
tag = mm.group(2)
value = mm.group(3)
entity = self.parse_entity(ent_str)
self._tag_change(tag, value, entity)
return
# SHOW ENTITY
m = self.re_show_ent.match(line)
if m is not None:
subline = line[m.end(0) + 1:]
mm = self.re_sub_ent.match(subline)
if mm is not None:
ent_str = mm.group(1)
entity = self.parse_entity(ent_str)
self._show_entity(entity, mm.group(2))
return
# FULL ENTITY
m = self.re_full_ent.match(line)
if m is not None:
subline = line[m.end(0) + 1:]
entity = self.parse_entity(subline)
self._full_entity(entity)
return
# PLAYER
m = self.re_player.match(line)
if m is not None:
subline = line[m.end(0) + 1:]
mm = self.re_player_acc.match(subline)
if mm is not None:
entityid = mm.group(1)
pid = mm.group(2)
high = int(mm.group(3))
low = int(mm.group(4))
self._player_acc(entityid, pid, high, low)
return
# CREATE GAME
m = self.re_game_start.match(line)
if m is not None:
self._game_start()
def _compile_regex(self):
self.re_game_start = re.compile(create_game_pattern)
self.re_player = re.compile(player_pattern)
self.re_player_acc = re.compile(player_acc_pattern)
self.re_game_end = re.compile(win_loss_pattern)
self.re_hero = re.compile(hero_pattern)
self.re_tag_change = re.compile(tag_change_pattern)
self.re_tag_param = re.compile(tag_param_pattern)
self.re_ent_id = re.compile(entity_pattern)
self.re_ent_name = re.compile(entity_pattern2)
self.re_full_ent = re.compile(full_entity_pattern)
self.re_show_ent = re.compile(show_entity_pattern)
self.re_sub_ent = re.compile(show_entity_sub_pattern) | Seek/ValueTracker | hs.py | Python | gpl-3.0 | 16,651 |
from raven.contrib.flask import Sentry
from flask import Flask
from flask.ext.babel import Babel
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.markdown import Markdown
from flask.ext.login import LoginManager
from flask.ext.gravatar import Gravatar
from flask.ext.migrate import Migrate, MigrateCommand
from flask.ext.script import Manager
from flask.ext.mail import Mail
import pytz
app = Flask(__name__)
app.config.from_pyfile("../config.py.example", silent=True)
app.config.from_pyfile("../config.py", silent=True)
app.jinja_env.add_extension('pyjade.ext.jinja.PyJadeExtension')
manager = Manager(app)
db = SQLAlchemy(app)
markdown = Markdown(app, safe_mode="escape")
login_manager = LoginManager(app)
sentry = Sentry(app)
gravatar = Gravatar(app, size=48, rating='g', default='identicon', force_default=False, use_ssl=True, base_url=None)
babel = Babel(app)
supported_languages = ['en', 'de']
migrate = Migrate(app, db)
manager.add_command("db", MigrateCommand)
mail = Mail(app)
default_timezone = pytz.timezone(app.config["DEFAULT_TIMEZONE"])
from dudel.util import load_icons
ICONS = load_icons("dudel/icons.txt")
import dudel.assets
import dudel.models
import dudel.forms
import dudel.filters
import dudel.views
import dudel.admin
import dudel.plugins.ldapauth
login_manager.login_view = "login"
| RalfJung/dudel | dudel/__init__.py | Python | gpl-3.0 | 1,320 |
# This file is part of the Simulation Manager project for VecNet.
# For copyright and licensing information about this project, see the
# NOTICE.txt and LICENSE.md files in its top-level directory; they are
# available at https://github.com/vecnet/simulation-manager
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License (MPL), version 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
class SimulationModel(object):
"""
Base class for simulation models. An instance represents a particular version of a simulation model available
on the compute engine.
"""
def __init__(self, id_, version, output_filenames=None):
"""
:param string id_: The model's identifier (see vecnet's simulation_models.model_id module)
:param string version: The identifier for the model version, e.g., '30', '1.2a4', '4.10 (build 7)'.
:param sequence output_filenames: Names of the model's output files.
"""
self.id = id_
self.version = version
if output_filenames is None:
output_filenames = list()
self.output_filenames = output_filenames | vecnet/simulation-manager | sim_manager/scripts/models/base.py | Python | mpl-2.0 | 1,216 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from config.template_middleware import TemplateResponse
from gaebusiness.business import CommandExecutionException
from gaecookie.decorator import no_csrf
from gaepermission import facade
from livro_app import fachada
from tekton import router
from tekton.gae.middleware.redirect import RedirectResponse
# Handlers
@no_csrf
def index(_logged_user):
buscar_livros_cmd = fachada.listar_livros_de_autor_cmd(_logged_user)
livro_lista = buscar_livros_cmd()
book_form = fachada.livro_tabela_form()
livro_lista = [book_form.fill_with_model(livro) for livro in livro_lista]
editar_form_path = router.to_path(editar_form)
delete_path = router.to_path(delete)
for livro in livro_lista:
livro['edit_path'] = '%s/%s' % (editar_form_path, livro['id'])
livro['delete_path'] = '%s/%s' % (delete_path, livro['id'])
contexto = {'livro_lista': livro_lista,
'form_path': router.to_path(form)}
return TemplateResponse(contexto)
@no_csrf
def editar_form(book_id):
busca_cmd = fachada.get_livro(book_id)
book = busca_cmd()
book_form = fachada.livro_form()
book_form.fill_with_model(book)
contexto = {'salvar_path': router.to_path(editar, book_id),
'book': book_form}
return TemplateResponse(contexto, 'books/form.html')
def editar(book_id, **propriedades):
editar_cmd = fachada.editar_livro(book_id, **propriedades)
try:
editar_cmd()
return RedirectResponse(router.to_path(index))
except CommandExecutionException:
contexto = {'salvar_path': router.to_path(salvar),
'erros': editar_cmd.errors,
'book': propriedades}
return TemplateResponse(contexto, 'books/form.html')
@no_csrf
def form():
contexto = {'salvar_path': router.to_path(salvar)}
return TemplateResponse(contexto, 'books/form_novo.html')
def delete(book_id):
apagar_livro_cmd = fachada.apagar_livro_cmd(book_id)
apagar_livro_cmd()
return RedirectResponse(router.to_path(index))
def salvar(email, **propriedades):
get_user_by_email_cmd = facade.get_user_by_email(email)
salvar_livro_com_autor_cmd = fachada.salvar_livro(get_user_by_email_cmd, **propriedades)
try:
salvar_livro_com_autor_cmd()
return RedirectResponse(router.to_path(index))
except CommandExecutionException:
contexto = {'salvar_path': router.to_path(salvar),
'erros': salvar_livro_com_autor_cmd.errors,
'email': email,
'book': propriedades}
return TemplateResponse(contexto, 'books/form_novo.html')
| renzon/appengine-video | backend/appengine/routes/books.py | Python | mit | 2,726 |
from __future__ import absolute_import
import scanomatic.generics.model as model
class SegmentationModel(model.Model):
def __init__(self, dydt=None, dydt_ranks=None, dydt_signs=None, d2yd2t=None,
d2yd2t_signs=None, phases=None, offset=0, log2_curve=None, times=None,
plate=None, pos=None):
self.log2_curve = log2_curve
""":type : numpy.ndarray"""
self.times = times
""":type : numpy.ndarray"""
self.plate = plate
""":type : int"""
self.pos = pos
""":type : (int, int)"""
self.dydt = dydt
""":type : numpy.ndarray"""
self.dydt_ranks = dydt_ranks
""":type : numpy.ndarray"""
self.dydt_signs = dydt_signs
""":type : numpy.ndarray"""
self.d2yd2t = d2yd2t
""":type : numpy.ndarray"""
self.d2yd2t_signs = d2yd2t_signs
""":type : numpy.ndarray"""
self.offset = offset
""":type : int"""
self.phases = phases
""":type : numpy.ndarray"""
super(SegmentationModel, self).__init__()
| Scan-o-Matic/scanomatic | scanomatic/models/phases_models.py | Python | gpl-3.0 | 1,102 |
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Computes metrics for Google Landmarks Recognition dataset predictions.
Metrics are written to stdout.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.python.platform import app
from delf.python.google_landmarks_dataset import dataset_file_io
from delf.python.google_landmarks_dataset import metrics
cmd_args = None
def main(argv):
if len(argv) > 1:
raise RuntimeError('Too many command-line arguments.')
# Read solution.
print('Reading solution...')
public_solution, private_solution, ignored_ids = dataset_file_io.ReadSolution(
cmd_args.solution_path, dataset_file_io.RECOGNITION_TASK_ID)
print('done!')
# Read predictions.
print('Reading predictions...')
public_predictions, private_predictions = dataset_file_io.ReadPredictions(
cmd_args.predictions_path, set(public_solution.keys()),
set(private_solution.keys()), set(ignored_ids),
dataset_file_io.RECOGNITION_TASK_ID)
print('done!')
# Global Average Precision.
print('**********************************************')
print('(Public) Global Average Precision: %f' %
metrics.GlobalAveragePrecision(public_predictions, public_solution))
print('(Private) Global Average Precision: %f' %
metrics.GlobalAveragePrecision(private_predictions, private_solution))
# Global Average Precision ignoring non-landmark queries.
print('**********************************************')
print(
'(Public) Global Average Precision ignoring non-landmark queries: %f' %
metrics.GlobalAveragePrecision(
public_predictions, public_solution, ignore_non_gt_test_images=True))
print(
'(Private) Global Average Precision ignoring non-landmark queries: %f' %
metrics.GlobalAveragePrecision(
private_predictions, private_solution,
ignore_non_gt_test_images=True))
# Top-1 accuracy.
print('**********************************************')
print('(Public) Top-1 accuracy: %.2f' %
(100.0 * metrics.Top1Accuracy(public_predictions, public_solution)))
print('(Private) Top-1 accuracy: %.2f' %
(100.0 * metrics.Top1Accuracy(private_predictions, private_solution)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--predictions_path',
type=str,
default='/tmp/predictions.csv',
help="""
Path to CSV predictions file, formatted with columns 'id,landmarks' (the
file should include a header).
""")
parser.add_argument(
'--solution_path',
type=str,
default='/tmp/solution.csv',
help="""
Path to CSV solution file, formatted with columns 'id,landmarks,Usage'
(the file should include a header).
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| alexgorban/models | research/delf/delf/python/google_landmarks_dataset/compute_recognition_metrics.py | Python | apache-2.0 | 3,676 |
import urllib
import re
__all__ = ["safe_quote_tuple", "etree_to_dict", "return_true"]
camelcase_to_underscore = lambda s: re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', '_\\1', s).lower().strip('_')
def safe_quote_tuple(tuple_):
"""Convert a 2-tuple to a string for use with AWS"""
key = urllib.quote(str(tuple_[0]), '-_.~')
value = urllib.quote(str(tuple_[1]), '-_.~')
return "%s=%s" % (key, value)
def etree_to_dict(etree, namespace=None, tag_list=True, convert_camelcase=False):
"""
Convert an etree to a dict.
**Keyword arguments:**
* *namespace* -- XML Namespace to be removed from tag names (Default None)
"""
children = etree.getchildren()
if len(children) == 0:
return etree.text
children_dict = {}
for element in children:
tag = element.tag
if namespace is not None:
tag = tag.replace(namespace, "")
if convert_camelcase:
tag = camelcase_to_underscore(tag)
element_dict = etree_to_dict(element, namespace=namespace, tag_list=tag_list, convert_camelcase=convert_camelcase)
if tag in children_dict:
if not isinstance(children_dict[tag], list):
children_dict[tag] = [children_dict[tag]]
children_dict[tag].append(element_dict)
else:
if tag_list:
children_dict[tag] = [element_dict]
else:
children_dict[tag] = element_dict
return children_dict
def return_true(data):
return True
| wehriam/awspider | awspider/aws/lib.py | Python | mit | 1,539 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateContact
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-essential-contacts
# [START essentialcontacts_v1_generated_EssentialContactsService_CreateContact_async]
from google.cloud import essential_contacts_v1
async def sample_create_contact():
# Create a client
client = essential_contacts_v1.EssentialContactsServiceAsyncClient()
# Initialize request argument(s)
request = essential_contacts_v1.CreateContactRequest(
parent="parent_value",
)
# Make the request
response = await client.create_contact(request=request)
# Handle the response
print(response)
# [END essentialcontacts_v1_generated_EssentialContactsService_CreateContact_async]
| googleapis/python-essential-contacts | samples/generated_samples/essentialcontacts_v1_generated_essential_contacts_service_create_contact_async.py | Python | apache-2.0 | 1,560 |
from __future__ import unicode_literals
import json
import re
import os
import jinja2
from nipype.utils.filemanip import loadcrash
from pkg_resources import resource_filename as pkgrf
class Element(object):
def __init__(self, name, file_pattern, title, description):
self.name = name
self.file_pattern = re.compile(file_pattern)
self.title = title
self.description = description
self.files_contents = []
class SubReport(object):
def __init__(self, name, elements, title=''):
self.name = name
self.title = title
self.elements = []
self.run_reports = []
for e in elements:
element = Element(**e)
self.elements.append(element)
def order_by_run(self):
run_reps = {}
for elem_index in range(len(self.elements) - 1, -1, -1):
element = self.elements[elem_index]
for index in range(len(element.files_contents) - 1, -1, -1):
filename = element.files_contents[index][0]
file_contents = element.files_contents[index][1]
name, title = self.generate_name_title(filename)
if not name:
continue
new_elem = {'name': element.name, 'file_pattern': element.file_pattern,
'title': element.title, 'description': element.description}
try:
new_element = Element(**new_elem)
run_reps[name].elements.append(new_element)
run_reps[name].elements[-1].files_contents.append((filename, file_contents))
except KeyError:
run_reps[name] = SubReport(name, [new_elem], title=title)
run_reps[name].elements[0].files_contents.append((filename, file_contents))
keys = list(run_reps.keys())
keys.sort()
for key in keys:
self.run_reports.append(run_reps[key])
def generate_name_title(self, filename):
fname = os.path.basename(filename)
expr = re.compile('^sub-(?P<subject_id>[a-zA-Z0-9]+)(_ses-(?P<session_id>[a-zA-Z0-9]+))?'
'(_task-(?P<task_id>[a-zA-Z0-9]+))?(_acq-(?P<acq_id>[a-zA-Z0-9]+))?'
'(_rec-(?P<rec_id>[a-zA-Z0-9]+))?(_run-(?P<run_id>[a-zA-Z0-9]+))?')
outputs = expr.search(fname)
if outputs:
outputs = outputs.groupdict()
else:
return None, None
name = '{session}{task}{acq}{rec}{run}'.format(
session="_ses-" + outputs['session_id'] if outputs['session_id'] else '',
task="_task-" + outputs['task_id'] if outputs['task_id'] else '',
acq="_acq-" + outputs['acq_id'] if outputs['acq_id'] else '',
rec="_rec-" + outputs['rec_id'] if outputs['rec_id'] else '',
run="_run-" + outputs['run_id'] if outputs['run_id'] else ''
)
title = '{session}{task}{acq}{rec}{run}'.format(
session=" Session: " + outputs['session_id'] if outputs['session_id'] else '',
task=" Task: " + outputs['task_id'] if outputs['task_id'] else '',
acq=" Acquisition: " + outputs['acq_id'] if outputs['acq_id'] else '',
rec=" Reconstruction: " + outputs['rec_id'] if outputs['rec_id'] else '',
run=" Run: " + outputs['run_id'] if outputs['run_id'] else ''
)
return name, title
class Report(object):
def __init__(self, path, config, out_dir, out_filename='report.html'):
self.root = path
self.sub_reports = []
self.errors = []
self._load_config(config)
self.out_dir = out_dir
self.out_filename = out_filename
def _load_config(self, config):
try:
config = json.load(open(config, 'r'))
except Exception as e:
print(e)
return
for e in config['sub_reports']:
sub_report = SubReport(**e)
self.sub_reports.append(sub_report)
self.index()
def index(self):
for root, directories, filenames in os.walk(self.root):
for f in filenames:
f = os.path.join(root, f)
for sub_report in self.sub_reports:
for element in sub_report.elements:
ext = f.split('.')[-1]
if element.file_pattern.search(f) and (ext == 'svg' or ext == 'html'):
with open(f) as fp:
content = fp.read()
content = '\n'.join(content.split('\n')[1:])
element.files_contents.append((f, content))
for sub_report in self.sub_reports:
sub_report.order_by_run()
subject_dir = self.root.split('/')[-1]
subject = re.search('^(?P<subject_id>sub-[a-zA-Z0-9]+)$', subject_dir).group()
error_dir = os.path.join(self.root, '../../log', subject[4:])
if os.path.isdir(error_dir):
self.index_error_dir(error_dir)
def index_error_dir(self, error_dir):
''' Crawl subjects most recent crash directory and return text for
.pklz crash file found. '''
# Crash directories for subject are named by a timestamp. Sort
# listdir output to order it alphabetically, which for our timestamped
# directories is also a chronological listing. Assumes no other
# directories will be created in subject crash dir.
try:
newest_dir = [x for x in os.listdir(error_dir)
if os.path.isdir(os.path.join(error_dir, x))]
newest_dir.sort()
newest_dir = newest_dir[-1]
newest_dir = os.path.join(error_dir, newest_dir)
except IndexError:
newest_dir = error_dir
for root, directories, filenames in os.walk(newest_dir):
for f in filenames:
# Only deal with files that start with crash and end in pklz
if not (f[:5] == 'crash' and f[-4:] == 'pklz'):
continue
crash_data = loadcrash(os.path.join(root, f))
error = {}
node = None
if 'node' in crash_data:
node = crash_data['node']
error['traceback'] = []
for elem in crash_data['traceback']:
error['traceback'].append("<br>".join(elem.split("\n")))
error['file'] = f
if node:
error['node'] = node
if node.base_dir:
error['node_dir'] = node.output_dir()
else:
error['node_dir'] = "Node crashed before execution"
error['inputs'] = sorted(node.inputs.trait_get().items())
self.errors.append(error)
def generate_report(self):
searchpath = pkgrf('fmriprep', '/')
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(searchpath=searchpath),
trim_blocks=True, lstrip_blocks=True
)
report_tpl = env.get_template('viz/report.tpl')
report_render = report_tpl.render(sub_reports=self.sub_reports, errors=self.errors)
with open(os.path.join(self.out_dir, self.out_filename), 'w') as fp:
fp.write(report_render)
return report_render
def run_reports(out_dir):
reportlet_path = os.path.join(out_dir, 'reports/')
config = pkgrf('fmriprep', 'viz/config.json')
for root, _, _ in os.walk(reportlet_path):
# relies on the fact that os.walk does not return a trailing /
dir = root.split('/')[-1]
try:
subject = re.search('^(?P<subject_id>sub-[a-zA-Z0-9]+)$', dir).group()
out_filename = '{}{}'.format(subject, '.html')
report = Report(root, config, out_dir, out_filename)
report.generate_report()
except AttributeError:
continue
| chrisfilo/fmriprep | fmriprep/viz/reports.py | Python | bsd-3-clause | 8,063 |
from django.db import models
from django.conf import settings
from django.contrib.auth.models import AbstractUser, UserManager
class Server (models.Model):
name = models.CharField(max_length=30)
url = models.URLField()
def __str__ (self):
return self.name
class Meta:
db_table = 'servers'
class Account (AbstractUser):
server = models.ForeignKey(Server, related_name='accounts', null=True)
def __str__ (self):
return self.username
class Meta:
db_table = 'accounts'
class Tag (models.Model):
name = models.CharField(max_length=50, unique=True)
def __str__ (self):
return self.name
class Meta:
db_table = 'tags'
class Post (models.Model):
account = models.ForeignKey(Account, null=True)
tags = models.ManyToManyField('Tag', blank=True)
message = models.CharField(max_length=settings.POST_LENGTH)
ts_created = models.DateTimeField(auto_now_add=True)
verified = models.BooleanField(default=False)
class Meta:
db_table = 'posts'
class Comment (models.Model):
account = models.ForeignKey(Account, null=True)
post = models.ForeignKey(Post, related_name='comments')
comment = models.ForeignKey('Comment', related_name='comments', null=True)
message = models.CharField(max_length=settings.COMMENT_LENGTH)
ts_created = models.DateTimeField(auto_now_add=True)
verified = models.BooleanField(default=False)
class Meta:
db_table = 'comments'
| AHAPX/dimib | dimib/models.py | Python | gpl-3.0 | 1,512 |
from __future__ import unicode_literals
from cms.models import CMSPlugin
from cms.models.fields import PageField
from django.db import models
from django.utils.six import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from filer.fields.folder import FilerFolderField
from filer.models import Image
@python_2_unicode_compatible
class Schedule(CMSPlugin):
headline = models.CharField(_('Headline'), max_length=4000, blank=True)
def __str__(self):
return '{0}'.format(self.headline[:23])
@python_2_unicode_compatible
class Event(CMSPlugin):
headline = models.CharField(_('Headline'), max_length=4000, blank=True)
text = models.TextField(_('Text'), blank=True)
date = models.DateField(_('Date'), blank=True, null=True)
recurring = models.BooleanField(_('Recurring'), default=False)
link = models.CharField(_('Link'), blank=True, max_length=4000)
page = PageField(
verbose_name=('CMS Page'), blank=True, null=True,
help_text=_('If both link and cms page is defined, the link is preferred.'))
link_text = models.CharField(_('Link Text'), blank=True, max_length=4000)
gallery = FilerFolderField(
verbose_name=('Image Gallery'), blank=True, null=True)
def __str__(self):
return '{0}'.format(self.headline[:23])
@property
def gallery_images(self):
if not self.gallery:
return []
return Image.objects.filter(folder=self.gallery)
| django-de/django-de-v4 | djangode/schedule/models.py | Python | mit | 1,484 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import DeserializationError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class RoutesOperations(object):
"""RoutesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2015-06-15".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2015-06-15"
self.config = config
def _delete_initial(
self, resource_group_name, route_table_name, route_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, route_table_name, route_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, route_table_name, route_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Route or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2015_06_15.models.Route or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Route', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def _create_or_update_initial(
self, resource_group_name, route_table_name, route_name, route_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(route_parameters, 'Route')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Route', response)
if response.status_code == 201:
deserialized = self._deserialize('Route', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, route_table_name, route_name, route_parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a route in the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:param route_parameters: Parameters supplied to the create or update
route operation.
:type route_parameters: ~azure.mgmt.network.v2015_06_15.models.Route
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns Route or
ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2015_06_15.models.Route]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
route_parameters=route_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('Route', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list(
self, resource_group_name, route_table_name, custom_headers=None, raw=False, **operation_config):
"""Gets all routes in a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Route
:rtype:
~azure.mgmt.network.v2015_06_15.models.RoutePaged[~azure.mgmt.network.v2015_06_15.models.Route]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RoutePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RoutePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2015_06_15/operations/routes_operations.py | Python | mit | 17,924 |
""" Module for abstract base class Capability
Essentially adaptor to give nengo objects interfaces
with functionality we require (related to observable data
they support).
Capability represents observable data that a given object
in a nengo model has to offer. The capability defines how
to connect an observer node to the object to be observed
in order to collect the data. It also maintains some
metadata regarding the type of data offered, including
determining the dimensions of the given data from the given
object.
"""
from abc import ABCMeta, abstractmethod
class Capability(object):
""" Capability class
"""
__metaclass__ = ABCMeta
@property
def name(self):
""" Name of the cap. Should be unique, like an id,
but we don't enforce this at the moment.
"""
return "Capability"
def supports_obj(self, obj):
""" Determines if the given object offers this cap.
Args:
obj (object): The object to check.
Returns:
bool. True if given object offers this capability.
"""
return False
@abstractmethod
def get_out_dimensions(self, obj):
""" Get the output dimensions of this cap for the given object.
Args:
obj (object): The object offering the cap.
Returns:
int. The number of dimensions of the data offered by
this cap for this obj.
Raises:
ValueError - This is not a cap for the given object.
The output dimensions depend on the object and the capability.
Also note here that dimensions are *not* like numpy ndarray dimensions,
they are simply the length of the vector that the signal will be.
I know. Threw me for a bit of a loop as well.
"""
pass
@abstractmethod
def connect_node(self, node, obj):
""" Connects an observer node to the given object.
Args:
node (nengo.Node): The observer node to connect to the object.
obj (object): The object to observe.
Raises:
ValueError - This is not a cap for the given object.
"""
pass
| chairmanmeow50/Brainspawn | brainspawn/simulator/capabilities/capability.py | Python | bsd-3-clause | 2,188 |
from django.contrib import admin
from responseTemplates.models import ResponseTemplate, Paragraph
class ParagraphInline(admin.StackedInline):
model = Paragraph
extra = 1
class ResponseTemplateAdmin(admin.ModelAdmin):
inlines = [ParagraphInline]
admin.site.register(ResponseTemplate, ResponseTemplateAdmin)
| wispwisp/supportNotebook | responseTemplates/admin.py | Python | mit | 325 |
#!/bin/python3
# Integers Come In All Sizes
# https://www.hackerrank.com/challenges/python-integers-come-in-all-sizes/problem
if __name__ == '__main__':
a, b, c, d = int(input()), int(input()), int(input()), int(input())
print((a ** b) + (c ** d))
| neiesc/Problem-solving | HackerRank/Python/Math/python-integers-come-in-all-sizes.py | Python | mit | 257 |
"""
Run some experiments using SyntheticDataset1
"""
import os
import sys
import errno
import logging
import numpy
import argparse
from apgl.graph import *
from exp.recommendexp.RecommendExpHelper import RecommendExpHelper
from exp.recommendexp.SyntheticDataset1 import SyntheticDataset1
#if __debug__:
# raise RuntimeError("Must run python with -O flag")
numpy.random.seed(21)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
numpy.set_printoptions(suppress=True, linewidth=150)
numpy.seterr("raise", under="ignore")
# Arguments related to the dataset
dataArgs = argparse.Namespace()
# Arguments related to the algorithm
defaultAlgoArgs = argparse.Namespace()
defaultAlgoArgs.ks = numpy.array(2**numpy.arange(4, 7, 0.5), numpy.int)
defaultAlgoArgs.rhos = numpy.linspace(0.5, 0.0, 10)
defaultAlgoArgs.folds = 3
# init (reading/writting command line arguments)
# data args parser #
dataParser = argparse.ArgumentParser(description="", add_help=False)
dataParser.add_argument("-h", "--help", action="store_true", help="show this help message and exit")
devNull, remainingArgs = dataParser.parse_known_args(namespace=dataArgs)
if dataArgs.help:
helpParser = argparse.ArgumentParser(description="", add_help=False, parents=[dataParser, RecommendExpHelper.newAlgoParser(defaultAlgoArgs)])
helpParser.print_help()
exit()
dataArgs.extendedDirName = ""
dataArgs.extendedDirName += "SyntheticDataset1"
dataArgs.nonUniform = False
# print args #
logging.info("Running on SyntheticDataset1")
logging.info("Data params:")
keys = list(vars(dataArgs).keys())
keys.sort()
for key in keys:
logging.info(" " + str(key) + ": " + str(dataArgs.__getattribute__(key)))
# data
generator = SyntheticDataset1(startM=5000, endM=10000, startN=1000, endN=1500, pnz=0.10, noise=0.01, nonUniform=dataArgs.nonUniform)
# run
logging.info("Creating the exp-runner")
recommendExpHelper = RecommendExpHelper(generator.getTrainIteratorFunc(), generator.getTestIteratorFunc(), remainingArgs, defaultAlgoArgs, dataArgs.extendedDirName)
recommendExpHelper.printAlgoArgs()
# os.makedirs(resultsDir, exist_ok=True) # for python 3.2
try:
os.makedirs(recommendExpHelper.resultsDir)
except OSError as err:
if err.errno != errno.EEXIST:
raise
recommendExpHelper.runExperiment()
| charanpald/wallhack | wallhack/recommendexp/SyntheticExp.py | Python | gpl-3.0 | 2,304 |
import StringIO
from fabric.api import run, sudo, cd, put, env, settings, hide
from fabric.context_managers import shell_env
from fabric.contrib.files import append, exists
from shuttle.services.postgis import *
from shuttle.services.service import Service
from shuttle.hooks import hook
from shuttle.shared import (
apt_get_install,
pip_install,
find_service,
chown,
get_django_setting,
SiteType,
)
POSTGRES_USER = 'postgres'
POSTGRES_GROUP = 'postgres'
_SHOW_FILE_COMMAND = '$(sudo -u %s psql -t -P format=unaligned -c "SHOW %%s;")' % POSTGRES_USER
_CONFIG_FILE_PATH = _SHOW_FILE_COMMAND % 'config_file'
_HBA_FILE_PATH = _SHOW_FILE_COMMAND % 'hba_file'
_IDENT_FILE_PATH = _SHOW_FILE_COMMAND % 'ident_file'
_MAIN_DIR = '$(dirname %s)' % _CONFIG_FILE_PATH
_CONF_DIR = '%s/conf.d' % _MAIN_DIR
_EXCLUDE_SETTINGS = ['postgis', 'hba', 'ident']
# NOTE: If hba is set to True instead of a list, then client authentication for the current local host is added
def _pg_quote_config(key, value):
if (isinstance(value, (str, unicode)) and value not in ('on', 'off') and not value[0].isdigit()) or key == 'listen_addresses':
return "'%s'" % value
return value
def _get_pg_env(site):
database = get_django_setting(site, 'DATABASES')['default']
return {
'PGHOST': database['HOST'],
'PGPORT': str(database.get('PORT') or '5432'),
'PGUSER': database['USER'],
'PGPASSWORD': database['PASSWORD'],
'PGDATABASE': database['NAME']
}
class Postgres(Service):
name = 'postgres'
script = 'postgresql'
def execute_sql(self, raw_sql, site=None):
sql = []
for line in raw_sql.split('\n'):
line = line.strip()
if not line or line.startswith('--'):
continue
sql.append(line.replace('\t', '').replace('\n', '').replace("'", "\\'"))
sql = ' '.join(sql)
if site:
with shell_env(**_get_pg_env(site)), settings(warn_only=True):
sudo("psql --echo-queries -c $'%s'" % sql)
else:
with settings(warn_only=True):
sudo("psql --echo-queries -c $'%s'" % sql, user=POSTGRES_USER)
def execute_command(self, pg_command, site=None):
if site:
with shell_env(**_get_pg_env(site)), settings(warn_only=True):
sudo(pg_command)
else:
with settings(warn_only=True):
sudo(pg_command, user=POSTGRES_USER)
def install(self):
with hook('install %s' % self.name, self):
apt_get_install('postgresql')
if self.settings.get('postgis'):
install_postgis()
def config(self):
with hook('config %s' % self.name, self):
if not exists(_CONF_DIR):
sudo('mkdir %s' % _CONF_DIR)
chown(_CONF_DIR, POSTGRES_USER, POSTGRES_GROUP)
append(_CONFIG_FILE_PATH, "include_dir 'conf.d'", use_sudo=True)
if self.settings:
# Apply any given settings and place into a new conf.d directory
config = ''
if env.get('vagrant'):
self.settings['listen_addresses'] = '*'
for setting in self.settings:
if setting not in _EXCLUDE_SETTINGS:
config += '%s = %s\n' % (setting, _pg_quote_config(setting, self.settings[setting]))
if config:
chown(put(StringIO.StringIO(config), _CONF_DIR + '/fabric.conf', use_sudo=True, mode=0644), POSTGRES_USER, POSTGRES_GROUP)
# Apply any given Client Authentications given under hba
hba = list(self.settings.get('hba', []))
if env.get('vagrant') or hba == True:
if hba == True:
hba = []
with hide('everything'):
host_ip = run('echo $SSH_CLIENT').split(' ')[0]
hba.append(('host', 'all', 'all', host_ip + '/32', 'md5'))
if hba:
append(_HBA_FILE_PATH, '# Fabric client connections:', use_sudo=True)
for client in hba:
client = '%s%s%s%s%s' % (client[0].ljust(8), client[1].ljust(16), client[2].ljust(16), client[3].ljust(24), client[4])
append(_HBA_FILE_PATH, client, use_sudo=True)
# Apply any given identity mappings
ident = self.settings.get('ident', [])
if ident:
append(_IDENT_FILE_PATH, '# Fabric username maps:', use_sudo=True)
for mapping in ident:
mapping = '%s%s%s' % (mapping[0].ljust(16), mapping[1].ljust(24), mapping[2])
append(_IDENT_FILE_PATH, mapping, use_sudo=True)
self.restart()
def site_install(self, site):
with hook('site install %s' % self.name, self, site):
if self.settings.get('postgis'):
# Install PostGIS also on the site if separate from the server
if find_service(self.name) is None:
install_postgis()
# Install python postgresql support
pip_install(site, 'psycopg2')
def site_config(self, site):
with hook('site config %s' % self.name, self, site):
if site['type'] == SiteType.DJANGO:
database = get_django_setting(site, 'DATABASES')['default']
# Create the user for django to access the database with
if find_service(self.name) is not None:
with settings(warn_only=True):
self.execute_command('createuser --echo --createdb --no-superuser --no-createrole ' + database['USER'])
self.execute_sql("ALTER USER %s WITH PASSWORD '%s';" % (database['USER'], database['PASSWORD']))
# Create the database
self.execute_command('createdb --echo ' + database['NAME'], site)
# Setup postgis
if self.settings.get('postgis'):
site_config_postgis(self, site)
| mvx24/fabric-shuttle | shuttle/services/postgres.py | Python | mit | 6,149 |
from __future__ import print_function
import sys
import time
import argparse
from time import sleep
from distutils.version import LooseVersion
from typing import Any
from Tests.scripts.utils import logging_wrapper as logging
import urllib3
import demisto_client.demisto_api
from Tests.scripts.utils.log_util import install_logging
from Tests.test_integration import check_integration
from demisto_sdk.commands.common.constants import PB_Status
from demisto_sdk.commands.common.tools import str2bool
from Tests.test_content import SettingsTester, DataKeeperTester, \
print_test_summary, update_test_msg, turn_off_telemetry, \
create_result_files, get_all_tests, get_instances_ips_and_names, get_server_numeric_version, \
initialize_queue_and_executed_tests_set, get_test_records_of_given_test_names, \
extract_filtered_tests, load_conf_files, set_integration_params, collect_integrations, \
SERVER_URL
# Disable insecure warnings
urllib3.disable_warnings()
def options_handler():
parser = argparse.ArgumentParser(description='Utility for batch action on incidents')
parser.add_argument('-k', '--apiKey', help='The Demisto API key for the server', required=True)
parser.add_argument('-s', '--server', help='The server URL to connect to')
parser.add_argument('-c', '--conf', help='Path to conf file', required=True)
parser.add_argument('-e', '--secret', help='Path to secret conf file')
parser.add_argument('-n', '--nightly', type=str2bool, help='Run nightly tests')
parser.add_argument('-t', '--slack', help='The token for slack', required=True)
parser.add_argument('-a', '--circleci', help='The token for circleci', required=True)
parser.add_argument('-b', '--buildNumber', help='The build number', required=True)
parser.add_argument('-g', '--buildName', help='The build name', required=True)
parser.add_argument('-sa', '--service_account', help="Path to GCS service account.", required=False)
parser.add_argument('-i', '--isAMI', type=str2bool, help='is AMI build or not', default=False)
parser.add_argument('-m', '--memCheck', type=str2bool,
help='Should trigger memory checks or not. The slack channel to check the data is: '
'dmst_content_nightly_memory_data', default=False)
parser.add_argument('-d', '--serverVersion', help='Which server version to run the '
'tests on(Valid only when using AMI)', default="NonAMI")
parser.add_argument('-l', '--testsList', help='List of specific, comma separated'
'tests to run')
options = parser.parse_args()
tests_settings = SettingsTester(options)
return tests_settings
def run_test_logic(tests_settings: Any, c: Any, failed_playbooks: list,
integrations: list, playbook_id: str, succeed_playbooks: list, test_message: str,
test_options: dict, slack: Any, circle_ci: str, build_number: str, server_url: str,
demisto_user: str, demisto_pass: str, build_name: str) -> bool:
"""
run_test_logic handles the testing of the integration by triggering check_integration. afterwards
it will check the status of the test and report success or add the failed test to the list of
failed integrations.
:param tests_settings: SettingsTester object which contains the test variables
:param c: Client for connecting to XSOAR via demisto-py
:param failed_playbooks: List of failed playbooks, additional failed playbooks will be added if
they failed.
:param integrations: List of integrations being tested.
:param playbook_id: ID of the test playbook being tested.
:param succeed_playbooks: List of playbooks which have passed tests.
:param test_message: Name of the playbook/integration being tested. This is reported back in the
build and used to print in the console the test being ran.
:param test_options: Options being passed to the test. PID, Docker Threshold, Timeout, etc.
:param slack: Slack client used for notifications.
:param circle_ci: CircleCI token. Used to get name of dev who triggered the build.
:param build_number: The build number of the CI run. Used in slack message.
:param server_url: The FQDN of the server tests are being ran on.
:param demisto_user: Username of the demisto user running the tests.
:param demisto_pass: Password of the demisto user running the tests.
:param build_name: Name of the build. (Nightly, etc.)
:return: Boolean indicating if the test was successful.
"""
status, inc_id = check_integration(c, server_url, demisto_user, demisto_pass, integrations, playbook_id,
options=test_options)
if status == PB_Status.COMPLETED:
logging.success(f'PASS: {test_message} succeed')
succeed_playbooks.append(playbook_id)
elif status == PB_Status.NOT_SUPPORTED_VERSION:
logging.info(f'PASS: {test_message} skipped - not supported version')
succeed_playbooks.append(playbook_id)
else:
logging.error(f'Failed: {test_message} failed')
playbook_id_with_mock = playbook_id
playbook_id_with_mock += " (Mock Disabled)"
failed_playbooks.append(playbook_id_with_mock)
succeed = status in (PB_Status.COMPLETED, PB_Status.NOT_SUPPORTED_VERSION)
return succeed
def run_test(tests_settings: SettingsTester, demisto_user: str, demisto_pass: str,
failed_playbooks: list, integrations: list, playbook_id: str, succeed_playbooks: list,
test_message: str, test_options: dict, slack: str, circle_ci: str, build_number: str,
server_url: str, build_name: str) -> None:
"""
Wrapper for the run_test_logic function. Helps by indicating when the test is starting and ending.
:param tests_settings: SettingsTester object which contains the test variables
:param demisto_user: Username of the demisto user running the tests.
:param demisto_pass: Password of the demisto user running the tests.
:param failed_playbooks: List of failed playbooks, additional failed playbooks will be added if
they failed.
:param integrations: List of integrations being tested.
:param playbook_id: ID of the test playbook being tested.
:param succeed_playbooks: List of playbooks which have passed tests.
:param test_message: Name of the playbook/integration being tested. This is reported back in the
build and used to print in the console the test being ran.
:param test_options: Options being passed to the test. PID, Docker Threshold, Timeout, etc.
:param slack: Slack client used for notifications.
:param circle_ci: CircleCI token. Used to get name of dev who triggered the build.
:param build_number: The build number of the CI run. Used in slack message.
:param server_url: The FQDN of the server tests are being ran on.
:param build_name: Name of the build. (Nightly, etc.)
:return: No object is returned.
"""
start_message = f'------ Test {test_message} start ------'
client = demisto_client.configure(base_url=server_url, username=demisto_user, password=demisto_pass,
verify_ssl=False)
logging.info(f'{start_message} (Private Build Test)')
run_test_logic(tests_settings, client, failed_playbooks, integrations, playbook_id,
succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, demisto_user, demisto_pass, build_name)
logging.info(f'------ Test {test_message} end ------\n')
def run_private_test_scenario(tests_settings: SettingsTester, t: dict, default_test_timeout: int,
skipped_tests_conf: dict, nightly_integrations: list, skipped_integrations_conf: set,
skipped_integration: set, filtered_tests: list, skipped_tests: set, secret_params: dict,
failed_playbooks: list, playbook_skipped_integration: set, succeed_playbooks: list,
slack: str, circle_ci: str, build_number: str, server: str, build_name: str,
server_numeric_version: str, demisto_user: str, demisto_pass: str, demisto_api_key: str):
"""
Checks to see if test should run given the scenario. If the test should run, it will collect the
integrations which are required to run the test.
:param tests_settings: SettingsTester object which contains the test variables
:param t: Options being passed to the test. PID, Docker Threshold, Timeout, etc.
:param default_test_timeout: Time in seconds indicating when the test should timeout if no
status is reported.
:param skipped_tests_conf: Collection of the tests which are skipped.
:param nightly_integrations: List of integrations which should only be tested on a nightly build.
:param skipped_integrations_conf: Collection of integrations which are skiped.
:param skipped_integration: Set of skipped integrations. Currently not used in private.
:param filtered_tests: List of tests excluded from testing.
:param skipped_tests: List of skipped tests.
:param secret_params: Parameters found in the content-test-conf. Used to configure the instance.
:param failed_playbooks: List of failed playbooks, additional failed playbooks will be added if
they failed.
:param playbook_skipped_integration: Not used.
:param succeed_playbooks: List of playbooks which have passed tests.
:param slack: Slack client used for notifications.
:param circle_ci: CircleCI token. Used to get name of dev who triggered the build.
:param build_number: The build number of the CI run. Used in slack message.
:param server: The FQDN of the server tests are being ran on.
:param build_name: Name of the build. (Nightly, etc.)
:param server_numeric_version: Version of XSOAR currently installed on the server.
:param demisto_user: Username of the demisto user running the tests.
:param demisto_pass: Password of the demisto user running the tests.
:param demisto_api_key: API key for the demisto instance.
:return:
"""
playbook_id = t['playbookID']
integrations_conf = t.get('integrations', [])
instance_names_conf = t.get('instance_names', [])
test_message = 'playbook: ' + playbook_id
test_options = {
'timeout': t.get('timeout', default_test_timeout)
}
if not isinstance(integrations_conf, list):
integrations_conf = [integrations_conf, ]
if not isinstance(instance_names_conf, list):
instance_names_conf = [instance_names_conf, ]
test_skipped_integration, integrations, is_nightly_integration = collect_integrations(
integrations_conf, skipped_integration, skipped_integrations_conf, nightly_integrations)
if playbook_id in filtered_tests:
playbook_skipped_integration.update(test_skipped_integration)
# Skip tests that are missing from filtered list
if filtered_tests and playbook_id not in filtered_tests:
return
# Skip bad test
if playbook_id in skipped_tests_conf:
skipped_tests.add(f'{playbook_id} - reason: {skipped_tests_conf[playbook_id]}')
return
# Skip integration
if test_skipped_integration:
return
# Skip version mismatch test
test_from_version = t.get('fromversion', '0.0.0')
test_to_version = t.get('toversion', '99.99.99')
if not LooseVersion(test_from_version) <= LooseVersion(server_numeric_version) <= LooseVersion(test_to_version):
warning_message = f'Test {test_message} ignored due to version mismatch ' \
f'(test versions: {test_from_version}-{test_to_version})'
logging.warning(warning_message)
return
placeholders_map = {'%%SERVER_HOST%%': server}
are_params_set = set_integration_params(demisto_api_key, integrations, secret_params, instance_names_conf,
playbook_id, placeholders_map)
if not are_params_set:
failed_playbooks.append(playbook_id)
return
test_message = update_test_msg(integrations, test_message)
run_test(tests_settings, demisto_user, demisto_pass, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci,
build_number, server, build_name)
def execute_testing(tests_settings: SettingsTester, server_ip: str, all_tests: set,
tests_data_keeper: DataKeeperTester):
"""
Main function used to handle the testing process. Starts by turning off telemetry and disabling
any left over tests. Afterwards it will create a test queue object which then is used to run the
specific test scenario.
:param tests_settings: SettingsTester object which contains the test variables
:param server_ip: IP address of the server. Will be formatted before use.
:param all_tests: All tests currently in the test conf.
:param tests_data_keeper: Object containing all the test results. Used by report tests function.
:return: No object is returned, just updates the tests_data_keep object.
"""
server = SERVER_URL.format(server_ip)
server_numeric_version = tests_settings.serverNumericVersion or ''
logging.info(f"Executing tests with the server {server} - and the server ip {server_ip}")
slack = tests_settings.slack
circle_ci = tests_settings.circleci
build_number = tests_settings.buildNumber
build_name = tests_settings.buildName
conf, secret_conf = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
demisto_api_key = tests_settings.api_key
demisto_user = secret_conf['username']
demisto_pass = secret_conf['userPassword']
default_test_timeout = conf.get('testTimeout', 30)
tests = conf['tests']
skipped_tests_conf = conf['skipped_tests']
nightly_integrations = conf['nightly_integrations']
skipped_integrations_conf = conf['skipped_integrations']
unmockable_integrations = conf['unmockable_integrations']
secret_params = secret_conf['integrations'] if secret_conf else []
filtered_tests = extract_filtered_tests()
if not tests or len(tests) == 0:
logging.info('no integrations are configured for test')
return
xsoar_client = demisto_client.configure(base_url=server, username=demisto_user,
password=demisto_pass, verify_ssl=False)
# turn off telemetry
turn_off_telemetry(xsoar_client)
failed_playbooks: list = []
succeed_playbooks: list = []
skipped_tests: set = set([])
skipped_integration: set = set([])
playbook_skipped_integration: set = set([])
# Private builds do not use mocking. Here we copy the mocked test list to the unmockable list.
private_tests = get_test_records_of_given_test_names(tests_settings, all_tests)
try:
# first run the mock tests to avoid mockless side effects in container
logging.info("\nRunning private tests")
executed_in_current_round, private_tests_queue = initialize_queue_and_executed_tests_set(private_tests)
while not private_tests_queue.empty():
t = private_tests_queue.get()
executed_in_current_round = update_round_set_and_sleep_if_round_completed(
executed_in_current_round, t)
run_private_test_scenario(tests_settings, t, default_test_timeout, skipped_tests_conf,
nightly_integrations, skipped_integrations_conf,
skipped_integration,
filtered_tests, skipped_tests, secret_params,
failed_playbooks, playbook_skipped_integration,
succeed_playbooks, slack, circle_ci, build_number, server,
build_name, server_numeric_version, demisto_user,
demisto_pass, demisto_api_key)
except Exception:
logging.exception('~~ Thread Failed ~~')
raise
finally:
tests_data_keeper.add_tests_data(succeed_playbooks, failed_playbooks, skipped_tests,
skipped_integration, unmockable_integrations)
def update_round_set_and_sleep_if_round_completed(executed_in_current_round: set,
t: dict) -> set:
"""
Checks if the string representation of the current test configuration is already in
the executed_in_current_round set.
If it is- it means we have already executed this test and the we have reached a round and
there are tests that
were not able to be locked by this execution..
In that case we want to start a new round monitoring by emptying the
'executed_in_current_round' set and sleep
in order to let the tests be unlocked
Args:
executed_in_current_round: A set containing the string representation of all tests
configuration as they appear
in conf.json file that were already executed in the current round
t: test configuration as it appears in conf.json file
Returns:
A new executed_in_current_round set which contains only the current tests configuration if a
round was completed else it just adds the new test to the set.
"""
if str(t) in executed_in_current_round:
logging.info(
'all tests in the queue were executed, sleeping for 30 seconds to let locked tests get unlocked.')
executed_in_current_round = set()
time.sleep(30)
executed_in_current_round.add(str(t))
return executed_in_current_round
def manage_tests(tests_settings: SettingsTester):
"""
This function manages the execution of Demisto's tests.
Args:
tests_settings (SettingsTester): An object containing all the relevant data regarding how the
tests should be ran.
"""
tests_settings.serverNumericVersion = get_server_numeric_version(tests_settings.serverVersion,
tests_settings.is_local_run)
instances_ips = get_instances_ips_and_names(tests_settings)
tests_data_keeper = DataKeeperTester()
for ami_instance_name, ami_instance_ip in instances_ips:
if ami_instance_name == tests_settings.serverVersion:
logging.info(f"Starting private testing for {ami_instance_name}")
logging.info(f"Starts tests with server url - https://{ami_instance_ip}")
all_tests = get_all_tests(tests_settings)
execute_testing(tests_settings, ami_instance_ip, all_tests, tests_data_keeper)
sleep(8)
print_test_summary(tests_data_keeper, tests_settings.isAMI, logging_module=logging)
create_result_files(tests_data_keeper)
if tests_data_keeper.failed_playbooks:
tests_failed_msg = "Some tests have failed. Not destroying instances."
print(tests_failed_msg)
sys.exit(1)
def main():
install_logging('Run_Tests.log', logger=logging)
tests_settings = options_handler()
logging.info(f"Build Name: {tests_settings.buildName}")
logging.info(f" Build Number: {tests_settings.buildNumber}")
manage_tests(tests_settings)
if __name__ == '__main__':
main()
| VirusTotal/content | Tests/private_build/run_content_tests_private.py | Python | mit | 19,711 |
import sys
import hashlib
from pyftpdlib.authorizers import AuthenticationFailed
class Authorizer:
def __init__(self, username, password, directory, permission=None):
self.username = username
self.password = password
self.directory = directory
self.permission = permission or "elradfmw"
def has_user(self, username):
return self.username == username
def validate_password(self, password):
return self.password == make_password_hash(password)
def validate_authentication(self, username, password, handler):
if self.username == username and self.validate_password(password):
return
raise AuthenticationFailed("Authentication failed.")
def get_home_dir(self, username):
return self.directory
def has_perm(self, username, perm, path=None):
return perm in self.permission
def get_perms(self, username):
return self.permission
def get_msg_login(self, username):
return "Login successful."
def get_msg_quit(self, username):
return "Goodbye."
def impersonate_user(self, username, password):
pass
def terminate_impersonation(self, username):
pass
def make_password_hash(password):
return hashlib.sha1(password.encode('utf8')).hexdigest()
def main():
if len(sys.argv) < 2:
sys.exit()
print("password hash:")
print(make_password_hash(sys.argv[1]))
if __name__ == '__main__':
main()
| tokibito/soloftpd | soloftpd/authorizers.py | Python | mit | 1,493 |
"""
WSGI config for bookmarklets project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from os.path import abspath, dirname
from sys import path
SITE_ROOT = dirname(dirname(abspath(__file__)))
path.append(SITE_ROOT)
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "jajaja.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bookmarklets.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| assertnotnull/bookmarklets | bookmarklets/bookmarklets/wsgi.py | Python | mit | 1,572 |
#!/usr/bin/python
from base import Persistence
class ErrorI18n(Persistence):
pass
| tLDP/lampadas | pylib/persistence/error_i18n.py | Python | gpl-2.0 | 89 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from datetime import datetime
from scrapy import log
from scrapy.exceptions import DropItem
from scrapy.item import Item, Field
from webbot.utils import utils, dateparser
import pprint
import re
import traceback
# 瀛楁鏄犲皠(mapping)
def item2post(item):
post = {}
for k,v in item.fields.iteritems():
if 'name' in v:
post[v['name']] = item[k]
return post
# 鍩烘湰澶勭悊(basic)
class BasicPipeline(object):
def open_spider(self, spider):
self.img = 'image_urls' in Item.fields
def process_item(self, item, spider):
try:
for k,v in item.fields.iteritems():
if v.get('multi'):
if k not in item:
item[k] = []
continue
if k in item:
if isinstance(item[k], list):
item[k] = item[k][0]
elif v.get('opt'):
item[k] = None
else:
raise Exception('field [{}] is empty'%k)
return item
except Exception as ex:
raise DropItem('item error: {}'.format(ex))
# 璋冭瘯鎵撳嵃(debug)
class DebugPipeline(object):
def open_spider(self, spider):
self.printer = utils.UnicodePrinter(verbose=getattr(spider, 'verbose', 0))
self.idx = 0
def process_item(self, item, spider):
if not (hasattr(spider, 'debug') and spider.debug):
return item
self.idx += 1
print utils.B('{:=^30}').format(self.idx)
for k,v in item.iteritems():
if type(v) in [str, unicode]:
v = re.sub(r'\s{2,}', ' ', v.replace('\n', ' ').replace('\r', ''))
if spider.verbose<3:
v = self.printer.squeeze(v)
elif type(v)==datetime:
now = datetime.utcnow()
if v>now:
colored = utils.RR
elif (now-v).total_seconds()>24*3600:
colored = utils.R
else:
colored = lambda x:x
offset = dateparser.tz_offset(spider.tz)
v = colored(v + offset)
else:
v = re.sub(r'(?m)^', '{: ^13}'.format(''), self.printer.pformat(v)).decode('utf-8').strip()
f = ' ' if 'name' in item.fields[k] else '*'
print u'{:>10.10}{}: {}'.format(k, f, v).encode('utf-8')
return item
# 鏁版嵁瀛樺偍(mongo)
class MongoPipeline(object):
def open_spider(self, spider):
if hasattr(spider, 'mongo'):
try:
self.upsert_keys = self.get_upsert_keys()
uri = spider.mongo
log.msg('connect <{}>'.format(uri))
self.cnn, self.db, self.tbl = utils.connect_uri(uri)
return
except Exception as ex:
log.err('cannot connect to mongodb: {}'.format(ex))
self.cnn = self.db = None
def get_upsert_keys(self):
keys = []
for k,v in Item.fields.iteritems():
if 'name' in v and v.get('upsert'):
keys.append(v['name'])
return keys
def process_item(self, item, spider):
if self.cnn:
try:
post = item2post(item)
if self.upsert_keys:
criteria = {k:post[k] for k in self.upsert_keys}
self.tbl.update(criteria, post, upsert=True)
else:
self.tbl.insert(post)
except Exception as ex:
traceback.print_exc()
return item
def close_spider(self, spider):
if self.cnn:
log.msg('disconnect mongodb')
self.cnn.close()
self.cnn = None
# 鏁版嵁瀛樺偍(mysql)
class MysqlPipeline(object):
def open_spider(self, spider):
if hasattr(spider, 'mysql'):
try:
uri = spider.mysql
log.msg('connect <{}>'.format(uri))
self.cnn, _, self.tbl = utils.connect_uri(uri)
self.cur = self.cnn.cursor()
return
except Exception as ex:
traceback.print_exc()
log.err('cannot connect to mysql: {}'.format(ex))
self.cnn = self.cur = None
def process_item(self, item, spider):
if self.cnn:
try:
post = item2post(item)
fields = []
values = []
for k,v in post.iteritems():
fields.append(k)
values.append(v)
self.cur.execute(
"""INSERT INTO {}({}) VALUES({});""".format(
self.tbl,
','.join(fields),
','.join(['%s']*len(fields))
),
values
)
self.cnn.commit()
except Exception as ex:
traceback.print_exc()
return item
def close_spider(self, spider):
if self.cnn:
log.msg('disconnect mysql')
self.cur.close()
self.cnn.close()
self.cnn = self.cur = None
# 娑堟伅闃熷垪(zmq)
class ZmqPipeline(object):
def open_spider(self, spider):
if hasattr(spider, 'zmq'):
try:
from utils.api import MessageSender
uri = spider.zmq
log.msg('connect <{}>'.format(uri))
self.sender = MessageSender(uri)
return
except Exception as ex:
log.err('cannot connect to zmq: {}'.format(ex))
self.sender = None
def process_item(self, item, spider):
if self.sender:
try:
self.sender.send(item2post(item))
except Exception as ex:
traceback.print_exc()
return item
def close_spider(self, spider):
if self.sender:
log.msg('disconnect zmq')
self.sender.term()
# 鍥剧墖涓嬭浇(img)
try:
from scrapy.contrib.pipeline.images import ImagesPipeline
class ImgPipeline(ImagesPipeline):
def open_spider(self, spider):
self.img = 'image_urls' in Item.fields
self.spiderinfo = self.SpiderInfo(spider)
if hasattr(spider, 'img'):
self.store = self._get_store(spider.img)
def process_item(self, item, spider):
if self.img:
return ImagesPipeline.process_item(self, item, spider)
else:
return item
def get_media_requests(self, item, info):
for r in ImagesPipeline.get_media_requests(self, item, info):
r.headers['Referer'] = item.get('url', 'http://www.google.com')
yield r
except:
pass
| vimagick/webbot | webbot/pipelines.py | Python | gpl-3.0 | 6,947 |
# pylint: disable=missing-docstring
from pathlib import Path
from textwrap import dedent
from beancount.loader import load_file
from beancount.loader import load_string
from fava.plugins.link_documents import DocumentError
def test_plugins(tmp_path: Path) -> None:
# Create sample files
expenses_foo = tmp_path / "documents" / "Expenses" / "Foo"
expenses_foo.mkdir(parents=True)
(expenses_foo / "2016-11-02 Test 1.pdf").touch()
(expenses_foo / "2016-11-03 Test 2.pdf").touch()
(expenses_foo / "2016-11-04 Test 3 discovered.pdf").touch()
assets_cash = tmp_path / "documents" / "Assets" / "Cash"
assets_cash.mkdir(parents=True)
(assets_cash / "2016-11-05 Test 4.pdf").touch()
(assets_cash / "Test 5.pdf").touch()
expenses_foo_rel = Path("documents") / "Expenses" / "Foo"
assets_cash_rel = Path("documents") / "Assets" / "Cash"
beancount_file = tmp_path / "example.beancount"
beancount_file.write_text(
dedent(
f"""
option "title" "Test"
option "operating_currency" "EUR"
option "documents" "{tmp_path / "documents"}"
plugin "fava.plugins.link_documents"
2016-10-30 open Expenses:Foo
2016-10-31 open Assets:Cash
2016-11-01 * "Foo" "Bar"
document: "{expenses_foo / "2016-11-03 Test 2.pdf"}"
document-2: "{assets_cash_rel / "2016-11-05 Test 4.pdf"}"
Expenses:Foo 100 EUR
Assets:Cash
2016-11-07 * "Foo" "Bar"
document: "{expenses_foo_rel / "2016-11-02 Test 1.pdf"}"
document-2: "{assets_cash_rel / "2016-11-05 Test 4.pdf"}"
Expenses:Foo 100 EUR
Assets:Cash
2016-11-06 document Assets:Cash "{assets_cash_rel / "Test 5.pdf"}"
2017-11-06 balance Assets:Cash -200 EUR
document: "{assets_cash_rel / "Test 5.pdf"}"
""".replace(
"\\", "\\\\"
)
)
)
entries, errors, _ = load_file(str(beancount_file))
assert not errors
assert len(entries) == 10
assert "linked" in entries[3].tags
assert "linked" in entries[4].tags
# Document can be linked twice
assert len(entries[6].links) == 2
assert entries[2].links == entries[4].links
assert entries[8].links == entries[3].links
def test_link_documents_error(load_doc) -> None:
"""
plugin "fava.plugins.link_documents"
2016-10-31 open Expenses:Foo
2016-10-31 open Assets:Cash
2016-11-01 * "Foo" "Bar"
document: "asdf"
Expenses:Foo 100 EUR
Assets:Cash
"""
entries, errors, _ = load_doc
assert len(errors) == 1
assert len(entries) == 3
def test_link_documents_missing(tmp_path: Path) -> None:
bfile = dedent(
f"""
option "documents" "{tmp_path}"
plugin "fava.plugins.link_documents"
2016-10-31 open Expenses:Foo
2016-10-31 open Assets:Cash
2016-11-01 * "Foo" "Bar"
document: "{Path("test") / "Foobar.pdf"}"
Expenses:Foo 100 EUR
Assets:Cash
""".replace(
"\\", "\\\\"
)
)
entries, errors, _ = load_string(bfile)
assert len(errors) == 1
assert isinstance(errors[0], DocumentError)
assert len(entries) == 3
| aumayr/beancount-web | tests/test_plugins_link_documents.py | Python | mit | 3,350 |
#!/usr/bin/env python
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2009, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import os
import sys
if sys.version_info < (2, 6, 0):
sys.stderr.write("Supybot requires Python 2.6 or newer.")
sys.stderr.write(os.linesep)
sys.exit(-1)
import os.path
import textwrap
from src.version import version
from setuptools import setup
def normalizeWhitespace(s):
return ' '.join(s.split())
plugins = [s for s in os.listdir('plugins') if
os.path.exists(os.path.join('plugins', s, 'plugin.py'))]
packages = ['supybot',
'supybot.utils',
'supybot.drivers',
'supybot.plugins',] + \
['supybot.plugins.'+s for s in plugins] + \
[
'supybot.plugins.Dict.local',
'supybot.plugins.Math.local',
]
package_dir = {'supybot': 'src',
'supybot.utils': 'src/utils',
'supybot.plugins': 'plugins',
'supybot.drivers': 'src/drivers',
'supybot.plugins.Dict.local': 'plugins/Dict/local',
'supybot.plugins.Math.local': 'plugins/Math/local',
}
for plugin in plugins:
package_dir['supybot.plugins.' + plugin] = 'plugins/' + plugin
setup(
# Metadata
name='supybot',
version=version,
author='Jeremy Fincher',
author_email='jemfinch@supybot.com',
maintainer='James McCoy',
maintainer_email='jamessan@users.sourceforge.net',
url='https://sourceforge.net/projects/supybot/',
download_url='https://sourceforge.net/projects/supybot/files/',
platforms=['linux', 'linux2', 'win32', 'cygwin', 'darwin'],
description='A flexible and extensible Python IRC bot and framework.',
long_description=normalizeWhitespace("""A robust, full-featured Python IRC
bot with a clean and flexible plugin API. Equipped with a complete ACL
system for specifying user permissions with as much as per-command
granularity. Batteries are included in the form of numerous plugins
already written."""),
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Topic :: Communications :: Chat :: Internet Relay Chat',
'Natural Language :: English',
'Operating System :: OS Independent',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
# Installation data
packages=packages,
package_dir=package_dir,
scripts=['scripts/supybot',
'scripts/supybot-test',
'scripts/supybot-botchk',
'scripts/supybot-wizard',
'scripts/supybot-adduser',
'scripts/supybot-plugin-doc',
'scripts/supybot-plugin-create',
],
install_requires=[
# Time plugin
'python-dateutil <2.0,>=1.3',
'feedparser',
],
tests_require=[
'mock',
]
)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| jeffmahoney/supybot | setup.py | Python | bsd-3-clause | 4,830 |
#!/bin/env python
# Copyright 2019 Arm Limited.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import shutil
SOURCE_CONTENT = """
int bob_test_implicit_out() {
return 88;
}
"""
HEADER_CONTENT = """
#define A 88
"""
def main():
parser = argparse.ArgumentParser(description="Test generator.")
parser.add_argument("input", help="Input files")
parser.add_argument("-o", "--output", help="Output file")
parser.add_argument("--header", action="store_true",
help="Generate implicit header")
parser.add_argument("--source", action="store_true",
help="Generate implicit source")
implicit = "lib"
args = parser.parse_args()
inp = args.input
out = args.output
if not os.path.exists(inp):
print("Input file doesn't exist: " + inp)
exit(-1)
shutil.copy(inp, out)
path = os.path.join(os.path.dirname(out), implicit)
if args.header:
# create lib.h
with open(path + ".h", "w") as hf:
hf.write(HEADER_CONTENT)
if args.source:
# create lib.c
with open(path + ".c", "w") as cf:
cf.write(SOURCE_CONTENT)
if __name__ == "__main__":
main()
| ARM-software/bob-build | tests/implicit_outs/generate.py | Python | apache-2.0 | 1,819 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
"""
This module implements a friendly (well, friendlier) interface between the raw JSON
responses from JIRA and the Resource/dict abstractions provided by this library. Users
will construct a JIRA object as described below. Full API documentation can be found
at: https://jira-python.readthedocs.org/en/latest/
"""
from functools import wraps
import imghdr
import mimetypes
import copy
import os
import re
import string
import tempfile
import logging
import json
import warnings
import pprint
import sys
import datetime
import calendar
import hashlib
from six.moves.urllib.parse import urlparse, urlencode
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from six import string_types, integer_types
# six.moves does not play well with pyinstaller, see https://github.com/pycontribs/jira/issues/38
# from six.moves import html_parser
if sys.version_info < (3, 0, 0):
import HTMLParser as html_parser
else:
import html.parser as html_parser
import requests
try:
from requests_toolbelt import MultipartEncoder
except:
pass
try:
from requests_jwt import JWTAuth
except ImportError:
pass
# JIRA specific resources
from .resources import Resource, Issue, Comment, Project, Attachment, Component, Dashboard, Filter, Votes, Watchers, \
Worklog, IssueLink, IssueLinkType, IssueType, Priority, Version, Role, Resolution, SecurityLevel, Status, User, \
CustomFieldOption, RemoteLink
# GreenHopper specific resources
from .resources import GreenHopperResource, Board, Sprint
from .resilientsession import ResilientSession
from .version import __version__
from .utils import threaded_requests, json_loads, JIRAError, CaseInsensitiveDict
try:
from random import SystemRandom
random = SystemRandom()
except ImportError:
import random
# warnings.simplefilter('default')
# encoding = sys.getdefaultencoding()
# if encoding != 'UTF8':
# warnings.warn("Python default encoding is '%s' instead of 'UTF8' which means that there is a big change of having problems. Possible workaround http://stackoverflow.com/a/17628350/99834" % encoding)
def translate_resource_args(func):
"""
Decorator that converts Issue and Project resources to their keys when used as arguments.
"""
@wraps(func)
def wrapper(*args, **kwargs):
arg_list = []
for arg in args:
if isinstance(arg, (Issue, Project)):
arg_list.append(arg.key)
else:
arg_list.append(arg)
result = func(*arg_list, **kwargs)
return result
return wrapper
class ResultList(list):
def __init__(self, iterable=None, _total=None):
if iterable is not None:
list.__init__(self, iterable)
else:
list.__init__(self)
self.total = _total if _total is not None else len(self)
class QshGenerator:
def __init__(self, context_path):
self.context_path = context_path
def __call__(self, req):
parse_result = urlparse(req.url)
path = parse_result.path[len(self.context_path):] if len(self.context_path) > 1 else parse_result.path
query = '&'.join(sorted(parse_result.query.split("&")))
qsh = '%(method)s&%(path)s&%(query)s' % {'method': req.method.upper(), 'path': path, 'query': query}
return hashlib.sha256(qsh).hexdigest()
class JIRA(object):
"""
User interface to JIRA.
Clients interact with JIRA by constructing an instance of this object and calling its methods. For addressable
resources in JIRA -- those with "self" links -- an appropriate subclass of :py:class:`Resource` will be returned
with customized ``update()`` and ``delete()`` methods, along with attribute access to fields. This means that calls
of the form ``issue.fields.summary`` will be resolved into the proper lookups to return the JSON value at that
mapping. Methods that do not return resources will return a dict constructed from the JSON response or a scalar
value; see each method's documentation for details on what that method returns.
"""
DEFAULT_OPTIONS = {
"server": "http://localhost:2990/jira",
"context_path": "/",
"rest_path": "api",
"rest_api_version": "2",
"verify": True,
"resilient": True,
"async": False,
"client_cert": None,
"check_update": True,
"headers": {
'X-Atlassian-Token': 'no-check',
'Cache-Control': 'no-cache',
# 'Accept': 'application/json;charset=UTF-8', # default for REST
'Content-Type': 'application/json', # ;charset=UTF-8',
# 'Accept': 'application/json', # default for REST
#'Pragma': 'no-cache',
#'Expires': 'Thu, 01 Jan 1970 00:00:00 GMT'
}
}
checked_version = False
JIRA_BASE_URL = '{server}/rest/api/{rest_api_version}/{path}'
AGILE_BASE_URL = '{server}/rest/greenhopper/1.0/{path}'
def __init__(self, server=None, options=None, basic_auth=None, oauth=None, jwt=None,
validate=False, get_server_info=True, async=False, logging=True, max_retries=3):
"""
Construct a JIRA client instance.
Without any arguments, this client will connect anonymously to the JIRA instance
started by the Atlassian Plugin SDK from one of the 'atlas-run', ``atlas-debug``,
or ``atlas-run-standalone`` commands. By default, this instance runs at
``http://localhost:2990/jira``. The ``options`` argument can be used to set the JIRA instance to use.
Authentication is handled with the ``basic_auth`` argument. If authentication is supplied (and is
accepted by JIRA), the client will remember it for subsequent requests.
For quick command line access to a server, see the ``jirashell`` script included with this distribution.
The easiest way to instantiate is using j = JIRA("https://jira.atlasian.com")
:param options: Specify the server and properties this client will use. Use a dict with any
of the following properties:
* server -- the server address and context path to use. Defaults to ``http://localhost:2990/jira``.
* rest_path -- the root REST path to use. Defaults to ``api``, where the JIRA REST resources live.
* rest_api_version -- the version of the REST resources under rest_path to use. Defaults to ``2``.
* verify -- Verify SSL certs. Defaults to ``True``.
* client_cert -- a tuple of (cert,key) for the requests library for client side SSL
:param basic_auth: A tuple of username and password to use when establishing a session via HTTP BASIC
authentication.
:param oauth: A dict of properties for OAuth authentication. The following properties are required:
* access_token -- OAuth access token for the user
* access_token_secret -- OAuth access token secret to sign with the key
* consumer_key -- key of the OAuth application link defined in JIRA
* key_cert -- private key file to sign requests with (should be the pair of the public key supplied to
JIRA in the OAuth application link)
:param jwt: A dict of properties for JWT authentication supported by Atlassian Connect. The following
properties are required:
* secret -- shared secret as delivered during 'installed' lifecycle event
(see https://developer.atlassian.com/static/connect/docs/latest/modules/lifecycle.html for details)
* payload -- dict of fields to be inserted in the JWT payload, e.g. 'iss'
Example jwt structure: ``{'secret': SHARED_SECRET, 'payload': {'iss': PLUGIN_KEY}}``
:param validate: If true it will validate your credentials first. Remember that if you are accesing JIRA
as anononymous it will fail to instanciate.
:param get_server_info: If true it will fetch server version info first to determine if some API calls
are available.
:param async: To enable async requests for those actions where we implemented it, like issue update() or delete().
Obviously this means that you cannot rely on the return code when this is enabled.
"""
if options is None:
options = {}
if server and hasattr(server, 'keys'):
warnings.warn(
"Old API usage, use JIRA(url) or JIRA(options={'server': url}, when using dictionary always use named parameters.",
DeprecationWarning)
options = server
server = None
if server:
options['server'] = server
if async:
options['async'] = async
self.logging = logging
self._options = copy.copy(JIRA.DEFAULT_OPTIONS)
self._options.update(options)
# Rip off trailing slash since all urls depend on that
if self._options['server'].endswith('/'):
self._options['server'] = self._options['server'][:-1]
context_path = urlparse(self._options['server']).path
if len(context_path) > 0:
self._options['context_path'] = context_path
self._try_magic()
if oauth:
self._create_oauth_session(oauth)
elif basic_auth:
self._create_http_basic_session(*basic_auth)
self._session.headers.update(self._options['headers'])
elif jwt:
self._create_jwt_session(jwt)
else:
verify = self._options['verify']
self._session = ResilientSession()
self._session.verify = verify
self._session.headers.update(self._options['headers'])
self._session.max_retries = max_retries
if validate:
# This will raise an Exception if you are not allowed to login.
# It's better to fail faster than later.
self.session()
if get_server_info:
# We need version in order to know what API calls are available or not
si = self.server_info()
try:
self._version = tuple(si['versionNumbers'])
except Exception as e:
globals()['logging'].error("invalid server_info: %s", si)
raise e
else:
self._version = (0, 0, 0)
if self._options['check_update'] and not JIRA.checked_version:
self._check_update_()
JIRA.checked_version = True
def _check_update_(self):
# check if the current version of the library is outdated
try:
data = requests.get("http://pypi.python.org/pypi/jira/json", timeout=2.001).json()
released_version = data['info']['version']
if released_version > __version__:
warnings.warn("You are running an outdated version of JIRA Python %s. Current version is %s. Do not file any bugs against older versions." % (
__version__, released_version))
except requests.RequestException:
pass
except Exception as e:
logging.warning(e)
def __del__(self):
session = getattr(self, "_session", None)
if session is not None:
if sys.version_info < (3, 4, 0): # workaround for https://github.com/kennethreitz/requests/issues/2303
session.close()
def _check_for_html_error(self, content):
# TODO: Make it return errors when content is a webpage with errors
# JIRA has the bad habbit of returning errors in pages with 200 and
# embedding the error in a huge webpage.
if '<!-- SecurityTokenMissing -->' in content:
logging.warning("Got SecurityTokenMissing")
raise JIRAError("SecurityTokenMissing: %s" % content)
return False
return True
# Information about this client
def client_info(self):
"""Get the server this client is connected to."""
return self._options['server']
# Universal resource loading
def find(self, resource_format, ids=None):
"""
Get a Resource object for any addressable resource on the server.
This method is a universal resource locator for any RESTful resource in JIRA. The
argument ``resource_format`` is a string of the form ``resource``, ``resource/{0}``,
``resource/{0}/sub``, ``resource/{0}/sub/{1}``, etc. The format placeholders will be
populated from the ``ids`` argument if present. The existing authentication session
will be used.
The return value is an untyped Resource object, which will not support specialized
:py:meth:`.Resource.update` or :py:meth:`.Resource.delete` behavior. Moreover, it will
not know to return an issue Resource if the client uses the resource issue path. For this
reason, it is intended to support resources that are not included in the standard
Atlassian REST API.
:param resource_format: the subpath to the resource string
:param ids: values to substitute in the ``resource_format`` string
:type ids: tuple or None
"""
resource = Resource(resource_format, self._options, self._session)
resource.find(ids)
return resource
def async_do(self, size=10):
"""
This will execute all async jobs and wait for them to finish. By default it will run on 10 threads.
size: number of threads to run on.
:return:
"""
if hasattr(self._session, '_async_jobs'):
logging.info("Executing async %s jobs found in queue by using %s threads..." % (
len(self._session._async_jobs), size))
threaded_requests.map(self._session._async_jobs, size=size)
# Application properties
# non-resource
def application_properties(self, key=None):
"""
Return the mutable server application properties.
:param key: the single property to return a value for
"""
params = {}
if key is not None:
params['key'] = key
return self._get_json('application-properties', params=params)
def set_application_property(self, key, value):
"""
Set the application property.
:param key: key of the property to set
:param value: value to assign to the property
"""
url = self._options['server'] + \
'/rest/api/2/application-properties/' + key
payload = {
'id': key,
'value': value
}
r = self._session.put(
url, data=json.dumps(payload))
def applicationlinks(self, cached=True):
"""
List of application links
:return: json
"""
# if cached, return the last result
if cached and hasattr(self, '_applicationlinks'):
return self._applicationlinks
# url = self._options['server'] + '/rest/applinks/latest/applicationlink'
url = self._options['server'] + \
'/rest/applinks/latest/listApplicationlinks'
r = self._session.get(url)
o = json_loads(r)
if 'list' in o:
self._applicationlinks = o['list']
else:
self._applicationlinks = []
return self._applicationlinks
# Attachments
def attachment(self, id):
"""Get an attachment Resource from the server for the specified ID."""
return self._find_for_resource(Attachment, id)
# non-resource
def attachment_meta(self):
"""Get the attachment metadata."""
return self._get_json('attachment/meta')
@translate_resource_args
def add_attachment(self, issue, attachment, filename=None):
"""
Attach an attachment to an issue and returns a Resource for it.
The client will *not* attempt to open or validate the attachment; it expects a file-like object to be ready
for its use. The user is still responsible for tidying up (e.g., closing the file, killing the socket, etc.)
:param issue: the issue to attach the attachment to
:param attachment: file-like object to attach to the issue, also works if it is a string with the filename.
:param filename: optional name for the attached file. If omitted, the file object's ``name`` attribute
is used. If you aquired the file-like object by any other method than ``open()``, make sure
that a name is specified in one way or the other.
:rtype: an Attachment Resource
"""
if isinstance(attachment, string_types):
attachment = open(attachment, "rb")
if hasattr(attachment, 'read') and hasattr(attachment, 'mode') and attachment.mode != 'rb':
logging.warning(
"%s was not opened in 'rb' mode, attaching file may fail." % attachment.name)
# TODO: Support attaching multiple files at once?
url = self._get_url('issue/' + str(issue) + '/attachments')
fname = filename
if not fname:
fname = os.path.basename(attachment.name)
if 'MultipartEncoder' not in globals():
method = 'old'
r = self._session.post(
url,
files={
'file': (fname, attachment, 'application/octet-stream')},
headers=CaseInsensitiveDict({'content-type': None, 'X-Atlassian-Token': 'nocheck'}))
else:
method = 'MultipartEncoder'
def file_stream():
return MultipartEncoder(
fields={
'file': (fname, attachment, 'application/octet-stream')}
)
m = file_stream()
r = self._session.post(
url, data=m, headers=CaseInsensitiveDict({'content-type': m.content_type, 'X-Atlassian-Token': 'nocheck'}), retry_data=file_stream)
attachment = Attachment(self._options, self._session, json_loads(r)[0])
if attachment.size == 0:
raise JIRAError("Added empty attachment via %s method?!: r: %s\nattachment: %s" % (method, r, attachment))
return attachment
# Components
def component(self, id):
"""
Get a component Resource from the server.
:param id: ID of the component to get
"""
return self._find_for_resource(Component, id)
@translate_resource_args
def create_component(self, name, project, description=None, leadUserName=None, assigneeType=None,
isAssigneeTypeValid=False):
"""
Create a component inside a project and return a Resource for it.
:param name: name of the component
:param project: key of the project to create the component in
:param description: a description of the component
:param leadUserName: the username of the user responsible for this component
:param assigneeType: see the ComponentBean.AssigneeType class for valid values
:param isAssigneeTypeValid: boolean specifying whether the assignee type is acceptable
"""
data = {
'name': name,
'project': project,
'isAssigneeTypeValid': isAssigneeTypeValid
}
if description is not None:
data['description'] = description
if leadUserName is not None:
data['leadUserName'] = leadUserName
if assigneeType is not None:
data['assigneeType'] = assigneeType
url = self._get_url('component')
r = self._session.post(
url, data=json.dumps(data))
component = Component(self._options, self._session, raw=json_loads(r))
return component
def component_count_related_issues(self, id):
"""
Get the count of related issues for a component.
:type id: integer
:param id: ID of the component to use
"""
return self._get_json('component/' + id + '/relatedIssueCounts')['issueCount']
# Custom field options
def custom_field_option(self, id):
"""
Get a custom field option Resource from the server.
:param id: ID of the custom field to use
"""
return self._find_for_resource(CustomFieldOption, id)
# Dashboards
def dashboards(self, filter=None, startAt=0, maxResults=20):
"""
Return a ResultList of Dashboard resources and a ``total`` count.
:param filter: either "favourite" or "my", the type of dashboards to return
:param startAt: index of the first dashboard to return
:param maxResults: maximum number of dashboards to return. The total number of
results is always available in the ``total`` attribute of the returned ResultList.
"""
params = {}
if filter is not None:
params['filter'] = filter
params['startAt'] = startAt
params['maxResults'] = maxResults
r_json = self._get_json('dashboard', params=params)
dashboards = [Dashboard(self._options, self._session, raw_dash_json)
for raw_dash_json in r_json['dashboards']]
return ResultList(dashboards, r_json['total'])
def dashboard(self, id):
"""
Get a dashboard Resource from the server.
:param id: ID of the dashboard to get.
"""
return self._find_for_resource(Dashboard, id)
# Fields
# non-resource
def fields(self):
"""Return a list of all issue fields."""
return self._get_json('field')
# Filters
def filter(self, id):
"""
Get a filter Resource from the server.
:param id: ID of the filter to get.
"""
return self._find_for_resource(Filter, id)
def favourite_filters(self):
"""Get a list of filter Resources which are the favourites of the currently authenticated user."""
r_json = self._get_json('filter/favourite')
filters = [Filter(self._options, self._session, raw_filter_json)
for raw_filter_json in r_json]
return filters
def create_filter(self, name=None, description=None,
jql=None, favourite=None):
"""
Create a new filter and return a filter Resource for it.
Keyword arguments:
name -- name of the new filter
description -- useful human readable description of the new filter
jql -- query string that defines the filter
favourite -- whether to add this filter to the current user's favorites
"""
data = {}
if name is not None:
data['name'] = name
if description is not None:
data['description'] = description
if jql is not None:
data['jql'] = jql
if favourite is not None:
data['favourite'] = favourite
url = self._get_url('filter')
r = self._session.post(
url, data=json.dumps(data))
raw_filter_json = json_loads(r)
return Filter(self._options, self._session, raw=raw_filter_json)
def update_filter(self, filter_id,
name=None, description=None,
jql=None, favourite=None):
"""
Updates a filter and return a filter Resource for it.
Keyword arguments:
name -- name of the new filter
description -- useful human readable description of the new filter
jql -- query string that defines the filter
favourite -- whether to add this filter to the current user's favorites
"""
filter = self.filter(filter_id)
data = {}
data['name'] = name or filter.name
data['description'] = description or filter.description
data['jql'] = jql or filter.jql
data['favourite'] = favourite or filter.favourite
url = self._get_url('filter/%s' % filter_id)
r = self._session.put(url, headers={'content-type': 'application/json'},
data=json.dumps(data))
raw_filter_json = json.loads(r.text)
return Filter(self._options, self._session, raw=raw_filter_json)
# Groups
# non-resource
def groups(self, query=None, exclude=None, maxResults=9999):
"""
Return a list of groups matching the specified criteria.
Keyword arguments:
query -- filter groups by name with this string
exclude -- filter out groups by name with this string
maxResults -- maximum results to return. defaults to 9999
"""
params = {}
groups = []
if query is not None:
params['query'] = query
if exclude is not None:
params['exclude'] = exclude
if maxResults is not None:
params['maxResults'] = maxResults
for group in self._get_json('groups/picker', params=params)['groups']:
groups.append(group['name'])
return sorted(groups)
def group_members(self, group):
"""
Return a hash or users with their information. Requires JIRA 6.0 or will raise NotImplemented.
"""
if self._version < (6, 0, 0):
raise NotImplementedError(
"Group members is not implemented in JIRA before version 6.0, upgrade the instance, if possible.")
params = {'groupname': group, 'expand': "users"}
r = self._get_json('group', params=params)
size = r['users']['size']
end_index = r['users']['end-index']
while end_index < size - 1:
params = {'groupname': group, 'expand': "users[%s:%s]" % (
end_index + 1, end_index + 50)}
r2 = self._get_json('group', params=params)
for user in r2['users']['items']:
r['users']['items'].append(user)
end_index = r2['users']['end-index']
size = r['users']['size']
result = {}
for user in r['users']['items']:
result[user['name']] = {'fullname': user['displayName'], 'email': user['emailAddress'],
'active': user['active']}
return result
def add_group(self, groupname):
'''
Creates a new group in JIRA.
:param groupname: The name of the group you wish to create.
:return: Boolean - True if succesfull.
'''
url = self._options['server'] + '/rest/api/latest/group'
# implementation based on
# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173
x = OrderedDict()
x['name'] = groupname
payload = json.dumps(x)
self._session.post(url, data=payload)
return True
def remove_group(self, groupname):
'''
Deletes a group from the JIRA instance.
:param groupname: The group to be deleted from the JIRA instance.
:return: Boolean. Returns True on success.
'''
# implementation based on
# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173
url = self._options['server'] + '/rest/api/latest/group'
x = {'groupname': groupname}
self._session.delete(url, params=x)
return True
# Issues
def issue(self, id, fields=None, expand=None):
"""
Get an issue Resource from the server.
:param id: ID or key of the issue to get
:param fields: comma-separated string of issue fields to include in the results
:param expand: extra information to fetch inside each resource
"""
# this allows us to pass Issue objects to issue()
if type(id) == Issue:
return id
issue = Issue(self._options, self._session)
params = {}
if fields is not None:
params['fields'] = fields
if expand is not None:
params['expand'] = expand
issue.find(id, params=params)
return issue
def create_issue(self, fields=None, prefetch=True, **fieldargs):
"""
Create a new issue and return an issue Resource for it.
Each keyword argument (other than the predefined ones) is treated as a field name and the argument's value
is treated as the intended value for that field -- if the fields argument is used, all other keyword arguments
will be ignored.
By default, the client will immediately reload the issue Resource created by this method in order to return
a complete Issue object to the caller; this behavior can be controlled through the 'prefetch' argument.
JIRA projects may contain many different issue types. Some issue screens have different requirements for
fields in a new issue. This information is available through the 'createmeta' method. Further examples are
available here: https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+Example+-+Create+Issue
:param fields: a dict containing field names and the values to use. If present, all other keyword arguments\
will be ignored
:param prefetch: whether to reload the created issue Resource so that all of its data is present in the value\
returned from this method
"""
data = {}
if fields is not None:
data['fields'] = fields
else:
fields_dict = {}
for field in fieldargs:
fields_dict[field] = fieldargs[field]
data['fields'] = fields_dict
p = data['fields']['project']
if isinstance(p, string_types) or isinstance(p, integer_types):
data['fields']['project'] = {'id': self.project(p).id}
url = self._get_url('issue')
r = self._session.post(url, data=json.dumps(data))
raw_issue_json = json_loads(r)
if 'key' not in raw_issue_json:
raise JIRAError(r.status_code, request=r)
if prefetch:
return self.issue(raw_issue_json['key'])
else:
return Issue(self._options, self._session, raw=raw_issue_json)
def createmeta(self, projectKeys=None, projectIds=[], issuetypeIds=None, issuetypeNames=None, expand=None):
"""
Gets the metadata required to create issues, optionally filtered by projects and issue types.
:param projectKeys: keys of the projects to filter the results with. Can be a single value or a comma-delimited\
string. May be combined with projectIds.
:param projectIds: IDs of the projects to filter the results with. Can be a single value or a comma-delimited\
string. May be combined with projectKeys.
:param issuetypeIds: IDs of the issue types to filter the results with. Can be a single value or a\
comma-delimited string. May be combined with issuetypeNames.
:param issuetypeNames: Names of the issue types to filter the results with. Can be a single value or a\
comma-delimited string. May be combined with issuetypeIds.
:param expand: extra information to fetch inside each resource.
"""
params = {}
if projectKeys is not None:
params['projectKeys'] = projectKeys
if projectIds is not None:
if isinstance(projectIds, string_types):
projectIds = projectIds.split(',')
params['projectIds'] = projectIds
if issuetypeIds is not None:
params['issuetypeIds'] = issuetypeIds
if issuetypeNames is not None:
params['issuetypeNames'] = issuetypeNames
if expand is not None:
params['expand'] = expand
return self._get_json('issue/createmeta', params)
# non-resource
@translate_resource_args
def assign_issue(self, issue, assignee):
"""
Assign an issue to a user. None will set it to unassigned. -1 will set it to Automatic.
:param issue: the issue to assign
:param assignee: the user to assign the issue to
"""
url = self._options['server'] + \
'/rest/api/2/issue/' + str(issue) + '/assignee'
payload = {'name': assignee}
r = self._session.put(
url, data=json.dumps(payload))
@translate_resource_args
def comments(self, issue):
"""
Get a list of comment Resources.
:param issue: the issue to get comments from
"""
r_json = self._get_json('issue/' + str(issue) + '/comment')
comments = [Comment(self._options, self._session, raw_comment_json)
for raw_comment_json in r_json['comments']]
return comments
@translate_resource_args
def comment(self, issue, comment):
"""
Get a comment Resource from the server for the specified ID.
:param issue: ID or key of the issue to get the comment from
:param comment: ID of the comment to get
"""
return self._find_for_resource(Comment, (issue, comment))
@translate_resource_args
def add_comment(self, issue, body, visibility=None):
"""
Add a comment from the current authenticated user on the specified issue and return a Resource for it.
The issue identifier and comment body are required.
:param issue: ID or key of the issue to add the comment to
:param body: Text of the comment to add
:param visibility: a dict containing two entries: "type" and "value". "type" is 'role' (or 'group' if the JIRA\
server has configured comment visibility for groups) and 'value' is the name of the role (or group) to which\
viewing of this comment will be restricted.
"""
data = {
'body': body
}
if visibility is not None:
data['visibility'] = visibility
url = self._get_url('issue/' + str(issue) + '/comment')
r = self._session.post(
url, data=json.dumps(data))
comment = Comment(self._options, self._session, raw=json_loads(r))
return comment
# non-resource
@translate_resource_args
def editmeta(self, issue):
"""
Get the edit metadata for an issue.
:param issue: the issue to get metadata for
"""
return self._get_json('issue/' + str(issue) + '/editmeta')
@translate_resource_args
def remote_links(self, issue):
"""
Get a list of remote link Resources from an issue.
:param issue: the issue to get remote links from
"""
r_json = self._get_json('issue/' + str(issue) + '/remotelink')
remote_links = [RemoteLink(
self._options, self._session, raw_remotelink_json) for raw_remotelink_json in r_json]
return remote_links
@translate_resource_args
def remote_link(self, issue, id):
"""
Get a remote link Resource from the server.
:param issue: the issue holding the remote link
:param id: ID of the remote link
"""
return self._find_for_resource(RemoteLink, (issue, id))
# removed the @translate_resource_args because it prevents us from finding
# information for building a proper link
def add_remote_link(self, issue, destination, globalId=None, application=None, relationship=None):
"""
Add a remote link from an issue to an external application and returns a remote link Resource
for it. ``object`` should be a dict containing at least ``url`` to the linked external URL and
``title`` to display for the link inside JIRA.
For definitions of the allowable fields for ``object`` and the keyword arguments ``globalId``, ``application``
and ``relationship``, see https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+for+Remote+Issue+Links.
:param issue: the issue to add the remote link to
:param destination: the link details to add (see the above link for details)
:param globalId: unique ID for the link (see the above link for details)
:param application: application information for the link (see the above link for details)
:param relationship: relationship description for the link (see the above link for details)
"""
warnings.warn(
"broken: see https://bitbucket.org/bspeakmon/jira-python/issue/46 and https://jira.atlassian.com/browse/JRA-38551",
Warning)
try:
applicationlinks = self.applicationlinks()
except JIRAError as e:
applicationlinks = []
# In many (if not most) configurations, non-admin users are
# not allowed to list applicationlinks; if we aren't allowed,
# let's let people try to add remote links anyway, we just
# won't be able to be quite as helpful.
warnings.warn(
"Unable to gather applicationlinks; you will not be able "
"to add links to remote issues: (%s) %s" % (
e.status_code,
e.text
),
Warning
)
data = {}
if type(destination) == Issue:
data['object'] = {
'title': str(destination),
'url': destination.permalink()
}
for x in applicationlinks:
if x['application']['displayUrl'] == destination._options['server']:
data['globalId'] = "appId=%s&issueId=%s" % (
x['application']['id'], destination.raw['id'])
data['application'] = {
'name': x['application']['name'], 'type': "com.atlassian.jira"}
break
if 'globalId' not in data:
raise NotImplementedError(
"Unable to identify the issue to link to.")
else:
if globalId is not None:
data['globalId'] = globalId
if application is not None:
data['application'] = application
data['object'] = destination
if relationship is not None:
data['relationship'] = relationship
# check if the link comes from one of the configured application links
for x in applicationlinks:
if x['application']['displayUrl'] == self._options['server']:
data['globalId'] = "appId=%s&issueId=%s" % (
x['application']['id'], destination.raw['id'])
data['application'] = {
'name': x['application']['name'], 'type': "com.atlassian.jira"}
break
url = self._get_url('issue/' + str(issue) + '/remotelink')
r = self._session.post(
url, data=json.dumps(data))
remote_link = RemoteLink(
self._options, self._session, raw=json_loads(r))
return remote_link
def add_simple_link(self, issue, object):
"""
Add a simple remote link from an issue to web resource. This avoids the admin access problems from add_remote_link by just using a simple object and presuming all fields are correct and not requiring more complex ``application`` data.
``object`` should be a dict containing at least ``url`` to the linked external URL
and ``title`` to display for the link inside JIRA.
For definitions of the allowable fields for ``object`` , see https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+for+Remote+Issue+Links.
:param issue: the issue to add the remote link to
:param object: the dictionary used to create remotelink data
"""
data = {}
# hard code data dict to be passed as ``object`` to avoid any permissions errors
data = object
url = self._get_url('issue/' + str(issue) + '/remotelink')
r = self._session.post(
url, data=json.dumps(data))
simple_link = RemoteLink(
self._options, self._session, raw=json_loads(r))
return simple_link
# non-resource
@translate_resource_args
def transitions(self, issue, id=None, expand=None):
"""
Get a list of the transitions available on the specified issue to the current user.
:param issue: ID or key of the issue to get the transitions from
:param id: if present, get only the transition matching this ID
:param expand: extra information to fetch inside each transition
"""
params = {}
if id is not None:
params['transitionId'] = id
if expand is not None:
params['expand'] = expand
return self._get_json('issue/' + str(issue) + '/transitions', params=params)['transitions']
def find_transitionid_by_name(self, issue, transition_name):
"""
Get a transitionid available on the specified issue to the current user.
Look at https://developer.atlassian.com/static/rest/jira/6.1.html#d2e1074 for json reference
:param issue: ID or key of the issue to get the transitions from
:param trans_name: iname of transition we are looking for
"""
transitions_json = self.transitions(issue)
id = None
for transition in transitions_json:
if transition["name"].lower() == transition_name.lower():
id = transition["id"]
break
return id
@translate_resource_args
def transition_issue(self, issue, transition, fields=None, comment=None, **fieldargs):
# TODO: Support update verbs (same as issue.update())
"""
Perform a transition on an issue.
Each keyword argument (other than the predefined ones) is treated as a field name and the argument's value
is treated as the intended value for that field -- if the fields argument is used, all other keyword arguments
will be ignored. Field values will be set on the issue as part of the transition process.
:param issue: ID or key of the issue to perform the transition on
:param transition: ID or name of the transition to perform
:param comment: *Optional* String to add as comment to the issue when performing the transition.
:param fields: a dict containing field names and the values to use. If present, all other keyword arguments\
will be ignored
"""
transitionId = None
try:
transitionId = int(transition)
except:
# cannot cast to int, so try to find transitionId by name
transitionId = self.find_transitionid_by_name(issue, transition)
if transitionId is None:
raise JIRAError("Invalid transition name. %s" % transition)
data = {
'transition': {
'id': transitionId
}
}
if comment:
data['update'] = {'comment': [{'add': {'body': comment}}]}
if fields is not None:
data['fields'] = fields
else:
fields_dict = {}
for field in fieldargs:
fields_dict[field] = fieldargs[field]
data['fields'] = fields_dict
url = self._get_url('issue/' + str(issue) + '/transitions')
r = self._session.post(
url, data=json.dumps(data))
@translate_resource_args
def votes(self, issue):
"""
Get a votes Resource from the server.
:param issue: ID or key of the issue to get the votes for
"""
return self._find_for_resource(Votes, issue)
@translate_resource_args
def add_vote(self, issue):
"""
Register a vote for the current authenticated user on an issue.
:param issue: ID or key of the issue to vote on
"""
url = self._get_url('issue/' + str(issue) + '/votes')
r = self._session.post(url)
@translate_resource_args
def remove_vote(self, issue):
"""
Remove the current authenticated user's vote from an issue.
:param issue: ID or key of the issue to unvote on
"""
url = self._get_url('issue/' + str(issue) + '/votes')
self._session.delete(url)
@translate_resource_args
def watchers(self, issue):
"""
Get a watchers Resource from the server for an issue.
:param issue: ID or key of the issue to get the watchers for
"""
return self._find_for_resource(Watchers, issue)
@translate_resource_args
def add_watcher(self, issue, watcher):
"""
Add a user to an issue's watchers list.
:param issue: ID or key of the issue affected
:param watcher: username of the user to add to the watchers list
"""
url = self._get_url('issue/' + str(issue) + '/watchers')
self._session.post(
url, data=json.dumps(watcher))
@translate_resource_args
def remove_watcher(self, issue, watcher):
"""
Remove a user from an issue's watch list.
:param issue: ID or key of the issue affected
:param watcher: username of the user to remove from the watchers list
"""
url = self._get_url('issue/' + str(issue) + '/watchers')
params = {'username': watcher}
result = self._session.delete(url, params=params)
return result
@translate_resource_args
def worklogs(self, issue):
"""
Get a list of worklog Resources from the server for an issue.
:param issue: ID or key of the issue to get worklogs from
"""
r_json = self._get_json('issue/' + str(issue) + '/worklog')
worklogs = [Worklog(self._options, self._session, raw_worklog_json)
for raw_worklog_json in r_json['worklogs']]
return worklogs
@translate_resource_args
def worklog(self, issue, id):
"""
Get a specific worklog Resource from the server.
:param issue: ID or key of the issue to get the worklog from
:param id: ID of the worklog to get
"""
return self._find_for_resource(Worklog, (issue, id))
@translate_resource_args
def add_worklog(self, issue, timeSpent=None, timeSpentSeconds=None, adjustEstimate=None,
newEstimate=None, reduceBy=None, comment=None, started=None, user=None):
"""
Add a new worklog entry on an issue and return a Resource for it.
:param issue: the issue to add the worklog to
:param timeSpent: a worklog entry with this amount of time spent, e.g. "2d"
:param adjustEstimate: (optional) allows the user to provide specific instructions to update the remaining\
time estimate of the issue. The value can either be ``new``, ``leave``, ``manual`` or ``auto`` (default).
:param newEstimate: the new value for the remaining estimate field. e.g. "2d"
:param reduceBy: the amount to reduce the remaining estimate by e.g. "2d"
:param started: Moment when the work is logged, if not specified will default to now
:param comment: optional worklog comment
"""
params = {}
if adjustEstimate is not None:
params['adjustEstimate'] = adjustEstimate
if newEstimate is not None:
params['newEstimate'] = newEstimate
if reduceBy is not None:
params['reduceBy'] = reduceBy
data = {}
if timeSpent is not None:
data['timeSpent'] = timeSpent
if timeSpentSeconds is not None:
data['timeSpentSeconds'] = timeSpentSeconds
if comment is not None:
data['comment'] = comment
elif user:
# we log user inside comment as it doesn't always work
data['comment'] = user
if started is not None:
# based on REST Browser it needs: "2014-06-03T08:21:01.273+0000"
data['started'] = started.strftime("%Y-%m-%dT%H:%M:%S.000%z")
if user is not None:
data['author'] = {"name": user,
'self': self.JIRA_BASE_URL + '/rest/api/2/user?username=' + user,
'displayName': user,
'active': False
}
data['updateAuthor'] = data['author']
# TODO: report bug to Atlassian: author and updateAuthor parameters are
# ignored.
url = self._get_url('issue/{0}/worklog'.format(issue))
r = self._session.post(url, params=params, data=json.dumps(data))
return Worklog(self._options, self._session, json_loads(r))
# Issue links
@translate_resource_args
def create_issue_link(self, type, inwardIssue, outwardIssue, comment=None):
"""
Create a link between two issues.
:param type: the type of link to create
:param inwardIssue: the issue to link from
:param outwardIssue: the issue to link to
:param comment: a comment to add to the issues with the link. Should be a dict containing ``body``\
and ``visibility`` fields: ``body`` being the text of the comment and ``visibility`` being a dict containing\
two entries: ``type`` and ``value``. ``type`` is ``role`` (or ``group`` if the JIRA server has configured\
comment visibility for groups) and ``value`` is the name of the role (or group) to which viewing of this\
comment will be restricted.
"""
# let's see if we have the right issue link 'type' and fix it if needed
if not hasattr(self, '_cached_issuetypes'):
self._cached_issue_link_types = self.issue_link_types()
if type not in self._cached_issue_link_types:
for lt in self._cached_issue_link_types:
if lt.outward == type:
# we are smart to figure it out what he ment
type = lt.name
break
elif lt.inward == type:
# so that's the reverse, so we fix the request
type = lt.name
inwardIssue, outwardIssue = outwardIssue, inwardIssue
break
data = {
'type': {
'name': type
},
'inwardIssue': {
'key': inwardIssue
},
'outwardIssue': {
'key': outwardIssue
},
'comment': comment
}
url = self._get_url('issueLink')
r = self._session.post(
url, data=json.dumps(data))
def issue_link(self, id):
"""
Get an issue link Resource from the server.
:param id: ID of the issue link to get
"""
return self._find_for_resource(IssueLink, id)
# Issue link types
def issue_link_types(self):
"""Get a list of issue link type Resources from the server."""
r_json = self._get_json('issueLinkType')
link_types = [IssueLinkType(self._options, self._session, raw_link_json) for raw_link_json in
r_json['issueLinkTypes']]
return link_types
def issue_link_type(self, id):
"""
Get an issue link type Resource from the server.
:param id: ID of the issue link type to get
"""
return self._find_for_resource(IssueLinkType, id)
# Issue types
def issue_types(self):
"""Get a list of issue type Resources from the server."""
r_json = self._get_json('issuetype')
issue_types = [IssueType(
self._options, self._session, raw_type_json) for raw_type_json in r_json]
return issue_types
def issue_type(self, id):
"""
Get an issue type Resource from the server.
:param id: ID of the issue type to get
"""
return self._find_for_resource(IssueType, id)
# User permissions
# non-resource
def my_permissions(self, projectKey=None, projectId=None, issueKey=None, issueId=None):
"""
Get a dict of all available permissions on the server.
:param projectKey: limit returned permissions to the specified project
:param projectId: limit returned permissions to the specified project
:param issueKey: limit returned permissions to the specified issue
:param issueId: limit returned permissions to the specified issue
"""
params = {}
if projectKey is not None:
params['projectKey'] = projectKey
if projectId is not None:
params['projectId'] = projectId
if issueKey is not None:
params['issueKey'] = issueKey
if issueId is not None:
params['issueId'] = issueId
return self._get_json('mypermissions', params=params)
# Priorities
def priorities(self):
"""Get a list of priority Resources from the server."""
r_json = self._get_json('priority')
priorities = [Priority(
self._options, self._session, raw_priority_json) for raw_priority_json in r_json]
return priorities
def priority(self, id):
"""
Get a priority Resource from the server.
:param id: ID of the priority to get
"""
return self._find_for_resource(Priority, id)
# Projects
def projects(self):
"""Get a list of project Resources from the server visible to the current authenticated user."""
r_json = self._get_json('project')
projects = [Project(
self._options, self._session, raw_project_json) for raw_project_json in r_json]
return projects
def project(self, id):
"""
Get a project Resource from the server.
:param id: ID or key of the project to get
"""
return self._find_for_resource(Project, id)
# non-resource
@translate_resource_args
def project_avatars(self, project):
"""
Get a dict of all avatars for a project visible to the current authenticated user.
:param project: ID or key of the project to get avatars for
"""
return self._get_json('project/' + project + '/avatars')
@translate_resource_args
def create_temp_project_avatar(self, project, filename, size, avatar_img, contentType=None, auto_confirm=False):
"""
Register an image file as a project avatar. The avatar created is temporary and must be confirmed before it can
be used.
Avatar images are specified by a filename, size, and file object. By default, the client will attempt to
autodetect the picture's content type: this mechanism relies on libmagic and will not work out of the box
on Windows systems (see http://filemagic.readthedocs.org/en/latest/guide.html for details on how to install
support). The ``contentType`` argument can be used to explicitly set the value (note that JIRA will reject any
type other than the well-known ones for images, e.g. ``image/jpg``, ``image/png``, etc.)
This method returns a dict of properties that can be used to crop a subarea of a larger image for use. This
dict should be saved and passed to :py:meth:`confirm_project_avatar` to finish the avatar creation process. If\
you want to cut out the middleman and confirm the avatar with JIRA's default cropping, pass the 'auto_confirm'\
argument with a truthy value and :py:meth:`confirm_project_avatar` will be called for you before this method\
returns.
:param project: ID or key of the project to create the avatar in
:param filename: name of the avatar file
:param size: size of the avatar file
:param avatar_img: file-like object holding the avatar
:param contentType: explicit specification for the avatar image's content-type
:param boolean auto_confirm: whether to automatically confirm the temporary avatar by calling\
:py:meth:`confirm_project_avatar` with the return value of this method.
"""
size_from_file = os.path.getsize(filename)
if size != size_from_file:
size = size_from_file
params = {
'filename': filename,
'size': size
}
headers = {'X-Atlassian-Token': 'no-check'}
if contentType is not None:
headers['content-type'] = contentType
else:
# try to detect content-type, this may return None
headers['content-type'] = self._get_mime_type(avatar_img)
url = self._get_url('project/' + project + '/avatar/temporary')
r = self._session.post(
url, params=params, headers=headers, data=avatar_img)
cropping_properties = json_loads(r)
if auto_confirm:
return self.confirm_project_avatar(project, cropping_properties)
else:
return cropping_properties
@translate_resource_args
def confirm_project_avatar(self, project, cropping_properties):
"""
Confirm the temporary avatar image previously uploaded with the specified cropping.
After a successful registry with :py:meth:`create_temp_project_avatar`, use this method to confirm the avatar
for use. The final avatar can be a subarea of the uploaded image, which is customized with the
``cropping_properties``: the return value of :py:meth:`create_temp_project_avatar` should be used for this
argument.
:param project: ID or key of the project to confirm the avatar in
:param cropping_properties: a dict of cropping properties from :py:meth:`create_temp_project_avatar`
"""
data = cropping_properties
url = self._get_url('project/' + project + '/avatar')
r = self._session.post(
url, data=json.dumps(data))
return json_loads(r)
@translate_resource_args
def set_project_avatar(self, project, avatar):
"""
Set a project's avatar.
:param project: ID or key of the project to set the avatar on
:param avatar: ID of the avatar to set
"""
self._set_avatar(
None, self._get_url('project/' + project + '/avatar'), avatar)
@translate_resource_args
def delete_project_avatar(self, project, avatar):
"""
Delete a project's avatar.
:param project: ID or key of the project to delete the avatar from
:param avatar: ID of the avater to delete
"""
url = self._get_url('project/' + project + '/avatar/' + avatar)
r = self._session.delete(url)
@translate_resource_args
def project_components(self, project):
"""
Get a list of component Resources present on a project.
:param project: ID or key of the project to get components from
"""
r_json = self._get_json('project/' + project + '/components')
components = [Component(
self._options, self._session, raw_comp_json) for raw_comp_json in r_json]
return components
@translate_resource_args
def project_versions(self, project):
"""
Get a list of version Resources present on a project.
:param project: ID or key of the project to get versions from
"""
r_json = self._get_json('project/' + project + '/versions')
versions = [
Version(self._options, self._session, raw_ver_json) for raw_ver_json in r_json]
return versions
# non-resource
@translate_resource_args
def project_roles(self, project):
"""
Get a dict of role names to resource locations for a project.
:param project: ID or key of the project to get roles from
"""
return self._get_json('project/' + project + '/role')
@translate_resource_args
def project_role(self, project, id):
"""
Get a role Resource.
:param project: ID or key of the project to get the role from
:param id: ID of the role to get
"""
return self._find_for_resource(Role, (project, id))
# Resolutions
def resolutions(self):
"""Get a list of resolution Resources from the server."""
r_json = self._get_json('resolution')
resolutions = [Resolution(
self._options, self._session, raw_res_json) for raw_res_json in r_json]
return resolutions
def resolution(self, id):
"""
Get a resolution Resource from the server.
:param id: ID of the resolution to get
"""
return self._find_for_resource(Resolution, id)
# Search
def search_issues(self, jql_str, startAt=0, maxResults=50, validate_query=True, fields=None, expand=None,
json_result=None):
"""
Get a ResultList of issue Resources matching a JQL search string.
:param jql_str: the JQL search string to use
:param startAt: index of the first issue to return
:param maxResults: maximum number of issues to return. Total number of results
is available in the ``total`` attribute of the returned ResultList.
If maxResults evaluates as False, it will try to get all issues in batches of 50.
:param fields: comma-separated string of issue fields to include in the results
:param expand: extra information to fetch inside each resource
"""
# TODO what to do about the expand, which isn't related to the issues?
infinite = False
maxi = 50
idx = 0
if fields is None:
fields = []
# If None is passed as parameter, this fetch all issues from the query
if not maxResults:
maxResults = maxi
infinite = True
search_params = {
"jql": jql_str,
"startAt": startAt,
"maxResults": maxResults,
"validateQuery": validate_query,
"fields": fields,
"expand": expand
}
if json_result:
return self._get_json('search', params=search_params)
resource = self._get_json('search', params=search_params)
issues = [Issue(self._options, self._session, raw_issue_json)
for raw_issue_json in resource['issues']]
cnt = len(issues)
total = resource['total']
if infinite:
while cnt == maxi:
idx += maxi
search_params["startAt"] = idx
resource = self._get_json('search', params=search_params)
issue_batch = [Issue(self._options, self._session, raw_issue_json) for raw_issue_json in
resource['issues']]
issues.extend(issue_batch)
cnt = len(issue_batch)
return ResultList(issues, total)
# Security levels
def security_level(self, id):
"""
Get a security level Resource.
:param id: ID of the security level to get
"""
return self._find_for_resource(SecurityLevel, id)
# Server info
# non-resource
def server_info(self):
"""Get a dict of server information for this JIRA instance."""
return self._get_json('serverInfo')
def myself(self):
"""Get a dict of server information for this JIRA instance."""
return self._get_json('myself')
# Status
def statuses(self):
"""Get a list of status Resources from the server."""
r_json = self._get_json('status')
statuses = [Status(self._options, self._session, raw_stat_json)
for raw_stat_json in r_json]
return statuses
def status(self, id):
"""
Get a status Resource from the server.
:param id: ID of the status resource to get
"""
return self._find_for_resource(Status, id)
# Users
def user(self, id, expand=None):
"""
Get a user Resource from the server.
:param id: ID of the user to get
:param expand: extra information to fetch inside each resource
"""
user = User(self._options, self._session)
params = {}
if expand is not None:
params['expand'] = expand
user.find(id, params=params)
return user
def search_assignable_users_for_projects(self, username, projectKeys, startAt=0, maxResults=50):
"""
Get a list of user Resources that match the search string and can be assigned issues for projects.
:param username: a string to match usernames against
:param projectKeys: comma-separated list of project keys to check for issue assignment permissions
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': username,
'projectKeys': projectKeys,
'startAt': startAt,
'maxResults': maxResults
}
r_json = self._get_json(
'user/assignable/multiProjectSearch', params=params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
def search_assignable_users_for_issues(self, username, project=None, issueKey=None, expand=None, startAt=0,
maxResults=50):
"""
Get a list of user Resources that match the search string for assigning or creating issues.
This method is intended to find users that are eligible to create issues in a project or be assigned
to an existing issue. When searching for eligible creators, specify a project. When searching for eligible
assignees, specify an issue key.
:param username: a string to match usernames against
:param project: filter returned users by permission in this project (expected if a result will be used to \
create an issue)
:param issueKey: filter returned users by this issue (expected if a result will be used to edit this issue)
:param expand: extra information to fetch inside each resource
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': username,
'startAt': startAt,
'maxResults': maxResults,
}
if project is not None:
params['project'] = project
if issueKey is not None:
params['issueKey'] = issueKey
if expand is not None:
params['expand'] = expand
r_json = self._get_json('user/assignable/search', params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
# non-resource
def user_avatars(self, username):
"""
Get a dict of avatars for the specified user.
:param username: the username to get avatars for
"""
return self._get_json('user/avatars', params={'username': username})
def create_temp_user_avatar(self, user, filename, size, avatar_img, contentType=None, auto_confirm=False):
"""
Register an image file as a user avatar. The avatar created is temporary and must be confirmed before it can
be used.
Avatar images are specified by a filename, size, and file object. By default, the client will attempt to
autodetect the picture's content type: this mechanism relies on ``libmagic`` and will not work out of the box
on Windows systems (see http://filemagic.readthedocs.org/en/latest/guide.html for details on how to install
support). The ``contentType`` argument can be used to explicitly set the value (note that JIRA will reject any
type other than the well-known ones for images, e.g. ``image/jpg``, ``image/png``, etc.)
This method returns a dict of properties that can be used to crop a subarea of a larger image for use. This
dict should be saved and passed to :py:meth:`confirm_user_avatar` to finish the avatar creation process. If you
want to cut out the middleman and confirm the avatar with JIRA's default cropping, pass the ``auto_confirm``
argument with a truthy value and :py:meth:`confirm_user_avatar` will be called for you before this method
returns.
:param user: user to register the avatar for
:param filename: name of the avatar file
:param size: size of the avatar file
:param avatar_img: file-like object containing the avatar
:param contentType: explicit specification for the avatar image's content-type
:param auto_confirm: whether to automatically confirm the temporary avatar by calling\
:py:meth:`confirm_user_avatar` with the return value of this method.
"""
size_from_file = os.path.getsize(filename)
if size != size_from_file:
size = size_from_file
params = {
'username': user,
'filename': filename,
'size': size
}
headers = {'X-Atlassian-Token': 'no-check'}
if contentType is not None:
headers['content-type'] = contentType
else:
# try to detect content-type, this may return None
headers['content-type'] = self._get_mime_type(avatar_img)
url = self._get_url('user/avatar/temporary')
r = self._session.post(
url, params=params, headers=headers, data=avatar_img)
cropping_properties = json_loads(r)
if auto_confirm:
return self.confirm_user_avatar(user, cropping_properties)
else:
return cropping_properties
def confirm_user_avatar(self, user, cropping_properties):
"""
Confirm the temporary avatar image previously uploaded with the specified cropping.
After a successful registry with :py:meth:`create_temp_user_avatar`, use this method to confirm the avatar for
use. The final avatar can be a subarea of the uploaded image, which is customized with the
``cropping_properties``: the return value of :py:meth:`create_temp_user_avatar` should be used for this
argument.
:param user: the user to confirm the avatar for
:param cropping_properties: a dict of cropping properties from :py:meth:`create_temp_user_avatar`
"""
data = cropping_properties
url = self._get_url('user/avatar')
r = self._session.post(url, params={'username': user},
data=json.dumps(data))
return json_loads(r)
def set_user_avatar(self, username, avatar):
"""
Set a user's avatar.
:param username: the user to set the avatar for
:param avatar: ID of the avatar to set
"""
self._set_avatar(
{'username': username}, self._get_url('user/avatar'), avatar)
def delete_user_avatar(self, username, avatar):
"""
Delete a user's avatar.
:param username: the user to delete the avatar from
:param avatar: ID of the avatar to remove
"""
params = {'username': username}
url = self._get_url('user/avatar/' + avatar)
r = self._session.delete(url, params=params)
def search_users(self, user, startAt=0, maxResults=50, includeActive=True, includeInactive=False):
"""
Get a list of user Resources that match the specified search string.
:param user: a string to match usernames, name or email against
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': user,
'startAt': startAt,
'maxResults': maxResults,
'includeActive': includeActive,
'includeInactive': includeInactive
}
r_json = self._get_json('user/search', params=params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
def search_allowed_users_for_issue(self, user, issueKey=None, projectKey=None, startAt=0, maxResults=50):
"""
Get a list of user Resources that match a username string and have browse permission for the issue or
project.
:param user: a string to match usernames against
:param issueKey: find users with browse permission for this issue
:param projectKey: find users with browse permission for this project
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': user,
'startAt': startAt,
'maxResults': maxResults,
}
if issueKey is not None:
params['issueKey'] = issueKey
if projectKey is not None:
params['projectKey'] = projectKey
r_json = self._get_json('user/viewissue/search', params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
# Versions
@translate_resource_args
def create_version(self, name, project, description=None, releaseDate=None, startDate=None, archived=False,
released=False):
"""
Create a version in a project and return a Resource for it.
:param name: name of the version to create
:param project: key of the project to create the version in
:param description: a description of the version
:param releaseDate: the release date assigned to the version
:param startDate: The start date for the version
"""
data = {
'name': name,
'project': project,
'archived': archived,
'released': released
}
if description is not None:
data['description'] = description
if releaseDate is not None:
data['releaseDate'] = releaseDate
if startDate is not None:
data['startDate'] = startDate
url = self._get_url('version')
r = self._session.post(
url, data=json.dumps(data))
version = Version(self._options, self._session, raw=json_loads(r))
return version
def move_version(self, id, after=None, position=None):
"""
Move a version within a project's ordered version list and return a new version Resource for it. One,
but not both, of ``after`` and ``position`` must be specified.
:param id: ID of the version to move
:param after: the self attribute of a version to place the specified version after (that is, higher in the list)
:param position: the absolute position to move this version to: must be one of ``First``, ``Last``,\
``Earlier``, or ``Later``
"""
data = {}
if after is not None:
data['after'] = after
elif position is not None:
data['position'] = position
url = self._get_url('version/' + id + '/move')
r = self._session.post(
url, data=json.dumps(data))
version = Version(self._options, self._session, raw=json_loads(r))
return version
def version(self, id, expand=None):
"""
Get a version Resource.
:param id: ID of the version to get
:param expand: extra information to fetch inside each resource
"""
version = Version(self._options, self._session)
params = {}
if expand is not None:
params['expand'] = expand
version.find(id, params=params)
return version
def version_count_related_issues(self, id):
"""
Get a dict of the counts of issues fixed and affected by a version.
:param id: the version to count issues for
"""
r_json = self._get_json('version/' + id + '/relatedIssueCounts')
del r_json['self'] # this isn't really an addressable resource
return r_json
def version_count_unresolved_issues(self, id):
"""
Get the number of unresolved issues for a version.
:param id: ID of the version to count issues for
"""
return self._get_json('version/' + id + '/unresolvedIssueCount')['issuesUnresolvedCount']
# Session authentication
def session(self):
"""Get a dict of the current authenticated user's session information."""
url = '{server}/rest/auth/1/session'.format(**self._options)
if type(self._session.auth) is tuple:
authentication_data = {
'username': self._session.auth[0], 'password': self._session.auth[1]}
r = self._session.post(url, data=json.dumps(authentication_data))
else:
r = self._session.get(url)
user = User(self._options, self._session, json_loads(r))
return user
def kill_session(self):
"""Destroy the session of the current authenticated user."""
url = self._options['server'] + '/rest/auth/latest/session'
r = self._session.delete(url)
# Websudo
def kill_websudo(self):
"""Destroy the user's current WebSudo session."""
url = self._options['server'] + '/rest/auth/1/websudo'
r = self._session.delete(url)
# Utilities
def _create_http_basic_session(self, username, password):
verify = self._options['verify']
self._session = ResilientSession()
self._session.verify = verify
self._session.auth = (username, password)
self._session.cert = self._options['client_cert']
def _create_oauth_session(self, oauth):
verify = self._options['verify']
from requests_oauthlib import OAuth1
from oauthlib.oauth1 import SIGNATURE_RSA
oauth = OAuth1(
oauth['consumer_key'],
rsa_key=oauth['key_cert'],
signature_method=SIGNATURE_RSA,
resource_owner_key=oauth['access_token'],
resource_owner_secret=oauth['access_token_secret']
)
self._session = ResilientSession()
self._session.verify = verify
self._session.auth = oauth
@staticmethod
def _timestamp(dt=None):
t = datetime.datetime.utcnow()
if dt is not None:
t += dt
return calendar.timegm(t.timetuple())
def _create_jwt_session(self, jwt):
try:
jwt_auth = JWTAuth(jwt['secret'], alg='HS256')
except NameError as e:
globals()['logging'].error("JWT authentication requires requests_jwt")
raise e
jwt_auth.add_field("iat", lambda req: JIRA._timestamp())
jwt_auth.add_field("exp", lambda req: JIRA._timestamp(datetime.timedelta(minutes=3)))
jwt_auth.add_field("qsh", QshGenerator(self._options['context_path']))
for f in jwt['payload'].items():
jwt_auth.add_field(f[0], f[1])
self._session = ResilientSession()
self._session.verify = self._options['verify']
self._session.auth = jwt_auth
def _set_avatar(self, params, url, avatar):
data = {
'id': avatar
}
r = self._session.put(url, params=params, data=json.dumps(data))
def _get_url(self, path, base=JIRA_BASE_URL):
options = self._options
options.update({'path': path})
return base.format(**options)
def _get_json(self, path, params=None, base=JIRA_BASE_URL):
url = self._get_url(path, base)
r = self._session.get(url, params=params)
try:
r_json = json_loads(r)
except ValueError as e:
logging.error("%s\n%s" % (e, r.text))
raise e
return r_json
def _find_for_resource(self, resource_cls, ids, expand=None):
resource = resource_cls(self._options, self._session)
params = {}
if expand is not None:
params['expand'] = expand
resource.find(id=ids, params=params)
return resource
def _try_magic(self):
try:
import magic
import weakref
except ImportError:
self._magic = None
else:
try:
_magic = magic.Magic(flags=magic.MAGIC_MIME_TYPE)
def cleanup():
_magic.close()
self._magic_weakref = weakref.ref(self, cleanup)
self._magic = _magic
except TypeError:
self._magic = None
except AttributeError:
self._magic = None
def _get_mime_type(self, buff):
if self._magic is not None:
return self._magic.id_buffer(buff)
else:
try:
return mimetypes.guess_type("f." + imghdr.what(0, buff))[0]
except (IOError, TypeError):
logging.warning("Couldn't detect content type of avatar image"
". Specify the 'contentType' parameter explicitly.")
return None
def email_user(self, user, body, title="JIRA Notification"):
"""
TBD:
"""
url = self._options['server'] + \
'/secure/admin/groovy/CannedScriptRunner.jspa'
payload = {
'cannedScript': 'com.onresolve.jira.groovy.canned.workflow.postfunctions.SendCustomEmail',
'cannedScriptArgs_FIELD_CONDITION': '',
'cannedScriptArgs_FIELD_EMAIL_TEMPLATE': body,
'cannedScriptArgs_FIELD_EMAIL_SUBJECT_TEMPLATE': title,
'cannedScriptArgs_FIELD_EMAIL_FORMAT': 'TEXT',
'cannedScriptArgs_FIELD_TO_ADDRESSES': self.user(user).emailAddress,
'cannedScriptArgs_FIELD_TO_USER_FIELDS': '',
'cannedScriptArgs_FIELD_INCLUDE_ATTACHMENTS': 'FIELD_INCLUDE_ATTACHMENTS_NONE',
'cannedScriptArgs_FIELD_FROM': '',
'cannedScriptArgs_FIELD_PREVIEW_ISSUE': '',
'cannedScript': 'com.onresolve.jira.groovy.canned.workflow.postfunctions.SendCustomEmail',
'id': '',
'Preview': 'Preview',
}
r = self._session.post(
url, headers=self._options['headers'], data=payload)
open("/tmp/jira_email_user_%s.html" % user, "w").write(r.text)
def rename_user(self, old_user, new_user):
"""
Rename a JIRA user. Current implementation relies on third party plugin but in the future it may use embedded JIRA functionality.
:param old_user: string with username login
:param new_user: string with username login
"""
if self._version >= (6, 0, 0):
url = self._options['server'] + '/rest/api/2/user'
payload = {
"name": new_user,
}
params = {
'username': old_user
}
# raw displayName
logging.debug("renaming %s" % self.user(old_user).emailAddress)
r = self._session.put(url, params=params,
data=json.dumps(payload))
else:
# old implementation needed the ScripRunner plugin
merge = "true"
try:
self.user(new_user)
except:
merge = "false"
url = self._options[
'server'] + '/secure/admin/groovy/CannedScriptRunner.jspa#result'
payload = {
"cannedScript": "com.onresolve.jira.groovy.canned.admin.RenameUser",
"cannedScriptArgs_FIELD_FROM_USER_ID": old_user,
"cannedScriptArgs_FIELD_TO_USER_ID": new_user,
"cannedScriptArgs_FIELD_MERGE": merge,
"id": "",
"RunCanned": "Run",
}
# raw displayName
logging.debug("renaming %s" % self.user(old_user).emailAddress)
r = self._session.post(
url, headers=self._options['headers'], data=payload)
if r.status_code == 404:
logging.error(
"In order to be able to use rename_user() you need to install Script Runner plugin. See https://marketplace.atlassian.com/plugins/com.onresolve.jira.groovy.groovyrunner")
return False
if r.status_code != 200:
logging.error(r.status_code)
if re.compile("XSRF Security Token Missing").search(r.content):
logging.fatal(
"Reconfigure JIRA and disable XSRF in order to be able call this. See https://developer.atlassian.com/display/JIRADEV/Form+Token+Handling")
return False
open("/tmp/jira_rename_user_%s_to%s.html" %
(old_user, new_user), "w").write(r.content)
msg = r.status_code
m = re.search("<span class=\"errMsg\">(.*)<\/span>", r.content)
if m:
msg = m.group(1)
logging.error(msg)
return False
# <span class="errMsg">Target user ID must exist already for a merge</span>
p = re.compile("type=\"hidden\" name=\"cannedScriptArgs_Hidden_output\" value=\"(.*?)\"\/>",
re.MULTILINE | re.DOTALL)
m = p.search(r.content)
if m:
h = html_parser.HTMLParser()
msg = h.unescape(m.group(1))
logging.info(msg)
# let's check if the user still exists
try:
self.user(old_user)
except:
logging.error("User %s does not exists." % old_user)
return msg
logging.error(msg)
logging.error(
"User %s does still exists after rename, that's clearly a problem." % old_user)
return False
def delete_user(self, username):
url = self._options['server'] + \
'/rest/api/latest/user/?username=%s' % username
r = self._session.delete(url)
if 200 <= r.status_code <= 299:
return True
else:
logging.error(r.status_code)
return False
def reindex(self, force=False, background=True):
"""
Start jira re-indexing. Returns True if reindexing is in progress or not needed, or False.
If you call reindex() without any parameters it will perform a backfround reindex only if JIRA thinks it should do it.
:param force: reindex even if JIRA doesn'tt say this is needed, False by default.
:param background: reindex inde background, slower but does not impact the users, defaults to True.
"""
# /secure/admin/IndexAdmin.jspa
# /secure/admin/jira/IndexProgress.jspa?taskId=1
if background:
indexingStrategy = 'background'
else:
indexingStrategy = 'stoptheworld'
url = self._options['server'] + '/secure/admin/jira/IndexReIndex.jspa'
r = self._session.get(url, headers=self._options['headers'])
if r.status_code == 503:
# logging.warning("JIRA returned 503, this could mean that a full reindex is in progress.")
return 503
if not r.text.find("To perform the re-index now, please go to the") and force is False:
return True
if r.text.find('All issues are being re-indexed'):
logging.warning("JIRA re-indexing is already running.")
return True # still reindexing is considered still a success
if r.text.find('To perform the re-index now, please go to the') or force:
r = self._session.post(url, headers=self._options['headers'],
params={"indexingStrategy": indexingStrategy, "reindex": "Re-Index"})
if r.text.find('All issues are being re-indexed') != -1:
return True
else:
logging.error("Failed to reindex jira, probably a bug.")
return False
def backup(self, filename='backup.zip'):
"""
Will call jira export to backup as zipped xml. Returning with success does not mean that the backup process finished.
"""
url = self._options['server'] + '/secure/admin/XmlBackup.jspa'
payload = {'filename': filename}
r = self._session.post(
url, headers=self._options['headers'], data=payload)
if r.status_code == 200:
return True
else:
logging.warning(
'Got %s response from calling backup.' % r.status_code)
return r.status_code
def current_user(self):
if not hasattr(self, '_serverInfo') or 'username' not in self._serverInfo:
url = self._get_url('serverInfo')
r = self._session.get(url, headers=self._options['headers'])
r_json = json_loads(r)
if 'x-ausername' in r.headers:
r_json['username'] = r.headers['x-ausername']
else:
r_json['username'] = None
self._serverInfo = r_json
# del r_json['self'] # this isn't really an addressable resource
return self._serverInfo['username']
def delete_project(self, pid):
"""
Project can be id, project key or project name. It will return False if it fails.
"""
found = False
try:
if not str(int(pid)) == pid:
found = True
except Exception as e:
r_json = self._get_json('project')
for e in r_json:
if e['key'] == pid or e['name'] == pid:
pid = e['id']
found = True
break
if not found:
logging.error("Unable to recognize project `%s`" % pid)
return False
uri = '/secure/admin/DeleteProject.jspa'
url = self._options['server'] + uri
payload = {'pid': pid, 'Delete': 'Delete', 'confirm': 'true'}
try:
r = self._gain_sudo_session(payload, uri)
if r.status_code != 200 or not self._check_for_html_error(r.text):
return False
except JIRAError as e:
raise JIRAError(0, "You must have global administrator rights to delete projects.")
return False
r = self._session.post(
url, headers=CaseInsensitiveDict({'content-type': 'application/x-www-form-urlencoded'}), data=payload)
if r.status_code == 200:
return self._check_for_html_error(r.text)
else:
logging.warning(
'Got %s response from calling delete_project.' % r.status_code)
return r.status_code
def _gain_sudo_session(self, options, destination):
url = self._options['server'] + '/secure/admin/WebSudoAuthenticate.jspa'
payload = {
'webSudoPassword': self._session.auth[1],
'webSudoDestination': destination,
'webSudoIsPost': 'true',
}
payload.update(options)
return self._session.post(
url, headers=CaseInsensitiveDict({'content-type': 'application/x-www-form-urlencoded'}), data=payload)
def create_project(self, key, name=None, assignee=None):
"""
Key is mandatory and has to match JIRA project key requirements, usually only 2-10 uppercase characters.
If name is not specified it will use the key value.
If assignee is not specified it will use current user.
The returned value should evaluate to False if it fails otherwise it will be the new project id.
"""
if assignee is None:
assignee = self.current_user()
if name is None:
name = key
if key.upper() != key or not key.isalpha() or len(key) < 2 or len(key) > 10:
logging.error(
'key parameter is not all uppercase alphanumeric of length between 2 and 10')
return False
url = self._options['server'] + \
'/rest/project-templates/1.0/templates'
r = self._session.get(url)
j = json_loads(r)
template_key = None
templates = []
for template in j['projectTemplates']:
templates.append(template['name'])
if template['name'] in ['JIRA Classic', 'JIRA Default Schemes']:
template_key = template['projectTemplateModuleCompleteKey']
break
if not template_key:
raise JIRAError(
"Unable to find a suitable project template to use. Found only: " + ', '.join(templates))
payload = {'name': name,
'key': key,
'keyEdited': 'false',
#'projectTemplate': 'com.atlassian.jira-core-project-templates:jira-issuetracking',
#'permissionScheme': '',
'projectTemplateWebItemKey': template_key,
'projectTemplateModuleKey': template_key,
'lead': assignee,
#'assigneeType': '2',
}
headers = CaseInsensitiveDict(
{'Content-Type': 'application/x-www-form-urlencoded'})
r = self._session.post(url, data=payload, headers=headers)
if r.status_code == 200:
r_json = json_loads(r)
return r_json
f = tempfile.NamedTemporaryFile(
suffix='.html', prefix='python-jira-error-create-project-', delete=False)
f.write(r.text)
if self.logging:
logging.error(
"Unexpected result while running create project. Server response saved in %s for further investigation [HTTP response=%s]." % (
f.name, r.status_code))
return False
def add_user(self, username, email, directoryId=1, password=None, fullname=None, active=True):
if not fullname:
fullname = username
# TODO: default the directoryID to the first directory in jira instead
# of 1 which is the internal one.
url = self._options['server'] + '/rest/api/latest/user'
# implementation based on
# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173
x = OrderedDict()
x['displayName'] = fullname
x['emailAddress'] = email
x['name'] = username
if password:
x['password'] = password
payload = json.dumps(x)
self._session.post(url, data=payload)
return True
def add_user_to_group(self, username, group):
'''
Adds a user to an existing group.
:param username: Username that will be added to specified group.
:param group: Group that the user will be added to.
:return: Boolean, True for success, false for failure.
'''
url = self._options['server'] + '/rest/api/latest/group/user'
x = {'groupname': group}
y = {'name': username}
payload = json.dumps(y)
self._session.post(url, params=x, data=payload)
return True
def remove_user_from_group(self, username, groupname):
'''
Removes a user from a group.
:param username: The user to remove from the group.
:param groupname: The group that the user will be removed from.
:return:
'''
url = self._options['server'] + '/rest/api/latest/group/user'
x = {'groupname': groupname,
'username': username}
self._session.delete(url, params=x)
return True
# Experimental
# Experimental support for iDalko Grid, expect API to change as it's using private APIs currently
# https://support.idalko.com/browse/IGRID-1017
def get_igrid(self, issueid, customfield, schemeid):
url = self._options['server'] + '/rest/idalko-igrid/1.0/datagrid/data'
if str(customfield).isdigit():
customfield = "customfield_%s" % customfield
params = {
#'_mode':'view',
'_issueId': issueid,
'_fieldId': customfield,
'_confSchemeId': schemeid,
#'validate':True,
#'_search':False,
#'rows':100,
#'page':1,
#'sidx':'DEFAULT',
#'sord':'asc',
}
r = self._session.get(
url, headers=self._options['headers'], params=params)
return json_loads(r)
# Jira Agile specific methods (GreenHopper)
"""
Define the functions that interact with GreenHopper.
"""
@translate_resource_args
def boards(self):
"""
Get a list of board GreenHopperResources.
"""
r_json = self._get_json(
'rapidviews/list', base=self.AGILE_BASE_URL)
boards = [Board(self._options, self._session, raw_boards_json)
for raw_boards_json in r_json['views']]
return boards
@translate_resource_args
def sprints(self, id, extended=False):
"""
Get a list of sprint GreenHopperResources.
:param id: the board to get sprints from
:param extended: fetch additional information like startDate, endDate, completeDate,
much slower because it requires an additional requests for each sprint
:rtype: dict
>>> { "id": 893,
>>> "name": "iteration.5",
>>> "state": "FUTURE",
>>> "linkedPagesCount": 0,
>>> "startDate": "None",
>>> "endDate": "None",
>>> "completeDate": "None",
>>> "remoteLinks": []
>>> }
"""
r_json = self._get_json('sprintquery/%s?includeHistoricSprints=true&includeFutureSprints=true' % id,
base=self.AGILE_BASE_URL)
if extended:
sprints = []
for raw_sprints_json in r_json['sprints']:
r_json = self._get_json(
'sprint/%s/edit/model' % raw_sprints_json['id'], base=self.AGILE_BASE_URL)
sprints.append(
Sprint(self._options, self._session, r_json['sprint']))
else:
sprints = [Sprint(self._options, self._session, raw_sprints_json)
for raw_sprints_json in r_json['sprints']]
return sprints
def sprints_by_name(self, id, extended=False):
sprints = {}
for s in self.sprints(id, extended=extended):
if s.name not in sprints:
sprints[s.name] = s.raw
else:
raise (Exception(
"Fatal error, duplicate Sprint Name (%s) found on board %s." % (s.name, id)))
return sprints
def update_sprint(self, id, name=None, startDate=None, endDate=None):
payload = {}
if name:
payload['name'] = name
if startDate:
payload['startDate'] = startDate
if endDate:
payload['startDate'] = endDate
# if state:
# payload['state']=state
url = self._get_url('sprint/%s' % id, base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(payload))
return json_loads(r)
def completed_issues(self, board_id, sprint_id):
"""
Return the completed issues for ``board_id`` and ``sprint_id``.
:param board_id: the board retrieving issues from
:param sprint_id: the sprint retieving issues from
"""
# TODO need a better way to provide all the info from the sprintreport
# incompletedIssues went to backlog but not it not completed
# issueKeysAddedDuringSprint used to mark some with a * ?
# puntedIssues are for scope change?
r_json = self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)
issues = [Issue(self._options, self._session, raw_issues_json) for raw_issues_json in
r_json['contents']['completedIssues']]
return issues
def completedIssuesEstimateSum(self, board_id, sprint_id):
"""
Return the total completed points this sprint.
"""
return self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)['contents']['completedIssuesEstimateSum']['value']
def incompleted_issues(self, board_id, sprint_id):
"""
Return the completed issues for the sprint
"""
r_json = self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)
issues = [Issue(self._options, self._session, raw_issues_json) for raw_issues_json in
r_json['contents']['incompletedIssues']]
return issues
def sprint_info(self, board_id, sprint_id):
"""
Return the information about a sprint.
:param board_id: the board retrieving issues from
:param sprint_id: the sprint retieving issues from
"""
return self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)['sprint']
# TODO: remove this as we do have Board.delete()
def delete_board(self, id):
"""
Deletes an agile board.
:param id:
:return:
"""
payload = {}
url = self._get_url(
'rapidview/%s' % id, base=self.AGILE_BASE_URL)
r = self._session.delete(
url, data=json.dumps(payload))
def create_board(self, name, project_ids, preset="scrum"):
"""
Create a new board for the ``project_ids``.
:param name: name of the board
:param project_ids: the projects to create the board in
:param preset: what preset to use for this board
:type preset: 'kanban', 'scrum', 'diy'
"""
payload = {}
if isinstance(project_ids, string_types):
ids = []
for p in project_ids.split(','):
ids.append(self.project(p).id)
project_ids = ','.join(ids)
payload['name'] = name
if isinstance(project_ids, string_types):
project_ids = project_ids.split(',')
payload['projectIds'] = project_ids
payload['preset'] = preset
url = self._get_url(
'rapidview/create/presets', base=self.AGILE_BASE_URL)
r = self._session.post(
url, data=json.dumps(payload))
raw_issue_json = json_loads(r)
return Board(self._options, self._session, raw=raw_issue_json)
def create_sprint(self, name, board_id, startDate=None, endDate=None):
"""
Create a new sprint for the ``board_id``.
:param name: name of the sprint
:param board_id: the board to add the sprint to
"""
url = self._get_url(
'sprint/%s' % board_id, base=self.AGILE_BASE_URL)
r = self._session.post(
url)
raw_issue_json = json_loads(r)
""" now r contains something like:
{
"id": 742,
"name": "Sprint 89",
"state": "FUTURE",
"linkedPagesCount": 0,
"startDate": "None",
"endDate": "None",
"completeDate": "None",
"remoteLinks": []
}"""
payload = {'name': name}
if startDate:
payload["startDate"] = startDate
if endDate:
payload["endDate"] = endDate
url = self._get_url(
'sprint/%s' % raw_issue_json['id'], base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(payload))
raw_issue_json = json_loads(r)
return Sprint(self._options, self._session, raw=raw_issue_json)
# TODO: broken, this API does not exist anymore and we need to use
# issue.update() to perform this operaiton
# Workaround based on https://answers.atlassian.com/questions/277651/jira-agile-rest-api-example
def add_issues_to_sprint(self, sprint_id, issue_keys):
"""
Add the issues in ``issue_keys`` to the ``sprint_id``. The sprint must
be started but not completed.
If a sprint was completed, then have to also edit the history of the
issue so that it was added to the sprint before it was completed,
preferably before it started. A completed sprint's issues also all have
a resolution set before the completion date.
If a sprint was not started, then have to edit the marker and copy the
rank of each issue too.
:param sprint_id: the sprint to add issues to
:param issue_keys: the issues to add to the sprint
"""
# Get the customFieldId for "Sprint"
sprint_field_name = "Sprint"
sprint_field_id = [f['schema']['customId'] for f in self.fields()
if f['name'] == sprint_field_name][0]
data = {}
data['idOrKeys'] = issue_keys
data['customFieldId'] = sprint_field_id
data['sprintId'] = sprint_id
data['addToBacklog'] = False
url = self._get_url('sprint/rank', base=self.AGILE_BASE_URL)
r = self._session.put(url, data=json.dumps(data))
def add_issues_to_epic(self, epic_id, issue_keys, ignore_epics=True):
"""
Add the issues in ``issue_keys`` to the ``epic_id``.
:param epic_id: the epic to add issues to
:param issue_keys: the issues to add to the epic
:param ignore_epics: ignore any issues listed in ``issue_keys`` that are epics
"""
data = {}
data['issueKeys'] = issue_keys
data['ignoreEpics'] = ignore_epics
url = self._get_url('epics/%s/add' %
epic_id, base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(data))
def rank(self, issue, next_issue):
"""
Rank an issue before another using the default Ranking field, the one named 'Rank'.
:param issue: issue key of the issue to be ranked before the second one.
:param next_issue: issue key of the second issue.
"""
# {"issueKeys":["ANERDS-102"],"rankBeforeKey":"ANERDS-94","rankAfterKey":"ANERDS-7","customFieldId":11431}
if not self._rank:
for field in self.fields():
if field['name'] == 'Rank' and field['schema']['custom'] == "com.pyxis.greenhopper.jira:gh-global-rank":
self._rank = field['schema']['customId']
data = {
"issueKeys": [issue], "rankBeforeKey": next_issue, "customFieldId": self._rank}
url = self._get_url('rank', base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(data))
class GreenHopper(JIRA):
def __init__(self, options=None, basic_auth=None, oauth=None, async=None):
warnings.warn(
"GreenHopper() class is deprecated, just use JIRA() instead.", DeprecationWarning)
self._rank = None
JIRA.__init__(
self, options=options, basic_auth=basic_auth, oauth=oauth, async=async)
| m42e/jira | jira/client.py | Python | bsd-2-clause | 107,593 |
from datetime import date
from bs4 import BeautifulSoup
from django.template import Context, Template
from freezegun import freeze_time
from ws import enums, models
from ws.tests import TestCase, factories
class TripTagsTest(TestCase):
@freeze_time("Jan 11 2019 20:30:00 EST")
def test_simple_trip_list(self):
trips = [
factories.TripFactory.create(name="Past Trip", trip_date=date(2017, 8, 2)),
factories.TripFactory.create(name="Today!", trip_date=date(2019, 1, 11)),
factories.TripFactory.create(name="Soon Trip", trip_date=date(2019, 1, 16)),
factories.TripFactory.create(
name="Later Trip", trip_date=date(2019, 1, 19)
),
]
html_template = Template('{% load trip_tags %}{% simple_trip_list trips %}')
context = Context({'trips': trips})
soup = BeautifulSoup(html_template.render(context), 'html.parser')
table = soup.find('table')
heading = table.find('thead').find_all('th')
self.assertEqual([tr.text for tr in heading], ['Trip', 'Date', 'Leaders'])
rows = [tr.find_all('td') for tr in table.find('tbody').find_all('tr')]
date_per_trip = [
(trip.find('a').text, rendered_date.text.strip())
for (trip, rendered_date, _leaders) in rows
]
# We render the dates for each trip unambiguously
self.assertEqual(
date_per_trip,
[
('Past Trip', '2017-08-02'),
('Today!', 'Today'),
('Soon Trip', 'Wed'),
('Later Trip', 'Jan 19'),
],
)
@freeze_time("Jan 10 2019 20:30:00 EST")
def test_trip_list_approve_mode(self):
def _ws_trip(trip_date, **kwargs):
return factories.TripFactory.create(
program=enums.Program.WINTER_SCHOOL.value,
trip_date=trip_date,
**kwargs,
)
has_itinerary = _ws_trip(
date(2019, 1, 11),
info=factories.TripInfoFactory.create(),
trip_type=enums.TripType.HIKING.value,
)
no_itinerary1 = _ws_trip(
date(2019, 1, 11), trip_type=enums.TripType.BC_SKIING.value
)
no_itinerary2 = _ws_trip(
date(2019, 1, 11), trip_type=enums.TripType.ICE_CLIMBING.value
)
html_template = Template('{% load trip_tags %}{% trip_list_table trips True %}')
context = Context({'trips': models.Trip.objects.all().order_by('pk')})
soup = BeautifulSoup(html_template.render(context), 'html.parser')
table = soup.find('table')
heading = table.find('thead').find_all('th')
self.assertEqual(
[tr.text for tr in heading],
['Name', 'Date', 'Level', 'Description', 'Leaders', 'Approve'],
)
rows = [tr.find_all('td') for tr in table.find('tbody').find_all('tr')]
trip_info = [
{
'link': row[0].find('a').attrs['href'],
'icon_classes': row[0].find('i').attrs['class'],
}
for row in rows
]
# For each trip, we give a link to the approve page
self.assertEqual(
trip_info,
[
{
'link': f'/winter_school/trips/{has_itinerary.pk}/',
'icon_classes': ['fa', 'fa-fw', 'fa-hiking'],
},
{
'link': f'/winter_school/trips/{no_itinerary1.pk}/',
'icon_classes': ['fa', 'fa-fw', 'fa-skiing'],
},
{
'link': f'/winter_school/trips/{no_itinerary2.pk}/',
'icon_classes': ['fa', 'fa-fw', 'fa-icicles'],
},
],
)
def test_feedback_for_trip_rated_leader(self):
leader = factories.ParticipantFactory.create(name='Janet Yellin')
rating = factories.LeaderRatingFactory.create(
participant=leader,
activity=models.BaseRating.CLIMBING,
rating='Leader',
active=True,
)
leader.leaderrating_set.add(rating)
feedback = factories.FeedbackFactory.create(
leader=leader,
participant__name="Jerome Powell",
comments="Shows promise",
trip__name='Free solo 5.13 finger crack climbing',
trip__program=enums.Program.CLIMBING.value,
)
self.assertEqual(str(feedback), 'Jerome Powell: "Shows promise" - Janet Yellin')
template = Template('{% load trip_tags %}{{ feedback|leader_display }}')
context = Context({'feedback': feedback})
self.assertEqual(template.render(context), 'Janet Yellin (Leader)')
def test_feedback_but_no_trip(self):
"""While it's rarely used, the data model supports feedback with a null trip."""
leader = factories.ParticipantFactory.create(name='Janet Yellin')
rating = factories.LeaderRatingFactory.create(
participant=leader,
activity=models.BaseRating.CLIMBING,
rating='Leader',
active=True,
)
leader.leaderrating_set.add(rating)
feedback = factories.FeedbackFactory.create(
leader=leader,
participant__name="Jerome Powell",
comments="Shows promise",
trip=None,
)
self.assertEqual(str(feedback), 'Jerome Powell: "Shows promise" - Janet Yellin')
template = Template('{% load trip_tags %}{{ feedback|leader_display }}')
context = Context({'feedback': feedback})
self.assertEqual(template.render(context), 'Janet Yellin')
| DavidCain/mitoc-trips | ws/tests/templatetags/test_trip_tags.py | Python | gpl-3.0 | 5,738 |
#!/usr/bin/python
import os
drc_base_path = os.getenv("DRC_BASE")
import sys
sys.path.append(os.path.join(drc_base_path, "software", "models",
"model_transformation"))
import convertCollada
import mitUrdfUtils as mit
import copy
from jointNameMap import jointNameMap
from lxml import etree
from glob import glob
import math
meshesDirectory = "../meshes"
full_mesh_xacro_path = ("../components/osrf_original/atlas_v5.urdf")
full_mesh_urdf_path = ("../components/atlas_v5_full_collision_geometry.urdf")
minimal_contact_urdf_path = "../components/atlas_v5_minimal_contact.urdf"
convex_hull_urdf_path = "../components/atlas_v5_convex_hull.urdf"
# Convert meshes
originalDirectory = os.getcwd()
os.chdir(os.path.abspath(meshesDirectory))
for inFile in glob("*.dae"):
# Use extremities_diffuse_unplugged_mit.jpg and
# torso_diffuse_unplugged_mit.jpg
dae = etree.parse(inFile)
xpath_str = "//*[text()='../materials/textures/extremities_diffuse_unplugged.jpg']"
for element in dae.xpath(xpath_str):
element.text = '../materials/textures/extremities_diffuse_unplugged_mit.jpg'
xpath_str = "//*[text()='../materials/textures/torso_diffuse_unplugged.jpg']"
for element in dae.xpath(xpath_str):
element.text = '../materials/textures/torso_diffuse_unplugged_mit.jpg'
dae.write(inFile)
mit.convertMeshTo(inFile, ".obj")
colladaFile = os.path.splitext(inFile)[0] + '.vtm'
convertCollada.colladaToPolyData(inFile, colladaFile)
os.chdir(originalDirectory)
for inFile in glob(os.path.join(meshesDirectory, "*.obj")):
if "chull" not in inFile:
mit.createConvexHullMesh(inFile)
for inFile in glob(os.path.join(meshesDirectory, "*.obj")):
mit.convertMeshTo(inFile, ".wrl")
mit.xacro(full_mesh_xacro_path, full_mesh_urdf_path)
urdf = etree.parse(full_mesh_urdf_path)
mit.renameJoints(urdf, jointNameMap)
mit.replaceMeshPaths(urdf, "package://atlas_v5/meshes")
mit.useObjMeshes(urdf)
mit.addFrame(urdf, "l_foot_sole", "l_foot", "0.0426 0.0017 -0.07645", "0 0 0")
mit.addFrame(urdf, "r_foot_sole", "r_foot", "0.0426 -0.0017 -0.07645", "0 0 0")
mit.addFrame(urdf, "l_foot_toe", "l_foot", "0.1728 0.0017 -0.07645", "0 0 0")
mit.addFrame(urdf, "r_foot_toe", "r_foot", "0.1728 -0.0017 -0.07645", "0 0 0")
mit.removeCollisions(urdf, ['mtorso', 'ltorso', 'l_talus', 'r_talus'])
armLinkNames = ['clav', 'scap', 'uarm', 'larm', 'ufarm', 'lfarm', 'hand']
for armLinkName in armLinkNames:
mit.copyLinkProperties(urdf, 'r_' + armLinkName, 'l_' + armLinkName)
jointCopyExceptions = ['limit', 'safety_controller']
for armJointName in ['arm_shx', 'arm_ely', 'arm_elx', 'arm_uwy', 'arm_mwx']:
mit.copyJointProperties(urdf, 'r_' + armJointName, 'l_' + armJointName, jointCopyExceptions)
mit.copyJointProperties(urdf, 'r_arm_shz', 'l_arm_shz', jointCopyExceptions + ['origin'])
for jointName in ['arm_shx', 'arm_ely', 'arm_elx', 'arm_uwy', 'arm_lwy']:
mit.invertJointAxis(urdf, 'l_' + jointName)
mit.setJointOriginRPY(urdf, 'l_arm_shz', [0, 0, math.pi])
mit.setJointOriginRPY(urdf, 'l_arm_uwy', [0, math.pi, 0])
mit.setJointOriginRPY(urdf, 'l_arm_lwy', [0, math.pi, 0])
# update joint limits
# legs
mit.setJointLimits(urdf, 'l_leg_aky', -1.06, 0.72)
mit.setJointLimits(urdf, 'r_leg_aky', -1.06, 0.72)
mit.setJointLimits(urdf, 'l_leg_akx', -0.7, 0.7)
mit.setJointLimits(urdf, 'r_leg_akx', -0.7, 0.7)
mit.setJointLimits(urdf, 'l_leg_hpx', -0.23, 0.52)
mit.setJointLimits(urdf, 'r_leg_hpx', -0.52, 0.23)
mit.setJointLimits(urdf, 'l_leg_hpz', -0.2, 0.82)
mit.setJointLimits(urdf, 'r_leg_hpz', -0.82, 0.2)
# arms
mit.setJointLimits(urdf, 'l_arm_uwy',-2.98, 2.98)
mit.setJointLimits(urdf, 'r_arm_uwy',-2.98, 2.98)
mit.setJointLimits(urdf, 'l_arm_mwx',-1.76, 1.76)
mit.setJointLimits(urdf, 'r_arm_mwx',-1.76, 1.76)
mit.setJointLimits(urdf, 'l_arm_lwy',-2.82, 2.82)
mit.setJointLimits(urdf, 'r_arm_lwy',-2.82, 2.82)
# neck
mit.setJointLimits(urdf, 'neck_ay', -0.605, 1.16)
# Create minimal contact skeleton
mit.removeAllCollisions(urdf)
minimal_contact_urdf = copy.deepcopy(urdf)
contact_pts = {'r_foot': {'heel': ["-0.0876 0.0626 -0.07645", "-0.0876 -0.066 -0.07645"],
'toe': ["0.1728 0.0626 -0.07645","0.1728 -0.066 -0.07645"],
'midfoot_front': ["0.086 0.0626 -0.07645", "0.086 -0.066 -0.07645"],
'midfoot_rear': ["-0.0008 0.0626 -0.07645", "-0.0008 -0.066 -0.07645"]},
'l_foot': {'heel': ["-0.0876 0.066 -0.07645", "-0.0876 -0.0626 -0.07645"],
'toe': ["0.1728 0.066 -0.07645", "0.1728 -0.0626 -0.07645"],
'midfoot_front': ["0.086 0.066 -0.07645", "0.086 -0.0626 -0.07645"],
'midfoot_rear': ["-0.008 0.066 -0.07645", "-0.008 -0.0626 -0.07645"]},
'pelvis': {'butt': ["-0.0927 -0.0616 -0.144", "-0.0927 0.0623 -0.144"]}};
for foot_name, groups in contact_pts.iteritems():
for group_name, pts in groups.iteritems():
for pt in pts:
mit.addContactPoint(minimal_contact_urdf, foot_name, pt, group_name)
minimal_contact_urdf.write(minimal_contact_urdf_path, pretty_print=True)
# Create convex hull skeleton
mit.addCollisionsFromVisuals(urdf)
mit.useConvexHullMeshes(urdf)
mit.removeCollisions(urdf, ['mtorso', 'ltorso', 'l_talus', 'r_talus'])
mit.addCollisionFilterGroup(urdf, 'feet', ['l_foot', 'r_foot'], ['feet'])
mit.addCollisionFilterGroup(urdf, 'core', ['utorso', 'pelvis'], ['core'])
mit.addCollisionFilterGroup(urdf, 'ignore_core',
['r_scap', 'l_scap', 'r_clav', 'l_clav'], ['core'])
mit.addCollisionFilterGroup(urdf, 'r_uleg', ['r_uglut', 'r_lglut', 'r_uleg'],
['core', 'r_uleg', 'l_uleg'])
mit.addCollisionFilterGroup(urdf, 'l_uleg', ['l_uglut', 'l_lglut', 'l_uleg'],
['core', 'l_uleg', 'l_uleg'])
mit.addCollisionFilterGroup(urdf, 'r_leg', ['r_lleg', 'r_talus', 'r_foot'],
['r_leg', 'r_uleg'])
mit.addCollisionFilterGroup(urdf, 'l_leg', ['l_lleg', 'l_talus', 'l_foot'],
['l_leg', 'l_uleg'])
mit.addCollisionFilterGroup(urdf, 'r_arm',
['r_clav', 'r_scap', 'r_uarm', 'r_larm', 'r_ufarm',
'r_lfarm', 'r_hand'],
['r_arm'])
mit.addCollisionFilterGroup(urdf, 'l_arm',
['l_clav', 'l_scap', 'l_uarm', 'l_larm', 'l_ufarm',
'l_lfarm', 'l_hand'],
['l_arm'])
convex_hull_urdf = copy.deepcopy(urdf)
for foot_name, groups in contact_pts.iteritems():
for group_name, pts in groups.iteritems():
for pt in pts:
mit.addContactPoint(convex_hull_urdf, foot_name, pt, group_name)
convex_hull_urdf.write(convex_hull_urdf_path, pretty_print=True)
# Add a geometry to represent the hose for lidar filtering to full collision model
for link in urdf.xpath("//link[contains(@name,'larm')]"):
collision = mit.addCollision(link)
mit.addOrigin(collision, xyz=[0.0, 0.15, 0.0])
geometry = mit.addGeometry(collision)
mit.addBox(geometry, size=[0.15, 0.20, 0.15])
urdf.write(full_mesh_urdf_path, pretty_print=True)
| openhumanoids/oh-distro | software/models/atlas_v5/mit_modifications/atlas_skeleton_v5.py | Python | bsd-3-clause | 7,241 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""VGG19 model for Keras.
Reference:
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](
https://arxiv.org/abs/1409.1556) (ICLR 2015)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import backend
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import VersionAwareLayers
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.lib.io import file_io
from tensorflow.python.util.tf_export import keras_export
WEIGHTS_PATH = ('https://storage.googleapis.com/tensorflow/keras-applications/'
'vgg19/vgg19_weights_tf_dim_ordering_tf_kernels.h5')
WEIGHTS_PATH_NO_TOP = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/vgg19/'
'vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5')
layers = VersionAwareLayers()
@keras_export('keras.applications.vgg19.VGG19', 'keras.applications.VGG19')
def VGG19(
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax'):
"""Instantiates the VGG19 architecture.
Reference:
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](
https://arxiv.org/abs/1409.1556) (ICLR 2015)
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
The default input size for this model is 224x224.
Note: each Keras Application expects a specific kind of input preprocessing.
For VGG19, call `tf.keras.applications.vgg19.preprocess_input` on your
inputs before passing them to the model.
`vgg19.preprocess_input` will convert the input images from RGB to BGR,
then will zero-center each color channel with respect to the ImageNet dataset,
without scaling.
Args:
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)`
(with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
Returns:
A `keras.Model` instance.
"""
if not (weights in {'imagenet', None} or file_io.file_exists_v2(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Block 1
x = layers.Conv2D(
64, (3, 3), activation='relu', padding='same', name='block1_conv1')(
img_input)
x = layers.Conv2D(
64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = layers.Conv2D(
128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = layers.Conv2D(
128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = layers.Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = layers.Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = layers.Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = layers.Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv4')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = layers.Flatten(name='flatten')(x)
x = layers.Dense(4096, activation='relu', name='fc1')(x)
x = layers.Dense(4096, activation='relu', name='fc2')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name='vgg19')
# Load weights.
if weights == 'imagenet':
if include_top:
weights_path = data_utils.get_file(
'vgg19_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
file_hash='cbe5617147190e668d6c5d5026f83318')
else:
weights_path = data_utils.get_file(
'vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='253f8cb515780f3b799900260a226db6')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export('keras.applications.vgg19.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode='caffe')
@keras_export('keras.applications.vgg19.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_CAFFE,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| petewarden/tensorflow | tensorflow/python/keras/applications/vgg19.py | Python | apache-2.0 | 9,934 |
__all__ = ["Decoder"]
import struct
from array import array
import pyconspack.header as H
import pyconspack.types as T
import pyconspack.error as E
import pyconspack.index as I
class ForwardRef:
def __init__(self, tag):
self.tag = tag
def set(self, place, index = None, is_key = False):
self.place = place
self.index = index
self.is_key = is_key
def replace(self, value):
# Note that FREF=FREF doesn't really work... but a dict()
# can't currently be a key anyway, so stupidity prevents this
# from really being used at all.
if(self.is_key):
oldval = self.place[self]
del self.place[self]
self.place[value] = oldval
else:
self.place[self.index] = value
class Decoder:
def __init__(self, **kw):
self.opt = kw
self.frefs = dict()
self.tags = dict()
self.index = self._opt('index')
if(self.index):
self.index = I.Index(self.index)
def read_header(self, f):
return f.read(1)[0]
def read_size(self, h, f):
size_bytes = h & H.SIZE_MASK
if (size_bytes == 0): return struct.unpack('B', f.read(1))[0]
elif(size_bytes == 1): return struct.unpack('>H', f.read(2))[0]
elif(size_bytes == 2): return struct.unpack('>I', f.read(4))[0]
elif(size_bytes == 3): return struct.unpack('>Q', f.read(8))[0]
def _opt(self, name):
return (name in self.opt) and (self.opt[name])
def _push_fref(self, tag):
fref = ForwardRef(tag)
if(tag in self.frefs):
self.frefs[tag].append(fref)
else:
self.frefs[tag] = [fref]
return fref
def _maybe_fref(self, val, place, index=None, is_key=False):
if(not type(val) is ForwardRef):
return
val.set(place, index, is_key)
def decode(self, f):
return self._decode(f)
def _decode(self, f, fixed=None):
if(not fixed):
h = self.read_header(f)
else:
h = fixed
if (H.is_bool(h)): return self.decode_bool(h)
elif(H.is_number(h)): return self.decode_number(h, f)
elif(H.is_index(h)): return self.decode_index(h, f)
elif(H.is_container(h)): return self.decode_container(h, f)
elif(H.is_cons(h)): return self.decode_cons(h, f)
elif(H.is_string(h)): return self.decode_string(h, f)
elif(H.is_character(h)): return self.decode_character(h, f)
elif(H.is_rref(h)): return self.decode_rref(h, f)
elif(H.is_pointer(h)): return self.decode_pointer(h, f)
elif(H.is_package(h)): return self.decode_package(h, f)
elif(H.is_symbol(h)): return self.decode_symbol(h, f)
elif(H.is_tag(h)): return self.decode_tag(h, f)
elif(H.is_ref(h)): return self.decode_ref(h, f)
else:
raise E.BadHeader("Bad header byte: 0b{h:08b}".format(h=h))
def decode_bool(self, h):
if(h == 0x00): return ()
else: return True
def decode_n(self, f, c):
n = 0
for i in range(c):
n <<= 8
n |= f.read(1)[0]
return n
def decode_number(self, h, f):
c, fmt = H.fixed_type_fmt(h)
if(fmt): return struct.unpack('>'+fmt, f.read(c))[0]
elif(t == H.INT128):
n = self.decode_n(f, 16)
if(n > 2**127):
n -= 2**128
return n
elif(t == H.UINT128):
return self.decode_n(f, 16)
def decode_container(self, h, f):
t = h & H.CONTAINER_TYPE_MASK
if (t == H.CONTAINER_VECTOR): return self.decode_vector(h, f)
elif(t == H.CONTAINER_LIST): return self.decode_list(h, f)
elif(t == H.CONTAINER_MAP): return self.decode_map(h, f)
elif(t == H.CONTAINER_TMAP): return self.decode_map(h, f)
def decode_list(self, h, f):
size = self.read_size(h, f)
fixed = None
if(h & H.CONTAINER_FIXED):
fixed = f.read(1)[0]
l = []
for i in range(size-1):
val = self._decode(f, fixed)
l.append(val)
self._maybe_fref(val, l, i)
final = self._decode(f, fixed)
if(final == () or
not (h & H.CONTAINER_TYPE_MASK) == H.CONTAINER_LIST):
return l
else:
l = T.DottedList(l)
l.append(final)
self._maybe_fref(final, l, len(l)-1)
return T.DottedList(l)
def decode_vector(self, h, f):
if(not (h & H.CONTAINER_FIXED)):
return T.Vector(self.decode_list(self, h, f))
size = self.read_size(h, f)
fixed = f.read(1)[0]
c, fmt = H.fixed_type_fmt(fixed)
a = array(fmt)
for i in range(size):
val = self._decode(f, fixed)
a.append(val)
self._maybe_fref(val, a, i)
return a
def decode_map(self, h, f):
size = self.read_size(h, f)
fixed = None
if(h & H.CONTAINER_FIXED):
fixed = self.read_header(f)
tmap_type = None
if((h & H.CONTAINER_TYPE_MASK) == H.CONTAINER_TMAP):
tmap_type = self._decode(f)
d = dict()
for i in range(size):
k = self._decode(f, fixed)
v = self._decode(f)
self._maybe_fref(k, d, is_key=True)
self._maybe_fref(v, d, k)
d[k] = v
if(tmap_type):
decoders = self._opt('decoders')
if(not decoders or tmap_type not in decoders):
if(tmap_type in Decoder.class_decoders):
decoders = Decoder.class_decoders
else:
raise E.NoDecoder("Decoder for {t} not found".format(t=tmap_type))
return decoders[tmap_type](d)
else:
return d
def decode_string(self, h, f):
size = self.read_size(h, f)
return f.read(size).decode(encoding='utf-8', errors='strict')
def decode_character(self, h, f):
size = h & H.SIZE_MASK
return f.read(size).decode(encoding='utf-8', errors='strict')
def decode_package(self, h, f):
name = self._decode(f)
return T.package(name)
def decode_symbol(self, h, f):
name = self._decode(f)
package = None
if(H.is_keyword(h)):
package = "KEYWORD"
else:
package = self._decode(f)
return T.intern(name, package)
def decode_rref(self, h, f):
decoder = self._opt('rref_decoder')
rref = self._decode(f);
if(decoder):
return decoder(rref)
else:
return T.RRef(rref)
def decode_pointer(self, h, f):
decoder = self._opt('pointer_decoder')
val = self.read_size(h, f)
if(decoder):
return decoder(val)
else:
return T.Pointer(val)
def decode_cons(self, h, f):
car = self._decode(f)
cdr = self._decode(f)
if(not cdr):
return [car]
else:
return T.DottedList([car,cdr])
def decode_tag(self, h, f):
tag = None
if(h & H.REFTAG_INLINE):
tag = h & H.REFTAG_INLINE_VALUE
else:
tag = self.read_size(h, f)
ob = self._decode(f)
self.tags[tag] = ob
if(tag in self.frefs):
self.replace_frefs(tag, ob)
return ob
def decode_ref(self, h, f):
tag = None
if(h & H.REFTAG_INLINE):
tag = h & H.REFTAG_INLINE_VALUE
else:
tag = self.read_size(h, f)
if(tag in self.tags):
return self.tags[tag]
return self._push_fref(tag)
def replace_frefs(self, tag, val):
for f in self.frefs[tag]:
f.replace(val)
def decode_index(self, h, f):
val = None
if(h & H.REFTAG_INLINE):
val = h & H.REFTAG_INLINE_VALUE
else:
tag = self.read_size(h, f)
if(self.index):
return self.index.index(val) or T.Index(val)
else:
return T.Index(val)
class_decoders = dict()
def register(symbol, func):
Decoder.class_decoders[symbol] = func
def deregister(symbol):
del Decoder.class_decoders[symbol]
| conspack/pyconspack | decode.py | Python | bsd-2-clause | 8,451 |
"""Backends for Browser based Authentication
"""
import time
from functools import wraps
from pulsar.api import (
Http401, PermissionDenied, Http404, HttpRedirect, BadRequest
)
from pulsar.apps.wsgi import Route
from lux.utils.date import to_timestamp, date_from_now, iso8601
from lux.utils.context import app_attribute
from lux.core import User
from .store import session_store
NotAuthorised = (Http401, PermissionDenied, BadRequest)
@app_attribute
def exclude_urls(app):
"""urls to exclude from browser sessions
"""
urls = []
for url in app.config['SESSION_EXCLUDE_URLS']:
urls.append(Route(url))
return tuple(urls)
def session_backend_action(method):
@wraps(method)
def _(self, r, *args, **kwargs):
if r.cache.get('skip_session_backend'):
return
return method(self, r, *args, **kwargs)
return _
class SessionBackend:
"""SessionBackend is used when the client is a web browser
It maintain a session via a cookie key
"""
@session_backend_action
def login(self, request, **data):
api = request.api
seconds = request.config['SESSION_EXPIRY']
data['user_agent'] = self._user_agent(request)
data['ip_address'] = request.get_client_address()
data['expiry'] = iso8601(date_from_now(seconds))
response = api.authorizations.post(json=data, jwt=True)
token = response.json()
session = self._create_session(request, token)
request.cache.session = session
return token
@session_backend_action
def logout(self, request):
"""logout a user
"""
session = request.cache.session
try:
request.api.authorizations.delete(token=session.token)
except NotAuthorised:
pass
session_store(request).delete(session.id)
request.cache.user = request.cache.auth_backend.anonymous(request)
request.cache.session = self._create_session(request)
@session_backend_action
def get_permissions(self, request, resources, actions=None):
return self._get_permissions(request, resources, actions)
@session_backend_action
def has_permission(self, request, resource, action):
"""Implement :class:`~AuthBackend.has_permission` method
"""
data = self._get_permissions(request, resource, action)
resource = data.get(resource)
if resource:
return resource.get(action, False)
return False
def request(self, request):
path = request.path[1:]
for url in exclude_urls(request.app):
if url.match(path):
request.cache.skip_session_backend = True
return
key = request.config['SESSION_COOKIE_NAME']
session_key = request.cookies.get(key)
store = session_store(request)
session = None
if session_key:
session = store.get(session_key.value)
if (session and (
session.expiry is None or session.expiry < time.time())):
store.delete(session.id)
session = None
if not session:
session = self._create_session(request)
request.cache.session = session
token = session.token
if token:
try:
user = request.api.user.get(token=session.token).json()
except NotAuthorised:
request.app.auth.logout(request)
raise HttpRedirect(request.config['LOGIN_URL']) from None
except Exception:
request.app.auth.logout(request)
raise
request.cache.user = User(user)
@session_backend_action
def response(self, request, response):
session = request.cache.get('session')
if session:
if response.can_set_cookies():
key = request.config['SESSION_COOKIE_NAME']
session_key = request.cookies.get(key)
id = session.id
if not session_key or session_key.value != id:
response.set_cookie(key, value=str(id), httponly=True,
expires=session.expiry)
session_store(request).save(session)
return response
# INTERNALS
def _create_session(self, request, token=None):
"""Create a new Session object"""
expiry = None
if token:
expiry = to_timestamp(token.get('expiry'))
token = token['id']
if not expiry:
seconds = request.config['SESSION_EXPIRY']
expiry = time.time() + seconds
return session_store(request).create(expiry=expiry,
token=token)
def _get_permissions(self, request, resources, actions=None):
if not isinstance(resources, (list, tuple)):
resources = (resources,)
query = [('resource', resource) for resource in resources]
if actions:
if not isinstance(actions, (list, tuple)):
actions = (actions,)
query.extend((('action', action) for action in actions))
try:
response = request.api.user.permissions.get(params=query)
except NotAuthorised:
handle_401(request)
return response.json()
def _user_agent(self, request, max_len=256):
agent = request.get('HTTP_USER_AGENT')
return agent[:max_len] if agent else ''
def handle_401(request, user=None):
"""When the API respond with a 401 logout and redirect to login
"""
user = user or request.session.user
if user.is_authenticated():
request.app.auth.logout(request)
raise HttpRedirect(request.config['LOGIN_URL'])
else:
raise Http404
| quantmind/lux | lux/ext/sessions/browser.py | Python | bsd-3-clause | 5,814 |
# Copyright 2016 Hewlett Packard Enterprise Development Company LP.
# Copyright 2017 IBM Corp
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import json
from cinderclient import exceptions as cinder_exceptions
from cinderclient.v3 import client as client
from oslo_log import log
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import keystone
from ironic.conf import CONF
LOG = log.getLogger(__name__)
AVAILABLE = 'available'
IN_USE = 'in-use'
_CINDER_SESSION = None
def _get_cinder_session():
global _CINDER_SESSION
if not _CINDER_SESSION:
auth = keystone.get_auth('cinder')
_CINDER_SESSION = keystone.get_session('cinder', auth=auth)
return _CINDER_SESSION
def get_client():
"""Get a cinder client connection.
:returns: A cinder client.
"""
params = {
'connect_retries': CONF.cinder.retries
}
# TODO(jtaryma): Add support for noauth
# NOTE(TheJulia): If a URL is provided for cinder, we will pass
# along the URL to python-cinderclient. Otherwise the library
# handles keystone url autodetection.
if CONF.cinder.url:
params['endpoint_override'] = CONF.cinder.url
if CONF.keystone.region_name:
params['region_name'] = CONF.keystone.region_name
params['session'] = _get_cinder_session()
return client.Client(**params)
def is_volume_available(volume):
"""Check if a volume is available for a connection.
:param volume: The object representing the volume.
:returns: Boolean if volume is available.
"""
return (volume.status == AVAILABLE or
(volume.status == IN_USE and
volume.multiattach))
def is_volume_attached(node, volume):
"""Check if a volume is attached to the supplied node.
:param node: The object representing the node.
:param volume: The object representing the volume from cinder.
:returns: Boolean indicating if the volume is attached. Returns True if
cinder shows the volume as presently attached, otherwise
returns False.
"""
attachments = volume.attachments
if attachments is not None:
for attachment in attachments:
if attachment.get('server_id') in (node.instance_uuid, node.uuid):
return True
return False
def _get_attachment_id(node, volume):
"""Return the attachment ID for a node to a volume.
:param node: The object representing the node.
:param volume: The object representing the volume from cinder.
:returns: The UUID of the attachment in cinder, if present. Otherwise
returns None.
"""
# NOTE(TheJulia): This is under the belief that there is a single
# attachment for each node that represents all possible attachment
# information as multiple types can be submitted in a single request.
attachments = volume.attachments
if attachments is None:
return
for attachment in attachments:
if attachment.get('server_id') in (node.instance_uuid, node.uuid):
return attachment.get('attachment_id')
def _create_metadata_dictionary(node, action):
"""Create a volume metadata dictionary.
:param node: Object representing a node.
:param action: String value representing the last action.
:returns: Dictionary with a json representation of
the metadata to send to cinder as it does
not support nested dictionaries.
"""
label = "ironic_node_%s" % node.uuid
data = {'instance_uuid': node.instance_uuid or node.uuid,
'last_seen': datetime.datetime.utcnow().isoformat(),
'last_action': action}
return {label: json.dumps(data)}
def _init_client(task):
"""Obtain cinder client and return it for use.
:param task: TaskManager instance representing the operation.
:returns: A cinder client.
:raises: StorageError If an exception is encountered creating the client.
"""
node = task.node
try:
return get_client()
except Exception as e:
msg = (_('Failed to initialize cinder client for operations on node '
'%(uuid)s: %(err)s') % {'uuid': node.uuid, 'err': e})
LOG.error(msg)
raise exception.StorageError(msg)
def attach_volumes(task, volume_list, connector):
"""Attach volumes to a node.
Enumerate through the provided list of volumes and attach the volumes
to the node defined in the task utilizing the provided connector
information.
If an attachment appears to already exist, we will skip attempting to
attach the volume. If use of the volume fails, a user may need to
remove any lingering pre-existing/unused attachment records since
we have no way to validate if the connector profile data differs
from what was provided to cinder.
:param task: TaskManager instance representing the operation.
:param volume_list: List of volume_id UUID values representing volumes.
:param connector: Dictionary object representing the node sufficiently
to attach a volume. This value can vary based upon
the node's configuration, capability, and ultimately
the back-end storage driver. As cinder was designed
around iSCSI, the 'ip' and 'initiator' keys are
generally expected by cinder drivers.
For FiberChannel, the key 'wwpns' can be used
with a list of port addresses.
Some drivers support a 'multipath' boolean key,
although it is generally False. The 'host' key
is generally used for logging by drivers.
Example:
{
'wwpns': ['list','of','port','wwns'],
'ip': 'ip address',
'initiator': 'initiator iqn',
'multipath': False,
'host': 'hostname',
}
:raises: StorageError If storage subsystem exception is raised.
:returns: List of connected volumes, including volumes that were
already connected to desired nodes. The returned list
can be relatively consistent depending on the end storage
driver that the volume is configured for, however
the 'driver_volume_type' key should not be relied upon
as it is a free-form value returned by the driver.
The accompanying 'data' key contains the actual target
details which will indicate either target WWNs and a LUN
or a target portal and IQN. It also always contains
volume ID in cinder and ironic. Except for these two IDs,
each driver may return somewhat different data although
the same keys are used if the target is FC or iSCSI,
so any logic should be based upon the returned contents.
For already attached volumes, the structure contains
'already_attached': True key-value pair. In such case,
connection info for the node is already in the database,
'data' structure contains only basic info of volume ID in
cinder and ironic, so any logic based on that should
retrieve it from the database.
Example:
[{
'driver_volume_type': 'fibre_channel'
'data': {
'encrypted': False,
'target_lun': 1,
'target_wwn': ['1234567890123', '1234567890124'],
'volume_id': '00000000-0000-0000-0000-000000000001',
'ironic_volume_id':
'11111111-0000-0000-0000-000000000001'}
},
{
'driver_volume_type': 'iscsi'
'data': {
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000002',
'target_portal': '127.0.0.0.1:3260',
'volume_id': '00000000-0000-0000-0000-000000000002',
'ironic_volume_id':
'11111111-0000-0000-0000-000000000002',
'target_lun': 2}
},
{
'already_attached': True
'data': {
'volume_id': '00000000-0000-0000-0000-000000000002',
'ironic_volume_id':
'11111111-0000-0000-0000-000000000002'}
}]
"""
node = task.node
client = _init_client(task)
connected = []
for volume_id in volume_list:
try:
volume = client.volumes.get(volume_id)
except cinder_exceptions.ClientException as e:
msg = (_('Failed to get volume %(vol_id)s from cinder for node '
'%(uuid)s: %(err)s') %
{'vol_id': volume_id, 'uuid': node.uuid, 'err': e})
LOG.error(msg)
raise exception.StorageError(msg)
if is_volume_attached(node, volume):
LOG.debug('Volume %(vol_id)s is already attached to node '
'%(uuid)s. Skipping attachment.',
{'vol_id': volume_id, 'uuid': node.uuid})
# NOTE(jtaryma): Actual connection info of already connected
# volume will be provided by nova. Adding this dictionary to
# 'connected' list so it contains also already connected volumes.
connection = {'data': {'ironic_volume_uuid': volume.id,
'volume_id': volume_id},
'already_attached': True}
connected.append(connection)
continue
try:
client.volumes.reserve(volume_id)
except cinder_exceptions.ClientException as e:
msg = (_('Failed to reserve volume %(vol_id)s for node %(node)s: '
'%(err)s)') %
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
LOG.error(msg)
raise exception.StorageError(msg)
try:
# Provide connector information to cinder
connection = client.volumes.initialize_connection(volume_id,
connector)
except cinder_exceptions.ClientException as e:
msg = (_('Failed to initialize connection for volume '
'%(vol_id)s to node %(node)s: %(err)s') %
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
LOG.error(msg)
raise exception.StorageError(msg)
if 'volume_id' not in connection['data']:
connection['data']['volume_id'] = volume_id
connection['data']['ironic_volume_uuid'] = volume.id
connected.append(connection)
LOG.info('Successfully initialized volume %(vol_id)s for '
'node %(node)s.', {'vol_id': volume_id, 'node': node.uuid})
instance_uuid = node.instance_uuid or node.uuid
try:
# NOTE(TheJulia): The final step of the cinder volume
# attachment process involves updating the volume
# database record to indicate that the attachment has
# been completed, which moves the volume to the
# 'attached' state. This action also sets a mountpoint
# for the volume, if known. In our use case, there is
# no way for us to know what the mountpoint is inside of
# the operating system, thus we send None.
client.volumes.attach(volume_id, instance_uuid, None)
except cinder_exceptions.ClientException as e:
msg = (_('Failed to inform cinder that the attachment for volume '
'%(vol_id)s for node %(node)s has been completed: '
'%(err)s') %
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
LOG.error(msg)
raise exception.StorageError(msg)
try:
# Set metadata to assist a user in volume identification
client.volumes.set_metadata(
volume_id,
_create_metadata_dictionary(node, 'attached'))
except cinder_exceptions.ClientException as e:
LOG.warning('Failed to update volume metadata for volume '
'%(vol_id)s for node %(node)s: %(err)s',
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
return connected
def detach_volumes(task, volume_list, connector, allow_errors=False):
"""Detach a list of volumes from a provided connector detail.
Enumerates through a provided list of volumes and issues
detachment requests utilizing the connector information
that describes the node.
:param task: The TaskManager task representing the request.
:param volume_list: The list of volume id values to detach.
:param connector: Dictionary object representing the node sufficiently
to attach a volume. This value can vary based upon
the node's configuration, capability, and ultimately
the back-end storage driver. As cinder was designed
around iSCSI, the 'ip' and 'initiator' keys are
generally expected. For FiberChannel, the key
'wwpns' can be used with a list of port addresses.
Some drivers support a 'multipath' boolean key,
although it is generally False. The 'host' key
is generally used for logging by drivers.
Example:
{
'wwpns': ['list','of','port','wwns']
'ip': 'ip address',
'initiator': 'initiator iqn',
'multipath': False,
'host': 'hostname'
}
:param allow_errors: Boolean value governing if errors that are returned
are treated as warnings instead of exceptions.
Default False.
:raises: StorageError
"""
def _handle_errors(msg):
if allow_errors:
LOG.warning(msg)
else:
LOG.error(msg)
raise exception.StorageError(msg)
client = _init_client(task)
node = task.node
for volume_id in volume_list:
try:
volume = client.volumes.get(volume_id)
except cinder_exceptions.ClientException as e:
_handle_errors(_('Failed to get volume %(vol_id)s from cinder for '
'node %(node)s: %(err)s') %
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
# If we do not raise an exception, we should move on to
# the next volume since the volume could have been deleted
# before we're attempting to power off the node.
continue
if not is_volume_attached(node, volume):
LOG.debug('Volume %(vol_id)s is not attached to node '
'%(uuid)s: Skipping detachment.',
{'vol_id': volume_id, 'uuid': node.uuid})
continue
try:
client.volumes.begin_detaching(volume_id)
except cinder_exceptions.ClientException as e:
_handle_errors(_('Failed to request detach for volume %(vol_id)s '
'from cinder for node %(node)s: %(err)s') %
{'vol_id': volume_id, 'node': node.uuid, 'err': e}
)
# NOTE(jtaryma): This operation only updates the volume status, so
# we can proceed the process of actual detachment if allow_errors
# is set to True.
try:
# Remove the attachment
client.volumes.terminate_connection(volume_id, connector)
except cinder_exceptions.ClientException as e:
_handle_errors(_('Failed to detach volume %(vol_id)s from node '
'%(node)s: %(err)s') %
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
# Skip proceeding with this method if we're not raising
# errors. This will leave the volume in the detaching
# state, but in that case something very unexpected
# has occured.
continue
# Attempt to identify the attachment id value to provide
# accessible relationship data to leave in the cinder API
# to enable reconciliation.
attachment_id = _get_attachment_id(node, volume)
try:
# Update the API attachment record
client.volumes.detach(volume_id, attachment_id)
except cinder_exceptions.ClientException as e:
_handle_errors(_('Failed to inform cinder that the detachment for '
'volume %(vol_id)s from node %(node)s has been '
'completed: %(err)s') %
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
# NOTE(jtaryma): This operation mainly updates the volume status,
# so we can proceed the process of volume updating if allow_errors
# is set to True.
try:
# Set metadata to assist in volume identification.
client.volumes.set_metadata(
volume_id,
_create_metadata_dictionary(node, 'detached'))
except cinder_exceptions.ClientException as e:
LOG.warning('Failed to update volume %(vol_id)s metadata for node '
'%(node)s: %(err)s',
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
| SauloAislan/ironic | ironic/common/cinder.py | Python | apache-2.0 | 18,721 |
# Copyright 2020 Tecnativa - V铆ctor Mart铆nez
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import _, api, models
from odoo.exceptions import ValidationError
class StockLocation(models.Model):
_inherit = "stock.location"
@api.constrains("active")
def _check_active_stock_archive_constraint_stock_quant(self):
res = self.env["stock.quant"].search(
[
"&",
("location_id.usage", "in", ("internal", "transit")),
"|",
("location_id", "in", self.filtered(lambda x: not x.active).ids),
("location_id", "child_of", self.filtered(lambda x: not x.active).ids),
],
limit=1,
)
if res:
raise ValidationError(
_(
"It is not possible to archive location '%s' which has "
"associated stock quantities." % res[0].display_name
)
)
@api.constrains("active")
def _check_active_stock_archive_constraint_stock_move(self):
res = self.env["stock.move"].search(
[
"&",
("state", "not in", ("done", "cancel")),
"|",
("location_id", "in", self.filtered(lambda x: not x.active).ids),
("location_id", "child_of", self.filtered(lambda x: not x.active).ids),
],
limit=1,
)
if res:
raise ValidationError(
_(
"It is not possible to archive location '%s' which has "
"associated picking lines." % res[0].display_name
)
)
@api.constrains("active")
def _check_active_stock_archive_constraint_stock_move_line(self):
res = self.env["stock.move.line"].search(
[
"&",
("state", "not in", ("done", "cancel")),
"|",
("location_id", "in", self.filtered(lambda x: not x.active).ids),
("location_id", "child_of", self.filtered(lambda x: not x.active).ids),
],
limit=1,
)
if res:
raise ValidationError(
_(
"It is not possible to archive location '%s' which has "
"associated stock reservations." % res[0].display_name
)
)
| OCA/stock-logistics-warehouse | stock_archive_constraint/models/stock_location.py | Python | agpl-3.0 | 2,452 |
from django import VERSION
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
def import_from_settings(attr, *args):
"""
Load an attribute from the django settings.
:raises:
ImproperlyConfigured
"""
try:
if args:
return getattr(settings, attr, args[0])
return getattr(settings, attr)
except AttributeError:
raise ImproperlyConfigured('Setting {0} not found'.format(attr))
def absolutify(request, path):
"""Return the absolute URL of a path."""
return request.build_absolute_uri(path)
# Computed once, reused in every request
_less_than_django_1_10 = VERSION < (1, 10)
def is_authenticated(user):
"""return True if the user is authenticated.
This is necessary because in Django 1.10 the `user.is_authenticated`
stopped being a method and is now a property.
Actually `user.is_authenticated()` actually works, thanks to a backwards
compat trick in Django. But in Django 2.0 it will cease to work
as a callable method.
"""
if _less_than_django_1_10:
return user.is_authenticated()
return user.is_authenticated
| cloudera/hue | desktop/core/ext-py/mozilla-django-oidc-1.0.0/mozilla_django_oidc/utils.py | Python | apache-2.0 | 1,174 |
#!/usr/bin/env python3
# Copyright (c) 2017 Intel Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import rclpy
from time import sleep
from std_msgs.msg import String
from example_interfaces.srv import AddTwoInts
node = None
def cleanup():
global node
node.destroy_node()
rclpy.shutdown()
def callback(request, response):
response.sum = request.a + request.b
return response
def main():
global node
service = 'js_py_add_two_ints'
if len(sys.argv) > 1:
service = sys.argv[1]
rclpy.init()
node = rclpy.create_node('add_service')
service = node.create_service(AddTwoInts, service, callback)
while rclpy.ok():
rclpy.spin_once(node)
cleanup()
if __name__ == '__main__':
main()
| minggangw/rclnodejs-1 | test/py/service.py | Python | apache-2.0 | 1,255 |
SECRET_KEY = 'SEKRIT'
INSTALLED_APPS = ('combocache', 'tests',)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': './test.db',
}
}
| hzdg/django-combocache | tests/settings.py | Python | mit | 182 |
"""
"""
from __future__ import absolute_import
from ..unitquantity import UnitQuantity
from .mass import gram, kg, ounce, lb
from .length import cm, m, ft
from .time import s
from .acceleration import g_0
N = newton = UnitQuantity(
'newton',
kg*m/s**2,
symbol='N',
aliases=['newtons']
)
dyne = UnitQuantity(
'dyne',
gram*cm/s**2,
symbol='dyn',
aliases=['dynes']
)
pond = UnitQuantity(
'pond',
g_0*kg,
symbol='p',
aliases=['ponds']
)
kgf = force_kilogram = kilogram_force = UnitQuantity(
'kilogram_force',
kg*g_0,
symbol='kgf',
aliases=['force_kilogram']
)
ozf = force_ounce = ounce_force = UnitQuantity(
'ounce_force',
ounce*g_0,
symbol='ozf',
aliases=['force_ounce']
)
lbf = force_pound = pound_force = UnitQuantity(
'pound_force',
lb*g_0,
symbol='lbf',
aliases=['force_pound']
)
poundal = UnitQuantity(
'poundal',
lb*ft/s**2,
symbol='pdl',
aliases=['poundals']
)
gf = gram_force = force_gram = UnitQuantity(
'gram_force',
gram*g_0,
symbol='gf',
aliases=['force_gram']
)
force_ton = ton_force = UnitQuantity(
'ton_force',
2000*force_pound,
aliases=['force_ton'])
kip = UnitQuantity(
'kip', 1000*lbf
)
del UnitQuantity, gram, kg, cm, m, s, g_0
| AdaptiveApplications/carnegie | tarc_bus_locator_client/quantities-0.10.1/quantities/units/force.py | Python | mit | 1,292 |
# -*- coding:utf8 -*-
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.statsgov.items import StatsGovItemLoader, StatsGovItem
from scrapy.http.request import Request
class StatsgovSpider(BaseSpider):
name = "statsgov"
allowed_domains = ["statsgov.org"]
start_urls = [
"http://www.stats.gov.cn/tjbz/xzqhdm/t20030219_67297.htm", # 2002-12-31
"http://www.stats.gov.cn/tjbz/xzqhdm/t20030722_93432.htm", # 2003-06-30
"http://www.stats.gov.cn/tjbz/xzqhdm/t20040211_140666.htm", # 2003-12-31
"http://www.stats.gov.cn/tjbz/xzqhdm/t20040607_402156425.htm", # 2004-03-31
"http://www.stats.gov.cn/tjbz/xzqhdm/t20041022_402202293.htm", # 2004-09-30
"http://www.stats.gov.cn/tjbz/xzqhdm/t20041022_402259937.htm", # 2004-12-31
]
def parse(self, response):
hxs = HtmlXPathSelector(response)
title = hxs.select('//title/text()').extract()[0]
with open(title, 'wb') as f:
f.write(response.body)
trs = hxs.select('//span[@class="content"]/table/tbody/tr')
items = []
for tr in trs:
i = StatsGovItemLoader(StatsGovItem())
code = tr.select("td[1]/p/span/text()").extract()[0]
label = tr.select("td[2]/p/span/text()").extract()[0]
i.add_value(u"label", label)
i.add_value(u"code", code)
items.append(i.load_item())
return items
| 535521469/crawler_sth | scrapy/statsgov/spiders/statsgov_spider.py | Python | bsd-3-clause | 1,553 |
# app/mod_user/__init__.py
| cisko3000/flask-inventory | app/mod_user/__init__.py | Python | mit | 27 |
import time
import os
import re
import logging
from autotest.client.shared import error
from virttest import utils_misc
from virttest import utils_test
from virttest import env_process
from virttest import data_dir
from autotest.client import utils
@error.context_aware
def run(test, params, env):
"""
KVM whql env setup test:
1) Log into a guest
2) Update Windows kernel to the newest version
3) Un-check Automatically restart in system failure
4) Disable UAC
5) Get the symbol files
6) Set VM to physical memory + 100M
7) Update the nic configuration
8) Install debug view and make it auto run
:param test: QEMU test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
log_path = "%s/../debug" % test.resultsdir
# Prepare the tools iso
error.context("Prepare the tools iso", logging.info)
src_list = params.get("src_list")
src_path = params.get("src_path", "%s/whql_src" % test.tmpdir)
if not os.path.exists(src_path):
os.makedirs(src_path)
if src_list is not None:
for i in re.split(",", src_list):
utils.unmap_url(src_path, i, src_path)
# Make iso for src
cdrom_whql = params.get("cdrom_whql")
cdrom_whql = utils_misc.get_path(data_dir.get_data_dir(), cdrom_whql)
cdrom_whql_dir = os.path.split(cdrom_whql)[0]
if not os.path.exists(cdrom_whql_dir):
os.makedirs(cdrom_whql_dir)
cmd = "mkisofs -J -o %s %s" % (cdrom_whql, src_path)
utils.system(cmd)
params["cdroms"] += " whql"
vm = "vm1"
vm_params = params.object_params(vm)
env_process.preprocess_vm(test, vm_params, env, vm)
vm = env.get_vm(vm)
timeout = float(params.get("login_timeout", 240))
session = vm.wait_for_login(timeout=timeout)
error_log = utils_misc.get_path(log_path, "whql_setup_error_log")
run_guest_log = params.get(
"run_guest_log", "%s/whql_qemu_comman" % test.tmpdir)
# Record qmmu command line in a log file
error.context("Record qemu command line", logging.info)
if os.path.isfile(run_guest_log):
fd = open(run_guest_log, "r+")
fd.read()
else:
fd = open(run_guest_log, "w")
fd.write("%s\n" % vm.qemu_command)
fd.close()
# Get set up commands
update_cmd = params.get("update_cmd", "")
timezone_cmd = params.get("timezone_cmd", "")
auto_restart = params.get("auto_restart", "")
qxl_install = params.get("qxl_install", "")
debuggers_install = params.get("debuggers_install", "")
disable_uas = params.get("disable_uas", "")
symbol_files = params.get("symbol_files", "")
vm_size = int(params.get("mem")) + 100
nic_cmd = params.get("nic_config_cmd", "")
dbgview_cmd = params.get("dbgview_cmd", "")
format_cmd = params.get("format_cmd", "")
disable_firewall = params.get("disable_firewall", "")
disable_update = params.get("disable_update", "")
setup_timeout = int(params.get("setup_timeout", "7200"))
disk_init_cmd = params.get("disk_init_cmd", "")
disk_driver_install = params.get("disk_driver_install", "")
vm_ma_cmd = "wmic computersystem set AutomaticManagedPagefile=False"
vm_cmd = "wmic pagefileset where name=\"C:\\\\pagefile.sys\" set "
vm_cmd += "InitialSize=%s,MaximumSize=%s" % (vm_size, vm_size)
vm_ma_cmd = ""
vm_cmd = ""
if symbol_files:
symbol_cmd = "del C:\\\\symbols &&"
symbol_cmd += "git clone %s C:\\\\symbol_files C:\\\\symbols" % \
symbol_files
else:
symbol_cmd = ""
wmic_prepare_cmd = "echo exit > cmd && cmd /s wmic"
error.context("Configure guest system", logging.info)
cmd_list = [wmic_prepare_cmd, auto_restart, disable_uas, symbol_cmd,
vm_ma_cmd, vm_cmd, dbgview_cmd, qxl_install, disable_firewall,
timezone_cmd]
if nic_cmd:
for index, nic in enumerate(re.split("\s+", params.get("nics"))):
setup_params = params.get("nic_setup_params_%s" % nic, "")
if params.get("vm_arch_name", "") == "x86_64":
nic_cmd = re.sub("set", "set_64", nic_cmd)
cmd_list.append("%s %s %s" % (nic_cmd, str(index + 1),
setup_params))
if disk_init_cmd:
disk_num = len(re.split("\s+", params.get("images")))
if disk_driver_install:
cmd_list.append(disk_driver_install + str(disk_num - 1))
labels = "IJKLMNOPQRSTUVWXYZ"
for index, images in enumerate(re.split("\s+", params.get("images"))):
if index > 0:
cmd_list.append(disk_init_cmd % (str(index),
labels[index - 1]))
format_cmd_image = format_cmd % (labels[index - 1],
params.get("win_format_%s" % images))
if params.get("win_extra_%s" % images):
format_cmd_image += " %s" % params.get(
"win_extra_%s" % images)
cmd_list.append(format_cmd_image)
cmd_list += [update_cmd, disable_update]
failed_flag = 0
# Check symbol files in guest
if symbol_files:
error.context("Update symbol files", logging.info)
install_check_tool = False
check_tool_chk = params.get("check_tool_chk",
"C:\debuggers\symchk.exe")
output = session.cmd_output(check_tool_chk)
if "cannot find" in output:
install_check_tool = True
if install_check_tool:
output = session.cmd_output(debuggers_install)
symbol_file_check = params.get("symbol_file_check")
symbol_file_download = params.get("symbol_file_download")
symbol_check_pattern = params.get("symbol_check_pattern")
symbol_pid_pattern = params.get("symbol_pid_pattern")
download = utils_test.BackgroundTest(session.cmd,
(symbol_file_download,
setup_timeout))
sessioncheck = vm.wait_for_login(timeout=timeout)
download.start()
while download.is_alive():
o = sessioncheck.cmd_output(symbol_file_check, setup_timeout)
if symbol_check_pattern in o:
# Check is done kill download process
cmd = "tasklist /FO list"
s, o = sessioncheck.cmd_status_output(cmd)
pid = re.findall(symbol_pid_pattern, o, re.S)
if pid:
cmd = "taskkill /PID %s /F" % pid[0]
try:
sessioncheck.cmd(cmd)
except Exception:
pass
break
time.sleep(5)
sessioncheck.close()
download.join()
for cmd in cmd_list:
if len(cmd) > 0:
s = 0
try:
s, o = session.cmd_status_output(cmd, timeout=setup_timeout)
except Exception, err:
failed_flag += 1
utils_misc.log_line(
error_log, "Unexpected exception: %s" % err)
if s != 0:
failed_flag += 1
utils_misc.log_line(error_log, o)
if failed_flag != 0:
raise error.TestFail("Have %s setup fialed. Please check the log."
% failed_flag)
| uni-peter-zheng/tp-qemu | generic/tests/whql_env_setup.py | Python | gpl-2.0 | 7,490 |
from django import template
from greyjay.jobs.models import JobPostingListPage, JobPostingPage
register = template.Library()
@register.simple_tag(takes_context=True)
def get_active_posting_page(context):
if JobPostingPage.objects.live().count() == 0:
return None
listing_page = JobPostingListPage.objects.live().first()
return listing_page
| CIGIHub/greyjay | greyjay/jobs/templatetags/jobs_tags.py | Python | mit | 365 |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for dm_env_rpc/dm_env utilities."""
import typing
from absl.testing import absltest
from dm_env import specs
import numpy as np
from dm_env_rpc.v1 import dm_env_rpc_pb2
from dm_env_rpc.v1 import dm_env_utils
from dm_env_rpc.v1 import spec_manager
class TensorSpecToDmEnvSpecTests(absltest.TestCase):
def test_no_bounds_gives_arrayspec(self):
tensor_spec = dm_env_rpc_pb2.TensorSpec()
tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32
tensor_spec.shape[:] = [3]
tensor_spec.name = 'foo'
actual = dm_env_utils.tensor_spec_to_dm_env_spec(tensor_spec)
self.assertEqual(specs.Array(shape=[3], dtype=np.uint32), actual)
self.assertEqual('foo', actual.name)
def test_string_give_string_array(self):
tensor_spec = dm_env_rpc_pb2.TensorSpec()
tensor_spec.dtype = dm_env_rpc_pb2.DataType.STRING
tensor_spec.shape[:] = [1, 2, 3]
tensor_spec.name = 'string_spec'
actual = dm_env_utils.tensor_spec_to_dm_env_spec(tensor_spec)
self.assertEqual(specs.StringArray(shape=[1, 2, 3]), actual)
self.assertEqual('string_spec', actual.name)
def test_scalar_with_0_n_bounds_gives_discrete_array(self):
tensor_spec = dm_env_rpc_pb2.TensorSpec()
tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32
tensor_spec.name = 'foo'
max_value = 9
tensor_spec.min.uint32s.array[:] = [0]
tensor_spec.max.uint32s.array[:] = [max_value]
actual = typing.cast(specs.DiscreteArray,
dm_env_utils.tensor_spec_to_dm_env_spec(tensor_spec))
expected = specs.DiscreteArray(
num_values=max_value + 1, dtype=np.uint32, name='foo')
self.assertEqual(expected, actual)
self.assertEqual(0, actual.minimum)
self.assertEqual(max_value, actual.maximum)
self.assertEqual('foo', actual.name)
def test_scalar_with_1_n_bounds_gives_bounded_array(self):
tensor_spec = dm_env_rpc_pb2.TensorSpec()
tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32
tensor_spec.name = 'foo'
tensor_spec.min.uint32s.array[:] = [1]
tensor_spec.max.uint32s.array[:] = [10]
actual = dm_env_utils.tensor_spec_to_dm_env_spec(tensor_spec)
expected = specs.BoundedArray(
shape=(), dtype=np.uint32, minimum=1, maximum=10, name='foo')
self.assertEqual(expected, actual)
self.assertEqual('foo', actual.name)
def test_scalar_with_0_min_and_no_max_bounds_gives_bounded_array(self):
tensor_spec = dm_env_rpc_pb2.TensorSpec()
tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32
tensor_spec.name = 'foo'
tensor_spec.min.uint32s.array[:] = [0]
actual = dm_env_utils.tensor_spec_to_dm_env_spec(tensor_spec)
expected = specs.BoundedArray(
shape=(), dtype=np.uint32, minimum=0, maximum=2**32 - 1, name='foo')
self.assertEqual(expected, actual)
self.assertEqual('foo', actual.name)
def test_only_min_bounds(self):
tensor_spec = dm_env_rpc_pb2.TensorSpec()
tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32
tensor_spec.shape[:] = [3]
tensor_spec.name = 'foo'
tensor_spec.min.uint32s.array[:] = [1]
actual = dm_env_utils.tensor_spec_to_dm_env_spec(tensor_spec)
expected = specs.BoundedArray(
shape=[3], dtype=np.uint32, minimum=1, maximum=2**32 - 1)
self.assertEqual(expected, actual)
self.assertEqual('foo', actual.name)
def test_only_max_bounds(self):
tensor_spec = dm_env_rpc_pb2.TensorSpec()
tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32
tensor_spec.shape[:] = [3]
tensor_spec.name = 'foo'
tensor_spec.max.uint32s.array[:] = [10]
actual = dm_env_utils.tensor_spec_to_dm_env_spec(tensor_spec)
expected = specs.BoundedArray(
shape=[3], dtype=np.uint32, minimum=0, maximum=10)
self.assertEqual(expected, actual)
self.assertEqual('foo', actual.name)
def test_both_bounds(self):
tensor_spec = dm_env_rpc_pb2.TensorSpec()
tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32
tensor_spec.shape[:] = [3]
tensor_spec.name = 'foo'
tensor_spec.min.uint32s.array[:] = [1]
tensor_spec.max.uint32s.array[:] = [10]
actual = dm_env_utils.tensor_spec_to_dm_env_spec(tensor_spec)
expected = specs.BoundedArray(
shape=[3], dtype=np.uint32, minimum=1, maximum=10)
self.assertEqual(expected, actual)
self.assertEqual('foo', actual.name)
def test_bounds_oneof_not_set_gives_dtype_bounds(self):
tensor_spec = dm_env_rpc_pb2.TensorSpec()
tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32
tensor_spec.shape[:] = [3]
tensor_spec.name = 'foo'
# Just to force the message to get created.
tensor_spec.min.floats.array[:] = [3.0]
tensor_spec.min.ClearField('floats')
actual = dm_env_utils.tensor_spec_to_dm_env_spec(tensor_spec)
expected = specs.BoundedArray(
shape=[3], dtype=np.uint32, minimum=0, maximum=2**32 - 1)
self.assertEqual(expected, actual)
self.assertEqual('foo', actual.name)
def test_bounds_wrong_type_gives_error(self):
tensor_spec = dm_env_rpc_pb2.TensorSpec()
tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32
tensor_spec.shape[:] = [3]
tensor_spec.name = 'foo'
tensor_spec.min.floats.array[:] = [1.9]
with self.assertRaisesRegex(ValueError, 'uint32'):
dm_env_utils.tensor_spec_to_dm_env_spec(tensor_spec)
def test_bounds_on_string_gives_error(self):
tensor_spec = dm_env_rpc_pb2.TensorSpec()
tensor_spec.dtype = dm_env_rpc_pb2.DataType.STRING
tensor_spec.shape[:] = [2]
tensor_spec.name = 'named'
tensor_spec.min.floats.array[:] = [1.9]
tensor_spec.max.floats.array[:] = [10.0]
with self.assertRaisesRegex(ValueError, 'string'):
dm_env_utils.tensor_spec_to_dm_env_spec(tensor_spec)
class DmEnvSpecTests(absltest.TestCase):
def test_spec(self):
dm_env_rpc_specs = {
54:
dm_env_rpc_pb2.TensorSpec(
name='fuzz', shape=[3], dtype=dm_env_rpc_pb2.DataType.FLOAT),
55:
dm_env_rpc_pb2.TensorSpec(
name='foo', shape=[2], dtype=dm_env_rpc_pb2.DataType.INT32),
}
manager = spec_manager.SpecManager(dm_env_rpc_specs)
expected = {
'foo': specs.Array(shape=[2], dtype=np.int32),
'fuzz': specs.Array(shape=[3], dtype=np.float32)
}
self.assertDictEqual(expected, dm_env_utils.dm_env_spec(manager))
def test_empty_spec(self):
self.assertDictEqual({},
dm_env_utils.dm_env_spec(spec_manager.SpecManager({})))
def test_spec_generate_and_validate_scalars(self):
dm_env_rpc_specs = []
for name, dtype in dm_env_rpc_pb2.DataType.items():
if dtype != dm_env_rpc_pb2.DataType.INVALID_DATA_TYPE:
dm_env_rpc_specs.append(
dm_env_rpc_pb2.TensorSpec(name=name, shape=(), dtype=dtype))
for dm_env_rpc_spec in dm_env_rpc_specs:
spec = dm_env_utils.tensor_spec_to_dm_env_spec(dm_env_rpc_spec)
value = spec.generate_value()
spec.validate(value)
def test_spec_generate_and_validate_tensors(self):
example_shape = (10, 10, 3)
dm_env_rpc_specs = []
for name, dtype in dm_env_rpc_pb2.DataType.items():
if dtype != dm_env_rpc_pb2.DataType.INVALID_DATA_TYPE:
dm_env_rpc_specs.append(
dm_env_rpc_pb2.TensorSpec(
name=name, shape=example_shape, dtype=dtype))
for dm_env_rpc_spec in dm_env_rpc_specs:
spec = dm_env_utils.tensor_spec_to_dm_env_spec(dm_env_rpc_spec)
value = spec.generate_value()
spec.validate(value)
if __name__ == '__main__':
absltest.main()
| deepmind/dm_env_rpc | dm_env_rpc/v1/dm_env_utils_test.py | Python | apache-2.0 | 8,223 |
from __future__ import absolute_import, print_function, division
__author__ = 'Alistair Miles <alimanfoo@googlemail.com>'
| rs/petl | src/petl/test/transform/__init__.py | Python | mit | 124 |
from relier.web.views import AuthenticatedView
from relier.models import Invitation
from flask import g, render_template, abort, request
class Invitations(AuthenticatedView):
def get(self):
if not g.user.is_admin:
abort(403)
invitations = Invitation.select().where(Invitation.organization == g.user.organization)
invitations.order_by(Invitation.email.desc())
return render_template('invitations.j2', invitations=invitations)
def post(self):
if not g.user.is_admin:
abort(403)
email = request.form.get('email_address')
if not email:
abort(400)
if Invitation.select().where(Invitation.email == email).count() != 0:
abort(409)
Invitation.send(organization = g.user.organization, email = email)
invitations = Invitation.select().where(Invitation.organization == g.user.organization)
invitations.order_by(Invitation.email.desc())
return render_template('invitations.j2', invitations=invitations)
| citruspi/relier-api | relier/web/views/invitations.py | Python | unlicense | 1,056 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Script copyright (C) Thomas PORTASSAU (50thomatoes50)
# Contributors: Campbell Barton, Jiri Hnidek, Paolo Ciccone, Thomas Larsson, http://blender.stackexchange.com/users/185/adhi
# <pep8-80 compliant>
bl_info = {
"name": "Metasequoia format (.mqo)",
"author": "Thomas Portassau (50thomatoes50), sapper-trle@github, jacquesmn@github",
"blender": (2, 80, 0),
"version": (0, 2, 1),
"location": "File > Import-Export",
"description": "Import-Export MQO, UV's, "
"materials and textures",
"warning": "Work In Progress, never use the exported file to overwrite original Metasequoia files",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Import-Export/MQO",
"tracker_url": "https://github.com/50thomatoes50/blender.io_mqo/issues",
"category": "Import-Export"}
#http://wiki.blender.org/index.php/Dev:2.5/Py/Scripts/Cookbook/Code_snippets/Multi-File_packages#init_.py
if "bpy" in locals():
import imp
if "import_mqo" in locals():
imp.reload(import_mqo)
if "export_mqo" in locals():
imp.reload(export_mqo)
import os
import bpy
from bpy.props import (BoolProperty,
FloatProperty,
StringProperty,
EnumProperty,
)
from bpy_extras.io_utils import (ExportHelper,
ImportHelper,
path_reference_mode,
axis_conversion,
)
class SCRIPT_OT_export_mqo(bpy.types.Operator, ExportHelper):
bl_idname = "script.export_mqo"
bl_description = 'Export to Metasequoia file format (.mqo)'
bl_label = "Export mqo"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
# From ExportHelper. Filter filenames.
filename_ext = ".mqo"
filter_glob = StringProperty(default="*.mqo", options={'HIDDEN'})
scale: bpy.props.FloatProperty(
name = "Scale",
description="Scale mesh. Value > 1 means bigger, value < 1 means smaller",
default = 1, min = 0.001, max = 1000.0)
rot90: bpy.props.BoolProperty(
name = "Up axis correction",
description="Blender up axis is Z but metasequoia up axis is Y\nExporter will invert value to be in the correcte direction",
default = True)
invert: bpy.props.BoolProperty(
name = "Correction of inverted faces",
description="Correction of inverted faces",
default = True)
edge: bpy.props.BoolProperty(
name = "Export lost edge",
description="Export edge with is not attached to a polygon",
default = True)
uv_exp: bpy.props.BoolProperty(
name = "Export UV",
description="Export UV",
default = True)
uv_cor: bpy.props.BoolProperty(
name = "Convert UV",
description="invert UV map to be in the direction has metasequoia",
default = True)
mat_exp: bpy.props.BoolProperty(
name = "Export Materials",
description="...",
default = True)
mod_exp: bpy.props.BoolProperty(
name = "Export Modifier",
description="Export modifier like mirror or/and subdivision surface",
default = True)
vcol_exp: bpy.props.BoolProperty(
name = "Export Vertex Colors",
description="Export vertex colors",
default = True)
def execute(self, context):
msg = ".mqo export: Executing"
self.report({'INFO'}, msg)
print(msg)
if self.scale < 1:
s = "%.0f times smaller" % 1.0/self.scale
elif self.scale > 1:
s = "%.0f times bigger" % self.scale
else:
s = "same size"
msg = ".mqo export: Objects will be %s"%(s)
print(msg)
self.report({'INFO'}, msg)
from . import export_mqo
meshobjects = [ob for ob in context.scene.objects if ob.type == 'MESH']
export_mqo.export_mqo(self,
self.properties.filepath,
meshobjects,
self.rot90, self.invert, self.edge, self.uv_exp, self.uv_cor, self.mat_exp, self.mod_exp, self.vcol_exp,
self.scale)
return {'FINISHED'}
def invoke(self, context, event):
meshobjects = [ob for ob in context.scene.objects if ob.type == 'MESH']
if not meshobjects:
msg = ".mqo export: Cancelled - No MESH objects to export."
self.report({'ERROR'}, msg)
print(msg,"\n")
return{'CANCELLED'}
pth, fn = os.path.split(bpy.data.filepath)
nm, xtn = os.path.splitext(fn)
if nm =="":
nm = meshobjects[0].name
self.properties.filepath = nm
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
class SCRIPT_OT_import_mqo(bpy.types.Operator, ImportHelper):
bl_idname = "script.import_mqo"
bl_description = 'Import from Metasequoia file format (.mqo)'
bl_label = "Import mqo"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
# From ImportHelper. Filter filenames.
filename_ext = ".mqo"
filter_glob: StringProperty(default="*.mqo", options={'HIDDEN'})
scale: bpy.props.FloatProperty(
name = "Scale",
description="Scale mesh. Value > 1 means bigger, value < 1 means smaller",
default = 1, min = 0.001, max = 1000.0)
rot90: bpy.props.BoolProperty(
name = "Up axis correction",
description="Blender up axis is Z but metasequoia up axis is Y\nExporter will invert value to be in the correcte direction",
default = True)
txtenc: bpy.props.EnumProperty(
name="Text encoding", description="Set the text encoding used to write the file (ignored for 4.7+)",
default='ascii', items=[
('ascii', "Ascii", ""),
('cp1252', "CP1252", "Code Page 1252 Western Europe"),
('shift_jis', "Shift JIS", "Shift Japanese Industrial Standards"),
('utf_8', "UTF8", ""),
])
debug: bpy.props.BoolProperty(
name = "Show debug text",
description="Print debug text to console",
default = False)
def execute(self, context):
msg = ".mqo import: Opening %s"% self.properties.filepath
print(msg)
self.report({'INFO'}, msg)
if self.scale < 1:
s = "%.0f times smaller" % (1.0/self.scale)
elif self.scale > 1:
s = "%.0f times bigger" % self.scale
else:
s = "same size"
msg = ".mqo import: Objects will be %s"%(s)
print(msg)
self.report({'INFO'}, msg)
from . import import_mqo
r = import_mqo.import_mqo(self,
self.properties.filepath,
self.rot90,
self.scale,
self.txtenc,
self.debug)
return {r[0]}
def menu_func_import(self, context):
self.layout.operator(SCRIPT_OT_import_mqo.bl_idname, text="Metasequoia (.mqo)")
def menu_func_export(self, context):
self.layout.operator(SCRIPT_OT_export_mqo.bl_idname, text="Metasequoia (.mqo)")
classes = (SCRIPT_OT_import_mqo,
SCRIPT_OT_export_mqo
)
def register():
#bpy.utils.register_module(__name__)
for c in classes:
bpy.utils.register_class(c)
bpy.types.TOPBAR_MT_file_import.append(menu_func_import)
bpy.types.TOPBAR_MT_file_export.append(menu_func_export)
def unregister():
#bpy.utils.unregister_module(__name__)
for c in reversed(classes):
bpy.utils.unregister_class(c)
bpy.types.TOPBAR_MT_file_import.remove(menu_func_import)
bpy.types.TOPBAR_MT_file_export.remove(menu_func_export)
if __name__ == "__main__":
register()
| 50thomatoes50/blender.io_mqo | io_scene_mqo/__init__.py | Python | gpl-2.0 | 8,539 |
# noinspection PyProtectedMember
from podcast_transcribe_episode.bcp47_lang import _country_tld_from_url, iso_639_1_code_to_bcp_47_identifier
def test_country_tld_from_url():
assert _country_tld_from_url("https://www.bbc.co.uk/news/politics/eu-regions/vote2014_sitemap.xml") == "uk"
def test_iso_639_1_code_to_bcp_47_identifier():
assert iso_639_1_code_to_bcp_47_identifier('') is None
# noinspection PyTypeChecker
assert iso_639_1_code_to_bcp_47_identifier(None) is None
assert iso_639_1_code_to_bcp_47_identifier('lt') == 'lt-LT'
assert iso_639_1_code_to_bcp_47_identifier('en') == 'en-US'
assert iso_639_1_code_to_bcp_47_identifier('en', url_hint='https://WWW.BBC.CO.UK:443/test.html') == 'en-GB'
| berkmancenter/mediacloud | apps/podcast-transcribe-episode/tests/python/test_bcp47_lang.py | Python | agpl-3.0 | 732 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# convert ctp to pyctp
def structmember(bodycode):
import re
ret = []
iterater = re.finditer(r"[\n^]\s*(?P<type>[a-zA-Z][ \w]*[\s\*])(?P<name>[a-zA-Z]\w*)([ \t]*\[[ \t]*(?P<length>\d+)[ \t]*\])?\s*;", bodycode)
for i in iterater:
d = i.groupdict()
ret.append( dict(name=d['name'].strip(), type=d['type'].strip()) )
return ret
def enummember(bodycode):
ret = []
inc = 0
for item in bodycode.split(','):
arr = item.split('=')
key = arr[0].strip()
if len(arr)==2:
inc = int(arr[1])
val = inc
ret.append( dict(name=key, value=val) )
inc += 1
return ret
def classparser(codetree, code):
import re
classname = code[:code.index('{')].split(' ')[-1].strip()
iterater = re.finditer(r"[\n^]\s*(?P<type>.+)\((?P<param>.+)?\)\s*", code)
#print('-----', classname , '----------')
methods = []
for i in iterater:
method = i.groupdict()
names = method['type'].split(' ')
param = method['param']
method_name = names[-1]
method_type = names[-2] if len(names)>1 else None
method_param = []
if param:
params = param.split(',')
for param in params:
p = param.strip().split('=')
defval = None
if len(p)==2:
key = p[-2]
defval = p[-1]
else:
key = p[-1]
keys = key.strip().split(' ')
p_type = keys[-2]
p_name = keys[-1]
p_defv = defval
if p_type == '*':
p_type = keys[-3]
p_name = '*'+p_name
#print(p_type, p_name, p_defv )
method_param.append( dict(type=p_type, name=p_name, default=p_defv) )
#print( method_type , method_name, method_param )
methods.append(dict(name=method_name, type=method_type, param=method_param))
return dict(type='class', name=classname, methods=methods)
def structure(filename):
import re, chardet
with open(filename, 'rb') as f:
data = f.read()
encoding = chardet.detect(data)
sourcecode = data.decode(encoding['encoding'])
ret = {}
# remove all comments
sourcecode = re.sub(r"//[^\n]*", "", sourcecode)
sourcecode = re.sub(r"/\*[.\r\n]*\*/", "", sourcecode)
#define
iterater = re.finditer(r"[\n^]\s*#define\s+(?P<name>[a-zA-Z]\w*(\([^\)]*\))?)([ \t]+(?P<value>.*))?(?!\\)\r?\n", sourcecode)
for i in iterater:
define = i.groupdict()
ret[define['name']] = dict(type='#define', value=define['value'])
# print("#define [{0}] [{1}]".format(define['name'], define['value']))
#typedef
iterater = re.finditer(r"[\n^]\s*typedef\s+(?P<value>[ \w\t]+[ \t\*])(?P<name>[a-zA-Z]\w*)([ \t]*\[[ \t]*(?P<length>\d+)[ \t]*\])?\s*;", sourcecode)
for i in iterater:
typedef = i.groupdict()
# if struct['length']:
# print("typedef {1} {0}[{2}]".format(typedef['name'], typedef['value'].strip(), typedef['length']))
# else:
# print("typedef {1} {0}".format(typedef['name'], typedef['value'].strip()))
ret[typedef['name']] = dict(type='typedef', value=typedef['value'].strip(), length=typedef['length'])
# enum
iterater = re.finditer(r"[\n^]\s*enum\s+(?P<name>[a-zA-Z]\w*)\s*\{(?P<body>[^\}]*)\}", sourcecode)
for i in iterater:
enum = i.groupdict()
ret[enum['name']] = dict(type='enum', value=enummember(enum['body']))
# struct
iterater = re.finditer(r"[\n^]\s*struct\s+(?P<name>[a-zA-Z]\w*)\s*\{(?P<body>[^\}]*)\}", sourcecode)
for i in iterater:
struct = i.groupdict()
ret[struct['name']] = dict(type='struct', value=structmember(struct['body']))
# class
keyword = ""
class_body = ""
class_state = 0
block_nest = 0
block_state = 0
i = 0
while i < len(sourcecode):
char = sourcecode[i]
if not class_state:
if char in ["\r", "\n", "\t", " "]:
if keyword == 'class':
class_state = 1
keyword = ""
else:
keyword += char
if class_state:
class_body += char
if char == '{':
block_nest += 1
block_state = 1
if char == '}':
block_nest -= 1
if block_state and block_nest == 0:
classes = classparser(ret, 'class' + class_body)
ret[classes['name']] = classes
block_state = 0
class_state = 0
class_body = ""
i += 1
return ret
def funchar(objectid, memberid, typeid, length):
if length:
# _inp = 'es'
_inp = 'y'
# _out = 'N'
_out = 'y'
# _var = 'PyCTP_PyUnicode_DecodeGB2312('+objectid+'->'+memberid+')'
_var = objectid+'->'+memberid
_dec = 'char *'+objectid+'_'+memberid+ ' = nullptr;\n'
# _ref = ', \"gb2312\", &'+objectid+'_'+memberid
_ref = ', &'+objectid+'_'+memberid
# _set = 'if(' + objectid+'_'+memberid + ' != nullptr){ strcpy_s('+objectid+'->'+memberid + ', ' + objectid+'_'+memberid + '); PyMem_Free(' + objectid+'_'+memberid + '); ' + objectid+'_'+memberid + ' = nullptr; }'
_set = 'if(' + objectid+'_'+memberid + ' != nullptr){ strcpy_s('+objectid+'->'+memberid + ', ' + objectid+'_'+memberid + '); ' + objectid+'_'+memberid + ' = nullptr; }'
else:
_inp = 'c'
_out = 'c'
_var = objectid+'->'+memberid
_dec = 'char '+objectid+'_'+memberid+ ' = 0;\n'
_ref = ', &'+objectid+'_'+memberid
_set = objectid+'->'+memberid + ' = ' + objectid+'_'+memberid + ';'
return dict(out=_out, inp=_inp, var = _var, dec=_dec, ref=_ref, sett = _set)
def fundouble(objectid, memberid, typeid, length):
return dict(out='d', inp='d', var=objectid+'->'+memberid,
dec='double '+objectid+'_'+memberid+' = 0.0;\n',
ref=', &'+objectid+'_'+memberid,
sett = objectid+'->'+memberid + ' = ' + objectid+'_'+memberid + ';')
def funint(objectid, memberid, typeid, length):
return dict(out='i', inp='i', var=objectid+'->'+memberid,
dec='int '+objectid+'_'+memberid+' = 0;\n',
ref=', &'+objectid+'_'+memberid,
sett = objectid+'->'+memberid + ' = ' + objectid+'_'+memberid + ';')
def funshort(objectid, memberid, typeid, length):
return dict(out='h',inp='h', var=objectid+'->'+memberid,
dec='short '+objectid+'_'+memberid+' = 0;\n',
ref=', &'+objectid+'_'+memberid,
sett = objectid+'->'+memberid + ' = ' + objectid+'_'+memberid + ';')
typefun = dict(char=funchar,double=fundouble, int=funint, short=funshort)
def generatestructcppheadercode(codetree):
"""struct header"""
cppheadercodetemplate = "./src/UserApiStruct.h.template"
cppheadercodefile = "./src/UserApiStruct.h"
cppheadercode = ""
for (key, value) in codetree.items():
if(value['type'] == 'struct'):
cppheadercode += "\nPyObject *PyCTP_PyDict_FromStruct(" + key + " *p"+key.replace("CThostFtdc", "")+");"
cppheadercode += "\nint PyCTP_Struct_FromPyDict(" + key + " *p" + key.replace("CThostFtdc", "") + ", PyObject *dict);"
import re, chardet
with open(cppheadercodetemplate, 'rb') as f:
data = f.read()
encoding = chardet.detect(data)
sourcecode = data.decode(encoding['encoding'])
sourcecode = re.sub(r'\{\{\s*body\s*\}\}', cppheadercode, sourcecode)
with open(cppheadercodefile, 'wt') as f:
f.write(sourcecode)
def generatestructcppsourcecode(codetree):
"""struct source"""
cppsourcecodetemplate = "./src/UserApiStruct.cpp.template"
cppsourcecodefile = "./src/UserApiStruct.cpp"
cppsourcecode = ""
for (key,value) in codetree.items():
if(value['type'] == 'struct'):
# analysis struct members
outformat = ""
outvarlist = ""
keywordslist = ""
keywordss = ""
declaration = ""
refcode = ""
setcode = ""
for member in value['value']:
member_name = member['name']
member_type = codetree.get( member['type'] )
type_name = member_type['value']
type_len = member_type['length']
ret = typefun.get(type_name)("p"+key.replace("CThostFtdc", ""), member_name, type_name, type_len)
outformat += ",s:" + ret['out']
outvarlist += "\t\t, \""+member_name+"\", " + ret['var'] + "\n"
keywordslist += "\"" + member_name + "\", "
keywordss += ret['inp']
declaration += '\t'+ret['dec']
refcode += '\t\t'+ret['ref']+'\n'
setcode += '\t\t'+ret['sett']+'\n'
cppsourcecode += "\nint PyCTP_Struct_FromPyDict(" + key + " *p" + key.replace("CThostFtdc", "") + ", PyObject *dict)\n"
cppsourcecode += "{\n"
cppsourcecode += "\tstatic char *kwlist[] = {"+keywordslist+"nullptr};\n"
cppsourcecode += declaration
cppsourcecode += "\tPyCTP_PyDict_FromStruct_BEGIN(p" + key.replace("CThostFtdc", "") + ", \"|"+keywordss+"\")\n";
cppsourcecode += refcode
cppsourcecode += "\tPyCTP_PyDict_FromStruct_END\n"
cppsourcecode += setcode
cppsourcecode += "\tPyCTP_PyDict_FromStruct_RETURN\n"
cppsourcecode += "}\n"
outformat = outformat.lstrip(',')
cppsourcecode += "PyObject *PyCTP_PyDict_FromStruct(" + key + " *p"+key.replace("CThostFtdc", "")+")\n"
cppsourcecode += "{\n"
cppsourcecode += "\tif(p"+key.replace("CThostFtdc", "")+" == nullptr) Py_RETURN_NONE;\n"
cppsourcecode += "\treturn Py_BuildValue(\"{"+outformat+"}\"\n"+outvarlist+"\t\t);\n"
cppsourcecode += "}\n"
import re, chardet
with open(cppsourcecodetemplate, 'rb') as f:
data = f.read()
encoding = chardet.detect(data)
sourcecode = data.decode(encoding['encoding'])
sourcecode = re.sub(r'\{\{\s*body\s*\}\}', cppsourcecode, sourcecode)
with open(cppsourcecodefile, 'wt') as f:
f.write(sourcecode)
def generatemacrocode(codetree):
template = "./src/UserApiDataType.cpp.template"
file = "./src/UserApiDataType.cpp"
cppsourcecode = ""
for (key,value) in codetree.items():
if(value['type'] == 'enum'):
for item in value['value']:
cppsourcecode += "if( PyModule_AddIntMacro(m, "+item['name']+") != 0 ) return -1;\n"
if(value['type'] == '#define'):
if value['value']:
if len(value['value'].strip("'"))==1:
cppsourcecode += "if( PyModule_AddCharMacro(m, "+key+") != 0 ) return -1;\n"
else:
cppsourcecode += "if( PyModule_AddStrConstant(m, \""+key+"\", \""+value['value'].strip("'")+"\") != 0 ) return -1;\n"
import re, chardet
with open(template, 'rb') as f:
data = f.read()
encoding = chardet.detect(data)
sourcecode = data.decode(encoding['encoding'])
sourcecode = re.sub(r'\{\{\s*body\s*\}\}', cppsourcecode, sourcecode)
with open(file, 'wt') as f:
f.write(sourcecode)
def generateMdApi(codetree):
header_template = "./src/MdApi.h.template"
source_template = "./src/MdApi.cpp.template"
#for method in codetree['CThostFtdcMdSpi']['methods']:
#print(method)
import os
ctpdir = './v6.5.1_20200908/v6.5.1_20200908_api_tradeapi_se_linux64/'
codetree = {}
codetree.update(structure(os.path.join(ctpdir, 'ThostFtdcUserApiDataType.h')))
codetree.update(structure(os.path.join(ctpdir, 'ThostFtdcUserApiStruct.h')))
codetree.update(structure(os.path.join(ctpdir, 'ThostFtdcMdApi.h')))
codetree.update(structure(os.path.join(ctpdir, 'ThostFtdcTraderApi.h')))
generatestructcppheadercode(codetree)
generatestructcppsourcecode(codetree)
generatemacrocode(codetree)
#generateMdApi(codetree)
#generateTraderApi(codetree)
| shizhuolin/PyCTP | APIToPyCTP.py | Python | lgpl-3.0 | 11,679 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds android browsers that can be controlled by telemetry."""
import logging
import os
from telemetry import decorators
from telemetry.core import browser
from telemetry.core import exceptions
from telemetry.core import possible_browser
from telemetry.core import platform
from telemetry.core import util
from telemetry.core.backends import adb_commands
from telemetry.core.platform import android_device
from telemetry.core.backends.chrome import android_browser_backend
CHROME_PACKAGE_NAMES = {
'android-content-shell':
['org.chromium.content_shell_apk',
android_browser_backend.ContentShellBackendSettings,
'ContentShell.apk'],
'android-chrome-shell':
['org.chromium.chrome.shell',
android_browser_backend.ChromeShellBackendSettings,
'ChromeShell.apk'],
'android-webview':
['org.chromium.telemetry_shell',
android_browser_backend.WebviewBackendSettings,
None],
'android-webview-shell':
['org.chromium.android_webview.shell',
android_browser_backend.WebviewShellBackendSettings,
'AndroidWebView.apk'],
'android-chrome':
['com.google.android.apps.chrome',
android_browser_backend.ChromeBackendSettings,
'Chrome.apk'],
'android-chrome-beta':
['com.chrome.beta',
android_browser_backend.ChromeBackendSettings,
None],
'android-chrome-dev':
['com.google.android.apps.chrome_dev',
android_browser_backend.ChromeBackendSettings,
None],
'android-chrome-canary':
['com.chrome.canary',
android_browser_backend.ChromeBackendSettings,
None],
'android-jb-system-chrome':
['com.android.chrome',
android_browser_backend.ChromeBackendSettings,
None]
}
class PossibleAndroidBrowser(possible_browser.PossibleBrowser):
"""A launchable android browser instance."""
def __init__(self, browser_type, finder_options, android_platform,
backend_settings, apk_name):
super(PossibleAndroidBrowser, self).__init__(
browser_type, 'android', backend_settings.supports_tab_control)
assert browser_type in FindAllBrowserTypes(finder_options), (
'Please add %s to android_browser_finder.FindAllBrowserTypes' %
browser_type)
self._platform = android_platform
self._platform_backend = (
android_platform._platform_backend) # pylint: disable=W0212
self._backend_settings = backend_settings
self._local_apk = None
if browser_type == 'exact':
if not os.path.exists(apk_name):
raise exceptions.PathMissingError(
'Unable to find exact apk %s specified by --browser-executable' %
apk_name)
self._local_apk = apk_name
elif apk_name:
chrome_root = util.GetChromiumSrcDir()
candidate_apks = []
for build_dir, build_type in util.GetBuildDirectories():
apk_full_name = os.path.join(chrome_root, build_dir, build_type, 'apks',
apk_name)
if os.path.exists(apk_full_name):
last_changed = os.path.getmtime(apk_full_name)
candidate_apks.append((last_changed, apk_full_name))
if candidate_apks:
# Find the candidate .apk with the latest modification time.
newest_apk_path = sorted(candidate_apks)[-1][1]
self._local_apk = newest_apk_path
def __repr__(self):
return 'PossibleAndroidBrowser(browser_type=%s)' % self.browser_type
def _InitPlatformIfNeeded(self):
pass
def Create(self, finder_options):
self._InitPlatformIfNeeded()
use_rndis_forwarder = (finder_options.android_rndis or
finder_options.browser_options.netsim or
platform.GetHostPlatform().GetOSName() != 'linux')
browser_backend = android_browser_backend.AndroidBrowserBackend(
self._platform_backend,
finder_options.browser_options, self._backend_settings,
use_rndis_forwarder,
output_profile_path=finder_options.output_profile_path,
extensions_to_load=finder_options.extensions_to_load,
target_arch=finder_options.target_arch)
return browser.Browser(
browser_backend, self._platform_backend, self._credentials_path)
def SupportsOptions(self, finder_options):
if len(finder_options.extensions_to_load) != 0:
return False
return True
def HaveLocalAPK(self):
return self._local_apk and os.path.exists(self._local_apk)
@decorators.Cache
def UpdateExecutableIfNeeded(self):
if self.HaveLocalAPK():
logging.warn('Installing %s on device if needed.' % self._local_apk)
self.platform.InstallApplication(self._local_apk)
def last_modification_time(self):
if self.HaveLocalAPK():
return os.path.getmtime(self._local_apk)
return -1
def SelectDefaultBrowser(possible_browsers):
"""Return the newest possible browser."""
if not possible_browsers:
return None
return max(possible_browsers, key=lambda b: b.last_modification_time())
def CanFindAvailableBrowsers():
return android_device.CanDiscoverDevices()
def CanPossiblyHandlePath(target_path):
return os.path.splitext(target_path.lower())[1] == '.apk'
def FindAllBrowserTypes(_options):
return CHROME_PACKAGE_NAMES.keys() + ['exact']
def _FindAllPossibleBrowsers(finder_options, android_platform):
"""Testable version of FindAllAvailableBrowsers."""
if not android_platform:
return []
possible_browsers = []
# Add the exact APK if given.
if (finder_options.browser_executable and
CanPossiblyHandlePath(finder_options.browser_executable)):
normalized_path = os.path.expanduser(finder_options.browser_executable)
exact_package = adb_commands.GetPackageName(normalized_path)
if not exact_package:
raise exceptions.PackageDetectionError(
'Unable to find package for %s specified by --browser-executable' %
normalized_path)
package_info = next((info for info in CHROME_PACKAGE_NAMES.itervalues()
if info[0] == exact_package), None)
if package_info:
[package, backend_settings, _] = package_info
possible_browsers.append(
PossibleAndroidBrowser(
'exact',
finder_options,
android_platform,
backend_settings(package),
normalized_path))
else:
raise exceptions.UnknownPackageError(
'%s specified by --browser-executable has an unknown package: %s' %
(normalized_path, exact_package))
for name, package_info in CHROME_PACKAGE_NAMES.iteritems():
package, backend_settings, local_apk = package_info
b = PossibleAndroidBrowser(name,
finder_options,
android_platform,
backend_settings(package),
local_apk)
if b.platform.CanLaunchApplication(package) or b.HaveLocalAPK():
possible_browsers.append(b)
return possible_browsers
def FindAllAvailableBrowsers(finder_options):
"""Finds all the possible browsers on one device.
The device is either the only device on the host platform,
or |finder_options| specifies a particular device.
"""
device = android_device.GetDevice(finder_options)
if not device:
return []
android_platform = platform.GetPlatformForDevice(device)
return _FindAllPossibleBrowsers(finder_options, android_platform)
| Jonekee/chromium.src | tools/telemetry/telemetry/core/backends/chrome/android_browser_finder.py | Python | bsd-3-clause | 7,601 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-01-04 09:38
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('uth_db', '0011_auto_20170102_1649'),
]
operations = [
migrations.AlterField(
model_name='table',
name='booking',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='uth_db.Booking'),
),
]
| GeoCSBI/UTH_DB | mysite/uth_db/migrations/0012_auto_20170104_0938.py | Python | gpl-3.0 | 547 |
#
# Executable template for Challenge scoring application
#
# To use this script, first install the Synapse Python Client
# http://python-docs.synapse.org/
#
# Log in once using your user name and password
# import synapseclient
# syn = synapseclient.Synapse()
# syn.login(<username>, <password>, rememberMe=True)
#
# Your credentials will be saved after which you may run this script with no credentials.
#
# Author: chris.bare
#
###############################################################################
import synapseclient
import synapseclient.utils as utils
from synapseclient.exceptions import *
from synapseclient import Activity
from synapseclient import Project, Folder, File
from synapseclient import Evaluation, Submission, SubmissionStatus
from synapseclient import Wiki
from synapseclient.dict_object import DictObject
from collections import OrderedDict
from datetime import datetime, timedelta
from itertools import izip
from StringIO import StringIO
import argparse
import lock
import json
import math
import os
import random
import re
import sys
import time
import traceback
import urllib
import uuid
from message_templates import *
# use unique names for projects and the evaluation:
CHALLENGE_NAME = "Testing ALS2 DREAM Challenge" #"Synapse Challenge Template"
CHALLENGE_PROJECT_NAME = CHALLENGE_NAME
CHALLENGE_EVALUATION_NAME = CHALLENGE_NAME
PARTICIPANT_PROJECT_NAME = CHALLENGE_NAME + " Participant Project"
ADMIN_USER_IDS = [1984495] #[1421212]
# the page size can be bigger, we do this just to demonstrate pagination
PAGE_SIZE = 20
# the batch size can be bigger, we do this just to demonstrate batching
BATCH_SIZE = 20
# how many times to we retry batch uploads of submission annotations
BATCH_UPLOAD_RETRY_COUNT = 5
# make sure there are multiple batches to handle
NUM_OF_SUBMISSIONS_TO_CREATE = 5
WAIT_FOR_QUERY_ANNOTATIONS_SEC = 30.0
UUID_REGEX = re.compile('[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}')
# A module level variable to hold the Synapse connection
syn = None
## define the columns that will make up the leaderboard
LEADERBOARD_COLUMNS = [ {'column_name':'objectId', 'display_name':'ID'},
{'column_name':'name', 'display_name':'name'},
{'column_name':'entityId', 'display_name':'entity', 'renderer':'synapseid'},
{'column_name':'status', 'display_name':'status'},
{'column_name':'submitterAlias', 'display_name':'team'},
{'column_name':'userId', 'display_name':'user ID', 'renderer':'userid'},
{'column_name':'bayesian_whatsajigger', 'display_name':'Bayesian Whatsajigger'},
{'column_name':'root_mean_squared_flapdoodle', 'display_name':'RMSF'},
{'column_name':'discombobulation_index', 'display_name':'Discombobulation', 'sort':'DESC'} ]
def update_submissions_status_batch(evaluation, statuses):
"""
Update statuses in batch. This can be much faster than individual updates,
especially in rank based scoring methods which recalculate scores for all
submissions each time a new submission is received.
"""
for retry in range(BATCH_UPLOAD_RETRY_COUNT):
try:
token = None
offset = 0
while offset < len(statuses):
batch = {"statuses" : statuses[offset:offset+BATCH_SIZE],
"isFirstBatch" : (offset==0),
"isLastBatch" : (offset+BATCH_SIZE>=len(statuses)),
"batchToken" : token}
response = syn.restPUT("/evaluation/%s/statusBatch" % evaluation.id, json.dumps(batch))
token = response.get('nextUploadToken', None)
offset += BATCH_SIZE
except SynapseHTTPError as err:
# on 412 ConflictingUpdateException we want to retry
if err.response.status_code == 412:
# sys.stderr.write('%s, retrying...\n' % err.message)
time.sleep(2)
else:
raise
class Team(DictObject):
def __init__(self, **kwargs):
super(Team, self).__init__(kwargs)
def create_team(name, description):
team = {'name': name, 'description': description, 'canPublicJoin':True}
return Team(**syn.restPOST("/team", body=json.dumps(team)))
class Query(object):
"""
An object that helps with paging through annotation query results.
Also exposes properties totalNumberOfResults, headers and rows.
"""
def __init__(self, query, limit=20, offset=0):
self.query = query
self.limit = limit
self.offset = offset
self.fetch_batch_of_results()
def fetch_batch_of_results(self):
uri = "/evaluation/submission/query?query=" + urllib.quote_plus("%s limit %s offset %s" % (self.query, self.limit, self.offset))
results = syn.restGET(uri)
self.totalNumberOfResults = results['totalNumberOfResults']
self.headers = results['headers']
self.rows = results['rows']
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i >= len(self.rows):
if self.offset >= self.totalNumberOfResults:
raise StopIteration()
self.fetch_batch_of_results()
values = self.rows[self.i]['values']
self.i += 1
self.offset += 1
return values
def set_up():
try:
uuid_suffix = " " + str(uuid.uuid4())
# Create the Challenge Project
challenge_project = syn.store(Project(name=CHALLENGE_PROJECT_NAME+uuid_suffix))
print "Created project %s %s" % (challenge_project.id, challenge_project.name)
evaluation = syn.store(Evaluation(
name=challenge_project.name,
contentSource=challenge_project.id,
status="OPEN",
submissionInstructionsMessage="To submit to the XYZ Challenge, send a tab-delimited file as described here: https://...",
submissionReceiptMessage="Your submission has been received. For further information, consult the leader board at https://..."))
print "Created Evaluation %s %s" % (evaluation.id, evaluation.name)
# Create teams for participants and administrators
participants_team = create_team(CHALLENGE_PROJECT_NAME+uuid_suffix+' Participants', description='A team for people who have joined the challenge')
print "Created team %s %s" % (participants_team.id, participants_team.name)
admin_team = create_team(CHALLENGE_PROJECT_NAME+uuid_suffix+' Administrators', description='A team for challenge administrators')
print "Created team %s %s" % (admin_team.id, admin_team.name)
# give the teams permissions on challenge artifacts
# see: http://rest.synapse.org/org/sagebionetworks/repo/model/ACCESS_TYPE.html
# see: http://rest.synapse.org/org/sagebionetworks/evaluation/model/UserEvaluationPermissions.html
syn.setPermissions(challenge_project, admin_team.id, ['READ', 'UPDATE', 'DELETE', 'CHANGE_PERMISSIONS', 'DOWNLOAD', 'PARTICIPATE', 'SUBMIT', 'READ_PRIVATE_SUBMISSION'])
syn.setPermissions(evaluation, participants_team.id, ['READ', 'PARTICIPATE', 'SUBMIT'])
# Create the participant project
participant_project = syn.store(Project(name=PARTICIPANT_PROJECT_NAME+uuid_suffix))
print "Created project %s %s" % (participant_project.id, participant_project.name)
participant_file = syn.store(File(synapseclient.utils.make_bogus_data_file(), parent=participant_project))
return dict(challenge_project=challenge_project,
evaluation=evaluation,
participant_project=participant_project,
participant_file=participant_file,
participants_team=participants_team,
admin_team=admin_team)
except Exception as ex:
tear_down(locals())
raise
def find_objects(uuid):
"""Based on the given UUID (as a string), find demo artifacts"""
results = list(syn.chunkedQuery('select id from project where project.name == "%s"' % (CHALLENGE_PROJECT_NAME+" "+uuid)))
if results:
challenge_project = syn.get(results[0]['project.id'])
results = list(syn.chunkedQuery('select id from project where project.name == "%s"' % (PARTICIPANT_PROJECT_NAME+" "+uuid)))
if results:
participant_project = syn.get(results[0]['project.id'])
response = syn.restGET("/teams?fragment=" + CHALLENGE_PROJECT_NAME+" "+uuid+" Participants")
participants_team = Team(**response['results'][0])
syn.restGET("/teams?fragment=" + CHALLENGE_PROJECT_NAME+" "+uuid+" Administrators")
admin_team = Team(**response['results'][0])
return dict(challenge_project=challenge_project,
participant_project=participant_project,
participants_team=participants_team,
admin_team=admin_team)
def tear_down(objects):
print "Cleanup:"
for project in (objects[key] for key in objects.keys() if key.endswith("_project")):
try:
for evaluation in syn.getEvaluationByContentSource(project.id):
try:
print " deleting", evaluation.id
syn.restDELETE('/evaluation/%s' % evaluation.id)
except:
sys.stderr.write('Failed to clean up evaluation %s\n' % evaluation.id)
print " deleting", project.id
syn.delete(project)
except Exception as ex1:
print ex1
sys.stderr.write('Failed to clean up project: %s\n' % str(project))
for team in (objects[key] for key in objects.keys() if key.endswith("_team")):
print 'deleting', team['id'], team['name']
syn.restDELETE('/team/{id}'.format(id=team['id']))
def submit_to_challenge(evaluation, participant_file, n=NUM_OF_SUBMISSIONS_TO_CREATE):
for i in range(n):
syn.submit(evaluation=evaluation,
entity=participant_file,
name="Awesome submission %d" % i,
teamName="Team Awesome")
def validate_submission(file_path):
if random.random() < 0.5:
return True, "Validated"
else:
return False, "This submission was randomly selected to be invalid!"
def validate(evaluation,
send_messages=False,
notifications=False,
dry_run=False):
"""
It may be convenient to validate submissions in one pass before scoring
them, especially if scoring takes a long time.
"""
print "\n\nValidating", utils.id_of(evaluation)
print "-" * 60
for submission, status in syn.getSubmissionBundles(evaluation, status='RECEIVED'):
## refetch the submission so that we get the file path
## to be later replaced by a "downloadFiles" flag on getSubmissionBundles
submission = syn.getSubmission(submission)
is_valid, validation_message = validate_submission(submission.filePath)
print submission.id, validation_message
if is_valid:
status.status = "VALIDATED"
else:
status.status = "INVALID"
if not dry_run:
status = syn.store(status)
## send message AFTER storing status to ensure we don't get repeat messages
if not is_valid and send_messages:
profile = syn.getUserProfile(submission.userId)
message = VALIDATION_TEMPLATE.format(
username=profile.get('firstName', profile.get('userName', profile['ownerId'])),
submission_id=submission.id,
submission_name=submission.name,
message=validation_message)
response = syn.sendMessage(
userIds=[submission.userId],
messageSubject="Error validating Submission to "+CHALLENGE_NAME,
messageBody=message)
print "sent validation error message: ", unicode(response).encode('utf-8')
def score_submission(submission, file_path):
"""
Generate some random scoring metrics
"""
if submission.name.endswith('3'):
raise Exception('A fake test exception occured while scoring!')
score = dict(bayesian_whatsajigger=random.random(),
root_mean_squared_flapdoodle=random.random(),
discombobulation_index=random.random())
message="\n".join(" %s = %0.5f"%(k,v) for k,v in score.iteritems())
return (score, message)
def score(evaluation,
send_messages=False,
notifications=False,
dry_run=False):
sys.stdout.write('\n\nScoring ' + utils.id_of(evaluation))
sys.stdout.flush()
## collect statuses here for batch update
statuses = []
for submission, status in syn.getSubmissionBundles(evaluation, status='VALIDATED'):
## refetch the submission so that we get the file path
## to be later replaced by a "downloadFiles" flag on getSubmissionBundles
submission = syn.getSubmission(submission)
try:
score, message = score_submission(submission, submission.filePath)
status.status = "SCORED"
status.score = math.fsum(v for k,v in score.iteritems()) / len(score)
status.annotations = synapseclient.annotations.to_submission_status_annotations(score)
except Exception as ex1:
sys.stderr.write('\n\nError scoring submission %s %s:\n' % (submission.name, submission.id))
st = StringIO()
traceback.print_exc(file=st)
sys.stderr.write(st.getvalue())
sys.stderr.write('\n')
status.status = "INVALID"
message = st.getvalue()
if notifications and ADMIN_USER_IDS:
submission_info = "submission id: %s\nsubmission name: %s\nsubmitted by user id: %s\n\n" % (submission.id, submission.name, submission.userId)
response = syn.sendMessage(
userIds=ADMIN_USER_IDS,
messageSubject=CHALLENGE_NAME+": exception during scoring",
messageBody=error_notification_template.format(message=submission_info+st.getvalue()))
print "sent notification: ", unicode(response).encode('utf-8')
if not dry_run:
status = syn.store(status)
## send message AFTER storing status to ensure we don't get repeat messages
if send_messages:
profile = syn.getUserProfile(submission.userId)
if status.status == 'SCORED':
message_body = scoring_message_template.format(
message=message,
username=profile.get('firstName', profile.get('userName', profile['ownerId'])),
submission_name=submission.name,
submission_id=submission.id)
subject = "Submission to "+CHALLENGE_NAME
else:
message_body = scoring_error_message_template.format(
message=message,
username=profile.get('firstName', profile.get('userName', profile['ownerId'])),
submission_name=submission.name,
submission_id=submission.id)
subject = "Error scoring submission to "+CHALLENGE_NAME
response = syn.sendMessage(
userIds=[submission.userId],
messageSubject=subject,
messageBody=message_body)
print "sent message: ", unicode(response).encode('utf-8')
sys.stdout.write('.')
sys.stdout.flush()
sys.stdout.write('\n')
def query(evaluation, expected_result_count=NUM_OF_SUBMISSIONS_TO_CREATE):
"""Test the query that will be run to construct the leaderboard"""
## Note: Constructing the index on which the query operates is an
## asynchronous process, so we may need to wait a bit.
found = False
start_time = time.time()
time.sleep(1)
while not found and (time.time()-start_time < WAIT_FOR_QUERY_ANNOTATIONS_SEC):
results = Query(query="select * from evaluation_%s" % evaluation.id)
if results.totalNumberOfResults < expected_result_count:
time.sleep(2)
else:
found = True
## annotate each column with it's position in the query results, if it's there
for column in LEADERBOARD_COLUMNS:
if column['column_name'] in results.headers:
column['index'] = results.headers.index(column['column_name'])
## print leaderboard
print "\t".join([column['display_name'] for column in LEADERBOARD_COLUMNS if 'index' in column])
for row in results:
if row[results.headers.index('status')] == 'SCORED':
indexes = (column['index'] for column in LEADERBOARD_COLUMNS if 'index' in column)
print "\t".join("%0.4f"%row[i] if isinstance(row[i],float) else unicode(row[i]) for i in indexes)
if not found:
sys.stderr.write("Error: Annotations have not appeared in query results.\n")
def create_supertable_leaderboard(evaluation):
"""
Create the leaderboard using a supertable, a markdown extension that dynamically
builds a table by querying submissions. Because the supertable re-queries whenever
the page is rendered, this step only has to be done once.
"""
uri_base = urllib.quote_plus("/evaluation/submission/query")
# it's incredibly picky that the equals sign here has to be urlencoded, but
# the later equals signs CAN'T be urlencoded.
query = urllib.quote_plus('query=select * from evaluation_%s where status=="SCORED"' % utils.id_of(evaluation))
params = [ ('paging', 'true'),
('queryTableResults', 'true'),
('showIfLoggedInOnly', 'false'),
('pageSize', '25'),
('showRowNumber', 'false'),
('jsonResultsKeyName', 'rows')]
# Columns specifications have 4 fields: renderer, display name, column name, sort.
# Renderer and sort are usually 'none' and 'NONE'.
for i, column in enumerate(LEADERBOARD_COLUMNS):
fields = dict(renderer='none', sort='NONE')
fields.update(column)
params.append(('columnConfig%s' % i, "{renderer},{display_name},{column_name};,{sort}".format(**fields)))
return "${supertable?path=" + uri_base + "%3F" + query + "&" + "&".join([key+"="+urllib.quote_plus(value) for key,value in params]) + "}"
# Notes: supertable fails w/ bizarre error when sorting by a floating point column.
# can we format floating point "%0.4f"
# supertable is really picky about what gets URL encoded.
def create_wiki(evaluation, challenge_home_entity, team):
"""
Create landing page for challenge and a sub-page for a leaderboard.
Note that, while this code demonstrates programmatic generation of wiki markdown
including leader board table widget, the typical method for creating and editing
such content is via the Synapse web portal (www.synapse.org).
"""
wiki = Wiki(
owner=challenge_home_entity,
markdown=CHALLENGE_PROJECT_WIKI.format(
title=CHALLENGE_PROJECT_NAME,
teamId=team['id'],
evalId=evaluation.id))
wiki = syn.store(wiki)
supertable = create_supertable_leaderboard(evaluation)
lb_wiki = Wiki(
title="Leaderboard",
owner=challenge_home_entity,
parentWikiId=wiki.id,
markdown=LEADERBOARD_MARKDOWN.format(evaluation_name=evaluation.name, supertable=supertable))
lb_wiki = syn.store(lb_wiki)
return (wiki, lb_wiki)
def list_submissions(evaluation, status=None, **kwargs):
if isinstance(evaluation, basestring):
evaluation = syn.getEvaluation(evaluation)
print '\n\nSubmissions for: %s %s' % (evaluation.id, evaluation.name)
print '-' * 60
for submission, status in syn.getSubmissionBundles(evaluation, status=status):
print submission.id, submission.createdOn, status.status, submission.name.encode('utf-8'), submission.userId
def list_evaluations(project):
print '\n\nEvaluations for project: ', utils.id_of(project)
print '-' * 60
evaluations = syn.getEvaluationByContentSource(project)
for evaluation in evaluations:
print "Evaluation: %s" % evaluation.id, evaluation.name.encode('utf-8')
def challenge_demo(number_of_submissions=NUM_OF_SUBMISSIONS_TO_CREATE, cleanup=True):
try:
# create a Challenge project, evaluation queue, etc.
objects=set_up()
evaluation=objects['evaluation']
# create submissions
submit_to_challenge(evaluation, objects['participant_file'], n=number_of_submissions)
# validate correctness
# (this can be done at the same time as scoring, below, but we
# demonstrate doing the two tasks separately)
validate(evaluation)
# score the validated submissions
score(evaluation)
# query the results (this is the action used by dynamic leader boards
# viewable in challenge web pages)
query(evaluation, expected_result_count=number_of_submissions)
# create leaderboard wiki page
create_wiki(evaluation, objects['challenge_project'], objects['participants_team'])
finally:
if cleanup and "objects" in locals() and objects:
tear_down(objects)
def command_demo(args):
challenge_demo(args.number_of_submissions, args.cleanup)
def command_cleanup(args):
objs = find_objects(args.uuid)
print "Cleaning up:"
for key,obj in objs.iteritems():
print key,obj['name'],obj['id']
if not args.dry_run:
tear_down(objs)
def command_list(args):
if args.challenge_project:
list_evaluations(project=args.challenge_project)
elif args.evaluation:
list_submissions(evaluation=args.evaluation,
status=args.status)
else:
sys.stderr.write('\nList command requires either an evaluation ID or a synapse project. '\
'The list command might also be customized to list evaluations specific '\
'to a given challenge.\n')
def command_check_status(args):
submission = syn.getSubmission(args.submission)
status = syn.getSubmissionStatus(args.submission)
evaluation = syn.getEvaluation(submission.evaluationId)
## deleting the entity key is a hack to work around a bug which prevents
## us from printing a submission
del submission['entity']
print unicode(evaluation).encode('utf-8')
print unicode(submission).encode('utf-8')
print unicode(status).encode('utf-8')
def command_reset(args):
for submission in args.submission:
status = syn.getSubmissionStatus(submission)
status.status = args.status
if not args.dry_run:
print unicode(syn.store(status)).encode('utf-8')
def command_validate(args):
validate(args.evaluation,
send_messages=args.send_messages,
notifications=args.notifications,
dry_run=args.dry_run)
def command_score(args):
score(args.evaluation,
send_messages=args.send_messages,
notifications=args.notifications,
dry_run=args.dry_run)
def command_rank(args):
pass
def main():
global syn
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--user", help="UserName", default=None)
parser.add_argument("-p", "--password", help="Password", default=None)
parser.add_argument("--notifications", help="Send error notifications to challenge admins", action="store_true", default=False)
parser.add_argument("--send-messages", help="Send error confirmation and validation errors to participants", action="store_true", default=False)
parser.add_argument("--dry-run", help="Perform the requested command without updating anything in Synapse", action="store_true", default=False)
parser.add_argument("--debug", help="Show verbose error output from Synapse API calls", action="store_true", default=False)
subparsers = parser.add_subparsers(title="subcommand")
parser_demo = subparsers.add_parser('demo', help="Create a test challenge and populate it with some fake submissions")
parser_demo.add_argument("-n", "--number-of-submissions", type=int, default=NUM_OF_SUBMISSIONS_TO_CREATE)
group = parser_demo.add_mutually_exclusive_group(required=False)
group.add_argument("--cleanup", dest='cleanup', action='store_true', help="Delete any Synapse assets created during the demo")
group.add_argument("--no-cleanup", dest='cleanup', action='store_false')
parser_demo.set_defaults(cleanup=True)
parser_demo.set_defaults(func=command_demo)
parser_cleanup = subparsers.add_parser('cleanup', help="delete challenge artifacts")
parser_cleanup.add_argument("uuid", metavar="UUID", help="UUID of challenge artifacts")
parser_cleanup.set_defaults(func=command_cleanup)
parser_list = subparsers.add_parser('list', help="List submissions to an evaluation or list evaluations")
parser_list.add_argument("evaluation", metavar="EVALUATION-ID", nargs='?', default=None)
parser_list.add_argument("--challenge-project", "--challenge", "--project", metavar="SYNAPSE-ID", default=None)
parser_list.add_argument("-s", "--status", default=None)
parser_list.set_defaults(func=command_list)
parser_status = subparsers.add_parser('status', help="Check the status of a submission")
parser_status.add_argument("submission")
parser_status.set_defaults(func=command_check_status)
parser_reset = subparsers.add_parser('reset', help="Reset a submission to RECEIVED for re-scoring (or set to some other status)")
parser_reset.add_argument("submission", metavar="SUBMISSION-ID", type=int, nargs='+', help="One or more submission IDs")
parser_reset.add_argument("-s", "--status", default='RECEIVED')
parser_reset.set_defaults(func=command_reset)
parser_validate = subparsers.add_parser('validate', help="Validate all RECEIVED submissions to an evaluation")
parser_validate.add_argument("evaluation", metavar="EVALUATION-ID", default=None)
parser_validate.set_defaults(func=command_validate)
parser_score = subparsers.add_parser('score', help="Score all VALIDATED submissions to an evaluation")
parser_score.add_argument("evaluation", metavar="EVALUATION-ID", default=None)
parser_score.set_defaults(func=command_score)
parser_rank = subparsers.add_parser('rank', help="Rank all SCORED submissions to an evaluation")
parser_rank.add_argument("evaluation", metavar="EVALUATION-ID", default=None)
parser_rank.set_defaults(func=command_rank)
args = parser.parse_args()
print "\n" * 2, "=" * 75
print datetime.utcnow().isoformat()
## Acquire lock, don't run two scoring scripts at once
try:
update_lock = lock.acquire_lock_or_fail('challenge', max_age=timedelta(hours=4))
except lock.LockedException:
print u"Is the scoring script already running? Can't acquire lock."
# can't acquire lock, so return error code 75 which is a
# temporary error according to /usr/include/sysexits.h
return 75
try:
syn = synapseclient.Synapse(debug=args.debug)
if not args.user:
args.user = os.environ.get('SYNAPSE_USER', None)
if not args.password:
args.password = os.environ.get('SYNAPSE_PASSWORD', None)
syn.login(email=args.user, password=args.password)
args.func(args)
except Exception as ex1:
sys.stderr.write('Error in scoring script:\n')
st = StringIO()
traceback.print_exc(file=st)
sys.stderr.write(st.getvalue())
sys.stderr.write('\n')
if args.notifications:
message = error_notification_template.format(message=st.getvalue())
response = syn.sendMessage(
userIds=ADMIN_USER_IDS,
messageSubject="Exception while scoring " + CHALLENGE_NAME,
messageBody=message)
print "sent notification: ", unicode(response).encode('utf-8')
finally:
update_lock.release()
print "\ndone: ", datetime.utcnow().isoformat()
print "=" * 75, "\n" * 2
if __name__ == '__main__':
main()
| javigx2/alsdream | src/alsdream/challenge_template.py | Python | gpl-2.0 | 28,520 |
"""ds"""
from __future__ import absolute_import, print_function
from socket import error
__revision__ = '$Id: socketerror_import.py,v 1.2 2005-12-28 14:58:22 syt Exp $'
print(error)
| ruchee/vimrc | vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/socketerror_import.py | Python | mit | 183 |
# -*- coding: utf-8 -*-
#Config for the synt project
import os
import nltk
PROJECT_PATH = os.path.realpath(os.path.dirname(__file__))
#Where collected databases and user config are stored by default
SYNT_PATH = os.path.expanduser("~/.synt")
USER_CONFIG_PATH = os.path.join(SYNT_PATH, 'config.py')
#Emoticons may serve as useful indicatiors in classifying sentiment.
#These are the set of default emoticons to use, you may use your own or
#disregard emoticons entirely they are optional.
EMOTICONS = [
':-L', ':L', '<3', '8)', '8-)', '8-}', '8]', '8-]', '8-|', '8(', '8-(',
'8-[', '8-{', '-.-', 'xx', '</3', ':-{', ': )', ': (', ';]', ':{', '={',
':-}', ':}', '=}', ':)', ';)', ':/', '=/', ';/', 'x(', 'x)', ':D', 'T_T',
'O.o', 'o.o', 'o_O', 'o.-', 'O.-', '-.o', '-.O', 'X_X', 'x_x', 'XD', 'DX',
':-$', ':|', '-_-', 'D:', ':-)', '^_^', '=)', '=]', '=|', '=[', '=(', ':(',
':-(', ':, (', ':\'(', ':-]', ':-[', ':]', ':[', '>.>', '<.<'
]
#Default classifiers supported
CLASSIFIERS = {
'naivebayes' : nltk.NaiveBayesClassifier,
}
#If the user config is in place, use settings from there instead.
if os.path.exists(USER_CONFIG_PATH):
execfile(USER_CONFIG_PATH)
| lrvick/synt | synt/config.py | Python | agpl-3.0 | 1,196 |
import sys
import csv
import parsers
import yaml
#######################################################################################
# From MCP (create_dic_class, create_dic_member, parse_all_files), just modified to death.
#######################################################################################
def create_dic_class (parsed_rgs):
return_dic = {}
for entry in parsed_rgs['class_map']:
notch_data = entry['src_name'].split('/')
return_dic[entry['trg_name']] = {}
return_dic[entry['trg_name']]['notch'] = notch_data[-1]
return_dic[entry['trg_name']]['searge'] = entry['trg_name']
return_dic[entry['trg_name']]['full'] = entry['trg_name']
return_dic[entry['trg_name']]['class'] = entry['trg_name']
return_dic[entry['trg_name']]['full_final'] = entry['trg_name']
return_dic[entry['trg_name']]['notch_pkg'] = '/'.join(notch_data[:-1])
return_dic[entry['trg_name']]['modified'] = False
return_dic[entry['trg_name']]['methods'] = {}
return_dic[entry['trg_name']]['fields'] = {}
return return_dic
def create_dic_member(parsed_rgs, parsed_csv, class_dict, target, type):
return_dic = {}
rev_class_dict = {}
for key,value in class_dict.items():
rev_class_dict[value['notch']] = key
for entry in parsed_rgs[type]:
notch_data = entry['src_name'].split('/')
s_root = entry['trg_name']
if entry['trg_name'].find('func') != -1 or entry['trg_name'].find('field') != -1:
s_root = '_'.join(entry['trg_name'].split('_')[0:2])
else:
s_root = notch_data[-2] + '_' + entry['trg_name']
return_dic[s_root] = {}
return_dic[s_root]['notch'] = notch_data[-1]
return_dic[s_root]['searge'] = entry['trg_name']
return_dic[s_root]['s_root'] = s_root
return_dic[s_root]['full'] = None
return_dic[s_root]['full_final'] = None
return_dic[s_root]['notch_sig'] = None
return_dic[s_root]['csv'] = None
return_dic[s_root]['index'] = s_root.split('_')[-1]
return_dic[s_root]['known'] = False
return_dic[s_root]['notch_class'] = notch_data[-2]
return_dic[s_root]['notch_pkg'] = '/'.join(notch_data[:-2])
return_dic[s_root]['class'] = None
return_dic[s_root]['descript'] = None
return_dic[s_root]['package'] = 'net/minecraft/server'
#Bot related keys
return_dic[s_root]['old_mod'] = False #This modification has already been commited and is considered permanent
return_dic[s_root]['new_mod'] = False #This is a new modification to be commited on next update
return_dic[s_root]['modified'] = False #The entry has been modified
return_dic[s_root]['nick_mod'] = None #The name of the guy who did the last set on this entry
return_dic[s_root]['time_mod'] = None #The time of the modification
return_dic[s_root]['forced'] = False #If this entry has been forced modified
return_dic[s_root]['annotation'] = '' #Some infos which may be usefull later one
try:
return_dic[s_root]['class'] = rev_class_dict[notch_data[-2]]
except:
return_dic[s_root]['class'] = return_dic[s_root]['notch_class']
if type == 'method_map':
return_dic[s_root]['notch_sig'] = entry['src_sig']
#We create a dict lookup based on the csv
member_lookup = {}
descri_lookup = {}
for entry in parsed_csv:
s_root = entry['searge_%s'%target]
if entry['searge_%s'%target].find('func') != -1 or entry['searge_%s'%target].find('field') != -1:
s_root = '_'.join(entry['searge_%s'%target].split('_')[0:2])
member_lookup[s_root] = entry['full']
if 'description' in entry:
descri_lookup[s_root] = entry['description']
else:
descri_lookup[s_root] = '*'
#Now, we go through the return_dict, and associate the corresponding full name to the corresponding key
#If we don't have a fullname, we 'star' it for later parsing
known_name_repr = 'csv'.split('+')
unknown_name_repr = 'searge'.split('+')
for part in known_name_repr:
if part not in ['notch', 'searge', 's_root', 'csv', 'index']:
raise KeyError("Unknown qualifier for representation. Choose in ['notch', 'searge', 's_root', 'csv', 'index'] separated by '+'")
for part in unknown_name_repr:
if part not in ['notch', 'searge', 's_root', 'csv', 'index']:
raise KeyError("Unknown qualifier for representation. Choose in ['notch', 'searge', 's_root', 'csv', 'index'] separated by '+'")
for key in return_dic.keys():
try:
return_dic[key]['csv'] = member_lookup[return_dic[key]['s_root']]
return_dic[key]['known'] = True
except KeyError:
return_dic[key]['csv'] = return_dic[key]['s_root']
for key in return_dic.keys():
try:
return_dic[key]['descript'] = descri_lookup[return_dic[key]['s_root']]
except KeyError:
return_dic[key]['descript'] = '*'
for key in return_dic.keys():
if return_dic[key]['known']:
return_dic[key]['full'] = member_lookup[return_dic[key]['s_root']]
return_dic[key]['full_final'] = '_'.join([return_dic[key][i] for i in known_name_repr])
else:
if return_dic[key]['searge'].find('func') != -1 or return_dic[key]['searge'].find('field') != -1:
return_dic[key]['full'] = return_dic[key]['s_root']
return_dic[key]['full_final'] = '_'.join([return_dic[key][i] for i in unknown_name_repr])
else:
return_dic[key]['full'] = return_dic[key]['searge']
return_dic[key]['full_final'] = return_dic[key]['searge']
# Commented this part. It will make sure to have full final names with the notch extended, even if it is one of the enum.
# Not sure if it should be done or not.
# if return_dic[key]['searge'].find('func') != -1 or return_dic[key]['searge'].find('field') != -1:
# return_dic[key]['full'] = return_dic[key]['searge']
# return_dic[key]['full_final'] = return_dic[key]['searge']
# else:
# return_dic[key]['full'] = return_dic[key]['searge']
# return_dic[key]['full_final'] = return_dic[key]['searge'] + '_' + return_dic[key]['notch']
return return_dic
def parse_all_files():
class_csv = parsers.parse_csv('data/mcp/classes.csv', 3, ',', ['full', 'trashbin', 'notch_c', 'trashbin', 'notch_s', 'description'])
method_csv = parsers.parse_csv('data/mcp/methods.csv', 4, ',', ['trashbin', 'searge_c', 'trashbin', 'searge_s', 'full', 'description'])
field_csv = parsers.parse_csv('data/mcp/fields.csv', 3, ',', ['trashbin', 'trashbin', 'searge_c', 'trashbin', 'trashbin', 'searge_s', 'full', 'description'])
#client_rgs = parsers.parse_rgs(config['client_rgs']) #contains a list of notch_name to searge_name for the client
server_rgs = parsers.parse_rgs('data/mcp/minecraft_server.rgs') #contains a list of notch_name to searge_name for the server
#We want 3 dicts per soft. One for classes, methods and fields. Each dict is going to take the searge_name as the key, as it is the only
#unique identifier we are sure of for now. Each dict will have at least 3 entries, notch_name, searge_name and full_name.
#Classes will have an identical searge_name and full_name, as they are identical. Methods will also contain the notch_signature and maybe the searge_signature.
#Packages can also be a value somewhere for the reobfuscation step.
#Let's start with the class dictionary. For this one, we just need the rgs file.
#class_dict_c = create_dic_class(client_rgs)
class_dict_s = create_dic_class(server_rgs)
#Now the fields, as they are easy to process. Need both the csv and the rgs. Third argument is to get the right column
#field_dict_c = create_dic_member(client_rgs, field_csv, class_dict_c, 'c', 'field_map', config)
field_dict_s = create_dic_member(server_rgs, field_csv, class_dict_s, 's', 'field_map')
#And finally the methods. Same as before.
#method_dict_c = create_dic_member(client_rgs, method_csv, class_dict_c, 'c', 'method_map', config)
method_dict_s = create_dic_member(server_rgs, method_csv, class_dict_s, 's', 'method_map')
nmethods=0
nfields=0
nclasses=0
ckeys=class_dict_s.keys()
ckeys.sort()
for ckey in ckeys:
nclasses=nclasses+1
#print '* Post-processing class %s...' % ckey
for mkey in method_dict_s:
method = method_dict_s[mkey]
if method['class']==ckey:
nmethods=nmethods+1
nmkey = method['csv'] # Use CSV name to determine method key.
if nmkey==None:
nmkey=method['searge']
class_dict_s[ckey]['methods'][nmkey]=method
for fkey in field_dict_s:
field = field_dict_s[fkey]
if field['class']==ckey:
nfields=nfields+1
nfkey = field['csv'] # Use CSV name to determine field key.
if nfkey==None:
nfkey=field['searge']
class_dict_s[ckey]['fields'][nfkey]=field
print '*** POST-PROCESSING COMPLETE ***'
print ' + %d classes' % nclasses
print ' + %d methods' % nmethods
print ' + %d fields' % nfields
#solve_collisions(client_dic)
#solve_collisions(server_dic)
return class_dict_s
f = open('mappings.yml','w')
mcdict=parse_all_files()
print '+ Writing YAML mappings...'
yaml.dump(mcdict,f,default_flow_style=False)
#yaml.dump(parse_all_files(),f)
f.close()
| N3X15/MiddleCraft | lib/scripts/ImportMCP.py | Python | bsd-3-clause | 10,116 |
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Test the suitability of Python core and the availability of various
Python modules for running CDS Invenio. Warn the user if there are
eventual troubles. Exit status: 0 if okay, 1 if not okay. Useful for
running from configure.ac.
"""
## minimally recommended/required versions:
cfg_min_python_version = "2.4"
cfg_min_mysqldb_version = "1.2.1_p2"
## 0) import modules needed for this testing:
import string
import sys
import getpass
def wait_for_user(msg):
"""Print MSG and prompt user for confirmation."""
try:
raw_input(msg)
except KeyboardInterrupt:
print "\n\nInstallation aborted."
sys.exit(1)
except EOFError:
print " (continuing in batch mode)"
return
## 1) check Python version:
if sys.version < cfg_min_python_version:
print """
*******************************************************
** ERROR: OLD PYTHON DETECTED: %s
*******************************************************
** You seem to be using an old version of Python. **
** You must use at least Python %s. **
** **
** Note that if you have more than one Python **
** installed on your system, you can specify the **
** --with-python configuration option to choose **
** a specific (e.g. non system wide) Python binary. **
** **
** Please upgrade your Python before continuing. **
*******************************************************
""" % (string.replace(sys.version, "\n", ""), cfg_min_python_version)
sys.exit(1)
## 2) check for required modules:
try:
import MySQLdb
import base64
import cPickle
import cStringIO
import cgi
import copy
import fileinput
import getopt
import sys
if sys.hexversion < 0x2060000:
import md5
else:
import hashlib
import marshal
import os
import signal
import tempfile
import time
import traceback
import unicodedata
import urllib
import zlib
import wsgiref
except ImportError, msg:
print """
*************************************************
** IMPORT ERROR %s
*************************************************
** Perhaps you forgot to install some of the **
** prerequisite Python modules? Please look **
** at our INSTALL file for more details and **
** fix the problem before continuing! **
*************************************************
""" % msg
sys.exit(1)
## 3) check for recommended modules:
try:
if (2**31 - 1) == sys.maxint:
# check for Psyco since we seem to run in 32-bit environment
import psyco
else:
# no need to advise on Psyco on 64-bit systems
pass
except ImportError, msg:
print """
*****************************************************
** IMPORT WARNING %s
*****************************************************
** Note that Psyco is not really required but we **
** recommend it for faster CDS Invenio operation **
** if you are running in 32-bit operating system. **
** **
** You can safely continue installing CDS Invenio **
** now, and add this module anytime later. (I.e. **
** even after your CDS Invenio installation is put **
** into production.) **
*****************************************************
""" % msg
wait_for_user("Press ENTER to continue the installation...")
try:
import rdflib
except ImportError, msg:
print """
*****************************************************
** IMPORT WARNING %s
*****************************************************
** Note that rdflib is needed only if you plan **
** to work with the automatic classification of **
** documents based on RDF-based taxonomies. **
** **
** You can safely continue installing CDS Invenio **
** now, and add this module anytime later. (I.e. **
** even after your CDS Invenio installation is put **
** into production.) **
*****************************************************
""" % msg
wait_for_user("Press ENTER to continue the installation...")
try:
import pyRXP
except ImportError, msg:
print """
*****************************************************
** IMPORT WARNING %s
*****************************************************
** Note that PyRXP is not really required but **
** we recommend it for fast XML MARC parsing. **
** **
** You can safely continue installing CDS Invenio **
** now, and add this module anytime later. (I.e. **
** even after your CDS Invenio installation is put **
** into production.) **
*****************************************************
""" % msg
wait_for_user("Press ENTER to continue the installation...")
try:
import libxml2
except ImportError, msg:
print """
*****************************************************
** IMPORT WARNING %s
*****************************************************
** Note that libxml2 is not really required but **
** we recommend it for XML metadata conversions **
** and for fast XML parsing. **
** **
** You can safely continue installing CDS Invenio **
** now, and add this module anytime later. (I.e. **
** even after your CDS Invenio installation is put **
** into production.) **
*****************************************************
""" % msg
wait_for_user("Press ENTER to continue the installation...")
try:
import libxslt
except ImportError, msg:
print """
*****************************************************
** IMPORT WARNING %s
*****************************************************
** Note that libxslt is not really required but **
** we recommend it for XML metadata conversions. **
** **
** You can safely continue installing CDS Invenio **
** now, and add this module anytime later. (I.e. **
** even after your CDS Invenio installation is put **
** into production.) **
*****************************************************
""" % msg
wait_for_user("Press ENTER to continue the installation...")
try:
import Gnuplot
except ImportError, msg:
print """
*****************************************************
** IMPORT WARNING %s
*****************************************************
** Note that Gnuplot.py is not really required but **
** we recommend it in order to have nice download **
** and citation history graphs on Detailed record **
** pages. **
** **
** You can safely continue installing CDS Invenio **
** now, and add this module anytime later. (I.e. **
** even after your CDS Invenio installation is put **
** into production.) **
*****************************************************
""" % msg
wait_for_user("Press ENTER to continue the installation...")
try:
import magic
except ImportError, msg:
print """
*****************************************************
** IMPORT WARNING %s
*****************************************************
** Note that magic module is not really required **
** but we recommend it in order to have detailed **
** content information about fulltext files. **
** **
** You can safely continue installing CDS Invenio **
** now, and add this module anytime later. (I.e. **
** even after your CDS Invenio installation is put **
** into production.) **
*****************************************************
""" % msg
try:
import reportlab
except ImportError, msg:
print """
*****************************************************
** IMPORT WARNING %s
*****************************************************
** Note that reportlab module is not really **
** required, but we recommend it you want to **
** enrich PDF with OCR information. **
** **
** You can safely continue installing CDS Invenio **
** now, and add this module anytime later. (I.e. **
** even after your CDS Invenio installation is put **
** into production.) **
*****************************************************
""" % msg
wait_for_user("Press ENTER to continue the installation...")
## 4) check for versions of some important modules:
if MySQLdb.__version__ < cfg_min_mysqldb_version:
print """
*****************************************************
** ERROR: PYTHON MODULE MYSQLDB %s DETECTED
*****************************************************
** You have to upgrade your MySQLdb to at least **
** version %s. You must fix this problem **
** before continuing. Please see the INSTALL file **
** for more details. **
*****************************************************
""" % (MySQLdb.__version__, cfg_min_mysqldb_version)
sys.exit(1)
try:
import Stemmer
try:
from Stemmer import algorithms
except ImportError, msg:
print """
*****************************************************
** ERROR: STEMMER MODULE PROBLEM %s
*****************************************************
** Perhaps you are using an old Stemmer version? **
** You must either remove your old Stemmer or else **
** upgrade to Snowball Stemmer
** <http://snowball.tartarus.org/wrappers/PyStemmer-1.0.1.tar.gz>
** before continuing. Please see the INSTALL file **
** for more details. **
*****************************************************
""" % (msg)
sys.exit(1)
except ImportError:
pass # no prob, Stemmer is optional
## 5) check for Python.h (needed for intbitset):
try:
from distutils.sysconfig import get_python_inc
path_to_python_h = get_python_inc() + os.sep + 'Python.h'
if not os.path.exists(path_to_python_h):
raise StandardError, "Cannot find %s" % path_to_python_h
except StandardError, msg:
print """
*****************************************************
** ERROR: PYTHON HEADER FILE ERROR %s
*****************************************************
** You do not seem to have Python developer files **
** installed (such as Python.h). Some operating **
** systems provide these in a separate Python **
** package called python-dev or python-devel. **
** You must install such a package before **
** continuing the installation process. **
*****************************************************
""" % (msg)
sys.exit(1)
| ppiotr/Bibedit-some-refactoring | configure-tests.py | Python | gpl-2.0 | 12,323 |
#Script for predicting a phrase as Entity or relation(predicate)
#Run the script with the phrase as a parameter.
import pandas as pd
import numpy as np
import re
from collections import defaultdict
from keras.engine import Input
from keras.engine import Model
from keras.layers import Dense, Dropout, Embedding, LSTM, Bidirectional, Conv1D, GlobalAveragePooling1D, GlobalMaxPooling1D
from keras.layers.merge import Concatenate, Add, concatenate
from keras.preprocessing import sequence
from keras import backend as K
from keras.layers.core import Lambda
from keras.optimizers import Adam
from sklearn.metrics import accuracy_score
import cPickle
import functools
from itertools import product
from keras.callbacks import EarlyStopping
from gensim.models import word2vec
from sklearn.model_selection import KFold
from collections import defaultdict
from collections import Counter
from keras.utils import np_utils
from sklearn.metrics import f1_score
from keras.models import model_from_json
from gensim import models
import gensim
from keras.layers.normalization import BatchNormalization
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
import functools
from sklearn.model_selection import StratifiedKFold
from gensim.models.keyedvectors import KeyedVectors
import sys
import json
model_j = 'EARL/models/er.json'
model_wgt = 'EARL/models/er.h5'
max_len = 283
#lc_quad_j = './lcQuad.json'
#load the model
json_file = open(model_j, 'r')
model_json = json_file.read()
json_file.close()
model = model_from_json(model_json)
model.load_weights(model_wgt)
print("Loaded model from disk")
adam = Adam(lr=0.0001)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
def clean_str(string, TREC=False):
"""
Tokenization/string cleaning for all datasets except for SST.
Every dataset is lower cased except for TREC
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip()
def predict_phrase(phrase):
#load the model
#preprocess the phrase
#phrase_clean = clean_str(phrase)
phrase_clean = phrase
#load the dictionary
char_dict = np.load('EARL/models/char_dict.npy').item()
#phrase_clean = [char for char in phrase_clean]
#print phrase_clean
phrase_clean = [char_dict[char] for char in phrase_clean]
#print phrase_clean
#print np.concatenate((np.zeros(max_len-len(phrase_clean)), phrase_clean) )
prediction = model.predict(np.concatenate((np.zeros((270-len(phrase_clean))), phrase_clean)).reshape(1,270))
print prediction[0]
pred = np.argmax(prediction[0])
return 'R' if pred == 0 else 'E'
def predict_cln_phrs(clean_phrase):
tags = []
print clean_phrase
for phr in clean_phrase:
print phr
tags.append(predict_phrase(phr))
return tags
if __name__ == '__main__':
#phrase = sys.argv[1:]
lc_quad_j = sys.argv[1]
#print phrase
#predict_phrase(phrase)
#Read the json file
#lcquad = json.loads(open(lc_quad_j).read())
#chunks = [dat['CleanPhrase'] for dat in lcquad['PhraseChunked_LC-QuAD']]
#for dat in lcquad['PhraseChunked_LC-QuAD']:
# dat['E-R-predictor'] = predict_cln_phrs(dat['CleanPhrase'].replace('[','').replace(']','').split(','))
#print lcquad['PhraseChunked_LC-QuAD'][0]
#np.save('lcquad.npy',lcquad)
#with open('lcQuad_ers.json','w') as f:
# json.dump(lcquad, f)
print predict_phrase(list(lc_quad_j))
| AskNowQA/EARL | scripts/ER_predictor/predict_phrase.py | Python | gpl-3.0 | 3,993 |
from __future__ import unicode_literals
# import datetime
import difflib
import itertools
import json
import math
import os
import time
import datetime
from pokemongo_bot import inventory
from pokemongo_bot.base_dir import _base_dir
from pokemongo_bot.base_task import BaseTask
from pokemongo_bot.human_behaviour import sleep, action_delay
from pokemongo_bot.item_list import Item
from pokemongo_bot.tree_config_builder import ConfigException
from pokemongo_bot.worker_result import WorkerResult
SUCCESS = 1
ERROR_XP_BOOST_ALREADY_ACTIVE = 3
LOG_TIME_INTERVAL = 120
class PokemonOptimizer(BaseTask):
SUPPORTED_TASK_API_VERSION = 1
def __init__(self, bot, config):
super(PokemonOptimizer, self).__init__(bot, config)
def initialize(self):
self.max_pokemon_storage = inventory.get_pokemon_inventory_size()
self.last_pokemon_count = 0
self.pokemon_names = [p.name for p in inventory.pokemons().STATIC_DATA]
self.evolution_map = {}
self.debug = self.config.get('debug', False)
self.ongoing_stardust_count = 0
self.buddy = None
self.buddyid = 0
self.lock_buddy = True
self.no_log_until = 0
self.ignore_favorite = []
self.used_lucky_egg = None
pokemon_upgrade_cost_file = os.path.join(_base_dir, "data", "pokemon_upgrade_cost.json")
with open(pokemon_upgrade_cost_file, "r") as fd:
self.pokemon_upgrade_cost = json.load(fd)
if self.config.get("keep", None) is not None:
raise ConfigException("Pokemon Optimizer configuration has changed. See docs/pokemon_optimized.md or configs/config.json.optimizer.example")
if self.debug:
log_file_path = os.path.join(_base_dir, "data", "pokemon-optimizer-%s.log" % self.bot.config.username)
with open(log_file_path, "a") as _:
pass
self.log_file = open(log_file_path, "r+")
self.log_file.seek(0, 2)
self.config_bulktransfer_enabled = self.config.get("bulktransfer_enabled", False)
self.config_use_evolution_items = self.config.get("use_evolution_items", False)
self.config_max_bulktransfer = self.config.get("max_bulktransfer", 10)
self.config_min_slots_left = self.config.get("min_slots_left", 5)
self.config_action_wait_min = self.config.get("action_wait_min", 3)
self.config_action_wait_max = self.config.get("action_wait_max", 5)
self.config_transfer = self.config.get("transfer", False)
self.config_evolve = self.config.get("evolve", False)
self.config_evolve_to_final = self.config.get("evolve_to_final", True)
self.config_evolve_time = self.config.get("evolve_time", 25)
self.config_evolve_for_xp = self.config.get("evolve_for_xp", True)
self.config_transfer_after_xp_evolve = self.config.get("transfer_after_xp_evolve", True)
self.config_evolve_only_with_lucky_egg = self.config.get("evolve_only_with_lucky_egg", False)
self.config_evolve_count_for_lucky_egg = self.config.get("evolve_count_for_lucky_egg", 80)
self.config_may_use_lucky_egg = self.config.get("may_use_lucky_egg", False)
self.config_may_evolve_favorites = self.config.get("may_evolve_favorites", True)
self.config_may_upgrade_favorites = self.config.get("may_upgrade_favorites", True)
self.config_may_unfavor_pokemon = self.config.get("may_unfavor_pokemon", False)
self.config_upgrade = self.config.get("upgrade", False)
self.config_upgrade_level = self.config.get("upgrade_level", 30)
self.config_groups = self.config.get("groups", {"gym": ["Dragonite", "Snorlax", "Lapras", "Arcanine"]})
self.config_rules = self.config.get("rules", [{"mode": "overall", "top": 1, "sort": ["max_cp", "cp"], "keep": {"candy": -124}, "evolve": False, "buddy": True},
{"mode": "overall", "top": 1, "sort": ["-candy", "max_cp", "cp"], "evolve": False, "buddy": True},
{"mode": "by_family", "top": 3, "names": ["gym"], "sort": ["iv", "ncp"], "evolve": {"iv": 0.9, "ncp": 0.9}, "upgrade": {"iv": 0.9, "ncp": 0.9}},
{"mode": "by_family", "top": 1, "sort": ["iv"], "evolve": {"iv": 0.9}},
{"mode": "by_family", "top": 1, "sort": ["ncp"], "evolve": {"ncp": 0.9}},
{"mode": "by_family", "top": 1, "sort": ["cp"], "evolve": False},
{"mode": "by_pokemon", "names": ["!with_next_evolution"], "top": 1, "sort": ["dps_attack", "iv"], "keep": {"iv": 0.9}}])
if (not self.config_may_use_lucky_egg) and self.config_evolve_only_with_lucky_egg:
self.config_evolve = False
if self.config_evolve_for_xp is True:
self.config_evolve_for_xp = ["Caterpie", "Weedle", "Pidgey", "Rattata", "Nidoran F", "Nidoran M",
"Zubat", "Oddish", "Paras", "Venonat", "Psyduck", "Tentacool",
"Magnemite", "Krabby", "Voltorb", "Goldeen", "Staryu", "Eevee",
"Sentret", "Swinub", "Hoothoot", "Ledyba", "Natu", "Spinarak",
"Wooper", "Marill", "Remoraid"]
elif self.config_evolve_for_xp is False:
self.config_evolve_for_xp = []
self.config_evolve_for_xp_whitelist, self.config_evolve_for_xp_blacklist = self.get_colorlist(self.config_evolve_for_xp)
self.config_groups["with_next_evolution"] = []
self.config_groups["with_previous_evolution"] = []
for pokemon in inventory.Pokemons.STATIC_DATA:
if pokemon.has_next_evolution:
self.config_groups["with_next_evolution"].append(pokemon.name)
if pokemon.prev_evolutions_all:
self.config_groups["with_previous_evolution"].append(pokemon.name)
def log(self, txt):
if self.log_file.tell() >= 1024 * 1024:
self.log_file.seek(0, 0)
self.log_file.write("[%s] %s\n" % (datetime.datetime.now().isoformat(str(" ")), txt))
self.log_file.flush()
def active_lucky_egg(self):
if self.used_lucky_egg is None:
return False
# If last used is bigger then 30 minutes ago
if self.used_lucky_egg > datetime.datetime.now()-datetime.timedelta(minutes=30):
return True
else:
return False
def get_pokemon_slot_left(self):
pokemon_count = inventory.Pokemons.get_space_used()
if pokemon_count != self.last_pokemon_count:
self.last_pokemon_count = pokemon_count
self.logger.info("Pokemon Bag: %s / %s", pokemon_count, self.max_pokemon_storage)
inventory.update_web_inventory()
return inventory.Pokemons.get_space_left()
def work(self):
if not self.enabled:
return WorkerResult.SUCCESS
# Repeat the optimizer 2 times, to get rid of the trash evolved.
run_number = 0
for _ in itertools.repeat(None, 2):
run_number += 1
self.check_buddy()
self.open_inventory()
keep_all = []
try_evolve_all = []
try_upgrade_all = []
buddy_all = []
favor_all = []
for rule in self.config_rules:
mode = rule.get("mode", "by_family")
names = rule.get("names", [])
check_top = rule.get("top", "all")
check_keep = rule.get("keep", True)
whitelist, blacklist = self.get_colorlist(names)
if check_top == "all" and names == [] and check_keep:
self.logger.info("WARNING!! Will not transfer any Pokemon!!")
self.logger.info(rule)
self.logger.info("This rule is set to keep (`keep` is true) all Pokemon (no `top` and no `names` set!!)")
self.logger.info("Are you sure you want this?")
if mode == "by_pokemon":
for pokemon_id, pokemon_list in self.group_by_pokemon_id(inventory.pokemons().all()):
name = inventory.pokemons().name_for(pokemon_id)
if name in blacklist:
continue
if whitelist and (name not in whitelist):
continue
sorted_list = self.score_and_sort(pokemon_list, rule)
if len(sorted_list) == 0:
continue
keep, try_evolve, try_upgrade, buddy, favor = self.get_best_pokemon_for_rule(sorted_list, rule)
keep_all += keep
try_evolve_all += try_evolve
try_upgrade_all += try_upgrade
buddy_all += buddy
favor_all += favor
elif mode == "by_family":
for family_id, pokemon_list in self.group_by_family_id(inventory.pokemons().all()):
matching_names = self.get_family_names(family_id)
if any(n in blacklist for n in matching_names):
continue
if whitelist and not any(n in whitelist for n in matching_names):
continue
sorted_list = self.score_and_sort(pokemon_list, rule)
if len(sorted_list) == 0:
continue
if family_id == 133: # "Eevee"
keep, try_evolve, try_upgrade, buddy, favor = self.get_multi_best_pokemon_for_rule(sorted_list, rule, 3)
else:
keep, try_evolve, try_upgrade, buddy, favor = self.get_best_pokemon_for_rule(sorted_list, rule)
keep_all += keep
try_evolve_all += try_evolve
try_upgrade_all += try_upgrade
buddy_all += buddy
favor_all += favor
elif mode == "overall":
pokemon_list = []
for pokemon in inventory.pokemons().all():
name = pokemon.name
if name in blacklist:
continue
if whitelist and (name not in whitelist):
continue
pokemon_list.append(pokemon)
sorted_list = self.score_and_sort(pokemon_list, rule)
if len(sorted_list) == 0:
continue
keep, try_evolve, try_upgrade, buddy, favor = self.get_best_pokemon_for_rule(sorted_list, rule)
keep_all += keep
try_evolve_all += try_evolve
try_upgrade_all += try_upgrade
buddy_all += buddy
favor_all += favor
keep_all = self.unique_pokemon_list(keep_all)
try_evolve_all = self.unique_pokemon_list(try_evolve_all)
try_upgrade_all = self.unique_pokemon_list(try_upgrade_all)
buddy_all = self.unique_pokemon_list(buddy_all)
try_favor_all = self.unique_pokemon_list(favor_all)
# Favorites has nothing to do with evolve, can be done even when bag not full
# Like a buddy
if self.config_may_unfavor_pokemon:
unfavor = []
for pokemon in inventory.pokemons().all():
if not pokemon in try_favor_all and pokemon.is_favorite:
unfavor.append(pokemon)
if len(unfavor) > 0:
self.logger.info("Marking %s Pokemon as no longer favorite", len(unfavor))
for pokemon in unfavor:
self.unfavor_pokemon(pokemon)
# Dont favor Pokemon if already a favorite
try_favor_all = [p for p in try_favor_all if not p.is_favorite]
try_favor_all = [p for p in try_favor_all if p.unique_id not in self.ignore_favorite]
if len(try_favor_all) > 0:
self.logger.info("Marking %s Pokemon as favorite", len(try_favor_all))
for pokemon in try_favor_all:
if pokemon.is_favorite is False:
self.favor_pokemon(pokemon)
if (not self.lock_buddy) and (len(buddy_all) > 0):
new_buddy = buddy_all[0]
if (not self.buddy) or (self.buddy["id"] != new_buddy.unique_id):
self.set_buddy_pokemon(new_buddy)
# Only check bag on the first run, second run ignores if the bag is empty enough
if run_number == 1 and self.get_pokemon_slot_left() > self.config_min_slots_left:
return WorkerResult.SUCCESS
transfer_all = []
evolve_all = []
upgrade_all = []
xp_all = []
for family_id, pokemon_list in self.group_by_family_id(inventory.pokemons().all()):
keep = [p for p in keep_all if self.get_family_id(p) == family_id]
try_evolve = [p for p in try_evolve_all if self.get_family_id(p) == family_id]
try_upgrade = [p for p in try_upgrade_all if self.get_family_id(p) == family_id]
transfer, evolve, upgrade, xp = self.get_evolution_plan(family_id, pokemon_list, keep, try_evolve, try_upgrade)
transfer_all += transfer
evolve_all += evolve
upgrade_all += upgrade
xp_all += xp
if not self.config_may_evolve_favorites:
self.logger.info("Removing favorites from evolve list.")
evolve_all = [p for p in evolve_all if not p.is_favorite]
if not self.config_may_upgrade_favorites:
self.logger.info("Removing favorites from upgrade list.")
upgrade_all = [p for p in upgrade_all if not p.is_favorite]
self.apply_optimization(transfer_all, evolve_all, upgrade_all, xp_all)
return WorkerResult.SUCCESS
def check_buddy(self):
self.buddy = self.bot.player_data.get("buddy_pokemon", {})
self.buddyid = self._get_buddyid()
if not self.buddy:
self.lock_buddy = False
return
pokemon = next((p for p in inventory.pokemons().all() if p.unique_id == self.buddy["id"]), None)
if not pokemon:
return
km_walked = inventory.player().player_stats.get("km_walked", 0)
last_km_awarded = self.buddy.setdefault("last_km_awarded", km_walked)
distance_walked = km_walked - last_km_awarded
distance_needed = pokemon.buddy_distance_needed
if distance_walked >= distance_needed:
self.get_buddy_walked(pokemon)
# self.buddy["start_km_walked"] can be empty here
if 'start_km_walked' not in self.buddy:
self.buddy["start_km_walked"] = 0
self.buddy["last_km_awarded"] = self.buddy["start_km_walked"] + distance_needed * int(distance_walked / distance_needed)
self.lock_buddy = False
else:
now = time.time()
if self.no_log_until < now:
self.no_log_until = now + LOG_TIME_INTERVAL
self.emit_event("buddy_walked",
formatted="Buddy {pokemon} walking: {distance_walked:.2f} / {distance_needed:.2f} km",
data={"pokemon": pokemon.name,
"distance_walked": distance_walked,
"distance_needed": distance_needed})
def open_inventory(self):
for pokemon in inventory.pokemons().all():
setattr(pokemon, "ncp", pokemon.cp_percent)
setattr(pokemon, "max_cp", pokemon.static.max_cp)
setattr(pokemon, "dps", pokemon.moveset.dps)
setattr(pokemon, "dps1", pokemon.fast_attack.dps)
setattr(pokemon, "dps2", pokemon.charged_attack.dps)
setattr(pokemon, "dps_attack", pokemon.moveset.dps_attack)
setattr(pokemon, "dps_defense", pokemon.moveset.dps_defense)
setattr(pokemon, "attack_perfection", pokemon.moveset.attack_perfection)
setattr(pokemon, "defense_perfection", pokemon.moveset.defense_perfection)
setattr(pokemon, "candy", pokemon.candy_quantity)
candy_to_evolution = max(pokemon.evolution_cost - pokemon.candy_quantity, 0)
setattr(pokemon, "candy_to_evolution", candy_to_evolution)
self.ongoing_stardust_count = self.bot.stardust
def get_colorlist(self, names):
whitelist = []
blacklist = []
for name in names:
if not name:
continue
if name[0] not in ['!', '-']:
group = self.config_groups.get(name, [])
if not group:
name = self.get_closest_name(name)
if name:
whitelist.append(name)
whitelist_sub, blacklist_sub = self.get_colorlist(group)
whitelist += whitelist_sub
blacklist += blacklist_sub
else:
name = name[1:]
group = self.config_groups.get(name, [])
if not group:
name = self.get_closest_name(name)
if name:
blacklist.append(name)
blacklist_sub, whitelist_sub = self.get_colorlist(group)
blacklist += blacklist_sub
whitelist += whitelist_sub
return (whitelist, blacklist)
def get_family_names(self, family_id):
ids = [family_id]
ids += inventory.pokemons().data_for(family_id).next_evolutions_all[:]
return [inventory.pokemons().name_for(x) for x in ids]
def get_closest_name(self, name):
mapping = {ord(x): ord(y) for x, y in zip("\u2641\u2642.-", "fm ")}
clean_names = {n.lower().translate(mapping): n for n in self.pokemon_names}
closest_names = difflib.get_close_matches(name.lower().translate(mapping), clean_names.keys(), 1)
if closest_names:
closest_name = clean_names[closest_names[0]]
if name != closest_name:
self.logger.warning("Unknown Pokemon name [%s]. Assuming it is [%s]", name, closest_name)
return closest_name
else:
raise ConfigException("Unknown Pokemon name [%s]" % name)
def group_by_pokemon_id(self, pokemon_list):
sorted_list = sorted(pokemon_list, key=self.get_pokemon_id)
return itertools.groupby(sorted_list, self.get_pokemon_id)
def group_by_family_id(self, pokemon_list):
sorted_list = sorted(pokemon_list, key=self.get_family_id)
return itertools.groupby(sorted_list, self.get_family_id)
def get_pokemon_id(self, pokemon):
return pokemon.pokemon_id
def get_family_id(self, pokemon):
return pokemon.first_evolution_id
def score_and_sort(self, pokemon_list, rule):
pokemon_list = list(pokemon_list)
if self.debug:
self.log("Pokemon %s" % pokemon_list)
self.log("Rule %s" % rule)
for pokemon in pokemon_list:
setattr(pokemon, "__score__", self.get_score(pokemon, rule))
keep = [p for p in pokemon_list if p.__score__[1] is True]
keep.sort(key=lambda p: p.__score__[0], reverse=True)
return keep
def get_score(self, pokemon, rule):
score = []
for a in rule.get("sort", []):
if a[0] == "-":
value = -getattr(pokemon, a[1:], 0)
else:
value = getattr(pokemon, a, 0)
score.append(value)
rule_keep = rule.get("keep", True)
rule_evolve = rule.get("evolve", True)
rule_upgrade = rule.get("upgrade", False)
rule_buddy = rule.get("buddy", False)
rule_favor = rule.get("favorite", False)
keep = rule_keep not in [False, {}]
keep &= self.satisfy_requirements(pokemon, rule_keep)
may_try_evolve = (hasattr(pokemon, "has_next_evolution") and pokemon.has_next_evolution())
may_try_evolve &= rule_evolve not in [False, {}]
may_try_evolve &= self.satisfy_requirements(pokemon, rule_evolve)
may_try_upgrade = rule_upgrade not in [False, {}]
may_try_upgrade &= self.satisfy_requirements(pokemon, rule_upgrade)
may_buddy = rule_buddy not in [False, {}]
may_buddy &= pokemon.in_fort is False
may_buddy &= self.satisfy_requirements(pokemon, may_buddy)
may_favor = rule_favor not in [False, {}]
may_favor &= self.satisfy_requirements(pokemon, may_favor)
if self.debug:
self.log("P:%s S:%s K:%s E:%s U:%s B:%s F:%s" % (pokemon, tuple(score), keep, may_try_evolve, may_try_upgrade, may_buddy, may_favor))
return tuple(score), keep, may_try_evolve, may_try_upgrade, may_buddy, may_favor
def satisfy_requirements(self, pokemon, req):
if type(req) is bool:
return req
satisfy = True
for a, v in req.items():
value = getattr(pokemon, a, 0)
if (type(v) is str) or (type(v) is unicode):
v = float(v)
if type(v) is list:
if type(v[0]) is list:
satisfy_range = False
for r in v:
satisfy_range |= (value >= r[0]) and (value <= r[1])
satisfy &= satisfy_range
else:
satisfy &= (value >= v[0]) and (value <= v[1])
elif v < 0:
satisfy &= (value <= abs(v))
else:
satisfy &= (value >= v)
return satisfy
def get_best_pokemon_for_rule(self, pokemon_list, rule):
pokemon_list = list(pokemon_list)
if len(pokemon_list) == 0:
return ([], [], [], [])
top = max(rule.get("top", 0), 0)
index = int(math.ceil(top)) - 1
if 0 < top < 1:
worst = object()
for a in rule.get("sort", []):
best_attribute = getattr(pokemon_list[0], a)
setattr(worst, a, best_attribute * (1 - top))
setattr(worst, "__score__", self.get_score(worst, rule))
elif 0 <= index < len(pokemon_list):
worst = pokemon_list[index]
else:
worst = pokemon_list[-1]
return self.get_better_pokemon(pokemon_list, worst)
def get_multi_best_pokemon_for_rule(self, family_list, rule, nb_branch):
family_list = list(family_list)
if len(family_list) == 0:
return ([], [], [], [])
# Handle each group of senior independently
senior_pokemon_list = [p for p in family_list if not p.has_next_evolution()]
other_family_list = [p for p in family_list if p.has_next_evolution()]
senior_pids = set(p.pokemon_id for p in senior_pokemon_list)
keep_all = []
try_evolve_all = []
try_upgrade_all = []
buddy_all = []
favor_all = []
if not self.config_evolve:
# Player handle evolution manually = Fall-back to per Pokemon behavior
for _, pokemon_list in self.group_by_pokemon_id(family_list):
keep, try_evolve, try_upgrade, buddy, favor = self.get_best_pokemon_for_rule(pokemon_list, rule)
keep_all += keep
try_evolve_all += try_evolve
try_upgrade_all += try_upgrade
buddy_all += buddy
favor_all += favor
else:
for _, pokemon_list in self.group_by_pokemon_id(senior_pokemon_list):
keep, try_evolve, try_upgrade, buddy, favor = self.get_best_pokemon_for_rule(pokemon_list, rule)
keep_all += keep
try_evolve_all += try_evolve
try_upgrade_all += try_upgrade
buddy_all += buddy
favor_all += favor
if len(other_family_list) > 0:
if len(senior_pids) < nb_branch:
# We did not get every combination yet = All other Pokemon are potentially good to keep
worst = other_family_list[-1]
else:
best = keep_all + try_evolve_all + try_upgrade_all
best.sort(key=lambda p: p.__score__[0], reverse=True)
worst = best[-1]
keep, try_evolve, try_upgrade, buddy, favor = self.get_better_pokemon(other_family_list, worst, 12)
keep_all += keep
try_evolve_all += try_evolve
try_upgrade_all += try_upgrade
buddy_all += buddy
favor_all += favor
return keep_all, try_evolve_all, try_upgrade_all, buddy_all, favor_all
def get_better_pokemon(self, pokemon_list, worst, limit=1000):
keep = [p for p in pokemon_list if p.__score__[0] >= worst.__score__[0]][:limit]
try_evolve = [p for p in keep if p.__score__[2] is True]
try_upgrade = [p for p in keep if (p.__score__[2] is False) and (p.__score__[3] is True)]
buddy = [p for p in keep if p.__score__[4] is True]
favor = [p for p in keep if p.__score__[5] is True]
return keep, try_evolve, try_upgrade, buddy, favor
def get_evolution_plan(self, family_id, family_list, keep, try_evolve, try_upgrade):
candies = inventory.candies().get(family_id).quantity
family_name = inventory.Pokemons().name_for(family_id)
# All the rest is crap, for now
crap = list(family_list)
crap = [p for p in crap if p not in keep]
crap = [p for p in crap if not p.in_fort and not p.is_favorite and not (p.unique_id == self.buddyid)]
crap.sort(key=lambda p: (p.iv, p.cp), reverse=True)
# We will gain a candy whether we choose to transfer or evolve these Pokemon
candies += len(crap)
evolve = []
for pokemon in try_evolve:
pokemon_id = pokemon.pokemon_id
needed_evolution_item = inventory.pokemons().evolution_item_for(pokemon_id)
if needed_evolution_item is not None:
if self.config_use_evolution_items:
# We need a special Item to evolve this Pokemon!
item = inventory.items().get(needed_evolution_item)
needed = inventory.pokemons().evolution_items_needed_for(pokemon_id)
if item.count < needed:
self.logger.info("To evolve a {} we need {} of {}. We have {}".format(pokemon.name, needed, item.name, item.count))
continue
else:
# pass for this Pokemon
continue
if self.config_evolve_to_final:
pokemon_id = pokemon.pokemon_id
while inventory.pokemons().has_next_evolution(pokemon_id):
candies -= inventory.pokemons().evolution_cost_for(pokemon_id)
pokemon_id = inventory.pokemons().next_evolution_ids_for(pokemon_id)[0]
else:
candies -= pokemon.evolution_cost
if candies < 0:
continue
if self.config_evolve_to_final:
pokemon_id = pokemon.pokemon_id
while inventory.pokemons().has_next_evolution(pokemon_id):
candies += 1
evolve.append(pokemon)
pokemon_id = inventory.pokemons().next_evolution_ids_for(pokemon_id)[0]
else:
candies += 1
evolve.append(pokemon)
upgrade = []
upgrade_level = min(self.config_upgrade_level, inventory.player().level + 1.5, 40)
# Highest CP on top.
if len(try_upgrade) > 0:
try_upgrade.sort(key=lambda p: (p.cp), reverse=True)
for pokemon in try_upgrade:
# self.log("Considering %s for upgrade" % pokemon.name)
if pokemon.level >= upgrade_level:
# self.log("Pokemon already at target level. %s" % pokemon.level)
continue
full_upgrade_candy_cost = 0
full_upgrade_stardust_cost = 0
for i in range(int(pokemon.level * 2), int(upgrade_level * 2)):
upgrade_cost = self.pokemon_upgrade_cost[i - 2]
full_upgrade_candy_cost += upgrade_cost[0]
full_upgrade_stardust_cost += upgrade_cost[1]
candies -= full_upgrade_candy_cost
self.ongoing_stardust_count -= full_upgrade_stardust_cost
if (candies < 0) or (self.ongoing_stardust_count < 0):
# self.log("Not enough candy: %s" % candies)
# self.log("or stardust %s" % self.ongoing_stardust_count)
# We didn' t use the stardust, so refund it...
self.ongoing_stardust_count += full_upgrade_stardust_cost
continue
# self.log("Pokemon can be upgraded!!")
upgrade.append(pokemon)
if (not self.config_evolve_for_xp) or (family_name in self.config_evolve_for_xp_blacklist):
xp = []
transfer = crap
elif self.config_evolve_for_xp_whitelist and (family_name not in self.config_evolve_for_xp_whitelist):
xp = []
transfer = crap
else:
# Compute how many crap we should keep if we want to batch evolve them for xp
lowest_evolution_cost = inventory.pokemons().evolution_cost_for(family_id)
# transfer + keep_for_xp = len(crap)
# leftover_candies = candies - len(crap) + transfer * 1
# keep_for_xp = (leftover_candies - 1) / (lowest_evolution_cost - 1)
# keep_for_xp = (candies - len(crap) + transfer - 1) / (lowest_evolution_cost - 1)
# keep_for_xp = (candies - keep_for_xp - 1) / (lowest_evolution_cost - 1)
if (candies > 0) and lowest_evolution_cost:
keep_for_xp = int((candies - 1) / lowest_evolution_cost)
else:
keep_for_xp = 0
xp = [p for p in crap if p.has_next_evolution() and p.evolution_cost == lowest_evolution_cost][:keep_for_xp]
transfer = [p for p in crap if p not in xp]
return (transfer, evolve, upgrade, xp)
def unique_pokemon_list(self, pokemon_list):
seen = set()
return [p for p in pokemon_list if not (p.unique_id in seen or seen.add(p.unique_id))]
def apply_optimization(self, transfer, evolve, upgrade, xp):
transfer_count = len(transfer)
evolve_count = len(evolve)
upgrade_count = len(upgrade)
xp_count = len(xp)
if self.config_transfer or self.bot.config.test:
if transfer_count > 0:
self.logger.info("Transferring %s Pokemon", transfer_count)
self.transfer_pokemon(transfer)
if self.config_upgrade or self.bot.config.test:
if upgrade_count > 0:
self.logger.info("Upgrading %s Pokemon [%s stardust]", upgrade_count, self.bot.stardust)
for pokemon in upgrade:
self.upgrade_pokemon(pokemon)
if self.config_evolve or self.bot.config.test:
evolve_xp_count = evolve_count + xp_count
if evolve_xp_count > 0:
skip_evolve = False
if self.config_evolve and self.config_may_use_lucky_egg and (not self.bot.config.test):
lucky_egg = inventory.items().get(Item.ITEM_LUCKY_EGG.value) # @UndefinedVariable
if lucky_egg.count == 0:
if self.config_evolve_only_with_lucky_egg:
skip_evolve = True
self.emit_event("skip_evolve",
formatted="Skipping evolution step. No lucky egg available")
elif evolve_xp_count < self.config_evolve_count_for_lucky_egg:
if self.config_evolve_only_with_lucky_egg:
skip_evolve = True
self.emit_event("skip_evolve",
formatted="Skipping evolution step. Not enough Pokemon to evolve with lucky egg: %s/%s" % (evolve_xp_count, self.config_evolve_count_for_lucky_egg))
elif self.get_pokemon_slot_left() > self.config_min_slots_left:
skip_evolve = True
self.emit_event("skip_evolve",
formatted="Waiting for more Pokemon to evolve with lucky egg: %s/%s" % (evolve_xp_count, self.config_evolve_count_for_lucky_egg))
else:
self.use_lucky_egg()
if not skip_evolve:
self.evolution_map = {}
if evolve_count > 0:
self.logger.info("Evolving %s Pokemon (the best)", evolve_count)
for pokemon in evolve:
self.evolve_pokemon(pokemon)
if xp_count > 0:
self.logger.info("Evolving %s Pokemon (for xp)", xp_count)
for pokemon in xp:
self.evolve_pokemon(pokemon, self.config_transfer_after_xp_evolve)
def transfer_pokemon(self, pokemons, skip_delay=False):
error_codes = {
0: 'UNSET',
1: 'SUCCESS',
2: 'POKEMON_DEPLOYED',
3: 'FAILED',
4: 'ERROR_POKEMON_IS_EGG',
5: 'ERROR_POKEMON_IS_BUDDY'
}
if self.config_bulktransfer_enabled and len(pokemons) > 1:
while len(pokemons) > 0:
action_delay(self.config_action_wait_min, self.config_action_wait_max)
pokemon_ids = []
count = 0
transfered = []
while len(pokemons) > 0 and count < self.config_max_bulktransfer:
pokemon = pokemons.pop()
transfered.append(pokemon)
pokemon_ids.append(pokemon.unique_id)
count = count + 1
try:
if self.config_transfer:
request = self.bot.api.create_request()
request.release_pokemon(pokemon_ids=pokemon_ids)
response_dict = request.call()
result = response_dict['responses']['RELEASE_POKEMON']['result']
if result != 1:
self.logger.error(u'Error while transfer pokemon: {}'.format(error_codes[result]))
return False
except Exception:
return False
for pokemon in transfered:
candy = inventory.candies().get(pokemon.pokemon_id)
if self.config_transfer and (not self.bot.config.test):
candy.add(1)
self.emit_event("pokemon_release",
formatted="Exchanged {pokemon} [IV {iv}] [CP {cp}] [{candy} candies]",
data={"pokemon": pokemon.name,
"iv": pokemon.iv,
"cp": pokemon.cp,
"candy": candy.quantity})
if self.config_transfer:
inventory.pokemons().remove(pokemon.unique_id)
with self.bot.database as db:
cursor = db.cursor()
cursor.execute("SELECT COUNT(name) FROM sqlite_master WHERE type='table' AND name='transfer_log'")
db_result = cursor.fetchone()
if db_result[0] == 1:
db.execute("INSERT INTO transfer_log (pokemon, iv, cp) VALUES (?, ?, ?)", (pokemon.name, pokemon.iv, pokemon.cp))
else:
for pokemon in pokemons:
if self.config_transfer and (not self.bot.config.test):
request = self.bot.api.create_request()
request.release_pokemon(pokemon_id=pokemon.unique_id)
response_dict = request.call()
else:
response_dict = {"responses": {"RELEASE_POKEMON": {"candy_awarded": 0}}}
if not response_dict:
return False
candy_awarded = response_dict.get("responses", {}).get("RELEASE_POKEMON", {}).get("candy_awarded", 0)
candy = inventory.candies().get(pokemon.pokemon_id)
if self.config_transfer and (not self.bot.config.test):
candy.add(candy_awarded)
self.emit_event("pokemon_release",
formatted="Exchanged {pokemon} [IV {iv}] [CP {cp}] [{candy} candies]",
data={"pokemon": pokemon.name,
"iv": pokemon.iv,
"cp": pokemon.cp,
"candy": candy.quantity})
if self.config_transfer and (not self.bot.config.test):
inventory.pokemons().remove(pokemon.unique_id)
with self.bot.database as db:
cursor = db.cursor()
cursor.execute("SELECT COUNT(name) FROM sqlite_master WHERE type='table' AND name='transfer_log'")
db_result = cursor.fetchone()
if db_result[0] == 1:
db.execute("INSERT INTO transfer_log (pokemon, iv, cp) VALUES (?, ?, ?)", (pokemon.name, pokemon.iv, pokemon.cp))
if not skip_delay:
action_delay(self.config_action_wait_min, self.config_action_wait_max)
return True
def use_lucky_egg(self):
lucky_egg = inventory.items().get(Item.ITEM_LUCKY_EGG.value) # @UndefinedVariable
if lucky_egg.count == 0:
return False
response_dict = self.bot.use_lucky_egg()
if not response_dict:
self.emit_event("lucky_egg_error",
level='error',
formatted="Failed to use lucky egg!")
return False
result = response_dict.get("responses", {}).get("USE_ITEM_XP_BOOST", {}).get("result", 0)
if result == SUCCESS:
lucky_egg.remove(1)
self.emit_event("used_lucky_egg",
formatted="Used lucky egg ({amount_left} left).",
data={"amount_left": lucky_egg.count})
self.used_lucky_egg = datetime.datetime.now()
return True
elif result == ERROR_XP_BOOST_ALREADY_ACTIVE:
self.emit_event("used_lucky_egg",
formatted="Lucky egg already active ({amount_left} left).",
data={"amount_left": lucky_egg.count})
return True
else:
self.emit_event("lucky_egg_error",
level='error',
formatted="Failed to use lucky egg!")
return False
def evolve_pokemon(self, pokemon, transfer=False):
while pokemon.unique_id in self.evolution_map:
pokemon = self.evolution_map[pokemon.unique_id]
if self.config_evolve and (not self.bot.config.test):
needed_evolution_item = inventory.pokemons().evolution_item_for(pokemon.pokemon_id)
if needed_evolution_item is not None:
if self.config_use_evolution_items:
# We need evolution_item_requirement with some!!
request = self.bot.api.create_request()
request.evolve_pokemon(pokemon_id=pokemon.unique_id, evolution_item_requirement=needed_evolution_item)
response_dict = request.call()
else:
return False
else:
request = self.bot.api.create_request()
request.evolve_pokemon(pokemon_id=pokemon.unique_id)
response_dict = request.call()
else:
response_dict = {"responses": {"EVOLVE_POKEMON": {"result": SUCCESS}}}
if not response_dict:
return False
result = response_dict.get("responses", {}).get("EVOLVE_POKEMON", {}).get("result", 0)
if result != SUCCESS:
self.logger.info("Can't evolve %s" % pokemon.name)
self.logger.info(response_dict)
self.logger.info(result)
return False
xp = response_dict.get("responses", {}).get("EVOLVE_POKEMON", {}).get("experience_awarded", 0)
candy_awarded = response_dict.get("responses", {}).get("EVOLVE_POKEMON", {}).get("candy_awarded", 0)
candy = inventory.candies().get(pokemon.pokemon_id)
evolution = response_dict.get("responses", {}).get("EVOLVE_POKEMON", {}).get("evolved_pokemon_data", {})
if self.config_evolve and (not self.bot.config.test):
candy.consume(pokemon.evolution_cost - candy_awarded)
inventory.player().exp += xp
new_pokemon = inventory.Pokemon(evolution)
self.emit_event("pokemon_evolved",
formatted="Evolved {pokemon} [CP {old_cp}] into {new} [IV {iv}] [CP {cp}] [{candy} candies] [+{xp} xp]",
data={"pokemon": pokemon.name,
"new": new_pokemon.name,
"iv": pokemon.iv,
"old_cp": pokemon.cp,
"cp": new_pokemon.cp,
"candy": candy.quantity,
"xp": xp})
if self.config_evolve and (not self.bot.config.test):
new_pokemon = inventory.Pokemon(evolution)
self.evolution_map[pokemon.unique_id] = new_pokemon
inventory.pokemons().remove(pokemon.unique_id)
inventory.pokemons().add(new_pokemon)
with self.bot.database as db:
cursor = db.cursor()
cursor.execute("SELECT COUNT(name) FROM sqlite_master WHERE type='table' AND name='evolve_log'")
db_result = cursor.fetchone()
if db_result[0] == 1:
db.execute("INSERT INTO evolve_log (pokemon, iv, cp) VALUES (?, ?, ?)", (pokemon.name, pokemon.iv, pokemon.cp))
sleep(self.config_evolve_time, 0.1)
if transfer and not self.used_lucky_egg:
# Transfer the new Pokemon imediately!
self.transfer_pokemon([new_pokemon], True)
return True
def upgrade_pokemon(self, pokemon):
upgrade_level = min(self.config_upgrade_level, inventory.player().level + 1.5, 40)
candy = inventory.candies().get(pokemon.pokemon_id)
for i in range(int(pokemon.level * 2), int(upgrade_level * 2)):
upgrade_cost = self.pokemon_upgrade_cost[i - 2]
upgrade_candy_cost = upgrade_cost[0]
upgrade_stardust_cost = upgrade_cost[1]
if self.config_upgrade and (not self.bot.config.test):
request = self.bot.api.create_request()
request.upgrade_pokemon(pokemon_id=pokemon.unique_id)
response_dict = request.call()
else:
response_dict = {"responses": {"UPGRADE_POKEMON": {"result": SUCCESS}}}
if not response_dict:
return False
result = response_dict.get("responses", {}).get("UPGRADE_POKEMON", {}).get("result", 0)
if result != SUCCESS:
return False
upgrade = response_dict.get("responses", {}).get("UPGRADE_POKEMON", {}).get("upgraded_pokemon", {})
if self.config_upgrade and (not self.bot.config.test):
candy.consume(upgrade_candy_cost)
self.bot.stardust -= upgrade_stardust_cost
new_pokemon = inventory.Pokemon(upgrade)
self.emit_event("pokemon_upgraded",
formatted="Upgraded {pokemon} [IV {iv}] [CP {cp} -> {new_cp}] [{candy} candies] [{stardust} stardust]",
data={"pokemon": pokemon.name,
"iv": pokemon.iv,
"cp": pokemon.cp,
"new_cp": new_pokemon.cp,
"candy": candy.quantity,
"stardust": self.bot.stardust})
if self.config_upgrade and (not self.bot.config.test):
inventory.pokemons().remove(pokemon.unique_id)
new_pokemon = inventory.Pokemon(upgrade)
inventory.pokemons().add(new_pokemon)
pokemon = new_pokemon
action_delay(self.config_action_wait_min, self.config_action_wait_max)
return True
def set_buddy_pokemon(self, pokemon):
if not self.bot.config.test:
request = self.bot.api.create_request()
request.set_buddy_pokemon(pokemon_id=pokemon.unique_id)
response_dict = request.call()
else:
response_dict = {"responses": {"SET_BUDDY_POKEMON": {"result": SUCCESS, "updated_buddy": {"start_km_walked": 0, "last_km_awarded": 0, "id": 0}}}}
if not response_dict:
return False
result = response_dict.get("responses", {}).get("SET_BUDDY_POKEMON", {}).get("result", 0)
if result != SUCCESS:
return False
if not self.bot.config.test:
self.buddy = response_dict.get("responses", {}).get("SET_BUDDY_POKEMON", {}).get("updated_buddy", {})
self.buddyid = self._get_buddyid()
self.emit_event("buddy_pokemon",
formatted="Buddy {pokemon} [IV {iv}] [CP {cp}]",
data={"pokemon": pokemon.name,
"iv": pokemon.iv,
"cp": pokemon.cp})
self.lock_buddy = True
if not self.bot.config.test:
action_delay(self.config_action_wait_min, self.config_action_wait_max)
return True
def get_buddy_walked(self, pokemon):
if not self.bot.config.test:
request = self.bot.api.create_request()
request.get_buddy_walked()
response_dict = request.call()
else:
response_dict = {"responses": {"GET_BUDDY_WALKED": {"success": True, "family_candy_id": 0, "candy_earned_count": 0}}}
if not response_dict:
return False
success = response_dict.get("responses", {}).get("GET_BUDDY_WALKED", {}).get("success", False)
if not success:
return False
candy_earned_count = response_dict.get("responses", {}).get("GET_BUDDY_WALKED", {}).get("candy_earned_count", 0)
if candy_earned_count == 0:
return
family_candy_id = self.get_family_id(pokemon)
candy = inventory.candies().get(family_candy_id)
if not self.bot.config.test:
candy.add(candy_earned_count)
self.emit_event("buddy_reward",
formatted="Buddy {pokemon} rewards {family} candies [+{candy_earned} candies] [{candy} candies]",
data={"pokemon": pokemon.name,
"family": candy.type,
"candy_earned": candy_earned_count,
"candy": candy.quantity})
if not self.bot.config.test:
action_delay(self.config_action_wait_min, self.config_action_wait_max)
return True
def _get_buddyid(self):
if self.buddy and'id' in self.buddy:
return self.buddy['id']
return 0
def favor_pokemon(self, pokemon):
request = self.bot.api.create_request()
request.set_favorite_pokemon(pokemon_id=pokemon.unique_id, is_favorite=True)
response_dict = request.call()
sleep(1.2) # wait a bit after request
if response_dict:
result = response_dict.get('responses', {}).get('SET_FAVORITE_POKEMON', {}).get('result', 0)
if result is 1: # Request success
action_delay(self.config_action_wait_min, self.config_action_wait_max)
# Mark Pokemon as favorite
pokemon.is_favorite = True
self.emit_event("pokemon_favored",
formatted="Favored {pokemon} [IV {iv}] [CP {cp}]",
data={"pokemon": pokemon.name,
"iv": pokemon.iv,
"cp": pokemon.cp})
else:
# Pokemon not found??
self.ignore_favorite.append(pokemon.unique_id)
pokemon.is_favorite = True
self.logger.info("Unable to set %s as favorite!" % pokemon.name)
def unfavor_pokemon(self, pokemon):
request = self.bot.api.create_request()
request.set_favorite_pokemon(pokemon_id=pokemon.unique_id, is_favorite=False)
response_dict = request.call()
sleep(1.2) # wait a bit after request
if response_dict:
result = response_dict.get('responses', {}).get('SET_FAVORITE_POKEMON', {}).get('result', 0)
if result is 1: # Request success
# Mark Pokemon as no longer favorite
pokemon.is_favorite = False
self.emit_event("pokemon_unfavored",
formatted="Unfavored {pokemon} [IV {iv}] [CP {cp}]",
data={"pokemon": pokemon.name,
"iv": pokemon.iv,
"cp": pokemon.cp})
action_delay(self.config_action_wait_min, self.config_action_wait_max)
| Gobberwart/PokemonGo-Bot | pokemongo_bot/cell_workers/pokemon_optimizer.py | Python | mit | 50,727 |
import os
import pickle
import pandas
import logging
from indra.databases import hgnc_client
from indra.statements import Phosphorylation, Agent, Evidence
from indra.preassembler import Preassembler
from indra.ontology.bio import bio_ontology
from indra.preassembler.grounding_mapper import default_mapper
from indra.preassembler.sitemapper import SiteMapper, default_site_map
psite_fname = 'phosphosite_kin_sub_2016.csv'
stmts_fname = 'model.pkl'
logger = logging.getLogger('indra.benchmarks.phosphorylations')
def phosphosite_to_indra():
df = pandas.DataFrame.from_csv(psite_fname, index_col=None)
df = df[df['KIN_ORGANISM'] == 'human']
df = df[df['SUB_ORGANISM'] == 'human']
stmts = []
for _, row in df.iterrows():
enz_name = row['GENE']
enz_up = row['KIN_ACC_ID']
sub_name = row['SUB_GENE']
sub_up = row['SUB_ACC_ID']
if not enz_name or not sub_name or \
isinstance(enz_name, float) or isinstance(sub_name, float):
continue
enz = Agent(enz_name, db_refs={'UP': enz_up})
sub = Agent(sub_name, db_refs={'UP': sub_up})
site = row['SUB_MOD_RSD']
if site[0] in ('S', 'T', 'Y'):
residue = site[0]
position = site[1:]
else:
residue = None
position = None
ev = Evidence('phosphosite')
st = Phosphorylation(enz, sub, residue, position, ev)
stmts.append(st)
logger.info('%d human-human phosphorylations in Phosphosite' % len(stmts))
with open('phosphosite_indra.pkl', 'wb') as fh:
pickle.dump(stmts, fh)
return stmts
def extract_phos():
with open(stmts_fname, 'rb') as fh:
model = pickle.load(fh)
stmts = []
for pmid, pmid_stmts in model.items():
for stmt in pmid_stmts:
if isinstance(stmt, Phosphorylation):
stmts.append(stmt)
logger.info('%d phosphorylations in RAS Machine' % len(stmts))
stmts = [s for s in stmts if s.enz is not None]
logger.info('%d phosphorylations with enzyme in RAS Machine' % len(stmts))
stmts_grounded = filter_grounded(stmts)
logger.info('%d grounded phosphorylations in RAS Machine' % len(stmts_grounded))
stmts_enzkinase = filter_enzkinase(stmts_grounded)
logger.info('%d phosphorylations with kinase enzyme in RAS Machine' % len(stmts_enzkinase))
sm = SiteMapper(default_site_map)
stmts_valid, _ = sm.map_sites(stmts_enzkinase)
logger.info('%d valid-sequence phosphorylations in RAS Machine' % len(stmts_valid))
pa = Preassembler(bio_ontology, stmts_valid)
stmts_unique = pa.combine_duplicates()
logger.info('%d unique phosphorylations in RAS Machine' % len(stmts_unique))
stmts_unique = pa.combine_related()
logger.info('%d top-level phosphorylations in RAS Machine' % len(stmts_unique))
with open('mapped_unique_phos.pkl', 'wb') as fh:
pickle.dump(stmts_unique, fh)
# Filter RAS Machine statements for direct and not hypothesis
stmts = filter_direct(stmts_unique)
logger.info('%d direct phosphorylations in RAS Machine' % len(stmts))
stmts = filter_non_hypothesis(stmts)
logger.info('%d non-hypothesis phosphorylations in RAS Machine' % len(stmts))
with open('filtered_phos.pkl', 'wb') as fh:
pickle.dump(stmts, fh)
return stmts
def filter_belief(stmts):
# As a proxy here, we just look for > 1 evidence
believed_stmts = []
for stmt in stmts:
if len(stmt.evidence) > 1:
believed_stmts.append(stmt)
return believed_stmts
def filter_direct(stmts):
direct_stmts = []
for stmt in stmts:
if get_is_direct(stmt):
direct_stmts.append(stmt)
return direct_stmts
def filter_non_hypothesis(stmts):
non_hyp_stmts = []
for stmt in stmts:
if get_is_not_hypothesis(stmt):
non_hyp_stmts.append(stmt)
return non_hyp_stmts
def filter_grounded(stmts):
gm = default_mapper
stmts_mapped = gm.map_agents(stmts, do_rename=True)
stmts_grounded = []
for stmt in stmts_mapped:
all_grounded = True
for agent in stmt.agent_list():
if agent is not None:
if set(agent.db_refs.keys()) == set(['TEXT']):
all_grounded = False
break
if all_grounded:
stmts_grounded.append(stmt)
return stmts_grounded
def filter_enzkinase(stmts):
kinase_activities = get_kinase_activities()
stmts_enzkinase = []
for stmt in stmts:
is_kinase = False
for kin in kinase_activities:
if stmt.enz.entity_matches(kin.agent):
is_kinase = True
break
if kin.agent.refinement_of(stmt.enz, bio_ontology):
is_kinase = True
break
if is_kinase:
stmts_enzkinase.append(stmt)
return stmts_enzkinase
def compare_overlap(stmts_pred, stmts_ref):
# Ras Machine statements that are in Phosphosite
found_stmts = []
not_found_stmts = []
for i, stmt_pred in enumerate(stmts_pred):
found = False
for stmt_ref in stmts_ref:
if stmt_pred.matches(stmt_ref) or \
stmt_ref.refinement_of(stmt_pred, bio_ontology):
found = True
break
if found:
found_stmts.append(stmt_pred)
else:
not_found_stmts.append(stmt_pred)
return not_found_stmts, found_stmts
def get_kinase_activities():
kinase_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../../resources/kinases.tsv')
kinases = []
with open(kinase_file, 'rt') as fh:
lines = [l.strip() for l in fh.readlines()]
for lin in lines[1:]:
up_id, hgnc_name, _, _ = lin.split('\t')
hgnc_id = hgnc_client.get_hgnc_id(hgnc_name)
agent = Agent(hgnc_name, db_refs={'UP': up_id, 'HGNC': hgnc_id})
kinases.append(agent)
kin_activities = []
from indra.statements import HasActivity
for kin in kinases:
stmt = HasActivity(kin, 'kinase', True)
kin_activities.append(stmt)
return kin_activities
def get_is_direct(stmt):
'''Returns true if there is evidence that the statement is a direct
interaction. If any of the evidences associated with the statement
indicates a direct interatcion then we assume the interaction
is direct. If there is no evidence for the interaction being indirect
then we default to direct.'''
any_indirect = False
for ev in stmt.evidence:
if ev.epistemics.get('direct') is True:
return True
elif ev.epistemics.get('direct') is False:
# This guarantees that we have seen at least
# some evidence that the statement is indirect
any_indirect = True
if any_indirect:
return False
return True
def get_is_not_hypothesis(stmt):
hyps = [ev.epistemics.get('hypothesis') for ev in stmt.evidence]
for hyp in hyps:
if hyp is not True:
return True
return False
if __name__ == '__main__':
use_pkl = False
if use_pkl:
stmts_file = 'filtered_phos.pkl'
with open(stmts_file, 'rb') as fh:
indra_stmts = pickle.load(fh)
ps_file = 'phosphosite_indra.pkl'
with open(ps_file, 'rb') as fh:
ps_stmts = pickle.load(fh)
else:
logger.info('Extract phosphorylations from Phosphosite')
ps_stmts = phosphosite_to_indra()
logger.info('Extract phosphorylations from the RAS Machine')
indra_stmts = extract_phos()
not_found_stmts, found_stmts = compare_overlap(indra_stmts, ps_stmts)
logger.info('%d phosphorylations found in Phosphosite' % len(found_stmts))
logger.info('%d phosphorylations not found in Phosphosite' %
len(not_found_stmts))
indra_stmts = filter_belief(indra_stmts)
logger.info('%d > 1 evidence phosphorylations in statements' %
len(indra_stmts))
not_found_stmts, found_stmts = compare_overlap(indra_stmts, ps_stmts)
logger.info('%d phosphorylations found in Phosphosite' % len(found_stmts))
logger.info('%d phosphorylations not found in Phosphosite' %
len(not_found_stmts))
with open('not_found.tsv', 'wt') as fh:
for i, st in enumerate(not_found_stmts):
for ev in st.evidence:
if ev.epistemics.get('direct'):
fh.write('%d\t%s\t \t%s\t%s\n' % \
(i, st, ev.text, ev.pmid))
| johnbachman/indra | indra/benchmarks/phosphorylations/__init__.py | Python | bsd-2-clause | 8,646 |
{
'name': "POS debranding",
'version': '1.0.0',
'author': 'IT-Projects LLC, Ivan Yelizariev',
'license': 'GPL-3',
'category': 'Debranding',
'website': 'https://twitter.com/yelizariev',
'depends': ['point_of_sale'],
'price': 30.00,
'currency': 'EUR',
'data': [
'views.xml',
],
'qweb': [
'static/src/xml/pos_debranding.xml',
],
'installable': True,
}
| bmya/pos-addons | pos_debranding/__openerp__.py | Python | lgpl-3.0 | 425 |
#!/usr/bin/env python
# Mass Static Analysis
import tornado.httpclient
import os
import urllib2
import argparse
import mimetypes
import re
import json
import hashlib
import urllib
from threading import Thread
def HTTP_GET_Request(url):
response = None
http_client = tornado.httpclient.HTTPClient()
try:
response = http_client.fetch(url)
except tornado.httpclient.HTTPError as e:
pass
except Exception as e:
print("[ERROR] HTTP GET Request Error: " + str(e))
http_client.close()
return response
def isServerUp(url):
try:
response = urllib2.urlopen(url, timeout=5)
return True
except urllib2.URLError:
pass
return False
def getCSRF(url):
resp = HTTP_GET_Request(url)
return resp.headers['Set-Cookie'].split(";")[0].split("=")[1]
def encode_multipart_formdata(fields, files):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
L.append('--' + BOUNDARY)
L.append(
'Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
L.append('Content-Type: %s' % get_content_type(filename))
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def genMD5(app):
BLOCKSIZE = 65536
hasher = hashlib.md5()
with open(app, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while buf:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
return (hasher.hexdigest())
def doScan(app, server_url):
print "\nUploading : " + app
UPLOAD_URL = server_url + "/upload/"
CSRF = getCSRF(server_url)
APP_NAME = os.path.basename(app)
fields = [("csrfmiddlewaretoken", CSRF)]
files = [("file", APP_NAME, open(app, "rb").read())]
http_client = tornado.httpclient.HTTPClient()
content_type, body = encode_multipart_formdata(fields, files)
headers = {"Content-Type": content_type,
'content-length': str(len(body)), 'Cookie': 'csrftoken=' + CSRF}
request = tornado.httpclient.HTTPRequest(
UPLOAD_URL, "POST", headers=headers, body=body, validate_cert=False)
response = http_client.fetch(request)
if response.code == 200:
r = json.loads(response.body)
if r["status"] == "success":
MD5 = genMD5(app)
SCAN_DB[MD5] = APP_NAME
# Start Scan
START_SCAN_URL = server_url + "/" + \
r["url"].replace(APP_NAME, urllib.quote(APP_NAME))
SCAN_URLS.append(START_SCAN_URL)
elif r["description"]:
print r["description"]
return SCAN_DB, SCAN_URLS
def startScan(directory, server_url):
SCAN_URLS = []
SCAN_DB = {}
print "\nLooking for Android/iOS binaries or source code in : " + directory
for root, directories, filenames in os.walk(directory):
for filename in filenames:
scan_file = os.path.join(root, filename)
abs_filename, file_extension = os.path.splitext(scan_file)
if re.findall("apk|ipa|zip", file_extension):
SCAN_DB, SCAN_URLS = doScan(scan_file, server_url)
if len(SCAN_URLS) > 0:
print "\nFiles Uploaded "
print "======================================================================"
print "MD5 | App "
print "======================================================================"
for key, val in SCAN_DB.items():
print key + " | " + val
print "\nInvoking Scan Request. This takes time depending on the number of apps to be scanned."
for url in SCAN_URLS:
t = Thread(target=HTTP_GET_Request, args=(url,))
t.start()
print "Please wait while MobSF is performing Static Analysis. Once the scan is completed, you can get the report by searching for the MD5 checksum"
print "Exiting the Script..."
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory",
help="Path to the directory that contains mobile app binary/zipped source code")
parser.add_argument(
"-s", "--ipport", help="IP address and Port number of a running MobSF Server. (ex: 127.0.0.1:8000)")
args = parser.parse_args()
SCAN_DB = dict()
SCAN_URLS = list()
if args.directory and args.ipport:
SERVER = args.ipport
DIRECTORY = args.directory
SERVER_URL = "http://" + SERVER
if isServerUp(SERVER_URL) == False:
print "MobSF Server is not running at " + SERVER_URL
print "Exiting....."
exit(0)
# MobSF is running, start scan
startScan(DIRECTORY, SERVER_URL)
else:
parser.print_help()
| h4ng3r/YSO-Mobile-Security-Framework | mass_static_analysis.py | Python | gpl-3.0 | 5,413 |
#!/usr/bin/env python
#
# Author: helour
# Copyright: 2013-2015 helour
# Based on the cr33dog's script Export Layers as PNG (http://registry.gimp.org/node/18440)
# Modified by: jmunsch (11-25-2015)
# License: GPL v3+
#
# Version: 0.7
#
# GIMP plugin to export layers as a multiple pages PDF file
#
#
# Note for Windows users:
#
# You need add the ImageMagic directory (which consists the 'convert.exe' executable file)
# to the GIMP environment PATH variable into the file:
# C:\Program Files\GIMP 2\lib\gimp\2.0\environ\default.env
#
# like in the example here:
# PATH=${gimp_installation_dir}\bin;${gimp_installation_dir}\32\bin;C:\Program Files\ImageMagick-6.9.1-Q16
# PYTHONPATH=${gimp_installation_dir}\32\lib\gimp\2.0\python
#
#
# Note for Mac users:
# If ImageMagick was installed with brew then there is a good chance you will need to:
# brew update && brew upgrade && brew uninstall --force imagemagick && brew install -v imagemagick --build-from-source
#
# And given any errors on compiling update any of the linked libraries for convert/imageMagick
#
#
import os
import gtk
import subprocess
from tempfile import mkstemp
from gimpfu import *
def mktmpfile(suffix):
try:
fd, filename = mkstemp(suffix=suffix)
fptr = os.fdopen(fd)
except Exception as e:
pdb.gimp_message(e)
return filename
def get_layers_to_export(layers, only_visible, gimp_version):
try:
result = []
for layer in layers:
if gimp_version >= 2.8 and pdb.gimp_item_is_group(layer):
result += get_layers_to_export(layer.children, only_visible, gimp_version)
else:
if only_visible:
if layer.visible:
result.append(layer)
else:
result.append(layer)
except Exception as e:
print('get_layers_to_export: ...',e)
pdb.gimp_message(e)
return result
def combine_images_into_pdf(img_files, pdf_file):
try: # Run on shell because of conflict with windows system command 'convert.exe'
print('Checking convert command:', ['convert', '-verbose'] + img_files + [pdf_file])
checked = subprocess.check_output(['convert' , '-verbose'] + img_files + [pdf_file], stderr=subprocess.STDOUT, universal_newlines=True,shell = True if os.name == 'nt' else False)
print('convert command checked: ....', checked)
except subprocess.CalledProcessError as e:
print('Debug: combine_images_into_pdf:\n\n')
print(dir(e), e.args, e.cmd, e.message, e.output, e.returncode)
pdb.gimp_message('If convert cannot find the PNG module. Try lowering the pdf quality to less than 100. This defaults convert to use jpeg instead of png.\n\n##########convert stdout#############\n' +
"Error while executing 'convert' command:\n\n" +
e.output.replace('\n', '\n\n'))
except Exception as e:
import traceback;print(traceback.format_exc())
pdb.gimp_message("Error while executing 'convert' command:\n" +
str(e) +
"\n\nHave you installed the ImageMagic package\nand/or\nset the GIMP environment PATH variable?\n"
"\n\nIf ImageMagick was installed with brew run:\n\n brew update && brew upgrade && brew uninstall --force imagemagick && brew install -v imagemagick --build-from-source")
def export_layers(image, only_visible, quality):
if not image.filename:
pdb.gimp_message("Please save your file first!")
return
try:
chooser = gtk.FileChooserDialog(title = None, action = gtk.FILE_CHOOSER_ACTION_SAVE,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
chooser.set_current_folder(os.path.dirname(image.filename))
chooser.set_current_name(os.path.splitext(image.filename)[0] + '.pdf')
if chooser.run() != gtk.RESPONSE_OK:
return
filename = chooser.get_filename()
chooser.destroy()
version = gimp.version[0:2]
gimp_version = float(version[0]) + float(version[1]) / 10.0
layers_to_export = get_layers_to_export(image.layers, only_visible, gimp_version)
img_files = []
except Exception as e:
print('export_layers: ...', e)
pdb.gimp_message(e)
try:
for layer in layers_to_export:
ext = '.jpg' if quality < 100 else '.png'
fullpath = mktmpfile(ext)
img_files.append(fullpath)
pic_filename = os.path.basename(fullpath)
if quality < 100:
pdb.file_jpeg_save(image, layer, fullpath, pic_filename, quality / 100.0, 0, 1, 0, "", 0, 1, 0, 2)
else:
pdb.file_png_save(image, layer, fullpath, pic_filename, 0, 9, 1, 1, 1, 1, 1)
combine_images_into_pdf(img_files, filename)
except Exception as e:
print(' ... ', e)
finally:
for img in img_files:
try:
os.remove(img)
print('DELETED:', img)
except:
pass
try:
register(
"export-layers-to-pdf", #name
"Export layers to a multiple pages PDF file", #description
"Export all layers to a single multiple pages PDF file", #help
"helour", #author
"helour", #copyright
"2015", #year
"Export layers to PDF", #menu label
"*", # image format
[ #input args. Format (type, name, description, default [, extra])
(PF_IMAGE, "image", "Image", None),
(PF_BOOL, "only_visible", "Only Visible Layers?", True),
(PF_SLIDER, "quality", "Image quality", 100, (10, 100, 1)),
],
[], #results. Format (type, name, description)
export_layers, #callback
menu=("<Image>/File/Export/"),
)
except Exception as e:
pdb.gimp_message(e)
print('running main()')
try:
main()
except Exception as e:
pdb.gimp_message(e)
| jmunsch/gimp_export_layers_to_pdf | export-layers-to-pdf.py | Python | gpl-3.0 | 6,324 |
from share1 import ShareAnalysis1
from share1_dot_1 import ShareAnalysis1Dot1
from share2 import ShareAnalysis2
from share3 import ShareAnalysis3
from share_matrix import ShareMatrix
class ShareAnalysisFactory(object):
def __init__(self, share_name):
self.share_name = share_name
def new_share_analysis(self, dataset):
if self.share_name == "Share01":
return ShareAnalysis1(dataset)
elif self.share_name == "Share01.1":
return ShareAnalysis1Dot1(dataset)
elif self.share_name == "Share02":
return ShareAnalysis2(dataset)
elif self.share_name == "Share03":
return ShareAnalysis3(dataset)
else:
raise Exception("Unexpected share: {0}".format(self.share_name))
| lcameron05/PCWG | pcwg/share/share_factory.py | Python | mit | 780 |
from setuptools import setup, find_packages
setup(name='BIOMD0000000275',
version=20140916,
description='BIOMD0000000275 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/BIOMD0000000275',
maintainer='Stanley Gu',
maintainer_url='stanleygu@gmail.com',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
) | biomodels/BIOMD0000000275 | setup.py | Python | cc0-1.0 | 377 |
#!/usr/bin/python
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for the namespaces.py module."""
from __future__ import print_function
import errno
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from chromite.lib import cros_test_lib
from chromite.lib import namespaces
class SetNSTests(cros_test_lib.TestCase):
"""Tests for SetNS()"""
def testBasic(self):
"""Simple functionality test."""
NS_PATH = '/proc/self/ns/mnt'
if not os.path.exists(NS_PATH):
raise unittest.SkipTest('kernel too old (missing %s)' % NS_PATH)
with open(NS_PATH) as f:
try:
namespaces.SetNS(f.fileno(), 0)
except OSError as e:
if e.errno != errno.EPERM:
# Running as non-root will fail, so ignore it. We ran most
# of the code in the process which is all we really wanted.
raise
class UnshareTests(cros_test_lib.TestCase):
"""Tests for Unshare()"""
def testBasic(self):
"""Simple functionality test."""
try:
namespaces.Unshare(namespaces.CLONE_NEWNS)
except OSError as e:
if e.errno != errno.EPERM:
# Running as non-root will fail, so ignore it. We ran most
# of the code in the process which is all we really wanted.
raise
if __name__ == '__main__':
cros_test_lib.main()
| mxOBS/deb-pkg_trusty_chromium-browser | third_party/chromite/lib/namespaces_unittest.py | Python | bsd-3-clause | 1,522 |
# Module to run tests on ampsec definition
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import pytest
import os
import numpy as np
from pypeit.core import pixels
from pypeit.core import procimg
from pypeit.spectrographs.util import load_spectrograph
@pytest.fixture
def spectrograph():
return load_spectrograph('shane_kast_blue')
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'files')
return os.path.join(data_dir, filename)
def test_ampsec(spectrograph):
""" Test sort_data
"""
datasec_img = spectrograph.get_datasec_img(data_path('b1.fits.gz'), det=1)
datasec_img = procimg.trim_frame(datasec_img, datasec_img < 1)
# Test
assert datasec_img.shape == (2048, 350)
#assert np.sum(np.isclose(datasec_img, 1)) == 2162688 # Data region
#assert np.sum(np.isclose(datasec_img, 2)) == 2162688 # second amp
assert np.sum(np.isclose(datasec_img, 1)) == 358400 # Data region
assert np.sum(np.isclose(datasec_img, 2)) == 358400 # second amp
#assert settings.spect[dnum]['oscansec01'] == [[0, 0], [2049, 2080]]
#assert settings.spect[dnum]['datasec01'] == [[0, 0], [0, 1024]]
| PYPIT/PYPIT | pypeit/tests/test_datasec.py | Python | gpl-3.0 | 1,269 |
from staticfiles.settings import StaticfilesSettings
settings = StaticfilesSettings("STATICFILES")
| zodman/django-staticfiles | staticfiles/conf.py | Python | bsd-3-clause | 100 |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading, re, socket
import webbrowser
import requests
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
from electrum_bta.i18n import _
from electrum_bta import ELECTRUM_VERSION, print_error
class VersionGetter(threading.Thread):
def __init__(self, label):
threading.Thread.__init__(self)
self.label = label
self.daemon = True
def run(self):
try:
res = requests.request("GET", "https://electrum-bta.org/version")
except:
print_error("Could not retrieve version information")
return
if res.status_code == 200:
latest_version = res.text
latest_version = latest_version.replace("\n","")
if(re.match('^\d+(\.\d+)*$', latest_version)):
self.label.callback(latest_version)
class UpdateLabel(QLabel):
def __init__(self, config, sb):
QLabel.__init__(self)
self.new_version = False
self.sb = sb
self.config = config
self.current_version = ELECTRUM_VERSION
self.connect(self, QtCore.SIGNAL('new_electrum_version'), self.new_electrum_version)
# prevent HTTP leaks if a proxy is set
if self.config.get('proxy'):
return
VersionGetter(self).start()
def callback(self, version):
self.latest_version = version
if(self.compare_versions(self.latest_version, self.current_version) == 1):
latest_seen = self.config.get("last_seen_version",ELECTRUM_VERSION)
if(self.compare_versions(self.latest_version, latest_seen) == 1):
self.new_version = True
self.emit(QtCore.SIGNAL('new_electrum_version'))
def new_electrum_version(self):
if self.new_version:
self.setText(_("New version available") + ": " + self.latest_version)
self.sb.insertPermanentWidget(1, self)
def compare_versions(self, version1, version2):
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
try:
return cmp(normalize(version1), normalize(version2))
except:
return 0
def ignore_this_version(self):
self.setText("")
self.config.set_key("last_seen_version", self.latest_version, True)
QMessageBox.information(self, _("Preference saved"), _("Notifications about this update will not be shown again."))
self.dialog.done(0)
def ignore_all_version(self):
self.setText("")
self.config.set_key("last_seen_version", "9.9.9", True)
QMessageBox.information(self, _("Preference saved"), _("No more notifications about version updates will be shown."))
self.dialog.done(0)
def open_website(self):
webbrowser.open("http://electrum-bta.org/")
self.dialog.done(0)
def mouseReleaseEvent(self, event):
dialog = QDialog(self)
dialog.setWindowTitle(_('Electrum update'))
dialog.setModal(1)
main_layout = QGridLayout()
main_layout.addWidget(QLabel(_("A new version of Electrum is available:")+" " + self.latest_version), 0,0,1,3)
ignore_version = QPushButton(_("Ignore this version"))
ignore_version.clicked.connect(self.ignore_this_version)
ignore_all_versions = QPushButton(_("Ignore all versions"))
ignore_all_versions.clicked.connect(self.ignore_all_version)
open_website = QPushButton(_("Goto download page"))
open_website.clicked.connect(self.open_website)
main_layout.addWidget(ignore_version, 1, 0)
main_layout.addWidget(ignore_all_versions, 1, 1)
main_layout.addWidget(open_website, 1, 2)
dialog.setLayout(main_layout)
self.dialog = dialog
if not dialog.exec_(): return
| BTA-BATA/electrum-bta-master | gui/qt/version_getter.py | Python | gpl-3.0 | 4,574 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Libraries to build Recurrent Neural Networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| ivano666/tensorflow | tensorflow/models/rnn/__init__.py | Python | apache-2.0 | 839 |
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User, Group
from optparse import make_option
from sys import stdout
from csv import writer
FORMATS = [
'address',
'emails',
'google',
'outlook',
'linkedin',
'vcard',
]
def full_name(first_name, last_name, username, **extra):
name = u" ".join(n for n in [first_name, last_name] if n)
if not name:
return username
return name
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--group', '-g', action='store', dest='group', default=None,
help='Limit to users which are part of the supplied group name'),
make_option('--format', '-f', action='store', dest='format', default=FORMATS[0],
help="output format. May be one of '" + "', '".join(FORMATS) + "'."),
)
help = ("Export user email address list in one of a number of formats.")
args = "[output file]"
label = 'filename to save to'
requires_model_validation = True
can_import_settings = True
encoding = 'utf-8' # RED_FLAG: add as an option -DougN
def handle(self, *args, **options):
if len(args) > 1:
raise CommandError("extra arguments supplied")
group = options['group']
if group and not Group.objects.filter(name=group).count() == 1:
names = u"', '".join(g['name'] for g in Group.objects.values('name')).encode('utf-8')
if names:
names = "'" + names + "'."
raise CommandError("Unknown group '" + group + "'. Valid group names are: " + names)
if len(args) and args[0] != '-':
outfile = file(args[0], 'w')
else:
outfile = stdout
qs = User.objects.all().order_by('last_name', 'first_name', 'username', 'email')
if group:
qs = qs.filter(group__name=group).distinct()
qs = qs.values('last_name', 'first_name', 'username', 'email')
getattr(self, options['format'])(qs, outfile)
def address(self, qs, out):
"""simple single entry per line in the format of:
"full name" <my@address.com>;
"""
out.write(u"\n".join(u'"%s" <%s>;' % (full_name(**ent), ent['email'])
for ent in qs).encode(self.encoding))
out.write("\n")
def emails(self, qs, out):
"""simpler single entry with email only in the format of:
my@address.com,
"""
out.write(u",\n".join(u'%s' % (ent['email']) for ent in qs).encode(self.encoding))
out.write("\n")
def google(self, qs, out):
"""CSV format suitable for importing into google GMail
"""
csvf = writer(out)
csvf.writerow(['Name', 'Email'])
for ent in qs:
csvf.writerow([full_name(**ent).encode(self.encoding),
ent['email'].encode(self.encoding)])
def outlook(self, qs, out):
"""CSV format suitable for importing into outlook
"""
csvf = writer(out)
columns = ['Name', 'E-mail Address', 'Notes', 'E-mail 2 Address', 'E-mail 3 Address',
'Mobile Phone', 'Pager', 'Company', 'Job Title', 'Home Phone', 'Home Phone 2',
'Home Fax', 'Home Address', 'Business Phone', 'Business Phone 2',
'Business Fax', 'Business Address', 'Other Phone', 'Other Fax', 'Other Address']
csvf.writerow(columns)
empty = [''] * (len(columns) - 2)
for ent in qs:
csvf.writerow([full_name(**ent).encode(self.encoding),
ent['email'].encode(self.encoding)] + empty)
def linkedin(self, qs, out):
"""CSV format suitable for importing into linkedin Groups.
perfect for pre-approving members of a linkedin group.
"""
csvf = writer(out)
csvf.writerow(['First Name', 'Last Name', 'Email'])
for ent in qs:
csvf.writerow([ent['first_name'].encode(self.encoding),
ent['last_name'].encode(self.encoding),
ent['email'].encode(self.encoding)])
def vcard(self, qs, out):
try:
import vobject
except ImportError:
print self.style.ERROR("Please install python-vobject to use the vcard export format.")
import sys
sys.exit(1)
for ent in qs:
card = vobject.vCard()
card.add('fn').value = full_name(**ent)
if not ent['last_name'] and not ent['first_name']:
# fallback to fullname, if both first and lastname are not declared
card.add('n').value = vobject.vcard.Name(full_name(**ent))
else:
card.add('n').value = vobject.vcard.Name(ent['last_name'], ent['first_name'])
emailpart = card.add('email')
emailpart.value = ent['email']
emailpart.type_param = 'INTERNET'
out.write(card.serialize().encode(self.encoding))
| fusionbox/django-extensions | django_extensions/management/commands/export_emails.py | Python | mit | 5,084 |
# pylint: disable=missing-docstring, too-few-public-methods
# pylint: disable=too-many-ancestors, no-absolute-import, import-error, multiple-imports,wrong-import-position
from __future__ import print_function
import socket, binascii, abc, six
class MyException(object):
"""Custom 'exception'."""
class MySecondException(object):
"""Custom 'exception'."""
class MyGoodException(Exception):
"""Custom exception."""
class MySecondGoodException(MyGoodException):
"""Custom exception."""
class SkipException(socket.error):
"""Not an exception for Python 2, but one in 3."""
class SecondSkipException(SkipException):
"""Also a good exception."""
try:
1 + 1
except MyException: # [catching-non-exception]
print("caught")
try:
1 + 2
# +1:[catching-non-exception,catching-non-exception]
except (MyException, MySecondException):
print("caught")
try:
1 + 3
except MyGoodException:
print("caught")
try:
1 + 3
except (MyGoodException, MySecondGoodException):
print("caught")
try:
1 + 3
except (SkipException, SecondSkipException):
print("caught")
try:
1 + 42
# +1:[catching-non-exception,catching-non-exception]
except (None, list()):
print("caught")
try:
1 + 24
except None: # [catching-non-exception]
print("caught")
EXCEPTION = None
EXCEPTION = ZeroDivisionError
try:
1 + 46
except EXCEPTION:
print("caught")
try:
1 + 42
# +1:[catching-non-exception,catching-non-exception,catching-non-exception]
except (list([4, 5, 6]), None, ZeroDivisionError, 4):
print("caught")
EXCEPTION_TUPLE = (ZeroDivisionError, OSError)
NON_EXCEPTION_TUPLE = (ZeroDivisionError, OSError, 4)
try:
1 + 42
except EXCEPTION_TUPLE:
print("caught")
try:
1 + 42
except NON_EXCEPTION_TUPLE: # [catching-non-exception]
print("caught")
from missing_import import UnknownError
UNKNOWN_COMPONENTS = (ZeroDivisionError, UnknownError)
try:
1 + 42
except UNKNOWN_COMPONENTS:
print("caught")
try:
1 + 42
except binascii.Error:
print('builtin and detected')
try:
1 + 45
except object: # [catching-non-exception]
print('caught')
try:
1 + 42
except range: # [catching-non-exception]
print('caught')
class HasErrorInMRO(six.with_metaclass(abc.ABCMeta, Exception)):
pass
class Second(HasErrorInMRO):
pass
try:
raise Second
except Second:
pass
| arju88nair/projectCulminate | venv/lib/python3.5/site-packages/pylint/test/functional/invalid_exceptions_caught.py | Python | apache-2.0 | 2,380 |
#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
"""
Raspymc is a multimedia centre exposed via a http server built with bottlepy
Copyright (C) 2013 Giancarlo Fringuello
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os, inspect, ConfigParser, pickle
from utils import *
from logger import*
from track_obj import *
CNF_SERVER_PATH = sys.path[0]
CNF_FOLDER_PATH = ""
CNF_PLAYLIST_PATH = CNF_SERVER_PATH + "/config/playlist.pkl"
CNF_FOLDER_PATH = CNF_SERVER_PATH + "/config/"
CNF_CONFIG_FILE = CNF_FOLDER_PATH + "config.ini"
#
# Loads the saved playlist from file
def get_playlist():
log(LOG_INFO, inspect.currentframe().f_lineno, "conf_manager.py::load_playlist()")
l_playlist = []
try:
with open(CNF_PLAYLIST_PATH, 'rb') as l_input:
l_playlist = pickle.load(l_input)
except:
log(LOG_WARNING, inspect.currentframe().f_lineno, "conf_manager.py::load_playlist()", "unexisting playlist file: " + CNF_PLAYLIST_PATH)
return l_playlist
def store_playlist(p_list):
log(LOG_INFO, inspect.currentframe().f_lineno, "conf_manager.py::store_playlist()")
try:
with open(CNF_PLAYLIST_PATH, 'wb') as l_output:
pickle.dump(p_list, l_output, pickle.HIGHEST_PROTOCOL)
except:
log(LOG_WARNING, inspect.currentframe().f_lineno, "conf_manager.py::store_playlist()", "unexisting playlist file: " + CNF_PLAYLIST_PATH)
#
# Loads the configuration from file
def get_folder_path():
log(LOG_INFO, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()")
global CNF_FOLDER_PATH
global CNF_PLAYLIST_PATH
global SERVER_PATH
l_config_parser = ConfigParser.ConfigParser()
l_clean_configuration = False
if not os.path.isdir(CNF_FOLDER_PATH): # if config directory does not exist, create it
os.makedirs(CNF_FOLDER_PATH)
log(LOG_WARNING, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", CNF_FOLDER_PATH + " did not exist, it has been created")
if os.path.isfile(CNF_CONFIG_FILE):
try:
l_config_parser.read(CNF_CONFIG_FILE)
if l_config_parser.has_section("PATH"):
if l_config_parser.has_option("PATH", "CNF_FOLDER_PATH"):
CNF_FOLDER_PATH = l_config_parser.get("PATH","CNF_FOLDER_PATH")
else:
l_clean_configuration = True
else:
# if section does not exist
l_clean_configuration = True
log(LOG_VERBOSE, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "unable to load CNF_FOLDER_PATH, using home as default, new config.ini will be generated.")
except:
# if unable to read file (e.g. file damaged)
l_clean_configuration = True
log(LOG_WARNING, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "exception: unable to load CNF_FOLDER_PATH from " + CNF_CONFIG_FILE + ", using home path as default, new config.ini will be generated.")
else:
l_clean_configuration = True
log(LOG_WARNING, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "no configuration file found, new config.ini will be generated.")
if l_clean_configuration:
# cleanup config file
for l_section in l_config_parser.sections():
l_config_parser.remove_section(l_section)
l_config_parser.add_section("PATH")
l_config_parser.set("PATH", "CNF_FOLDER_PATH", os.path.expanduser("~"))
l_config_parser.write(file(CNF_CONFIG_FILE, 'w'))
if "" == CNF_FOLDER_PATH:
CNF_FOLDER_PATH = os.path.expanduser("~")
log(LOG_VERBOSE, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "CNF_FOLDER_PATH = " + CNF_FOLDER_PATH)
log(LOG_VERBOSE, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "CNF_PLAYLIST_PATH = " + CNF_PLAYLIST_PATH)
log(LOG_VERBOSE, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "CNF_SERVER_PATH = " + CNF_SERVER_PATH)
return CNF_FOLDER_PATH
def get_server_path():
return SERVER_PATH
def get_playlist_path():
return CNF_PLAYLIST_PATH | GiancarloF/raspymc_server | core/conf_manager.py | Python | gpl-3.0 | 4,528 |
import pandas as pd
import json
import os
from lda_pipeline import Ranker
from top_words import top_words_by_key
def task_1(lda_model, topic_df,args):
'''
Get a list of the n most important words
:param lda_model: Scikit pipeline that vectorizes and does lda
:param topic_df: A dataframe of documents/topics
:param args:
:return: the top num_words that are most important to our script
'''
top_words = top_words_by_key(df=topic_df, key="script", model=lda_model, no_top_words=args.num_words)
save_path = os.path.join(args.save_path,'top_words_list.json')
with open(save_path,'w') as f:
json.dump(top_words,f)
return top_words
def task_2(document_topic_dict,args):
'''
Compare the words in our list to all words in the corpus. Here we just create output, the topic breakdowns of
each document
:param document_topic_dict: A dictionary mapping documents to topic vectors
:param args:
:return: a dataframe whose columns are documents and rows are vectors
'''
topic_df = pd.DataFrame(document_topic_dict)
ax = topic_df.T.plot.bar(figsize=(20,10),colormap='jet',title="Most important topics per document")
fig = ax.get_figure()
save_path = os.path.join(args.save_path,'task_2_top_topics.png')
fig.savefig(save_path)
return topic_df
def task_3(documents, top_words,args):
'''
Generate a score/rank for each word in our top n_words vs the transcriptions
:param documents: The dicitonary of all documents for the entire task
:param top_words: A list of the top words we found in task 1
:param args:
:return: a pandas series. The most important words to our script ranked by how much more we should be using them
'''
transcript_corpus = [val for key, val in documents.items() if key.startswith('transcript')]
ranker = Ranker(documents=transcript_corpus)
ranked_words_series, score_word_series = ranker.rank_words_in_doc(documents["script"], top_words)
save_path = os.path.join(args.save_path,'ranked_words_list.json')
ranked_words_series.to_json(save_path)
ax =score_word_series.fillna(-1).sort_values(ascending=False).plot.bar(figsize=(20,10),title="Scoring of the words we should use more (Higher means more important")
fig = ax.get_figure()
save_path = os.path.join(args.save_path,'task_3_ranked_words.png')
fig.savefig(save_path)
return ranked_words_series | talolard/Interview | tasks.py | Python | gpl-3.0 | 2,431 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import datetime
import os
import socorro
from nose.plugins.attrib import attr
from nose.tools import eq_
from socorro.external.postgresql import server_status
from socorro.lib import datetimeutil
from unittestbase import PostgreSQLTestCase
@attr(integration='postgres') # for nosetests
class IntegrationTestServerStatus(PostgreSQLTestCase):
"""Test socorro.external.postgresql.server_status.ServerStatus class. """
def setUp(self):
"""Set up this test class by populating the database with fake data.
"""
super(IntegrationTestServerStatus, self).setUp()
# Create fake revision files
self.basedir = os.path.dirname(socorro.__file__)
open(os.path.join(
self.basedir, 'socorro_revision.txt'
), 'w').write('42')
open(os.path.join(
self.basedir, 'breakpad_revision.txt'
), 'w').write('43')
cursor = self.connection.cursor()
# Insert data
self.now = datetimeutil.utc_now()
date1 = datetime.datetime(
self.now.year, self.now.month, self.now.day, 12, 00, 00,
tzinfo=self.now.tzinfo
)
date2 = date1 - datetime.timedelta(minutes=15)
date3 = date2 - datetime.timedelta(minutes=15)
date4 = date3 - datetime.timedelta(minutes=15)
cursor.execute("""
INSERT INTO server_status
(id, date_recently_completed, date_oldest_job_queued,
avg_process_sec, avg_wait_sec, waiting_job_count,
processors_count, date_created)
VALUES
(
1,
'%(date1)s',
'%(date1)s',
2,
5,
3,
2,
'%(date1)s'
),
(
2,
'%(date2)s',
'%(date2)s',
3,
3.12,
2,
2,
'%(date2)s'
),
(
3,
'%(date3)s',
'%(date3)s',
1,
2,
4,
1,
'%(date3)s'
),
(
4,
NULL,
NULL,
1,
2,
4,
1,
'%(date4)s'
);
""" % {"date1": date1, "date2": date2, "date3": date3, "date4": date4})
# Prepare data for the schema revision
# Clean up from init routine
cursor.execute("TRUNCATE alembic_version CASCADE;")
cursor.execute("""
INSERT INTO alembic_version
(version_num)
VALUES
(
'aaaaaaaaaaaa'
)
""")
self.connection.commit()
def tearDown(self):
"""Clean up the database. """
# Delete fake revision files
os.remove(os.path.join(self.basedir, 'socorro_revision.txt'))
os.remove(os.path.join(self.basedir, 'breakpad_revision.txt'))
cursor = self.connection.cursor()
cursor.execute("TRUNCATE server_status, alembic_version CASCADE;")
self.connection.commit()
super(IntegrationTestServerStatus, self).tearDown()
def test_get(self):
status = server_status.ServerStatus(config=self.config)
date1 = datetime.datetime(
self.now.year, self.now.month, self.now.day, 12, 00, 00,
tzinfo=self.now.tzinfo
)
date2 = date1 - datetime.timedelta(minutes=15)
date3 = date2 - datetime.timedelta(minutes=15)
date4 = date3 - datetime.timedelta(minutes=15)
date1 = datetimeutil.date_to_string(date1)
date2 = datetimeutil.date_to_string(date2)
date3 = datetimeutil.date_to_string(date3)
date4 = datetimeutil.date_to_string(date4)
#......................................................................
# Test 1: default behavior
res = status.get()
res_expected = {
"hits": [
{
"id": 1,
"date_recently_completed": date1,
"date_oldest_job_queued": date1,
"avg_process_sec": 2,
"avg_wait_sec": 5,
"waiting_job_count": 3,
"processors_count": 2,
"date_created": date1
},
{
"id": 2,
"date_recently_completed": date2,
"date_oldest_job_queued": date2,
"avg_process_sec": 3,
"avg_wait_sec": 3.12,
"waiting_job_count": 2,
"processors_count": 2,
"date_created": date2
},
{
"id": 3,
"date_recently_completed": date3,
"date_oldest_job_queued": date3,
"avg_process_sec": 1,
"avg_wait_sec": 2,
"waiting_job_count": 4,
"processors_count": 1,
"date_created": date3
},
{
"id": 4,
"date_recently_completed": None,
"date_oldest_job_queued": None,
"avg_process_sec": 1,
"avg_wait_sec": 2,
"waiting_job_count": 4,
"processors_count": 1,
"date_created": date4
}
],
"socorro_revision": "42",
"breakpad_revision": "43",
"schema_revision": "aaaaaaaaaaaa",
"total": 4
}
eq_(res, res_expected)
#......................................................................
# Test 2: with duration
params = {
"duration": 1
}
res = status.get(**params)
res_expected = {
"hits": [
{
"id": 1,
"date_recently_completed": date1,
"date_oldest_job_queued": date1,
"avg_process_sec": 2,
"avg_wait_sec": 5,
"waiting_job_count": 3,
"processors_count": 2,
"date_created": date1
}
],
"socorro_revision": "42",
"breakpad_revision": "43",
"schema_revision": "aaaaaaaaaaaa",
"total": 1
}
eq_(res, res_expected)
| cliqz/socorro | socorro/unittest/external/postgresql/test_server_status.py | Python | mpl-2.0 | 6,873 |
from django.conf import settings
from django.test.simple import run_tests as django_test_runner
def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=None,
**kwargs):
""" Test runner that only runs tests for the apps
listed in ``settings.TEST_APPS``.
"""
extra_tests = extra_tests or []
app_labels = getattr(settings, "TEST_APPS", test_labels)
# Seems to be deleting the test database file twice :(
from celery.utils import noop
from django.db import connection
connection.creation.destroy_test_db = noop
return django_test_runner(app_labels,
verbosity=verbosity, interactive=interactive,
extra_tests=extra_tests, **kwargs)
| kumar303/rockit | vendor-local/djcelery/tests/runners.py | Python | bsd-3-clause | 751 |
# orm/interfaces.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Contains various base classes used throughout the ORM.
Defines the now deprecated ORM extension classes as well
as ORM internals.
Other than the deprecated extensions, this module and the
classes within should be considered mostly private.
"""
from .. import exc as sa_exc, util, inspect
from ..sql import operators
from collections import deque
orm_util = util.importlater('sqlalchemy.orm', 'util')
collections = util.importlater('sqlalchemy.orm', 'collections')
__all__ = (
'AttributeExtension',
'EXT_CONTINUE',
'EXT_STOP',
'ExtensionOption',
'InstrumentationManager',
'LoaderStrategy',
'MapperExtension',
'MapperOption',
'MapperProperty',
'PropComparator',
'PropertyOption',
'SessionExtension',
'StrategizedOption',
'StrategizedProperty',
)
EXT_CONTINUE = util.symbol('EXT_CONTINUE')
EXT_STOP = util.symbol('EXT_STOP')
ONETOMANY = util.symbol('ONETOMANY')
MANYTOONE = util.symbol('MANYTOONE')
MANYTOMANY = util.symbol('MANYTOMANY')
from .deprecated_interfaces import AttributeExtension, \
SessionExtension, \
MapperExtension
NOT_EXTENSION = util.symbol('NOT_EXTENSION')
"""Symbol indicating an :class:`_InspectionAttr` that's
not part of sqlalchemy.ext.
Is assigned to the :attr:`._InspectionAttr.extension_type`
attibute.
"""
class _InspectionAttr(object):
"""A base class applied to all ORM objects that can be returned
by the :func:`.inspect` function.
The attributes defined here allow the usage of simple boolean
checks to test basic facts about the object returned.
While the boolean checks here are basically the same as using
the Python isinstance() function, the flags here can be used without
the need to import all of these classes, and also such that
the SQLAlchemy class system can change while leaving the flags
here intact for forwards-compatibility.
"""
is_selectable = False
"""Return True if this object is an instance of :class:`.Selectable`."""
is_aliased_class = False
"""True if this object is an instance of :class:`.AliasedClass`."""
is_instance = False
"""True if this object is an instance of :class:`.InstanceState`."""
is_mapper = False
"""True if this object is an instance of :class:`.Mapper`."""
is_property = False
"""True if this object is an instance of :class:`.MapperProperty`."""
is_attribute = False
"""True if this object is a Python :term:`descriptor`.
This can refer to one of many types. Usually a
:class:`.QueryableAttribute` which handles attributes events on behalf
of a :class:`.MapperProperty`. But can also be an extension type
such as :class:`.AssociationProxy` or :class:`.hybrid_property`.
The :attr:`._InspectionAttr.extension_type` will refer to a constant
identifying the specific subtype.
.. seealso::
:attr:`.Mapper.all_orm_descriptors`
"""
is_clause_element = False
"""True if this object is an instance of :class:`.ClauseElement`."""
extension_type = NOT_EXTENSION
"""The extension type, if any.
Defaults to :data:`.interfaces.NOT_EXTENSION`
.. versionadded:: 0.8.0
.. seealso::
:data:`.HYBRID_METHOD`
:data:`.HYBRID_PROPERTY`
:data:`.ASSOCIATION_PROXY`
"""
class _MappedAttribute(object):
"""Mixin for attributes which should be replaced by mapper-assigned
attributes.
"""
class MapperProperty(_MappedAttribute, _InspectionAttr):
"""Manage the relationship of a ``Mapper`` to a single class
attribute, as well as that attribute as it appears on individual
instances of the class, including attribute instrumentation,
attribute access, loading behavior, and dependency calculations.
The most common occurrences of :class:`.MapperProperty` are the
mapped :class:`.Column`, which is represented in a mapping as
an instance of :class:`.ColumnProperty`,
and a reference to another class produced by :func:`.relationship`,
represented in the mapping as an instance of
:class:`.RelationshipProperty`.
"""
cascade = frozenset()
"""The set of 'cascade' attribute names.
This collection is checked before the 'cascade_iterator' method is called.
"""
is_property = True
def setup(self, context, entity, path, adapter, **kwargs):
"""Called by Query for the purposes of constructing a SQL statement.
Each MapperProperty associated with the target mapper processes the
statement referenced by the query context, adding columns and/or
criterion as appropriate.
"""
pass
def create_row_processor(self, context, path,
mapper, row, adapter):
"""Return a 3-tuple consisting of three row processing functions.
"""
return None, None, None
def cascade_iterator(self, type_, state, visited_instances=None,
halt_on=None):
"""Iterate through instances related to the given instance for
a particular 'cascade', starting with this MapperProperty.
Return an iterator3-tuples (instance, mapper, state).
Note that the 'cascade' collection on this MapperProperty is
checked first for the given type before cascade_iterator is called.
See PropertyLoader for the related instance implementation.
"""
return iter(())
def set_parent(self, parent, init):
self.parent = parent
def instrument_class(self, mapper): # pragma: no-coverage
raise NotImplementedError()
@util.memoized_property
def info(self):
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.MapperProperty`.
The dictionary is generated when first accessed. Alternatively,
it can be specified as a constructor argument to the
:func:`.column_property`, :func:`.relationship`, or :func:`.composite`
functions.
.. versionadded:: 0.8 Added support for .info to all
:class:`.MapperProperty` subclasses.
.. seealso::
:attr:`.QueryableAttribute.info`
:attr:`.SchemaItem.info`
"""
return {}
_configure_started = False
_configure_finished = False
def init(self):
"""Called after all mappers are created to assemble
relationships between mappers and perform other post-mapper-creation
initialization steps.
"""
self._configure_started = True
self.do_init()
self._configure_finished = True
@property
def class_attribute(self):
"""Return the class-bound descriptor corresponding to this
:class:`.MapperProperty`.
This is basically a ``getattr()`` call::
return getattr(self.parent.class_, self.key)
I.e. if this :class:`.MapperProperty` were named ``addresses``,
and the class to which it is mapped is ``User``, this sequence
is possible::
>>> from sqlalchemy import inspect
>>> mapper = inspect(User)
>>> addresses_property = mapper.attrs.addresses
>>> addresses_property.class_attribute is User.addresses
True
>>> User.addresses.property is addresses_property
True
"""
return getattr(self.parent.class_, self.key)
def do_init(self):
"""Perform subclass-specific initialization post-mapper-creation
steps.
This is a template method called by the ``MapperProperty``
object's init() method.
"""
pass
def post_instrument_class(self, mapper):
"""Perform instrumentation adjustments that need to occur
after init() has completed.
"""
pass
def is_primary(self):
"""Return True if this ``MapperProperty``'s mapper is the
primary mapper for its class.
This flag is used to indicate that the ``MapperProperty`` can
define attribute instrumentation for the class at the class
level (as opposed to the individual instance level).
"""
return not self.parent.non_primary
def merge(self, session, source_state, source_dict, dest_state,
dest_dict, load, _recursive):
"""Merge the attribute represented by this ``MapperProperty``
from source to destination object"""
pass
def compare(self, operator, value, **kw):
"""Return a compare operation for the columns represented by
this ``MapperProperty`` to the given value, which may be a
column value or an instance. 'operator' is an operator from
the operators module, or from sql.Comparator.
By default uses the PropComparator attached to this MapperProperty
under the attribute name "comparator".
"""
return operator(self.comparator, value)
def __repr__(self):
return '<%s at 0x%x; %s>' % (
self.__class__.__name__,
id(self), getattr(self, 'key', 'no key'))
class PropComparator(operators.ColumnOperators):
"""Defines boolean, comparison, and other operators for
:class:`.MapperProperty` objects.
SQLAlchemy allows for operators to
be redefined at both the Core and ORM level. :class:`.PropComparator`
is the base class of operator redefinition for ORM-level operations,
including those of :class:`.ColumnProperty`,
:class:`.RelationshipProperty`, and :class:`.CompositeProperty`.
.. note:: With the advent of Hybrid properties introduced in SQLAlchemy
0.7, as well as Core-level operator redefinition in
SQLAlchemy 0.8, the use case for user-defined :class:`.PropComparator`
instances is extremely rare. See :ref:`hybrids_toplevel` as well
as :ref:`types_operators`.
User-defined subclasses of :class:`.PropComparator` may be created. The
built-in Python comparison and math operator methods, such as
:meth:`.operators.ColumnOperators.__eq__`,
:meth:`.operators.ColumnOperators.__lt__`, and
:meth:`.operators.ColumnOperators.__add__`, can be overridden to provide
new operator behavior. The custom :class:`.PropComparator` is passed to
the :class:`.MapperProperty` instance via the ``comparator_factory``
argument. In each case,
the appropriate subclass of :class:`.PropComparator` should be used::
# definition of custom PropComparator subclasses
from sqlalchemy.orm.properties import \\
ColumnProperty,\\
CompositeProperty,\\
RelationshipProperty
class MyColumnComparator(ColumnProperty.Comparator):
def __eq__(self, other):
return self.__clause_element__() == other
class MyRelationshipComparator(RelationshipProperty.Comparator):
def any(self, expression):
"define the 'any' operation"
# ...
class MyCompositeComparator(CompositeProperty.Comparator):
def __gt__(self, other):
"redefine the 'greater than' operation"
return sql.and_(*[a>b for a, b in
zip(self.__clause_element__().clauses,
other.__composite_values__())])
# application of custom PropComparator subclasses
from sqlalchemy.orm import column_property, relationship, composite
from sqlalchemy import Column, String
class SomeMappedClass(Base):
some_column = column_property(Column("some_column", String),
comparator_factory=MyColumnComparator)
some_relationship = relationship(SomeOtherClass,
comparator_factory=MyRelationshipComparator)
some_composite = composite(
Column("a", String), Column("b", String),
comparator_factory=MyCompositeComparator
)
Note that for column-level operator redefinition, it's usually
simpler to define the operators at the Core level, using the
:attr:`.TypeEngine.comparator_factory` attribute. See
:ref:`types_operators` for more detail.
See also:
:class:`.ColumnProperty.Comparator`
:class:`.RelationshipProperty.Comparator`
:class:`.CompositeProperty.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
def __init__(self, prop, parentmapper, adapter=None):
self.prop = self.property = prop
self._parentmapper = parentmapper
self.adapter = adapter
def __clause_element__(self):
raise NotImplementedError("%r" % self)
def adapted(self, adapter):
"""Return a copy of this PropComparator which will use the given
adaption function on the local side of generated expressions.
"""
return self.__class__(self.prop, self._parentmapper, adapter)
@util.memoized_property
def info(self):
return self.property.info
@staticmethod
def any_op(a, b, **kwargs):
return a.any(b, **kwargs)
@staticmethod
def has_op(a, b, **kwargs):
return a.has(b, **kwargs)
@staticmethod
def of_type_op(a, class_):
return a.of_type(class_)
def of_type(self, class_):
"""Redefine this object in terms of a polymorphic subclass.
Returns a new PropComparator from which further criterion can be
evaluated.
e.g.::
query.join(Company.employees.of_type(Engineer)).\\
filter(Engineer.name=='foo')
:param \class_: a class or mapper indicating that criterion will be
against this specific subclass.
"""
return self.operate(PropComparator.of_type_op, class_)
def any(self, criterion=None, **kwargs):
"""Return true if this collection contains any member that meets the
given criterion.
The usual implementation of ``any()`` is
:meth:`.RelationshipProperty.Comparator.any`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.any_op, criterion, **kwargs)
def has(self, criterion=None, **kwargs):
"""Return true if this element references a member which meets the
given criterion.
The usual implementation of ``has()`` is
:meth:`.RelationshipProperty.Comparator.has`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.has_op, criterion, **kwargs)
class StrategizedProperty(MapperProperty):
"""A MapperProperty which uses selectable strategies to affect
loading behavior.
There is a single strategy selected by default. Alternate
strategies can be selected at Query time through the usage of
``StrategizedOption`` objects via the Query.options() method.
"""
strategy_wildcard_key = None
@util.memoized_property
def _wildcard_path(self):
if self.strategy_wildcard_key:
return ('loaderstrategy', (self.strategy_wildcard_key,))
else:
return None
def _get_context_strategy(self, context, path):
strategy_cls = path._inlined_get_for(self, context, 'loaderstrategy')
if not strategy_cls:
wc_key = self._wildcard_path
if wc_key and wc_key in context.attributes:
strategy_cls = context.attributes[wc_key]
if strategy_cls:
try:
return self._strategies[strategy_cls]
except KeyError:
return self.__init_strategy(strategy_cls)
return self.strategy
def _get_strategy(self, cls):
try:
return self._strategies[cls]
except KeyError:
return self.__init_strategy(cls)
def __init_strategy(self, cls):
self._strategies[cls] = strategy = cls(self)
return strategy
def setup(self, context, entity, path, adapter, **kwargs):
self._get_context_strategy(context, path).\
setup_query(context, entity, path,
adapter, **kwargs)
def create_row_processor(self, context, path, mapper, row, adapter):
return self._get_context_strategy(context, path).\
create_row_processor(context, path,
mapper, row, adapter)
def do_init(self):
self._strategies = {}
self.strategy = self.__init_strategy(self.strategy_class)
def post_instrument_class(self, mapper):
if self.is_primary() and \
not mapper.class_manager._attr_has_impl(self.key):
self.strategy.init_class_attribute(mapper)
class MapperOption(object):
"""Describe a modification to a Query."""
propagate_to_loaders = False
"""if True, indicate this option should be carried along
Query object generated by scalar or object lazy loaders.
"""
def process_query(self, query):
pass
def process_query_conditionally(self, query):
"""same as process_query(), except that this option may not
apply to the given query.
Used when secondary loaders resend existing options to a new
Query."""
self.process_query(query)
class PropertyOption(MapperOption):
"""A MapperOption that is applied to a property off the mapper or
one of its child mappers, identified by a dot-separated key
or list of class-bound attributes. """
def __init__(self, key, mapper=None):
self.key = key
self.mapper = mapper
def process_query(self, query):
self._process(query, True)
def process_query_conditionally(self, query):
self._process(query, False)
def _process(self, query, raiseerr):
paths = self._process_paths(query, raiseerr)
if paths:
self.process_query_property(query, paths)
def process_query_property(self, query, paths):
pass
def __getstate__(self):
d = self.__dict__.copy()
d['key'] = ret = []
for token in util.to_list(self.key):
if isinstance(token, PropComparator):
ret.append((token._parentmapper.class_, token.key))
else:
ret.append(token)
return d
def __setstate__(self, state):
ret = []
for key in state['key']:
if isinstance(key, tuple):
cls, propkey = key
ret.append(getattr(cls, propkey))
else:
ret.append(key)
state['key'] = tuple(ret)
self.__dict__ = state
def _find_entity_prop_comparator(self, query, token, mapper, raiseerr):
if orm_util._is_aliased_class(mapper):
searchfor = mapper
else:
searchfor = orm_util._class_to_mapper(mapper)
for ent in query._mapper_entities:
if ent.corresponds_to(searchfor):
return ent
else:
if raiseerr:
if not list(query._mapper_entities):
raise sa_exc.ArgumentError(
"Query has only expression-based entities - "
"can't find property named '%s'."
% (token, )
)
else:
raise sa_exc.ArgumentError(
"Can't find property '%s' on any entity "
"specified in this Query. Note the full path "
"from root (%s) to target entity must be specified."
% (token, ",".join(str(x) for
x in query._mapper_entities))
)
else:
return None
def _find_entity_basestring(self, query, token, raiseerr):
for ent in query._mapper_entities:
# return only the first _MapperEntity when searching
# based on string prop name. Ideally object
# attributes are used to specify more exactly.
return ent
else:
if raiseerr:
raise sa_exc.ArgumentError(
"Query has only expression-based entities - "
"can't find property named '%s'."
% (token, )
)
else:
return None
def _process_paths(self, query, raiseerr):
"""reconcile the 'key' for this PropertyOption with
the current path and entities of the query.
Return a list of affected paths.
"""
path = orm_util.PathRegistry.root
entity = None
paths = []
no_result = []
# _current_path implies we're in a
# secondary load with an existing path
current_path = list(query._current_path.path)
tokens = deque(self.key)
while tokens:
token = tokens.popleft()
if isinstance(token, str):
# wildcard token
if token.endswith(':*'):
return [path.token(token)]
sub_tokens = token.split(".", 1)
token = sub_tokens[0]
tokens.extendleft(sub_tokens[1:])
# exhaust current_path before
# matching tokens to entities
if current_path:
if current_path[1].key == token:
current_path = current_path[2:]
continue
else:
return no_result
if not entity:
entity = self._find_entity_basestring(
query,
token,
raiseerr)
if entity is None:
return no_result
path_element = entity.entity_zero
mapper = entity.mapper
if hasattr(mapper.class_, token):
prop = getattr(mapper.class_, token).property
else:
if raiseerr:
raise sa_exc.ArgumentError(
"Can't find property named '%s' on the "
"mapped entity %s in this Query. " % (
token, mapper)
)
else:
return no_result
elif isinstance(token, PropComparator):
prop = token.property
# exhaust current_path before
# matching tokens to entities
if current_path:
if current_path[0:2] == \
[token._parententity, prop]:
current_path = current_path[2:]
continue
else:
return no_result
if not entity:
entity = self._find_entity_prop_comparator(
query,
prop.key,
token._parententity,
raiseerr)
if not entity:
return no_result
path_element = entity.entity_zero
mapper = entity.mapper
else:
raise sa_exc.ArgumentError(
"mapper option expects "
"string key or list of attributes")
assert prop is not None
if raiseerr and not prop.parent.common_parent(mapper):
raise sa_exc.ArgumentError("Attribute '%s' does not "
"link from element '%s'" % (token, path_element))
path = path[path_element][prop]
paths.append(path)
if getattr(token, '_of_type', None):
ac = token._of_type
ext_info = inspect(ac)
path_element = mapper = ext_info.mapper
if not ext_info.is_aliased_class:
ac = orm_util.with_polymorphic(
ext_info.mapper.base_mapper,
ext_info.mapper, aliased=True,
_use_mapper_path=True)
ext_info = inspect(ac)
path.set(query, "path_with_polymorphic", ext_info)
else:
path_element = mapper = getattr(prop, 'mapper', None)
if mapper is None and tokens:
raise sa_exc.ArgumentError(
"Attribute '%s' of entity '%s' does not "
"refer to a mapped entity" %
(token, entity)
)
if current_path:
# ran out of tokens before
# current_path was exhausted.
assert not tokens
return no_result
return paths
class StrategizedOption(PropertyOption):
"""A MapperOption that affects which LoaderStrategy will be used
for an operation by a StrategizedProperty.
"""
chained = False
def process_query_property(self, query, paths):
strategy = self.get_strategy_class()
if self.chained:
for path in paths:
path.set(
query,
"loaderstrategy",
strategy
)
else:
paths[-1].set(
query,
"loaderstrategy",
strategy
)
def get_strategy_class(self):
raise NotImplementedError()
class LoaderStrategy(object):
"""Describe the loading behavior of a StrategizedProperty object.
The ``LoaderStrategy`` interacts with the querying process in three
ways:
* it controls the configuration of the ``InstrumentedAttribute``
placed on a class to handle the behavior of the attribute. this
may involve setting up class-level callable functions to fire
off a select operation when the attribute is first accessed
(i.e. a lazy load)
* it processes the ``QueryContext`` at statement construction time,
where it can modify the SQL statement that is being produced.
Simple column attributes may add their represented column to the
list of selected columns, *eager loading* properties may add
``LEFT OUTER JOIN`` clauses to the statement.
* It produces "row processor" functions at result fetching time.
These "row processor" functions populate a particular attribute
on a particular mapped instance.
"""
def __init__(self, parent):
self.parent_property = parent
self.is_class_level = False
self.parent = self.parent_property.parent
self.key = self.parent_property.key
def init_class_attribute(self, mapper):
pass
def setup_query(self, context, entity, path, adapter, **kwargs):
pass
def create_row_processor(self, context, path, mapper,
row, adapter):
"""Return row processing functions which fulfill the contract
specified by MapperProperty.create_row_processor.
StrategizedProperty delegates its create_row_processor method
directly to this method. """
return None, None, None
def __str__(self):
return str(self.parent_property)
| Drvanon/Game | venv/lib/python3.3/site-packages/sqlalchemy/orm/interfaces.py | Python | apache-2.0 | 28,514 |
import random
import numpy as np
import pandas as pd
import pdb
import django_standalone
from gui.models import Vectors, Experiment, Results, FullResults, Expansions, Clusters
from matplotlib import pylab as plt
import seaborn as sns
from gui.output_utils import get_cv_scores_many_experiment
from gui.user_code import get_demsar_params, pretty_names
from gui.constants import CLASSIFIER, METRIC_DB, BOOTSTRAP_REPS, SIGNIFICANCE_LEVEL
sns.set_style("whitegrid")
rc = {'xtick.labelsize': 16,
'ytick.labelsize': 16,
'axes.labelsize': 18,
'axes.labelweight': '900',
'legend.fontsize': 20,
'font.family': 'cursive',
'font.monospace': 'Nimbus Mono L',
'lines.linewidth': 2,
'lines.markersize': 9,
'xtick.major.pad': 20}
sns.set_context(rc=rc)
plt.rcParams['font.family'] = 'serif'
plt.rcParams['axes.labelsize'] = 22
from IPython import get_ipython
try:
get_ipython().magic('matplotlib inline')
plt.rcParams['figure.figsize'] = 12, 9 # that's default image size for this
plt.rcParams['savefig.dpi'] = 2 * plt.rcParams['savefig.dpi']
except AttributeError:
# when not running in IPython
pass
def diff_plot(list1, list2, labels, sort_by_magnitude=True, rotation=90):
"""
Compare the scores of pairs of experiment ids and plot a bar chart of the difference in performance.
Can be a bit hard to read though.
:param list1, list2: [1,2,3], [4,5,6] means exp 1 is compared to exp 4, etc ...
:param labels: labels for the x-axis, one per pair of experiments
:param sort_by_magnitude: if true, pairs on x-axis are sorted by magnitude of
difference, otherwise by order defined in `ids`
"""
assert len(list1) == len(list2) == len(labels)
diffs = []
for i, (a, b) in enumerate(zip(list1, list2)):
significance_df, names, mean_scores = get_demsar_params([a, b],
name_format=['id',
'expansions__vectors__id',
'expansions__vectors__composer',
'expansions__vectors__algorithm',
'expansions__vectors__dimensionality'])
diffs.append(mean_scores[0] - mean_scores[1])
if significance_df is None:
continue
if significance_df.significant[0]:
labels[i] += '*'
df = pd.DataFrame(dict(Model=labels, Delta=diffs))
order = df.Model[df.Delta.argsort()].tolist() # seaborn doesn't like DataFrame-s here
print(order)
g = sns.factorplot('Model', 'Delta', data=df, kind='bar',
x_order=order if sort_by_magnitude else None,
aspect=1.5)
g.set_xticklabels(rotation=rotation)
# remove axis labels
for ax in g.axes.flat:
ax.set(xlabel='', ylabel='')
def diff_plot_bar(lists, list_ids, xticks,
rotation=0, xlabel='', ylabel='Accuracy',
hline_at=None, legend_title='Method', **kwargs):
"""
Compare the scores of paired of experiment ids and plot a bar chart or their accuracies.
:param list1, list2: [1,2,3], [4,5,6] means exp 1 is compared to exp 4, etc ...
:param list1_id, list2_id: name for the first/ second group of experiments, will appear in legend
:param xticks: labels for the x-axis, one per pair of experiments, e.g.
list('abc') will label the first pair 'a', etc. Will appear as ticks on x axis.
If only two lists are provided a significance test is run for each pair and a * is added if pair is
significantly different
:param rotation: angle of x axis ticks
:param hline_at: draw a horizontal line at y=hline_at. Useful for baselines, etc
:param kwargs: extra arguments for sns.factorplot
"""
assert len(set(map(len, lists))) == 1
assert len(list_ids) == len(lists)
df_scores, df_reps, df_groups, df_labels = [], [], [], []
if len(lists) == 2:
for i, (a, b) in enumerate(zip(*lists)):
significance_df, names, mean_scores = get_demsar_params([a, b],
name_format=['id',
'expansions__vectors__id',
'expansions__vectors__composer',
'expansions__vectors__algorithm',
'expansions__vectors__dimensionality'])
if significance_df is None:
continue
if significance_df.significant[0]:
xticks[i] += '*'
for i, exp_ids in enumerate(zip(*lists)):
data, folds = get_cv_scores_many_experiment(exp_ids)
df_scores.extend(data)
df_reps.extend(folds)
df_labels.extend(len(folds) * [xticks[i]])
for list_id in list_ids:
df_groups.extend(len(folds) // len(lists) * [list_id])
df = pd.DataFrame(dict(Accuracy=df_scores, reps=df_reps, Method=df_groups, labels=df_labels))
df.rename(columns={'Method': legend_title}, inplace=True)
g = sns.factorplot(y='Accuracy', hue=legend_title, x='labels', data=df, kind='bar', aspect=1.5, **kwargs);
g.set_xticklabels(rotation=rotation);
# remove axis labels
for ax in g.axes.flat:
ax.set(xlabel=xlabel, ylabel=ylabel)
if hline_at is not None:
plt.axhline(hline_at, color='black')
def dataframe_from_exp_ids(ids, fields_to_include, abbreviate=True):
"""
Extracts performance results for given experiments into a long-form
DataFrame suitable for seaborn.
:param ids: list of ids to extract
:param fields_to_include: dict column_name_in_df -> django_query_to_get, e.g.
{'algo':'expansions__vectors__algorithm', 'comp':'expansions__vectors__composer'}. The DF
in this example will have 4 columns, [score, folds, comp, algo]
:param abbreviate: whether to run names of method through the abbreviation map at constants.ABBREVIATIONS
:return:
"""
data = {}
scores, folds = get_cv_scores_many_experiment(ids)
data['Accuracy'] = scores
data['folds'] = folds
for col_name, long_name in fields_to_include.items():
param_values = pretty_names(ids, [long_name], abbreviate=abbreviate)
data[col_name] = np.repeat(param_values, len(folds) // len(param_values))
for col_name, values in data.items():
print('%s has %d values' % (col_name, len(values)))
df = pd.DataFrame(data)
return df[df.Accuracy > -1] # remove experiments where results are missing
def sort_df_by(df, by):
"""
Returns the order of items in column `by` in long-form DF that would sort
the DF by mean accuracy across folds. Useful for seaborn's x_order, hue_order, etc
:param df:
:param by:
:return:
"""
mean_scores = df.groupby(by).Accuracy.mean()
return list(mean_scores.index[mean_scores.argsort()])
def random_vect_baseline(corpus='amazon_grouped-tagged'):
r_id = Experiment.objects.get(expansions__vectors__algorithm='random_vect',
labelled=corpus).id
return Results.objects.get(id=r_id, classifier=CLASSIFIER).accuracy_mean
def nondistributional_baseline(corpus='amazon_grouped-tagged',
document_features_tr='J+N+AN+NN',
document_features_ev='AN+NN', **kwargs):
res = Experiment.objects.get(labelled=corpus,
document_features_tr=document_features_tr,
document_features_ev=document_features_ev,
expansions__decode_handler='BaseFeatureHandler')
return Results.objects.get(id=res.id, classifier=CLASSIFIER).accuracy_mean
def settings_of(eid, exclude=[]):
"""
Returns a dict of the settings needed to query for an experiment, e.g.
>>> s = settings_of(21)
>>> Experiment.objects.get(**s).id == 21
Useful in two cases:
1) get very similar experiment
>>> s['clusters__num_clusters'] = 200
>>> Experiment.objects.get(**s)
2) use GUI to find ID of an interesting experiment, then dump settings into a program
:param eid: experiment ID
:param exclude: fields to drop
:return:
"""
from copy import deepcopy
e = Experiment.objects.get(id=eid)
settings = deepcopy(Experiment.objects.filter(id=eid).values()[0])
def _add_vectors_settings(nested_keys, keyword):
settings.update({'%s__vectors__%s' % (keyword, k): v for k, v in nested_keys.items()})
del settings['%s__vectors__id' % keyword]
try:
del settings['%s__vectors_id' % keyword]
except KeyError:
pass
del settings['%s__vectors__path' % keyword]
del settings['%s__vectors__size' % keyword]
del settings['%s__vectors__modified' % keyword]
if e.expansions:
nested_keys = Expansions.objects.filter(id=e.expansions.id).values()[0]
settings.update({'expansions__%s' % k: v for k, v in nested_keys.items()})
del settings['expansions__id']
if e.expansions.vectors:
nested_keys = Vectors.objects.filter(id=e.expansions.vectors.id).values()[0]
_add_vectors_settings(nested_keys, 'expansions')
if e.clusters:
nested_keys = Clusters.objects.filter(id=e.clusters.id).values()[0]
settings.update({'clusters__%s' % k: v for k, v in nested_keys.items()})
del settings['clusters__id']
del settings['clusters__vectors_id']
del settings['clusters__path']
if e.clusters.vectors:
nested_keys = Vectors.objects.filter(id=e.clusters.vectors.id).values()[0]
_add_vectors_settings(nested_keys, 'clusters')
del settings['expansions_id']
del settings['clusters_id']
del settings['id']
del settings['git_hash']
del settings['date_ran']
del settings['minutes_taken']
for key in exclude:
try:
del settings[key]
except KeyError:
pass
return settings
def compare_settings(*ids):
"""
Comparares the settings of several experiments and prints the differences.
Useful for when too many experiments are showing up in plots, because the query
isn't narrow enough
Example:
>>> compare_settings(1, 2)
expansions__vectors__dimensionality exp 1 exp 2
0 expansions__vectors__composer random_neigh random_vect
1 expansions__vectors__algorithm random_neigh random_vect
"""
dicts = [settings_of(i) for i in ids]
data = []
for key in set().union(*[d.keys() for d in dicts]):
in_all = all(key in d for d in dicts)
all_equal = len(set(d.get(key, 'N/A') for d in dicts)) == 1
if not (in_all and all_equal):
data.append([key] + [d.get(key, 'N/A') for d in dicts])
return pd.DataFrame(data, columns=['key'] + ['exp %d' % i for i in ids]).set_index('key')
def sparsify_axis_labels(ax, n=2):
"""
Sparsify tick labels on the given matplotlib axis, keeping only those whose index is divisible by n
"""
for idx, label in enumerate(ax.xaxis.get_ticklabels()):
if idx % n != 0:
label.set_visible(False)
def compare_neighbours(vectors, names, words=[], n_neighbours=4):
"""
Compare the neighbours of several entries in several thesauri
:param vectors: list of vectors to look up entries in
:param names: pretty (human-readable) names for the vectors
:param words: entries to compare. If none are specified, a random sample of
10 unigrams is selected
:return:
"""
if not words:
words = random.sample([x for x in vectors[0].keys() if not x.count('_')], 10)
data = []
for w in words:
this_row = []
for v in vectors:
neigh = v.get_nearest_neighbours(w)
if neigh:
this_row.append(', '.join(n[0] for n in neigh[:n_neighbours]))
else:
this_row.append(None)
data.append(this_row)
return pd.DataFrame(data, index=words, columns=names)
def my_bootstrap(*args, **kwargs):
return np.vstack(args)
def tsplot_for_facetgrid(*args, **kwargs):
"""
sns.tsplot does not work with sns.FacetGrid.map (all condition in a subplot are drawn in the same color).
This is because either tsplot misinterprets the color parameter, or because FacetGrid incorrectly
decides to pass in a color parameter to tsplot. Not sure which in which, but removing that parameter
fixes the problem
"""
if 'color' in kwargs:
kwargs.pop('color')
sns.tsplot(*args, **kwargs)
def performance_table(df):
def ci_width(scores):
low = np.percentile(scores, 100 * (SIGNIFICANCE_LEVEL / 2))
high = np.percentile(scores, 100 - 100 * (SIGNIFICANCE_LEVEL / 2))
return (high - low) / 2
cols = set(df.columns) - set('id Accuracy folds'.split())
print('keeping', cols)
return df.groupby(sorted(list(cols))).agg([np.mean, ci_width]).Accuracy * 100
| mbatchkarov/ExpLosion | notebooks/common_imports.py | Python | bsd-3-clause | 13,446 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
from __future__ import division, with_statement
import re
import os
import shutil
from glob import glob
import subprocess
from urllib import quote
import ox
from ox.django.fields import TupleField
from django.conf import settings
from django.db import models, transaction
from django.db.models import Max
from django.contrib.auth.models import User
from annotation.models import Annotation
from item.models import Item
from archive import extract
import managers
class Edit(models.Model):
class Meta:
unique_together = ("user", "name")
objects = managers.EditManager()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
user = models.ForeignKey(User, related_name='edits')
name = models.CharField(max_length=255)
status = models.CharField(max_length=20, default='private')
_status = ['private', 'public', 'featured']
description = models.TextField(default='')
rightslevel = models.IntegerField(db_index=True, default=0)
icon = models.ImageField(default=None, blank=True, null=True,
upload_to=lambda i, x: i.path("icon.jpg"))
poster_frames = TupleField(default=[], editable=False)
subscribed_users = models.ManyToManyField(User, related_name='subscribed_edits')
def __unicode__(self):
return u'%s (%s)' % (self.name, self.user)
def get_id(self):
return u'%s:%s' % (self.user.username, self.name)
def get_absolute_url(self):
return ('/edits/%s' % quote(self.get_id())).replace('%3A', ':')
def add_clip(self, data, index):
ids = [i['id'] for i in self.clips.order_by('index').values('id')]
clip = Clip(edit=self)
if 'annotation' in data and data['annotation']:
clip.annotation = Annotation.objects.get(public_id=data['annotation'])
clip.item = clip.annotation.item
else:
clip.item = Item.objects.get(itemId=data['item'])
clip.start = data['in']
clip.end = data['out']
clip.index = index
# dont add clip if in/out are invalid
if not clip.annotation:
duration = clip.item.sort.duration
if clip.start >= clip.end or clip.start >= duration or clip.end > duration:
return False
clip.save()
ids.insert(index, clip.id)
index = 0
with transaction.commit_on_success():
for i in ids:
Clip.objects.filter(id=i).update(index=index)
index += 1
return clip
def accessible(self, user):
return self.user == user or self.status in ('public', 'featured')
def editable(self, user):
if not user or user.is_anonymous():
return False
if self.user == user or \
user.is_staff or \
user.get_profile().capability('canEditFeaturedEdits') == True:
return True
return False
def edit(self, data, user):
for key in data:
if key == 'status':
value = data[key]
if value not in self._status:
value = self._status[0]
if value == 'private':
for user in self.subscribed_users.all():
self.subscribed_users.remove(user)
qs = Position.objects.filter(user=user,
section='section', edit=self)
if qs.count() > 1:
pos = qs[0]
pos.section = 'personal'
pos.save()
elif value == 'featured':
if user.get_profile().capability('canEditFeaturedEdits'):
pos, created = Position.objects.get_or_create(edit=self, user=user,
section='featured')
if created:
qs = Position.objects.filter(user=user, section='featured')
pos.position = qs.aggregate(Max('position'))['position__max'] + 1
pos.save()
Position.objects.filter(edit=self).exclude(id=pos.id).delete()
else:
value = self.status
elif self.status == 'featured' and value == 'public':
Position.objects.filter(edit=self).delete()
pos, created = Position.objects.get_or_create(edit=self,
user=self.user,section='personal')
qs = Position.objects.filter(user=self.user,
section='personal')
pos.position = qs.aggregate(Max('position'))['position__max'] + 1
pos.save()
for u in self.subscribed_users.all():
pos, created = Position.objects.get_or_create(edit=self, user=u,
section='public')
qs = Position.objects.filter(user=u, section='public')
pos.position = qs.aggregate(Max('position'))['position__max'] + 1
pos.save()
self.status = value
elif key == 'name':
data['name'] = re.sub(' \[\d+\]$', '', data['name']).strip()
if not data['name']:
data['name'] = "Untitled"
name = data['name']
num = 1
while Edit.objects.filter(name=name, user=self.user).exclude(id=self.id).count()>0:
num += 1
name = data['name'] + ' [%d]' % num
self.name = name
elif key == 'description':
self.description = ox.sanitize_html(data['description'])
elif key == 'rightslevel':
self.rightslevel = int(data['rightslevel'])
if 'position' in data:
pos, created = Position.objects.get_or_create(edit=self, user=user)
pos.position = data['position']
pos.section = 'featured'
if self.status == 'private':
pos.section = 'personal'
pos.save()
if 'type' in data:
self.type = data['type'] == 'pdf' and 'pdf' or 'html'
if 'posterFrames' in data:
self.poster_frames = tuple(data['posterFrames'])
self.save()
if 'posterFrames' in data:
self.update_icon()
def path(self, name=''):
h = "%07d" % self.id
return os.path.join('edits', h[:2], h[2:4], h[4:6], h[6:], name)
def get_items(self, user=None):
return Item.objects.filter(editclips__id__in=self.clips.all()).distinct()
def update_icon(self):
frames = []
if not self.poster_frames:
items = self.get_items(self.user).filter(rendered=True)
if items.count():
poster_frames = []
for i in range(0, items.count(), max(1, int(items.count()/4))):
poster_frames.append({
'item': items[int(i)].itemId,
'position': items[int(i)].poster_frame
})
self.poster_frames = tuple(poster_frames)
self.save()
for i in self.poster_frames:
qs = Item.objects.filter(itemId=i['item'])
if qs.count() > 0:
frame = qs[0].frame(i['position'])
if frame:
frames.append(frame)
self.icon.name = self.path('icon.jpg')
icon = self.icon.path
if frames:
while len(frames) < 4:
frames += frames
folder = os.path.dirname(icon)
ox.makedirs(folder)
for f in glob("%s/icon*.jpg" % folder):
os.unlink(f)
cmd = [
settings.LIST_ICON,
'-f', ','.join(frames),
'-o', icon
]
p = subprocess.Popen(cmd)
p.wait()
self.save()
def get_icon(self, size=16):
path = self.path('icon%d.jpg' % size)
path = os.path.join(settings.MEDIA_ROOT, path)
if not os.path.exists(path):
folder = os.path.dirname(path)
ox.makedirs(folder)
if self.icon and os.path.exists(self.icon.path):
source = self.icon.path
max_size = min(self.icon.width, self.icon.height)
else:
source = os.path.join(settings.STATIC_ROOT, 'jpg/list256.jpg')
max_size = 256
if size < max_size:
extract.resize_image(source, path, size=size)
else:
path = source
return path
def json(self, keys=None, user=None):
if not keys:
keys=[
'description',
'editable',
'rightslevel',
'id',
'items',
'clips',
'duration',
'name',
'posterFrames',
'status',
'subscribed',
'user'
]
response = {
'type': 'static'
}
_map = {
'posterFrames': 'poster_frames'
}
for key in keys:
if key == 'id':
response[key] = self.get_id()
elif key == 'items':
response[key] = self.clips.all().count()
elif key == 'clips':
response[key] = [c.json(user) for c in self.clips.all().order_by('index')]
elif key == 'duration':
if 'clips' in response:
clips = response['clips']
else:
clips = [c.json(user) for c in self.clips.all().order_by('index')]
response[key] = sum([c['duration'] for c in clips])
elif key == 'editable':
response[key] = self.editable(user)
elif key == 'user':
response[key] = self.user.username
elif key == 'subscribers':
response[key] = self.subscribed_users.all().count()
elif key == 'subscribed':
if user and not user.is_anonymous():
response[key] = self.subscribed_users.filter(id=user.id).exists()
elif hasattr(self, _map.get(key, key)):
response[key] = getattr(self, _map.get(key,key))
return response
def render(self):
#creating a new file from clips
tmp = tempfile.mkdtemp()
clips = []
for clip in self.clips.all().order_by('index'):
data = clip.json()
clips.append(os.path.join(tmp, '%06d.webm' % data['index']))
cmd = ['avconv', '-i', path,
'-ss', data['in'], '-t', data['out'],
'-vcodec', 'copy', '-acodec', 'copy',
clips[-1]]
#p = subprocess.Popen(cmd)
#p.wait()
cmd = ['mkvmerge', clips[0]] \
+ ['+'+c for c in clips[1:]] \
+ [os.path.join(tmp, 'render.webm')]
#p = subprocess.Popen(cmd)
#p.wait()
shutil.rmtree(tmp)
class Clip(models.Model):
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
edit = models.ForeignKey(Edit, related_name='clips')
index = models.IntegerField(default=0)
item = models.ForeignKey(Item, null=True, default=None, related_name='editclip')
annotation = models.ForeignKey(Annotation, null=True, default=None, related_name='editclip')
start = models.FloatField(default=0)
end = models.FloatField(default=0)
duration = models.FloatField(default=0)
hue = models.FloatField(default=0)
saturation= models.FloatField(default=0)
lightness= models.FloatField(default=0)
volume = models.FloatField(default=0)
def __unicode__(self):
if self.annotation:
return u'%s' % self.annotation.public_id
return u'%s/%0.3f-%0.3f' % (self.item.itemId, self.start, self.end)
def get_id(self):
return ox.toAZ(self.id)
def save(self, *args, **kwargs):
if self.duration != self.end - self.start:
self.update_calculated_values()
super(Clip, self).save(*args, **kwargs)
def update_calculated_values(self):
start = self.start
end = self.end
self.duration = end - start
if int(end*25) - int(start*25) > 0:
self.hue, self.saturation, self.lightness = extract.average_color(
self.item.timeline_prefix, self.start, self.end)
self.volume = extract.average_volume(self.item.timeline_prefix, self.start, self.end)
else:
self.hue = self.saturation = self.lightness = 0
self.volume = 0
def json(self, user=None):
data = {
'id': self.get_id(),
'index': self.index
}
if self.annotation:
data['annotation'] = self.annotation.public_id
data['item'] = self.item.itemId
data['in'] = self.annotation.start
data['out'] = self.annotation.end
data['parts'] = self.annotation.item.json['parts']
data['durations'] = self.annotation.item.json['durations']
else:
data['item'] = self.item.itemId
data['in'] = self.start
data['out'] = self.end
data['parts'] = self.item.json['parts']
data['durations'] = self.item.json['durations']
for key in ('title', 'director', 'year', 'videoRatio'):
value = self.item.json.get(key)
if value:
data[key] = value
data['duration'] = data['out'] - data['in']
data['cuts'] = tuple([c for c in self.item.get('cuts') if c > self.start and c < self.end])
return data
class Position(models.Model):
class Meta:
unique_together = ("user", "edit", "section")
edit = models.ForeignKey(Edit, related_name='position')
user = models.ForeignKey(User, related_name='edit_position')
section = models.CharField(max_length='255')
position = models.IntegerField(default=0)
def __unicode__(self):
return u'%s/%s/%s' % (self.section, self.position, self.edit)
| maysara/pandora_image | pandora/edit/models.py | Python | gpl-3.0 | 14,651 |
from starlette.datastructures import URL
from starlette.responses import RedirectResponse
from starlette.types import ASGIApp, Receive, Scope, Send
class HTTPSRedirectMiddleware:
def __init__(self, app: ASGIApp) -> None:
self.app = app
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
if scope["type"] in ("http", "websocket") and scope["scheme"] in ("http", "ws"):
url = URL(scope=scope)
redirect_scheme = {"http": "https", "ws": "wss"}[url.scheme]
netloc = url.hostname if url.port in (80, 443) else url.netloc
url = url.replace(scheme=redirect_scheme, netloc=netloc)
response = RedirectResponse(url, status_code=307)
await response(scope, receive, send)
else:
await self.app(scope, receive, send)
| encode/starlette | starlette/middleware/httpsredirect.py | Python | bsd-3-clause | 848 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class dnsaction64(base_resource) :
""" Configuration for dns64 action resource. """
def __init__(self) :
self._actionname = ""
self._prefix = ""
self._mappedrule = ""
self._excluderule = ""
self._builtin = []
self.___count = 0
@property
def actionname(self) :
ur"""Name of the dns64 action.
"""
try :
return self._actionname
except Exception as e:
raise e
@actionname.setter
def actionname(self, actionname) :
ur"""Name of the dns64 action.
"""
try :
self._actionname = actionname
except Exception as e:
raise e
@property
def prefix(self) :
ur"""The dns64 prefix to be used if the after evaluating the rules.
"""
try :
return self._prefix
except Exception as e:
raise e
@prefix.setter
def prefix(self, prefix) :
ur"""The dns64 prefix to be used if the after evaluating the rules.
"""
try :
self._prefix = prefix
except Exception as e:
raise e
@property
def mappedrule(self) :
ur"""The expression to select the criteria for ipv4 addresses to be used for synthesis.
Only if the mappedrule is evaluated to true the corresponding ipv4 address is used for synthesis using respective prefix,
otherwise the A RR is discarded.
"""
try :
return self._mappedrule
except Exception as e:
raise e
@mappedrule.setter
def mappedrule(self, mappedrule) :
ur"""The expression to select the criteria for ipv4 addresses to be used for synthesis.
Only if the mappedrule is evaluated to true the corresponding ipv4 address is used for synthesis using respective prefix,
otherwise the A RR is discarded.
"""
try :
self._mappedrule = mappedrule
except Exception as e:
raise e
@property
def excluderule(self) :
ur"""The expression to select the criteria for eliminating the corresponding ipv6 addresses from the response.
"""
try :
return self._excluderule
except Exception as e:
raise e
@excluderule.setter
def excluderule(self, excluderule) :
ur"""The expression to select the criteria for eliminating the corresponding ipv6 addresses from the response.
"""
try :
self._excluderule = excluderule
except Exception as e:
raise e
@property
def builtin(self) :
ur"""Flag to determine whether dna64action is default or not.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE.
"""
try :
return self._builtin
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(dnsaction64_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.dnsaction64
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.actionname is not None :
return str(self.actionname)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add dnsaction64.
"""
try :
if type(resource) is not list :
addresource = dnsaction64()
addresource.actionname = resource.actionname
addresource.prefix = resource.prefix
addresource.mappedrule = resource.mappedrule
addresource.excluderule = resource.excluderule
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ dnsaction64() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].actionname = resource[i].actionname
addresources[i].prefix = resource[i].prefix
addresources[i].mappedrule = resource[i].mappedrule
addresources[i].excluderule = resource[i].excluderule
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete dnsaction64.
"""
try :
if type(resource) is not list :
deleteresource = dnsaction64()
if type(resource) != type(deleteresource):
deleteresource.actionname = resource
else :
deleteresource.actionname = resource.actionname
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ dnsaction64() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].actionname = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ dnsaction64() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].actionname = resource[i].actionname
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update dnsaction64.
"""
try :
if type(resource) is not list :
updateresource = dnsaction64()
updateresource.actionname = resource.actionname
updateresource.prefix = resource.prefix
updateresource.mappedrule = resource.mappedrule
updateresource.excluderule = resource.excluderule
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ dnsaction64() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].actionname = resource[i].actionname
updateresources[i].prefix = resource[i].prefix
updateresources[i].mappedrule = resource[i].mappedrule
updateresources[i].excluderule = resource[i].excluderule
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of dnsaction64 resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = dnsaction64()
if type(resource) != type(unsetresource):
unsetresource.actionname = resource
else :
unsetresource.actionname = resource.actionname
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ dnsaction64() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].actionname = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ dnsaction64() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].actionname = resource[i].actionname
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the dnsaction64 resources that are configured on netscaler.
"""
try :
if not name :
obj = dnsaction64()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = dnsaction64()
obj.actionname = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [dnsaction64() for _ in range(len(name))]
obj = [dnsaction64() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = dnsaction64()
obj[i].actionname = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of dnsaction64 resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = dnsaction64()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the dnsaction64 resources configured on NetScaler.
"""
try :
obj = dnsaction64()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of dnsaction64 resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = dnsaction64()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Builtin:
MODIFIABLE = "MODIFIABLE"
DELETABLE = "DELETABLE"
IMMUTABLE = "IMMUTABLE"
class dnsaction64_response(base_response) :
def __init__(self, length=1) :
self.dnsaction64 = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.dnsaction64 = [dnsaction64() for _ in range(length)]
| benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/dns/dnsaction64.py | Python | apache-2.0 | 10,595 |