repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
var_hash
int64
-9,223,186,179,200,150,000
9,223,291,175B
doc_hash
int64
-9,223,304,365,658,930,000
9,223,309,051B
line_mean
float64
3.5
99.8
line_max
int64
13
999
alpha_frac
float64
0.25
0.97
autogenerated
bool
1 class
doduytrung/odoo-8.0
addons/purchase/stock.py
6
15030
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import SUPERUSER_ID from openerp.osv import fields, osv from openerp.tools.translate import _ class stock_move(osv.osv): _inherit = 'stock.move' _columns = { 'purchase_line_id': fields.many2one('purchase.order.line', 'Purchase Order Line', ondelete='set null', select=True, readonly=True), } def write(self, cr, uid, ids, vals, context=None): if isinstance(ids, (int, long)): ids = [ids] res = super(stock_move, self).write(cr, uid, ids, vals, context=context) from openerp import workflow if vals.get('state') in ['done', 'cancel']: for move in self.browse(cr, uid, ids, context=context): if move.purchase_line_id and move.purchase_line_id.order_id: order_id = move.purchase_line_id.order_id.id # update linked purchase order as superuser as the warehouse # user may not have rights to access purchase.order if self.pool.get('purchase.order').test_moves_done(cr, uid, [order_id], context=context): workflow.trg_validate(SUPERUSER_ID, 'purchase.order', order_id, 'picking_done', cr) if self.pool.get('purchase.order').test_moves_except(cr, uid, [order_id], context=context): workflow.trg_validate(SUPERUSER_ID, 'purchase.order', order_id, 'picking_cancel', cr) return res def copy(self, cr, uid, id, default=None, context=None): default = default or {} context = context or {} if not default.get('split_from'): #we don't want to propagate the link to the purchase order line except in case of move split default['purchase_line_id'] = False return super(stock_move, self).copy(cr, uid, id, default, context) def _create_invoice_line_from_vals(self, cr, uid, move, invoice_line_vals, context=None): if move.purchase_line_id: invoice_line_vals['purchase_line_id'] = move.purchase_line_id.id invoice_line_vals['account_analytic_id'] = move.purchase_line_id.account_analytic_id.id or False invoice_line_id = super(stock_move, self)._create_invoice_line_from_vals(cr, uid, move, invoice_line_vals, context=context) if move.purchase_line_id: purchase_line = move.purchase_line_id self.pool.get('purchase.order.line').write(cr, uid, [purchase_line.id], { 'invoice_lines': [(4, invoice_line_id)] }, context=context) self.pool.get('purchase.order').write(cr, uid, [purchase_line.order_id.id], { 'invoice_ids': [(4, invoice_line_vals['invoice_id'])], }) purchase_line_obj = self.pool.get('purchase.order.line') purchase_obj = self.pool.get('purchase.order') invoice_line_obj = self.pool.get('account.invoice.line') purchase_id = move.purchase_line_id.order_id.id purchase_line_ids = purchase_line_obj.search(cr, uid, [('order_id', '=', purchase_id), ('invoice_lines', '=', False), '|', ('product_id', '=', False), ('product_id.type', '=', 'service')], context=context) if purchase_line_ids: inv_lines = [] for po_line in purchase_line_obj.browse(cr, uid, purchase_line_ids, context=context): acc_id = purchase_obj._choose_account_from_po_line(cr, uid, po_line, context=context) inv_line_data = purchase_obj._prepare_inv_line(cr, uid, acc_id, po_line, context=context) inv_line_id = invoice_line_obj.create(cr, uid, inv_line_data, context=context) inv_lines.append(inv_line_id) po_line.write({'invoice_lines': [(4, inv_line_id)]}) invoice_line_obj.write(cr, uid, inv_lines, {'invoice_id': invoice_line_vals['invoice_id']}, context=context) return invoice_line_id def _get_master_data(self, cr, uid, move, company, context=None): if move.purchase_line_id: purchase_order = move.purchase_line_id.order_id return purchase_order.partner_id, purchase_order.create_uid.id, purchase_order.currency_id.id elif move.picking_id: # In case of an extra move, it is better to use the data from the original moves for purchase_move in move.picking_id.move_lines: if purchase_move.purchase_line_id: purchase_order = purchase_move.purchase_line_id.order_id return purchase_order.partner_id, purchase_order.create_uid.id, purchase_order.currency_id.id partner = move.picking_id and move.picking_id.partner_id or False code = self.get_code_from_locs(cr, uid, move, context=context) if partner and partner.property_product_pricelist_purchase and code == 'incoming': currency = partner.property_product_pricelist_purchase.currency_id.id return partner, uid, currency return super(stock_move, self)._get_master_data(cr, uid, move, company, context=context) def _get_invoice_line_vals(self, cr, uid, move, partner, inv_type, context=None): res = super(stock_move, self)._get_invoice_line_vals(cr, uid, move, partner, inv_type, context=context) if move.purchase_line_id: purchase_line = move.purchase_line_id res['invoice_line_tax_id'] = [(6, 0, [x.id for x in purchase_line.taxes_id])] res['price_unit'] = purchase_line.price_unit return res def attribute_price(self, cr, uid, move, context=None): """ Attribute price to move, important in inter-company moves or receipts with only one partner """ code = self.get_code_from_locs(cr, uid, move, context=context) if not move.purchase_line_id and code == 'incoming' and not move.price_unit: partner = move.picking_id and move.picking_id.partner_id or False price = False # If partner given, search price in its purchase pricelist if partner and partner.property_product_pricelist_purchase: pricelist_obj = self.pool.get("product.pricelist") pricelist = partner.property_product_pricelist_purchase.id price = pricelist_obj.price_get(cr, uid, [pricelist], move.product_id.id, move.product_uom_qty, partner, { 'uom': move.product_uom.id, 'date': move.date, })[pricelist] if price: return self.write(cr, uid, [move.id], {'price_unit': price}, context=context) super(stock_move, self).attribute_price(cr, uid, move, context=context) class stock_picking(osv.osv): _inherit = 'stock.picking' def _get_to_invoice(self, cr, uid, ids, name, args, context=None): res = {} for picking in self.browse(cr, uid, ids, context=context): res[picking.id] = False for move in picking.move_lines: if move.purchase_line_id and move.purchase_line_id.order_id.invoice_method == 'picking': if not move.move_orig_ids: res[picking.id] = True return res def _get_picking_to_recompute(self, cr, uid, ids, context=None): picking_ids = set() for move in self.pool.get('stock.move').browse(cr, uid, ids, context=context): if move.picking_id and move.purchase_line_id: picking_ids.add(move.picking_id.id) return list(picking_ids) _columns = { 'reception_to_invoice': fields.function(_get_to_invoice, type='boolean', string='Invoiceable on incoming shipment?', help='Does the picking contains some moves related to a purchase order invoiceable on the receipt?', store={ 'stock.move': (_get_picking_to_recompute, ['purchase_line_id', 'picking_id'], 10), }), } def _create_invoice_from_picking(self, cr, uid, picking, vals, context=None): purchase_obj = self.pool.get("purchase.order") purchase_line_obj = self.pool.get('purchase.order.line') invoice_line_obj = self.pool.get('account.invoice.line') invoice_id = super(stock_picking, self)._create_invoice_from_picking(cr, uid, picking, vals, context=context) return invoice_id def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, move, context=None): inv_vals = super(stock_picking, self)._get_invoice_vals(cr, uid, key, inv_type, journal_id, move, context=context) if move.purchase_line_id and move.purchase_line_id.order_id: purchase = move.purchase_line_id.order_id inv_vals.update({ 'fiscal_position': purchase.fiscal_position.id, 'payment_term': purchase.payment_term_id.id, }) return inv_vals class stock_warehouse(osv.osv): _inherit = 'stock.warehouse' _columns = { 'buy_to_resupply': fields.boolean('Purchase to resupply this warehouse', help="When products are bought, they can be delivered to this warehouse"), 'buy_pull_id': fields.many2one('procurement.rule', 'BUY rule'), } _defaults = { 'buy_to_resupply': True, } def _get_buy_pull_rule(self, cr, uid, warehouse, context=None): route_obj = self.pool.get('stock.location.route') data_obj = self.pool.get('ir.model.data') try: buy_route_id = data_obj.get_object_reference(cr, uid, 'purchase', 'route_warehouse0_buy')[1] except: buy_route_id = route_obj.search(cr, uid, [('name', 'like', _('Buy'))], context=context) buy_route_id = buy_route_id and buy_route_id[0] or False if not buy_route_id: raise osv.except_osv(_('Error!'), _('Can\'t find any generic Buy route.')) return { 'name': self._format_routename(cr, uid, warehouse, _(' Buy'), context=context), 'location_id': warehouse.in_type_id.default_location_dest_id.id, 'route_id': buy_route_id, 'action': 'buy', 'picking_type_id': warehouse.in_type_id.id, 'warehouse_id': warehouse.id, } def create_routes(self, cr, uid, ids, warehouse, context=None): pull_obj = self.pool.get('procurement.rule') res = super(stock_warehouse, self).create_routes(cr, uid, ids, warehouse, context=context) if warehouse.buy_to_resupply: buy_pull_vals = self._get_buy_pull_rule(cr, uid, warehouse, context=context) buy_pull_id = pull_obj.create(cr, uid, buy_pull_vals, context=context) res['buy_pull_id'] = buy_pull_id return res def write(self, cr, uid, ids, vals, context=None): pull_obj = self.pool.get('procurement.rule') if isinstance(ids, (int, long)): ids = [ids] if 'buy_to_resupply' in vals: if vals.get("buy_to_resupply"): for warehouse in self.browse(cr, uid, ids, context=context): if not warehouse.buy_pull_id: buy_pull_vals = self._get_buy_pull_rule(cr, uid, warehouse, context=context) buy_pull_id = pull_obj.create(cr, uid, buy_pull_vals, context=context) vals['buy_pull_id'] = buy_pull_id else: for warehouse in self.browse(cr, uid, ids, context=context): if warehouse.buy_pull_id: buy_pull_id = pull_obj.unlink(cr, uid, warehouse.buy_pull_id.id, context=context) return super(stock_warehouse, self).write(cr, uid, ids, vals, context=None) def get_all_routes_for_wh(self, cr, uid, warehouse, context=None): all_routes = super(stock_warehouse, self).get_all_routes_for_wh(cr, uid, warehouse, context=context) if warehouse.buy_to_resupply and warehouse.buy_pull_id and warehouse.buy_pull_id.route_id: all_routes += [warehouse.buy_pull_id.route_id.id] return all_routes def _get_all_products_to_resupply(self, cr, uid, warehouse, context=None): res = super(stock_warehouse, self)._get_all_products_to_resupply(cr, uid, warehouse, context=context) if warehouse.buy_pull_id and warehouse.buy_pull_id.route_id: for product_id in res: for route in self.pool.get('product.product').browse(cr, uid, product_id, context=context).route_ids: if route.id == warehouse.buy_pull_id.route_id.id: res.remove(product_id) break return res def _handle_renaming(self, cr, uid, warehouse, name, code, context=None): res = super(stock_warehouse, self)._handle_renaming(cr, uid, warehouse, name, code, context=context) pull_obj = self.pool.get('procurement.rule') #change the buy pull rule name if warehouse.buy_pull_id: pull_obj.write(cr, uid, warehouse.buy_pull_id.id, {'name': warehouse.buy_pull_id.name.replace(warehouse.name, name, 1)}, context=context) return res def change_route(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None): res = super(stock_warehouse, self).change_route(cr, uid, ids, warehouse, new_reception_step=new_reception_step, new_delivery_step=new_delivery_step, context=context) if warehouse.in_type_id.default_location_dest_id != warehouse.buy_pull_id.location_id: self.pool.get('procurement.rule').write(cr, uid, warehouse.buy_pull_id.id, {'location_id': warehouse.in_type_id.default_location_dest_id.id}, context=context) return res
agpl-3.0
-985,875,889,960,986,100
-280,679,034,323,323,170
54.666667
217
0.597006
false
janssen/kivy
kivy/garden/__init__.py
59
3061
''' Garden ====== .. versionadded:: 1.7.0 .. versionchanged:: 1.8.0 Garden is a project to centralize addons for Kivy maintained by users. You can find more information at `Kivy Garden <http://kivy-garden.github.io/>`_. All the garden packages are centralized on the `kivy-garden Github <https://github.com/kivy-garden>`_ repository. Garden is now distributed as a separate Python module, kivy-garden. You can install it with pip:: pip install kivy-garden The garden module does not initially include any packages. You can download them with the garden tool installed by the pip package:: # Installing a garden package garden install graph # Upgrade a garden package garden install --upgrade graph # Uninstall a garden package garden uninstall graph # List all the garden packages installed garden list # Search new packages garden search # Search all the packages that contain "graph" garden search graph # Show the help garden --help All the garden packages are installed by default in `~/.kivy/garden`. .. Note:: In previous versions of Kivy, garden was a tool at kivy/tools/garden. This no longer exists, but the kivy-garden module provides exactly the same functionality. Packaging --------- If you want to include garden packages in your application, you can add `--app` to the `install` command. This will create a `libs/garden` directory in your current directory which will be used by `kivy.garden`. For example:: cd myapp garden install --app graph ''' __path__ = 'kivy.garden' import sys import imp from os.path import dirname, join, realpath, exists, abspath from kivy import kivy_home_dir import kivy #: system path where garden modules can be installed garden_system_dir = join(kivy_home_dir, 'garden') garden_kivy_dir = abspath(join(dirname(kivy.__file__), 'garden')) #: application path where garden modules can be installed if getattr(sys, 'frozen', False) and getattr(sys, '_MEIPASS', False): garden_app_dir = join(realpath(sys._MEIPASS), 'libs', 'garden') else: garden_app_dir = join(realpath(dirname(sys.argv[0])), 'libs', 'garden') class GardenImporter(object): def find_module(self, fullname, path): if path == 'kivy.garden': return self def load_module(self, fullname): assert(fullname.startswith('kivy.garden')) moddir = join(garden_kivy_dir, fullname.split('.', 2)[-1]) if exists(moddir): return self._load_module(fullname, moddir) modname = fullname.split('.', 1)[-1] for directory in (garden_app_dir, garden_system_dir): moddir = join(directory, modname) if exists(moddir): return self._load_module(fullname, moddir) def _load_module(self, fullname, moddir): mod = imp.load_module(fullname, None, moddir, ('', '', imp.PKG_DIRECTORY)) return mod # insert the garden importer as ultimate importer sys.meta_path.append(GardenImporter())
mit
1,220,862,463,078,084,900
4,460,549,505,339,439,600
27.082569
79
0.67821
false
ltilve/ChromiumGStreamerBackend
chromecast/tools/build/generate_test_lists.py
32
4698
#!/usr/bin/env python # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Helper script to generate unit test lists for the Chromecast build scripts. """ import glob import optparse import sys def CombineList(test_files_dir, list_output_file, include_filters, additional_runtime_options): """Writes a unit test file in a format compatible for Chromecast scripts. If include_filters is True, uses filters to create a test runner list and also include additional options, if any. Otherwise, creates a list only of the tests to build. Args: test_files_dir: Path to the intermediate directory containing tests/filters. list_output_file: Path to write the unit test file out to. include_filters: Whether or not to include the filters when generating the test list. """ # GYP targets may provide a numbered priority for the filename. Sort to # use that priority. test_files = sorted(glob.glob(test_files_dir + "/*.tests")) filter_files = sorted(glob.glob(test_files_dir + "/*.filters")) test_bin_set = set() for test_filename in test_files: with open(test_filename, "r") as test_file: for test_file_line in test_file: # Binary name may be a simple test target (cast_net_unittests) or be a # qualified gyp path (../base.gyp:base_unittests). test_binary_name = test_file_line.split(":")[-1].strip() test_bin_set.add(test_binary_name) test_filters = {} if include_filters: for filter_filename in filter_files: with open(filter_filename, "r") as filter_file: for filter_line in filter_file: filter = filter_line.strip() test_binary_name = filter.split(" ", 1)[0] if test_binary_name not in test_bin_set: raise Exception("Filter found for unknown target: " + test_binary_name) # Note: This may overwrite a previous rule. This is okay, since higher # priority files are evaluated after lower priority files. test_filters[test_binary_name] = filter test_binaries = ( list(test_bin_set - set(test_filters.keys())) + test_filters.values()) if additional_runtime_options: lines = [ binary + " " + additional_runtime_options for binary in test_binaries ] else: lines = test_binaries with open(list_output_file, "w") as f: f.write("\n".join(sorted(lines))) def CreateList(inputs, list_output_file): with open(list_output_file, "w") as f: f.write("\n".join(inputs)) def DoMain(argv): """Main method. Runs helper commands for generating unit test lists.""" parser = optparse.OptionParser( """usage: %prog [<options>] <command> [<test names>] Valid commands: create_list prints all given test names/args to a file, one line per string pack_build packs all test files from the given output directory into a single test list file pack_run packs all test and filter files from the given output directory into a single test list file """) parser.add_option("-o", action="store", dest="list_output_file", help="Output path in which to write the test list.") parser.add_option("-t", action="store", dest="test_files_dir", help="Intermediate test list directory.") parser.add_option("-a", action="store", dest="additional_runtime_options", help="Additional options applied to all tests.") options, inputs = parser.parse_args(argv) list_output_file = options.list_output_file test_files_dir = options.test_files_dir additional_runtime_options = options.additional_runtime_options if len(inputs) < 1: parser.error("No command given.\n") command = inputs[0] test_names = inputs[1:] if not list_output_file: parser.error("Output path (-o) is required.\n") if command == "create_list": return CreateList(test_names, list_output_file) if command == "pack_build": if not test_files_dir: parser.error("pack_build require a test files directory (-t).\n") return CombineList(test_files_dir, list_output_file, False, None) if command == "pack_run": if not test_files_dir: parser.error("pack_run require a test files directory (-t).\n") return CombineList(test_files_dir, list_output_file, True, additional_runtime_options) parser.error("Invalid command specified.") if __name__ == "__main__": DoMain(sys.argv[1:])
bsd-3-clause
-2,746,610,009,723,808,000
-1,976,887,254,407,021,300
35.138462
80
0.649425
false
ojii/sandlib
lib/lib_pypy/_ctypes/primitive.py
1
11496
import _ffi import _rawffi import weakref import sys SIMPLE_TYPE_CHARS = "cbBhHiIlLdfguzZqQPXOv?" from _ctypes.basics import _CData, _CDataMeta, cdata_from_address,\ CArgObject from _ctypes.builtin import ConvMode from _ctypes.array import Array from _ctypes.pointer import _Pointer, as_ffi_pointer #from _ctypes.function import CFuncPtr # this import is moved at the bottom # because else it's circular class NULL(object): pass NULL = NULL() TP_TO_DEFAULT = { 'c': 0, 'u': 0, 'b': 0, 'B': 0, 'h': 0, 'H': 0, 'i': 0, 'I': 0, 'l': 0, 'L': 0, 'q': 0, 'Q': 0, 'f': 0.0, 'd': 0.0, 'g': 0.0, 'P': None, # not part of struct 'O': NULL, 'z': None, 'Z': None, '?': False, } if sys.platform == 'win32': TP_TO_DEFAULT['X'] = NULL TP_TO_DEFAULT['v'] = 0 DEFAULT_VALUE = object() class GlobalPyobjContainer(object): def __init__(self): self.objs = [] def add(self, obj): num = len(self.objs) self.objs.append(weakref.ref(obj)) return num def get(self, num): return self.objs[num]() pyobj_container = GlobalPyobjContainer() def generic_xxx_p_from_param(cls, value): if value is None: return cls(None) if isinstance(value, basestring): return cls(value) if isinstance(value, _SimpleCData) and \ type(value)._type_ in 'zZP': return value return None # eventually raise def from_param_char_p(cls, value): "used by c_char_p and c_wchar_p subclasses" res = generic_xxx_p_from_param(cls, value) if res is not None: return res if isinstance(value, (Array, _Pointer)): from ctypes import c_char, c_byte, c_wchar if type(value)._type_ in [c_char, c_byte, c_wchar]: return value def from_param_void_p(cls, value): "used by c_void_p subclasses" res = generic_xxx_p_from_param(cls, value) if res is not None: return res if isinstance(value, Array): return value if isinstance(value, (_Pointer, CFuncPtr)): return cls.from_address(value._buffer.buffer) if isinstance(value, (int, long)): return cls(value) FROM_PARAM_BY_TYPE = { 'z': from_param_char_p, 'Z': from_param_char_p, 'P': from_param_void_p, } class SimpleType(_CDataMeta): def __new__(self, name, bases, dct): try: tp = dct['_type_'] except KeyError: for base in bases: if hasattr(base, '_type_'): tp = base._type_ break else: raise AttributeError("cannot find _type_ attribute") if (not isinstance(tp, str) or not len(tp) == 1 or tp not in SIMPLE_TYPE_CHARS): raise ValueError('%s is not a type character' % (tp)) default = TP_TO_DEFAULT[tp] ffiarray = _rawffi.Array(tp) result = type.__new__(self, name, bases, dct) result._ffiargshape = tp result._ffishape = tp result._fficompositesize = None result._ffiarray = ffiarray if tp == 'z': # c_char_p def _getvalue(self): addr = self._buffer[0] if addr == 0: return None else: return _rawffi.charp2string(addr) def _setvalue(self, value): if isinstance(value, basestring): if isinstance(value, unicode): value = value.encode(ConvMode.encoding, ConvMode.errors) #self._objects = value array = _rawffi.Array('c')(len(value)+1, value) self._objects = CArgObject(value, array) value = array.buffer elif value is None: value = 0 self._buffer[0] = value result.value = property(_getvalue, _setvalue) result._ffiargtype = _ffi.types.Pointer(_ffi.types.char) elif tp == 'Z': # c_wchar_p def _getvalue(self): addr = self._buffer[0] if addr == 0: return None else: return _rawffi.wcharp2unicode(addr) def _setvalue(self, value): if isinstance(value, basestring): if isinstance(value, str): value = value.decode(ConvMode.encoding, ConvMode.errors) #self._objects = value array = _rawffi.Array('u')(len(value)+1, value) self._objects = CArgObject(value, array) value = array.buffer elif value is None: value = 0 self._buffer[0] = value result.value = property(_getvalue, _setvalue) result._ffiargtype = _ffi.types.Pointer(_ffi.types.unichar) elif tp == 'P': # c_void_p def _getvalue(self): addr = self._buffer[0] if addr == 0: return None return addr def _setvalue(self, value): if isinstance(value, str): array = _rawffi.Array('c')(len(value)+1, value) self._objects = CArgObject(value, array) value = array.buffer elif value is None: value = 0 self._buffer[0] = value result.value = property(_getvalue, _setvalue) elif tp == 'u': def _setvalue(self, val): if isinstance(val, str): val = val.decode(ConvMode.encoding, ConvMode.errors) # possible if we use 'ignore' if val: self._buffer[0] = val def _getvalue(self): return self._buffer[0] result.value = property(_getvalue, _setvalue) elif tp == 'c': def _setvalue(self, val): if isinstance(val, unicode): val = val.encode(ConvMode.encoding, ConvMode.errors) if val: self._buffer[0] = val def _getvalue(self): return self._buffer[0] result.value = property(_getvalue, _setvalue) elif tp == 'O': def _setvalue(self, val): num = pyobj_container.add(val) self._buffer[0] = num def _getvalue(self): return pyobj_container.get(self._buffer[0]) result.value = property(_getvalue, _setvalue) elif tp == 'X': from ctypes import WinDLL # Use WinDLL("oleaut32") instead of windll.oleaut32 # because the latter is a shared (cached) object; and # other code may set their own restypes. We need out own # restype here. oleaut32 = WinDLL("oleaut32") SysAllocStringLen = oleaut32.SysAllocStringLen SysStringLen = oleaut32.SysStringLen SysFreeString = oleaut32.SysFreeString def _getvalue(self): addr = self._buffer[0] if addr == 0: return None else: size = SysStringLen(addr) return _rawffi.wcharp2rawunicode(addr, size) def _setvalue(self, value): if isinstance(value, basestring): if isinstance(value, str): value = value.decode(ConvMode.encoding, ConvMode.errors) array = _rawffi.Array('u')(len(value)+1, value) value = SysAllocStringLen(array.buffer, len(value)) elif value is None: value = 0 if self._buffer[0]: SysFreeString(self._buffer[0]) self._buffer[0] = value result.value = property(_getvalue, _setvalue) elif tp == '?': # regular bool def _getvalue(self): return bool(self._buffer[0]) def _setvalue(self, value): self._buffer[0] = bool(value) result.value = property(_getvalue, _setvalue) elif tp == 'v': # VARIANT_BOOL type def _getvalue(self): return bool(self._buffer[0]) def _setvalue(self, value): if value: self._buffer[0] = -1 # VARIANT_TRUE else: self._buffer[0] = 0 # VARIANT_FALSE result.value = property(_getvalue, _setvalue) # make pointer-types compatible with the _ffi fast path if result._is_pointer_like(): def _as_ffi_pointer_(self, ffitype): return as_ffi_pointer(self, ffitype) result._as_ffi_pointer_ = _as_ffi_pointer_ return result from_address = cdata_from_address def from_param(self, value): if isinstance(value, self): return value from_param_f = FROM_PARAM_BY_TYPE.get(self._type_) if from_param_f: res = from_param_f(self, value) if res is not None: return res else: try: return self(value) except (TypeError, ValueError): pass return super(SimpleType, self).from_param(value) def _CData_output(self, resbuffer, base=None, index=-1): output = super(SimpleType, self)._CData_output(resbuffer, base, index) if self.__bases__[0] is _SimpleCData: return output.value return output def _sizeofinstances(self): return _rawffi.sizeof(self._type_) def _alignmentofinstances(self): return _rawffi.alignment(self._type_) def _is_pointer_like(self): return self._type_ in "sPzUZXO" class _SimpleCData(_CData): __metaclass__ = SimpleType _type_ = 'i' def __init__(self, value=DEFAULT_VALUE): if not hasattr(self, '_buffer'): self._buffer = self._ffiarray(1, autofree=True) if value is not DEFAULT_VALUE: self.value = value def _ensure_objects(self): if self._type_ not in 'zZP': assert self._objects is None return self._objects def _getvalue(self): return self._buffer[0] def _setvalue(self, value): self._buffer[0] = value value = property(_getvalue, _setvalue) del _getvalue, _setvalue def __ctypes_from_outparam__(self): meta = type(type(self)) if issubclass(meta, SimpleType) and meta != SimpleType: return self return self.value def __repr__(self): if type(self).__bases__[0] is _SimpleCData: return "%s(%r)" % (type(self).__name__, self.value) else: return "<%s object at 0x%x>" % (type(self).__name__, id(self)) def __nonzero__(self): return self._buffer[0] not in (0, '\x00') from _ctypes.function import CFuncPtr
bsd-3-clause
22,541,687,826,028,130
-3,483,967,005,938,578,000
31.752137
78
0.501044
false
LoHChina/nova
nova/tests/unit/virt/hyperv/test_rdpconsoleutils.py
83
1101
# Copyright 2013 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import test from nova.virt.hyperv import rdpconsoleutils class RDPConsoleUtilsTestCase(test.NoDBTestCase): def setUp(self): self._rdpconsoleutils = rdpconsoleutils.RDPConsoleUtils() super(RDPConsoleUtilsTestCase, self).setUp() def test_get_rdp_console_port(self): listener_port = self._rdpconsoleutils.get_rdp_console_port() self.assertEqual(self._rdpconsoleutils._DEFAULT_HYPERV_RDP_PORT, listener_port)
apache-2.0
-7,373,237,732,213,457,000
7,598,986,144,390,359,000
38.321429
78
0.718438
false
skearnes/pylearn2
pylearn2/sandbox/cuda_convnet/tests/test_response_norm.py
4
1500
import numpy import theano from nose.plugins.skip import SkipTest from theano.tests.unittest_tools import verify_grad try: from pylearn2.sandbox.cuda_convnet.response_norm import ( CrossMapNorm, CrossMapNormUndo ) from theano.sandbox.cuda import CudaNdarrayType, CudaNdarray from theano.sandbox.cuda import gpu_from_host except ImportError: raise SkipTest('cuda not available') def test_cross_map_norm_simple(): op = CrossMapNorm(16, 15. / 16., 1., True) x = CudaNdarray(numpy.ones((16, 2, 2, 2), dtype='float32')) x_ = theano.tensor.TensorVariable(CudaNdarrayType([False] * 4)) f = theano.function([x_], op(x_)[0]) numpy.testing.assert_allclose(f(x), 0.0625) def test_cross_map_norm_grad_simple(): rng = numpy.random.RandomState([2013, 02, 10]) op = CrossMapNorm(16, 15/16., 1, True) make_graph = lambda inp: op(gpu_from_host(inp))[0] verify = lambda array: verify_grad(make_graph, [array]) inputs = [numpy.ones((16, 1, 1, 1), dtype='float32'), rng.normal(size=(32, 5, 5, 10)).astype('float32')] for arr in inputs: yield verify, arr def test_optimization(): op = CrossMapNorm(16, 15./16., 1, True) x_ = theano.tensor.TensorVariable(CudaNdarrayType([False] * 4)) f = theano.function([x_], theano.grad(op(x_)[0].sum(), x_)) nodes = [x for x in f.maker.fgraph.apply_nodes if type(x.op) == CrossMapNormUndo] assert len(nodes) == 1 assert nodes[0].op.inplace
bsd-3-clause
-8,278,326,404,630,218,000
-993,893,717,700,322,800
34.714286
67
0.654
false
bnsgeyer/Copter3_4
mk/VRBRAIN/Tools/genmsg/src/genmsg/gentools.py
51
6819
#! /usr/bin/env python # Software License Agreement (BSD License) # # Copyright (c) 2008, Willow Garage, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Willow Garage, Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ Library for supporting message and service generation for all ROS client libraries. This is mainly responsible for calculating the md5sums and message definitions of classes. """ # NOTE: this should not contain any rospy-specific code. The rospy # generator library is rospy.genpy. import sys import hashlib try: from cStringIO import StringIO # Python 2.x except ImportError: from io import StringIO # Python 3.x from . import msgs from .msgs import InvalidMsgSpec, MsgSpec, bare_msg_type, is_builtin from .msg_loader import load_depends from .srvs import SrvSpec from . import names from . import base def compute_md5_text(msg_context, spec): """ Compute the text used for md5 calculation. MD5 spec states that we removes comments and non-meaningful whitespace. We also strip packages names from type names. For convenience sake, constants are reordered ahead of other declarations, in the order that they were originally defined. :returns: text for ROS MD5-processing, ``str`` """ package = spec.package buff = StringIO() for c in spec.constants: buff.write("%s %s=%s\n"%(c.type, c.name, c.val_text)) for type_, name in zip(spec.types, spec.names): msg_type = bare_msg_type(type_) # md5 spec strips package names if is_builtin(msg_type): buff.write("%s %s\n"%(type_, name)) else: # recursively generate md5 for subtype. have to build up # dependency representation for subtype in order to # generate md5 sub_pkg, _ = names.package_resource_name(msg_type) sub_pkg = sub_pkg or package sub_spec = msg_context.get_registered(msg_type) sub_md5 = compute_md5(msg_context, sub_spec) buff.write("%s %s\n"%(sub_md5, name)) return buff.getvalue().strip() # remove trailing new line def _compute_hash(msg_context, spec, hash): """ subroutine of compute_md5() :param msg_context: :class:`MsgContext` instance to load dependencies into/from. :param spec: :class:`MsgSpec` to compute hash for. :param hash: hash instance """ # accumulate the hash # - root file if isinstance(spec, MsgSpec): hash.update(compute_md5_text(msg_context, spec).encode()) elif isinstance(spec, SrvSpec): hash.update(compute_md5_text(msg_context, spec.request).encode()) hash.update(compute_md5_text(msg_context, spec.response).encode()) else: raise Exception("[%s] is not a message or service"%spec) return hash.hexdigest() def compute_md5(msg_context, spec): """ Compute md5 hash for message/service :param msg_context: :class:`MsgContext` instance to load dependencies into/from. :param spec: :class:`MsgSpec` to compute md5 for. :returns: md5 hash, ``str`` """ return _compute_hash(msg_context, spec, hashlib.md5()) ## alias compute_md5_v2 = compute_md5 def _unique_deps(dep_list): uniques = [] for d in dep_list: if d not in uniques: uniques.append(d) return uniques def compute_full_text(msg_context, spec): """ Compute full text of message/service, including text of embedded types. The text of the main msg/srv is listed first. Embedded msg/srv files are denoted first by an 80-character '=' separator, followed by a type declaration line,'MSG: pkg/type', followed by the text of the embedded type. :param msg_context: :class:`MsgContext` instance to load dependencies into/from. :param spec: :class:`MsgSpec` to compute full text for. :returns: concatenated text for msg/srv file and embedded msg/srv types, ``str`` """ buff = StringIO() sep = '='*80+'\n' # write the text of the top-level type buff.write(spec.text) buff.write('\n') # append the text of the dependencies (embedded types). Can't use set() as we have to preserve order. for d in _unique_deps(msg_context.get_all_depends(spec.full_name)): buff.write(sep) buff.write("MSG: %s\n"%d) buff.write(msg_context.get_registered(d).text) buff.write('\n') # #1168: remove the trailing \n separator that is added by the concatenation logic return buff.getvalue()[:-1] def compute_full_type_name(package_name, file_name): """ Compute the full type name of message/service 'pkg/type'. :param package_name: name of package file is in, ``str`` :file_name: name of the msg og srv file, ``str`` :returns: typename in format 'pkg/type' :raises: :exc:`MsgGenerationException` if file_name ends with an unknown file extension """ # strip extension for ext in (base.EXT_MSG, base.EXT_SRV): if file_name.endswith(ext): short_name = file_name[:-len(ext)] break else: raise base.MsgGenerationException("Processing file: '%s' - unknown file extension"% (file_name)) return "%s/%s"%(package_name, short_name)
gpl-3.0
7,246,196,304,176,484,000
-8,184,328,536,077,829,000
36.965714
106
0.668426
false
jss-emr/openerp-7-src
openerp/addons/hr_timesheet_invoice/wizard/hr_timesheet_analytic_profit.py
52
3163
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import datetime from openerp.osv import fields, osv from openerp.tools.translate import _ class account_analytic_profit(osv.osv_memory): _name = 'hr.timesheet.analytic.profit' _description = 'Print Timesheet Profit' _columns = { 'date_from': fields.date('From', required=True), 'date_to': fields.date('To', required=True), 'journal_ids': fields.many2many('account.analytic.journal', 'analytic_profit_journal_rel', 'analytic_id', 'journal_id', 'Journal', required=True), 'employee_ids': fields.many2many('res.users', 'analytic_profit_emp_rel', 'analytic_id', 'emp_id', 'User', required=True), } def _date_from(*a): return datetime.date.today().replace(day=1).strftime('%Y-%m-%d') def _date_to(*a): return datetime.date.today().strftime('%Y-%m-%d') _defaults = { 'date_from': _date_from, 'date_to': _date_to } def print_report(self, cr, uid, ids, context=None): line_obj = self.pool.get('account.analytic.line') data = {} data['form'] = self.read(cr, uid , ids, [], context=context)[0] ids_chk = line_obj.search(cr, uid, [ ('date', '>=', data['form']['date_from']), ('date', '<=', data['form']['date_to']), ('journal_id', 'in', data['form']['journal_ids']), ('user_id', 'in', data['form']['employee_ids']), ], context=context) if not ids_chk: raise osv.except_osv(_('Insufficient Data!'), _('No record(s) found for this report.')) data['form']['journal_ids'] = [(6, 0, data['form']['journal_ids'])] # Improve me => Change the rml/sxw so that it can support withou [0][2] data['form']['employee_ids'] = [(6, 0, data['form']['employee_ids'])] datas = { 'ids': [], 'model': 'account.analytic.line', 'form': data['form'] } return { 'type': 'ir.actions.report.xml', 'report_name': 'account.analytic.profit', 'datas': datas, } account_analytic_profit() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
4,155,865,166,164,077,600
-1,831,665,960,886,007,600
41.173333
154
0.567499
false
kubeflow/kubeflow
py/kubeflow/kubeflow/ci/common_ui_tests.py
1
4563
""""Argo Workflow for running frontend unit tests""" from kubeflow.kubeflow.ci import workflow_utils from kubeflow.testing import argo_build_util class Builder(workflow_utils.ArgoTestBuilder): def __init__(self, name=None, namespace=None, bucket=None, test_target_name=None, **kwargs): super().__init__(name=name, namespace=namespace, bucket=bucket, test_target_name=test_target_name, **kwargs) def _create_install_modules_task(self, task_template): install = argo_build_util.deep_copy(task_template) install["name"] = "npm-modules-install" install["container"]["image"] = "node:12.20.1-stretch-slim" install["container"]["command"] = ["npm"] install["container"]["args"] = ["ci"] ui_dir = ("%s/components/crud-web-apps/common/" "frontend/kubeflow-common-lib/") % self.src_dir install["container"]["workingDir"] = ui_dir return install def _create_ui_tests_task(self, task_template): ui_tests = argo_build_util.deep_copy(task_template) img = "browserless/chrome:1.44-chrome-stable" ui_tests["name"] = "common-ui-tests" ui_tests["container"]["image"] = img ui_tests["container"]["command"] = ["npm"] ui_tests["container"]["args"] = ["run", "test-ci"] ui_dir = ("%s/components/crud-web-apps/common/" "frontend/kubeflow-common-lib/") % self.src_dir ui_tests["container"]["workingDir"] = ui_dir return ui_tests def _create_ui_build_task(self, task_template): ui_build = argo_build_util.deep_copy(task_template) ui_build["name"] = "build-common-ui-library" ui_build["container"]["image"] = "node:12.20.1-stretch-slim" ui_build["container"]["command"] = ["npm"] ui_build["container"]["args"] = ["run", "build"] ui_dir = ("%s/components/crud-web-apps/common/" "frontend/kubeflow-common-lib/") % self.src_dir ui_build["container"]["workingDir"] = ui_dir return ui_build def _create_exit_handler(self, task_template): ui_build = argo_build_util.deep_copy(task_template) ui_build["name"] = "rm-node-modules" ui_build["container"]["image"] = "node:12.20.1-stretch-slim" ui_build["container"]["command"] = ["rm"] ui_build["container"]["args"] = ["-r", "node_modules"] ui_dir = ("%s/components/crud-web-apps/common/" "frontend/kubeflow-common-lib/") % self.src_dir ui_build["container"]["workingDir"] = ui_dir return ui_build def build(self): """Build the Argo workflow graph""" workflow = self.build_init_workflow() task_template = self.build_task_template() # install npm modules modules_install_task = self._create_install_modules_task(task_template) argo_build_util.add_task_to_dag(workflow, workflow_utils.E2E_DAG_NAME, modules_install_task, [self.mkdir_task_name]) # run common ui frontend tests ui_tests_task = self._create_ui_tests_task(task_template) argo_build_util.add_task_to_dag(workflow, workflow_utils.E2E_DAG_NAME, ui_tests_task, [modules_install_task["name"]]) # build the node module from the lib source code build_step = self._create_ui_build_task(task_template) argo_build_util.add_task_to_dag(workflow, workflow_utils.E2E_DAG_NAME, build_step, [modules_install_task["name"]]) # EXIT-HANDLER: remove node_modules folder as exit handler rm_node_modules = self._create_exit_handler(task_template) argo_build_util.add_task_to_dag(workflow, workflow_utils.EXIT_DAG_NAME, rm_node_modules, []) # Set the labels on all templates workflow = argo_build_util.set_task_template_labels(workflow) return workflow def create_workflow(name=None, namespace=None, bucket=None, **kwargs): """Create workflow returns an Argo workflow to test kfctl upgrades. Args: name: Name to give to the workflow. This can also be used to name things associated with the workflow. """ builder = Builder(name=name, namespace=namespace, bucket=bucket, **kwargs) return builder.build()
apache-2.0
-735,335,186,224,116,500
-7,301,301,136,198,185,000
39.380531
79
0.586895
false
mongolab/mongoctl
mongoctl/tests/sharded_test.py
1
2582
# The MIT License # Copyright (c) 2012 ObjectLabs Corporation # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import unittest import time from mongoctl.tests.test_base import MongoctlTestBase, append_user_arg ######################################################################################################################## # Servers SHARD_TEST_SERVERS = [ "ConfigServer1", "ConfigServer2", "ConfigServer3", "Mongos1", "Mongos2", "ShardServer1", "ShardServer2", "ShardServer3", "ShardServer4", "ShardServer5", "ShardServer6", "ShardArbiter" ] ######################################################################################################################## ### Sharded Servers class ShardedTest(MongoctlTestBase): ######################################################################################################################## def test_sharded(self): # Start all sharded servers for s_id in SHARD_TEST_SERVERS: self.assert_start_server(s_id, start_options=["--rs-add"]) print "Sleeping for 10 seconds..." # sleep for 10 of seconds time.sleep(10) conf_cmd = ["configure-shard-cluster", "ShardedCluster"] append_user_arg(conf_cmd) # Configure the sharded cluster self.mongoctl_assert_cmd(conf_cmd) ########################################################################### def get_my_test_servers(self): return SHARD_TEST_SERVERS # booty if __name__ == '__main__': unittest.main()
mit
-4,162,164,186,536,588,000
8,751,565,741,417,235,000
33.891892
124
0.585593
false
jamasi/Xtal-xplore-R
gui/doublespinslider.py
1
3682
# -*- coding: utf-8 -*- """DoubleSpinSlider - a custom widget combining a slider with a spinbox Copyright (C) 2014 Jan M. Simons <marten@xtal.rwth-aachen.de> This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ from __future__ import division, print_function, absolute_import from decimal import Decimal from PyQt4 import QtGui, QtCore from PyQt4.QtCore import pyqtSlot class DoubleSpinSlider(QtGui.QWidget): """This is a QWidget containing a QSlider and a QDoubleSpinBox""" def __init__(self, parent=None, width=50, height=100, dpi=100): #super(DoubleSpinSlider, self).__init__(parent) QtGui.QWidget.__init__(self, parent) self._vLayout = QtGui.QVBoxLayout() self._label = QtGui.QLabel(parent) self._label.setAlignment(QtCore.Qt.AlignCenter) self._vLayout.addWidget(self._label) self._dSBox = QtGui.QDoubleSpinBox(parent) self._dSBox.setWrapping(True) self._dSBox.setDecimals(4) self._dSBox.setMaximum(1.00000000) self._dSBox.setSingleStep(0.1000000000) self._vLayout.addWidget(self._dSBox) self._hLayout = QtGui.QHBoxLayout() self._vSlider = QtGui.QSlider(parent) self._vSlider.setMinimum(0) self._vSlider.setMaximum(10000) self._vSlider.setPageStep(1000) self._vSlider.setOrientation(QtCore.Qt.Vertical) self._vSlider.setTickPosition(QtGui.QSlider.TicksBothSides) self._vSlider.setTickInterval(0) self._hLayout.addWidget(self._vSlider) self._vLayout.addLayout(self._hLayout) self.setLayout(self._vLayout) self.setParent(parent) # map functions self.setText = self._label.setText self.text = self._label.text self.setValue = self._dSBox.setValue self.value = self._dSBox.value self._vSlider.valueChanged.connect(self.ChangeSpinBox) self._dSBox.valueChanged.connect(self.ChangeSlider) def _multiplier(self): return 10.000000 ** self._dSBox.decimals() @pyqtSlot(int) def ChangeSpinBox(self, slidervalue): #print("sv: {}".format(slidervalue)) newvalue = round(slidervalue / (self._multiplier()),4) #print("nv: {}".format(newvalue)) if newvalue != self._dSBox.value(): self._dSBox.setValue(newvalue) @pyqtSlot('double') def ChangeSlider(self, spinboxvalue): newvalue = spinboxvalue * self._multiplier() #print("sb: {sb} mult: {mult} prod: {prod}".format( # sb=spinboxvalue, # mult=int(10.00000000 ** self._dSBox.decimals()), # prod=newvalue)) self._vSlider.setValue(newvalue) @pyqtSlot('double') def setMaximum(self, maximum): self._dSBox.setMaximum(maximum) self._vSlider.setMaximum(maximum * self._multiplier()) @pyqtSlot('double') def setMinimum(self, minimum): self._dSBox.setMinimum(minimum) self._vSlider.setMinimum(minimum * self._multiplier())
agpl-3.0
-1,353,027,551,254,140,000
607,908,066,510,852,500
38.591398
77
0.655894
false
rbharath/deepchem
examples/qm9/qm9_tf_model.py
2
1512
""" Script that trains Tensorflow multitask models on QM9 dataset. """ from __future__ import print_function from __future__ import division from __future__ import unicode_literals import os import deepchem as dc import numpy as np from qm9_datasets import load_qm9 np.random.seed(123) qm9_tasks, datasets, transformers = load_qm9() train_dataset, valid_dataset, test_dataset = datasets fit_transformers = [dc.trans.CoulombFitTransformer(train_dataset)] regression_metric = [ dc.metrics.Metric(dc.metrics.mean_absolute_error, mode="regression"), dc.metrics.Metric(dc.metrics.pearson_r2_score, mode="regression") ] model = dc.models.TensorflowMultiTaskFitTransformRegressor( n_tasks=len(qm9_tasks), n_features=[29, 29], learning_rate=0.001, momentum=.8, batch_size=32, weight_init_stddevs=[1 / np.sqrt(400), 1 / np.sqrt(100), 1 / np.sqrt(100)], bias_init_consts=[0., 0., 0.], layer_sizes=[400, 100, 100], dropouts=[0.01, 0.01, 0.01], fit_transformers=fit_transformers, n_evals=10, seed=123) # Fit trained model model.fit(train_dataset, nb_epoch=50) model.save() train_scores = model.evaluate(train_dataset, regression_metric, transformers) print("Train scores [kcal/mol]") print(train_scores) valid_scores = model.evaluate(valid_dataset, regression_metric, transformers) print("Valid scores [kcal/mol]") print(valid_scores) test_scores = model.evaluate(test_dataset, regression_metric, transformers) print("Test scores [kcal/mol]") print(test_scores)
mit
-7,137,707,019,939,646,000
6,807,422,836,124,726,000
29.857143
79
0.726852
false
TheTimmy/spack
lib/spack/spack/cmd/configure.py
2
3509
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## import argparse import llnl.util.tty as tty import spack.cmd import spack.cmd.install as inst from spack import * description = 'stage and configure a package but do not install' section = "build" level = "long" build_system_to_phase = { AutotoolsPackage: 'configure', CMakePackage: 'cmake', QMakePackage: 'qmake', WafPackage: 'configure', PerlPackage: 'configure', IntelPackage: 'configure', } def setup_parser(subparser): subparser.add_argument( 'package', nargs=argparse.REMAINDER, help="spec of the package to install" ) subparser.add_argument( '-v', '--verbose', action='store_true', help="print additional output during builds" ) def _stop_at_phase_during_install(args, calling_fn, phase_mapping): if not args.package: tty.die("configure requires at least one package argument") # TODO: to be refactored with code in install specs = spack.cmd.parse_specs(args.package, concretize=True) if len(specs) != 1: tty.error('only one spec can be installed at a time.') spec = specs.pop() pkg = spec.package try: key = [cls for cls in phase_mapping if isinstance(pkg, cls)].pop() phase = phase_mapping[key] # Install package dependencies if needed parser = argparse.ArgumentParser() inst.setup_parser(parser) tty.msg('Checking dependencies for {0}'.format(args.package)) cli_args = ['-v'] if args.verbose else [] install_args = parser.parse_args(cli_args + ['--only=dependencies']) install_args.package = args.package inst.install(parser, install_args) # Install package and stop at the given phase cli_args = ['-v'] if args.verbose else [] install_args = parser.parse_args(cli_args + ['--only=package']) install_args.package = args.package inst.install(parser, install_args, stop_at=phase) except IndexError: tty.error( 'Package {0} has no {1} phase, or its {1} phase is not separated from install'.format( # NOQA: ignore=E501 spec.name, calling_fn.__name__) ) def configure(parser, args): _stop_at_phase_during_install(args, configure, build_system_to_phase)
lgpl-2.1
6,441,498,331,050,817,000
-7,128,192,473,181,085,000
35.936842
119
0.652038
false
agileblaze/OpenStackTwoFactorAuthentication
horizon/openstack_dashboard/dashboards/project/instances/audit_tables.py
59
2391
# Copyright 2013 Metacloud, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.utils.translation import pgettext_lazy from django.utils.translation import ugettext_lazy as _ from horizon import tables from horizon.utils import filters class AuditTable(tables.DataTable): ACTION_DISPLAY_CHOICES = ( ("create", pgettext_lazy("Action log of an instance", u"Create")), ("pause", pgettext_lazy("Action log of an instance", u"Pause")), ("unpause", pgettext_lazy("Action log of an instance", u"Unpause")), ("rebuild", pgettext_lazy("Action log of an instance", u"Rebuild")), ("resize", pgettext_lazy("Action log of an instance", u"Resize")), ("confirmresize", pgettext_lazy("Action log of an instance", u"Confirm Resize")), ("suspend", pgettext_lazy("Action log of an instance", u"Suspend")), ("resume", pgettext_lazy("Action log of an instance", u"Resume")), ("reboot", pgettext_lazy("Action log of an instance", u"Reboot")), ("stop", pgettext_lazy("Action log of an instance", u"Stop")), ("start", pgettext_lazy("Action log of an instance", u"Start")), ) request_id = tables.Column('request_id', verbose_name=_('Request ID')) action = tables.Column('action', verbose_name=_('Action'), display_choices=ACTION_DISPLAY_CHOICES) start_time = tables.Column('start_time', verbose_name=_('Start Time'), filters=[filters.parse_isotime]) user_id = tables.Column('user_id', verbose_name=_('User ID')) message = tables.Column('message', verbose_name=_('Message')) class Meta(object): name = 'audit' verbose_name = _('Instance Action List') def get_object_id(self, datum): return datum.request_id
apache-2.0
-3,543,399,113,414,046,700
472,943,229,649,835,840
44.113208
77
0.641154
false
pepetreshere/odoo
addons/account/tests/test_reconciliation_matching_rules.py
1
42618
# -*- coding: utf-8 -*- from freezegun import freeze_time from odoo.addons.account.tests.common import AccountTestInvoicingCommon from odoo.tests.common import Form from odoo.tests import tagged @tagged('post_install', '-at_install') class TestReconciliationMatchingRules(AccountTestInvoicingCommon): @classmethod def setUpClass(cls, chart_template_ref=None): super().setUpClass(chart_template_ref=chart_template_ref) ################# # Company setup # ################# cls.currency_data_2 = cls.setup_multi_currency_data({ 'name': 'Dark Chocolate Coin', 'symbol': '🍫', 'currency_unit_label': 'Dark Choco', 'currency_subunit_label': 'Dark Cacao Powder', }, rate2016=10.0, rate2017=20.0) cls.company = cls.company_data['company'] cls.account_pay = cls.company_data['default_account_payable'] cls.current_assets_account = cls.env['account.account'].search([ ('user_type_id', '=', cls.env.ref('account.data_account_type_current_assets').id), ('company_id', '=', cls.company.id)], limit=1) cls.bank_journal = cls.env['account.journal'].search([('type', '=', 'bank'), ('company_id', '=', cls.company.id)], limit=1) cls.cash_journal = cls.env['account.journal'].search([('type', '=', 'cash'), ('company_id', '=', cls.company.id)], limit=1) cls.tax21 = cls.env['account.tax'].create({ 'name': '21%', 'type_tax_use': 'purchase', 'amount': 21, }) cls.tax12 = cls.env['account.tax'].create({ 'name': '12%', 'type_tax_use': 'purchase', 'amount': 12, }) cls.partner_1 = cls.env['res.partner'].create({'name': 'partner_1', 'company_id': cls.company.id}) cls.partner_2 = cls.env['res.partner'].create({'name': 'partner_2', 'company_id': cls.company.id}) cls.partner_3 = cls.env['res.partner'].create({'name': 'partner_3', 'company_id': cls.company.id}) ############### # Rules setup # ############### cls.rule_1 = cls.env['account.reconcile.model'].create({ 'name': 'Invoices Matching Rule', 'sequence': '1', 'rule_type': 'invoice_matching', 'auto_reconcile': False, 'match_nature': 'both', 'match_same_currency': True, 'match_total_amount': True, 'match_total_amount_param': 100, 'match_partner': True, 'match_partner_ids': [(6, 0, (cls.partner_1 + cls.partner_2 + cls.partner_3).ids)], 'company_id': cls.company.id, 'line_ids': [(0, 0, {'account_id': cls.current_assets_account.id})], }) cls.rule_2 = cls.env['account.reconcile.model'].create({ 'name': 'write-off model', 'rule_type': 'writeoff_suggestion', 'match_partner': True, 'match_partner_ids': [], 'line_ids': [(0, 0, {'account_id': cls.current_assets_account.id})], }) ################## # Invoices setup # ################## cls.invoice_line_1 = cls._create_invoice_line(100, cls.partner_1, 'out_invoice') cls.invoice_line_2 = cls._create_invoice_line(200, cls.partner_1, 'out_invoice') cls.invoice_line_3 = cls._create_invoice_line(300, cls.partner_1, 'in_refund', name="RBILL/2019/09/0013") cls.invoice_line_4 = cls._create_invoice_line(1000, cls.partner_2, 'in_invoice') cls.invoice_line_5 = cls._create_invoice_line(600, cls.partner_3, 'out_invoice') cls.invoice_line_6 = cls._create_invoice_line(600, cls.partner_3, 'out_invoice', ref="RF12 3456") cls.invoice_line_7 = cls._create_invoice_line(200, cls.partner_3, 'out_invoice', pay_reference="RF12 3456") #################### # Statements setup # #################### # TODO : account_number, partner_name, transaction_type, narration invoice_number = cls.invoice_line_1.move_id.name cls.bank_st, cls.bank_st_2, cls.cash_st = cls.env['account.bank.statement'].create([ { 'name': 'test bank journal', 'journal_id': cls.bank_journal.id, 'line_ids': [ (0, 0, { 'payment_ref': 'invoice %s-%s-%s' % tuple(invoice_number.split('/')[1:]), 'partner_id': cls.partner_1.id, 'amount': 100, 'sequence': 1, }), (0, 0, { 'payment_ref': 'xxxxx', 'partner_id': cls.partner_1.id, 'amount': 600, 'sequence': 2, }), ], }, { 'name': 'second test bank journal', 'journal_id': cls.bank_journal.id, 'line_ids': [ (0, 0, { 'payment_ref': 'nawak', 'narration': 'Communication: RF12 3456', 'partner_id': cls.partner_3.id, 'amount': 600, 'sequence': 1, }), (0, 0, { 'payment_ref': 'RF12 3456', 'partner_id': cls.partner_3.id, 'amount': 600, 'sequence': 2, }), (0, 0, { 'payment_ref': 'baaaaah', 'ref': 'RF12 3456', 'partner_id': cls.partner_3.id, 'amount': 600, 'sequence': 2, }), ], }, { 'name': 'test cash journal', 'journal_id': cls.cash_journal.id, 'line_ids': [ (0, 0, { 'payment_ref': 'yyyyy', 'partner_id': cls.partner_2.id, 'amount': -1000, 'sequence': 1, }), ], } ]) cls.bank_line_1, cls.bank_line_2 = cls.bank_st.line_ids cls.bank_line_3, cls.bank_line_4, cls.bank_line_5 = cls.bank_st_2.line_ids cls.cash_line_1 = cls.cash_st.line_ids cls._post_statements(cls) @classmethod def _create_invoice_line(cls, amount, partner, type, currency=None, pay_reference=None, ref=None, name=None): ''' Create an invoice on the fly.''' invoice_form = Form(cls.env['account.move'].with_context(default_move_type=type, default_invoice_date='2019-09-01', default_date='2019-09-01')) invoice_form.partner_id = partner if currency: invoice_form.currency_id = currency if pay_reference: invoice_form.payment_reference = pay_reference if ref: invoice_form.ref = ref if name: invoice_form.name = name with invoice_form.invoice_line_ids.new() as invoice_line_form: invoice_line_form.name = 'xxxx' invoice_line_form.quantity = 1 invoice_line_form.price_unit = amount invoice_line_form.tax_ids.clear() invoice = invoice_form.save() invoice.action_post() lines = invoice.line_ids return lines.filtered(lambda l: l.account_id.user_type_id.type in ('receivable', 'payable')) def _post_statements(self): self.bank_st.balance_end_real = self.bank_st.balance_end self.bank_st_2.balance_end_real = self.bank_st_2.balance_end self.cash_st.balance_end_real = self.cash_st.balance_end (self.bank_st + self.bank_st_2 + self.cash_st).button_post() def _check_statement_matching(self, rules, expected_values, statements=None): if statements is None: statements = self.bank_st + self.cash_st statement_lines = statements.mapped('line_ids').sorted() matching_values = rules._apply_rules(statement_lines, None) for st_line_id, values in matching_values.items(): values.pop('reconciled_lines', None) values.pop('write_off_vals', None) self.assertDictEqual(values, expected_values[st_line_id]) def test_matching_fields(self): # Check without restriction. self._check_statement_matching(self.rule_1, { self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id}, self.bank_line_2.id: {'aml_ids': [ self.invoice_line_2.id, self.invoice_line_3.id, self.invoice_line_1.id, ], 'model': self.rule_1, 'partner': self.bank_line_2.partner_id}, self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id}, }) def test_matching_fields_match_text_location(self): self.rule_1.match_text_location_label = True self.rule_1.match_text_location_reference = False self.rule_1.match_text_location_note = False self._check_statement_matching(self.rule_1, { self.bank_line_3.id: {'aml_ids': [self.invoice_line_5.id], 'model': self.rule_1, 'partner': self.bank_line_3.partner_id}, self.bank_line_4.id: {'aml_ids': [self.invoice_line_7.id], 'model': self.rule_1, 'partner': self.bank_line_4.partner_id}, self.bank_line_5.id: {'aml_ids': [self.invoice_line_6.id], 'model': self.rule_1, 'partner': self.bank_line_5.partner_id}, }, statements=self.bank_st_2) self.rule_1.match_text_location_label = True self.rule_1.match_text_location_reference = False self.rule_1.match_text_location_note = True self._check_statement_matching(self.rule_1, { self.bank_line_3.id: {'aml_ids': [self.invoice_line_6.id], 'model': self.rule_1, 'partner': self.bank_line_3.partner_id}, self.bank_line_4.id: {'aml_ids': [self.invoice_line_7.id], 'model': self.rule_1, 'partner': self.bank_line_4.partner_id}, self.bank_line_5.id: {'aml_ids': [self.invoice_line_5.id], 'model': self.rule_1, 'partner': self.bank_line_5.partner_id}, }, statements=self.bank_st_2) self.rule_1.match_text_location_label = True self.rule_1.match_text_location_reference = True self.rule_1.match_text_location_note = False self._check_statement_matching(self.rule_1, { self.bank_line_3.id: {'aml_ids': [self.invoice_line_5.id], 'model': self.rule_1, 'partner': self.bank_line_3.partner_id}, self.bank_line_4.id: {'aml_ids': [self.invoice_line_7.id], 'model': self.rule_1, 'partner': self.bank_line_4.partner_id}, self.bank_line_5.id: {'aml_ids': [self.invoice_line_7.id], 'model': self.rule_1, 'partner': self.bank_line_5.partner_id}, }, statements=self.bank_st_2) self.rule_1.match_text_location_label = True self.rule_1.match_text_location_reference = True self.rule_1.match_text_location_note = True self._check_statement_matching(self.rule_1, { self.bank_line_3.id: {'aml_ids': [self.invoice_line_6.id], 'model': self.rule_1, 'partner': self.bank_line_3.partner_id}, self.bank_line_4.id: {'aml_ids': [self.invoice_line_7.id], 'model': self.rule_1, 'partner': self.bank_line_4.partner_id}, self.bank_line_5.id: {'aml_ids': [self.invoice_line_7.id], 'model': self.rule_1, 'partner': self.bank_line_5.partner_id}, }, statements=self.bank_st_2) self.rule_1.match_text_location_label = False self.rule_1.match_text_location_reference = False self.rule_1.match_text_location_note = False self._check_statement_matching(self.rule_1, { self.bank_line_3.id: {'aml_ids': [self.invoice_line_5.id], 'model': self.rule_1, 'partner': self.bank_line_3.partner_id}, self.bank_line_4.id: {'aml_ids': [self.invoice_line_5.id], 'model': self.rule_1, 'partner': self.bank_line_4.partner_id}, self.bank_line_5.id: {'aml_ids': [self.invoice_line_6.id], 'model': self.rule_1, 'partner': self.bank_line_5.partner_id}, }, statements=self.bank_st_2) def test_matching_fields_match_journal_ids(self): self.rule_1.match_journal_ids |= self.cash_st.journal_id self._check_statement_matching(self.rule_1, { self.bank_line_1.id: {'aml_ids': []}, self.bank_line_2.id: {'aml_ids': []}, self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id}, }) self.rule_1.match_journal_ids |= self.bank_st.journal_id + self.cash_st.journal_id def test_matching_fields_match_nature(self): self.rule_1.match_nature = 'amount_received' self._check_statement_matching(self.rule_1, { self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id}, self.bank_line_2.id: {'aml_ids': [ self.invoice_line_2.id, self.invoice_line_3.id, self.invoice_line_1.id, ], 'model': self.rule_1, 'partner': self.bank_line_2.partner_id}, self.cash_line_1.id: {'aml_ids': []}, }) self.rule_1.match_nature = 'amount_paid' self._check_statement_matching(self.rule_1, { self.bank_line_1.id: {'aml_ids': []}, self.bank_line_2.id: {'aml_ids': []}, self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id}, }) self.rule_1.match_nature = 'both' def test_matching_fields_match_amount(self): self.rule_1.match_amount = 'lower' self.rule_1.match_amount_max = 150 self._check_statement_matching(self.rule_1, { self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id}, self.bank_line_2.id: {'aml_ids': []}, self.cash_line_1.id: {'aml_ids': []}, }) self.rule_1.match_amount = 'greater' self.rule_1.match_amount_min = 200 self._check_statement_matching(self.rule_1, { self.bank_line_1.id: {'aml_ids': []}, self.bank_line_2.id: {'aml_ids': [ self.invoice_line_1.id, self.invoice_line_2.id, self.invoice_line_3.id, ], 'model': self.rule_1, 'partner': self.bank_line_2.partner_id}, self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id}, }) self.rule_1.match_amount = 'between' self.rule_1.match_amount_min = 200 self.rule_1.match_amount_max = 800 self._check_statement_matching(self.rule_1, { self.bank_line_1.id: {'aml_ids': []}, self.bank_line_2.id: {'aml_ids': [ self.invoice_line_1.id, self.invoice_line_2.id, self.invoice_line_3.id, ], 'model': self.rule_1, 'partner': self.bank_line_2.partner_id}, self.cash_line_1.id: {'aml_ids': []}, }) self.rule_1.match_amount = False def test_matching_fields_match_label(self): self.rule_1.match_label = 'contains' self.rule_1.match_label_param = 'yyyyy' self._check_statement_matching(self.rule_1, { self.bank_line_1.id: {'aml_ids': []}, self.bank_line_2.id: {'aml_ids': []}, self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id}, }) self.rule_1.match_label = 'not_contains' self.rule_1.match_label_param = 'xxxxx' self._check_statement_matching(self.rule_1, { self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id}, self.bank_line_2.id: {'aml_ids': []}, self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id}, }) self.rule_1.match_label = 'match_regex' self.rule_1.match_label_param = 'xxxxx|yyyyy' self._check_statement_matching(self.rule_1, { self.bank_line_1.id: {'aml_ids': []}, self.bank_line_2.id: {'aml_ids': [ self.invoice_line_1.id, self.invoice_line_2.id, self.invoice_line_3.id, ], 'model': self.rule_1, 'partner': self.bank_line_2.partner_id}, self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id}, }) self.rule_1.match_label = False def test_matching_fields_match_total_amount(self): # Check match_total_amount: line amount >= total residual amount. self.rule_1.match_total_amount_param = 90.0 self.bank_line_1.amount += 5 self._check_statement_matching(self.rule_1, { self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'status': 'write_off', 'partner': self.bank_line_1.partner_id}, self.bank_line_2.id: {'aml_ids': [ self.invoice_line_2.id, self.invoice_line_3.id, self.invoice_line_1.id, ], 'model': self.rule_1, 'partner': self.bank_line_2.partner_id}, self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id}, }) self.rule_1.match_total_amount_param = 100.0 self.bank_line_1.amount -= 5 # Check match_total_amount: line amount <= total residual amount. self.rule_1.match_total_amount_param = 90.0 self.bank_line_1.amount -= 5 self._check_statement_matching(self.rule_1, { self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'status': 'write_off', 'partner': self.bank_line_1.partner_id}, self.bank_line_2.id: {'aml_ids': [ self.invoice_line_2.id, self.invoice_line_3.id, self.invoice_line_1.id, ], 'model': self.rule_1, 'partner': self.bank_line_2.partner_id}, self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id}, }) self.rule_1.match_total_amount_param = 100.0 self.bank_line_1.amount += 5 def test_matching_fields_match_partner_category_ids(self): test_category = self.env['res.partner.category'].create({'name': 'Consulting Services'}) self.partner_2.category_id = test_category self.rule_1.match_partner_category_ids |= test_category self._check_statement_matching(self.rule_1, { self.bank_line_1.id: {'aml_ids': []}, self.bank_line_2.id: {'aml_ids': []}, self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id}, }) self.rule_1.match_partner_category_ids = False def test_mixin_rules(self): ''' Test usage of rules together.''' # rule_1 is used before rule_2. self.rule_1.sequence = 1 self.rule_2.sequence = 2 self._check_statement_matching(self.rule_1 + self.rule_2, { self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id}, self.bank_line_2.id: {'aml_ids': [ self.invoice_line_2.id, self.invoice_line_3.id, self.invoice_line_1.id, ], 'model': self.rule_1, 'partner': self.bank_line_2.partner_id}, self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id}, }) # rule_2 is used before rule_1. self.rule_1.sequence = 2 self.rule_2.sequence = 1 self._check_statement_matching(self.rule_1 + self.rule_2, { self.bank_line_1.id: {'aml_ids': [], 'model': self.rule_2, 'status': 'write_off', 'partner': self.bank_line_1.partner_id}, self.bank_line_2.id: {'aml_ids': [], 'model': self.rule_2, 'status': 'write_off', 'partner': self.bank_line_2.partner_id}, self.cash_line_1.id: {'aml_ids': [], 'model': self.rule_2, 'status': 'write_off', 'partner': self.cash_line_1.partner_id}, }) # rule_2 is used before rule_1 but only on partner_1. self.rule_2.match_partner_ids |= self.partner_1 self._check_statement_matching(self.rule_1 + self.rule_2, { self.bank_line_1.id: {'aml_ids': [], 'model': self.rule_2, 'status': 'write_off', 'partner': self.bank_line_1.partner_id}, self.bank_line_2.id: {'aml_ids': [], 'model': self.rule_2, 'status': 'write_off', 'partner': self.bank_line_2.partner_id}, self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id}, }) def test_auto_reconcile(self): ''' Test auto reconciliation.''' self.bank_line_1.amount += 5 self.rule_1.sequence = 2 self.rule_1.auto_reconcile = True self.rule_1.match_total_amount_param = 90 self.rule_2.sequence = 1 self.rule_2.match_partner_ids |= self.partner_2 self.rule_2.auto_reconcile = True self._check_statement_matching(self.rule_1 + self.rule_2, { self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'status': 'reconciled', 'partner': self.bank_line_1.partner_id}, self.bank_line_2.id: {'aml_ids': []}, self.cash_line_1.id: {'aml_ids': [], 'model': self.rule_2, 'status': 'reconciled', 'partner': self.cash_line_1.partner_id}, }) # Check first line has been well reconciled. self.assertRecordValues(self.bank_line_1.line_ids, [ {'partner_id': self.partner_1.id, 'debit': 105.0, 'credit': 0.0}, {'partner_id': self.partner_1.id, 'debit': 0.0, 'credit': 5.0}, {'partner_id': self.partner_1.id, 'debit': 0.0, 'credit': 100.0}, ]) # Check second line has been well reconciled. self.assertRecordValues(self.cash_line_1.line_ids, [ {'partner_id': self.partner_2.id, 'debit': 0.0, 'credit': 1000.0}, {'partner_id': self.partner_2.id, 'debit': 1000.0, 'credit': 0.0}, ]) def test_larger_invoice_auto_reconcile(self): ''' Test auto reconciliation with an invoice with larger amount than the statement line's, for rules without write-offs.''' self.bank_line_1.amount = 40 self.invoice_line_1.move_id.payment_reference = self.bank_line_1.payment_ref self.rule_1.sequence = 2 self.rule_1.auto_reconcile = True self.rule_1.line_ids = [(5, 0, 0)] self._check_statement_matching(self.rule_1, { self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'status': 'reconciled', 'partner': self.bank_line_1.partner_id}, self.bank_line_2.id: {'aml_ids': []}, }, statements=self.bank_st) # Check first line has been well reconciled. self.assertRecordValues(self.bank_line_1.line_ids, [ {'partner_id': self.partner_1.id, 'debit': 40.0, 'credit': 0.0}, {'partner_id': self.partner_1.id, 'debit': 0.0, 'credit': 40.0}, ]) self.assertEqual(self.invoice_line_1.amount_residual, 60.0, "The invoice should have been partially reconciled") def test_auto_reconcile_with_tax(self): ''' Test auto reconciliation with a tax amount included in the bank statement line''' self.rule_1.write({ 'auto_reconcile': True, 'rule_type': 'writeoff_suggestion', 'line_ids': [(1, self.rule_1.line_ids.id, { 'amount': 50, 'force_tax_included': True, 'tax_ids': [(6, 0, self.tax21.ids)], }), (0, 0, { 'amount': 100, 'force_tax_included': False, 'tax_ids': [(6, 0, self.tax12.ids)], 'account_id': self.current_assets_account.id, })] }) self.bank_line_1.amount = -121 self._check_statement_matching(self.rule_1, { self.bank_line_1.id: {'aml_ids': [], 'model': self.rule_1, 'status': 'reconciled', 'partner': self.bank_line_1.partner_id}, self.bank_line_2.id: {'aml_ids': [], 'model': self.rule_1, 'status': 'reconciled', 'partner': self.bank_line_2.partner_id}, }, statements=self.bank_st) # Check first line has been well reconciled. self.assertRecordValues(self.bank_line_1.line_ids, [ {'partner_id': self.partner_1.id, 'debit': 0.0, 'credit': 121.0, 'tax_ids': [], 'tax_line_id': False}, {'partner_id': self.partner_1.id, 'debit': 0.0, 'credit': 7.26, 'tax_ids': [], 'tax_line_id': False}, {'partner_id': self.partner_1.id, 'debit': 50.0, 'credit': 0.0, 'tax_ids': [self.tax21.id], 'tax_line_id': False}, {'partner_id': self.partner_1.id, 'debit': 10.5, 'credit': 0.0, 'tax_ids': [], 'tax_line_id': self.tax21.id}, {'partner_id': self.partner_1.id, 'debit': 60.5, 'credit': 0.0, 'tax_ids': [self.tax12.id], 'tax_line_id': False}, {'partner_id': self.partner_1.id, 'debit': 7.26, 'credit': 0.0, 'tax_ids': [], 'tax_line_id': self.tax12.id}, ]) def test_reverted_move_matching(self): partner = self.partner_1 AccountMove = self.env['account.move'] move = AccountMove.create({ 'journal_id': self.bank_journal.id, 'line_ids': [ (0, 0, { 'account_id': self.account_pay.id, 'partner_id': partner.id, 'name': 'One of these days', 'debit': 10, }), (0, 0, { 'account_id': self.bank_journal.payment_credit_account_id.id, 'partner_id': partner.id, 'name': 'I\'m gonna cut you into little pieces', 'credit': 10, }) ], }) payment_bnk_line = move.line_ids.filtered(lambda l: l.account_id == self.bank_journal.payment_credit_account_id) move.action_post() move_reversed = move._reverse_moves() self.assertTrue(move_reversed.exists()) self.bank_line_1.write({ 'payment_ref': '8', 'partner_id': partner.id, 'amount': -10, }) self._check_statement_matching(self.rule_1, { self.bank_line_1.id: {'aml_ids': [payment_bnk_line.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id}, self.bank_line_2.id: {'aml_ids': []}, }, statements=self.bank_st) def test_match_different_currencies(self): partner = self.env['res.partner'].create({'name': 'Bernard Gagnant'}) self.rule_1.write({'match_partner_ids': [(6, 0, partner.ids)], 'match_same_currency': False}) currency_inv = self.env.ref('base.EUR') currency_statement = self.env.ref('base.JPY') currency_statement.active = True invoice_line = self._create_invoice_line(100, partner, 'out_invoice', currency=currency_inv) self.bank_line_1.write({'partner_id': partner.id, 'foreign_currency_id': currency_statement.id, 'amount_currency': 100, 'payment_ref': 'test'}) self._check_statement_matching(self.rule_1, { self.bank_line_1.id: {'aml_ids': invoice_line.ids, 'model': self.rule_1, 'partner': self.bank_line_1.partner_id}, self.bank_line_2.id: {'aml_ids': []}, }, statements=self.bank_st) def test_invoice_matching_rule_no_partner(self): """ Tests that a statement line without any partner can be matched to the right invoice if they have the same payment reference. """ self.invoice_line_1.move_id.write({'payment_reference': 'Tournicoti66'}) self.bank_line_1.write({ 'payment_ref': 'Tournicoti66', 'partner_id': None, 'amount': 95, }) self.rule_1.write({ 'line_ids': [(5, 0, 0)], 'match_partner': False, 'match_label': 'contains', 'match_label_param': 'Tournicoti', # So that we only match what we want to test }) self._check_statement_matching(self.rule_1, { self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id}, self.bank_line_2.id: {'aml_ids': []}, }, self.bank_st) def test_inv_matching_rule_auto_rec_no_partner_with_writeoff(self): self.invoice_line_1.move_id.write({'payment_reference': 'doudlidou355'}) self.bank_line_1.write({ 'payment_ref': 'doudlidou355', 'partner_id': None, 'amount': 95, }) self.rule_1.write({ 'match_partner': False, 'match_label': 'contains', 'match_label_param': 'doudlidou', # So that we only match what we want to test 'match_total_amount_param': 90, 'auto_reconcile': True, }) # Check bank reconciliation self._check_statement_matching(self.rule_1, { self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id, 'status': 'reconciled'}, self.bank_line_2.id: {'aml_ids': []}, }, self.bank_st) # Check invoice line has been fully reconciled, with a write-off. self.assertRecordValues(self.bank_line_1.line_ids, [ {'partner_id': self.partner_1.id, 'debit': 95.0, 'credit': 0.0, 'account_id': self.bank_journal.default_account_id.id, 'reconciled': False}, {'partner_id': self.partner_1.id, 'debit': 5.0, 'credit': 0.0, 'account_id': self.current_assets_account.id, 'reconciled': False}, {'partner_id': self.partner_1.id, 'debit': 0.0, 'credit': 100.0, 'account_id': self.invoice_line_1.account_id.id, 'reconciled': True}, ]) self.assertEqual(self.invoice_line_1.amount_residual, 0.0, "The invoice should have been fully reconciled") def test_partner_mapping_rule(self): self.bank_line_1.write({'partner_id': None, 'payment_ref': 'toto42', 'narration': None}) self.bank_line_2.write({'partner_id': None}) # Do the test for both rule 1 and 2, so that we check invoice matching and write-off rules for rule in (self.rule_1 + self.rule_2): # To cope for minor differences in rule results matching_amls = rule.rule_type == 'invoice_matching' and self.invoice_line_1.ids or [] result_status = rule.rule_type == 'writeoff_suggestion' and {'status': 'write_off'} or {} match_result = {**result_status, 'aml_ids': matching_amls, 'model': rule, 'partner': self.partner_1} no_match_result = {'aml_ids': []} # Without mapping, there should be no match self._check_statement_matching(rule, { self.bank_line_1.id: no_match_result, self.bank_line_2.id: no_match_result, }, self.bank_st) # We add some mapping for payment reference to rule_1 rule.write({ 'partner_mapping_line_ids': [(0, 0, { 'partner_id': self.partner_1.id, 'payment_ref_regex': 'toto.*', })] }) # bank_line_1 should now match self._check_statement_matching(rule, { self.bank_line_1.id: match_result, self.bank_line_2.id: no_match_result, }, self.bank_st) # If we now add a narration regex to the same mapping line, nothing should match rule.partner_mapping_line_ids.write({'narration_regex': ".*coincoin"}) self.bank_line_1.write({'narration': None}) # Reset from possible previous iteration self._check_statement_matching(rule, { self.bank_line_1.id: no_match_result, self.bank_line_2.id: no_match_result, }, self.bank_st) # If we set the narration so that it matches the new mapping criterium, line_1 matches self.bank_line_1.write({'narration': "42coincoin"}) self._check_statement_matching(rule, { self.bank_line_1.id: match_result, self.bank_line_2.id: no_match_result, }, self.bank_st) def test_partner_name_in_communication(self): self.invoice_line_1.partner_id.write({'name': "Archibald Haddock"}) self.bank_line_1.write({'partner_id': None, 'payment_ref': '1234//HADDOCK-Archibald'}) self.bank_line_2.write({'partner_id': None}) self.rule_1.write({'match_partner': False}) # bank_line_1 should match, as its communication contains the invoice's partner name self._check_statement_matching(self.rule_1, { self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id}, self.bank_line_2.id: {'aml_ids': []}, }, self.bank_st) def test_partner_name_with_regexp_chars(self): self.invoice_line_1.partner_id.write({'name': "Archibald + Haddock"}) self.bank_line_1.write({'partner_id': None, 'payment_ref': '1234//HADDOCK+Archibald'}) self.bank_line_2.write({'partner_id': None}) self.rule_1.write({'match_partner': False}) # The query should still work self._check_statement_matching(self.rule_1, { self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id}, self.bank_line_2.id: {'aml_ids': []}, }, self.bank_st) def test_match_multi_currencies(self): ''' Ensure the matching of candidates is made using the right statement line currency. In this test, the value of the statement line is 100 USD = 300 GOL = 900 DAR and we want to match two journal items of: - 100 USD = 200 GOL (= 600 DAR from the statement line point of view) - 14 USD = 280 DAR Both journal items should be suggested to the user because they represents 98% of the statement line amount (DAR). ''' partner = self.env['res.partner'].create({'name': 'Bernard Perdant'}) journal = self.env['account.journal'].create({ 'name': 'test_match_multi_currencies', 'code': 'xxxx', 'type': 'bank', 'currency_id': self.currency_data['currency'].id, }) matching_rule = self.env['account.reconcile.model'].create({ 'name': 'test_match_multi_currencies', 'rule_type': 'invoice_matching', 'match_partner': True, 'match_partner_ids': [(6, 0, partner.ids)], 'match_total_amount': True, 'match_total_amount_param': 95.0, 'match_same_currency': False, 'company_id': self.company_data['company'].id, }) statement = self.env['account.bank.statement'].create({ 'name': 'test_match_multi_currencies', 'journal_id': journal.id, 'line_ids': [ (0, 0, { 'journal_id': journal.id, 'date': '2016-01-01', 'payment_ref': 'line', 'partner_id': partner.id, 'foreign_currency_id': self.currency_data_2['currency'].id, 'amount': 300.0, # Rate is 3 GOL = 1 USD in 2016. 'amount_currency': 900.0, # Rate is 10 DAR = 1 USD in 2016 but the rate used by the bank is 9:1. }), ], }) statement_line = statement.line_ids statement.button_post() move = self.env['account.move'].create({ 'move_type': 'entry', 'date': '2017-01-01', 'journal_id': self.company_data['default_journal_sale'].id, 'line_ids': [ # Rate is 2 GOL = 1 USD in 2017. # The statement line will consider this line equivalent to 600 DAR. (0, 0, { 'account_id': self.company_data['default_account_receivable'].id, 'partner_id': partner.id, 'currency_id': self.currency_data['currency'].id, 'debit': 100.0, 'credit': 0.0, 'amount_currency': 200.0, }), # Rate is 20 GOL = 1 USD in 2017. (0, 0, { 'account_id': self.company_data['default_account_receivable'].id, 'partner_id': partner.id, 'currency_id': self.currency_data_2['currency'].id, 'debit': 14.0, 'credit': 0.0, 'amount_currency': 280.0, }), # Line to balance the journal entry: (0, 0, { 'account_id': self.company_data['default_account_revenue'].id, 'debit': 0.0, 'credit': 114.0, }), ], }) move.action_post() move_line_1 = move.line_ids.filtered(lambda line: line.debit == 100.0) move_line_2 = move.line_ids.filtered(lambda line: line.debit == 14.0) with freeze_time('2017-01-01'): self._check_statement_matching(matching_rule, { statement_line.id: {'aml_ids': (move_line_1 + move_line_2).ids, 'model': matching_rule, 'partner': statement_line.partner_id} }, statements=statement) def test_inv_matching_with_write_off(self): self.rule_1.match_total_amount_param = 90 self.bank_st.line_ids[1].unlink() # We don't need this one here statement_line = self.bank_st.line_ids[0] statement_line.write({ 'payment_ref': self.invoice_line_1.move_id.payment_reference, 'amount': 90, }) # Test the invoice-matching part self._check_statement_matching(self.rule_1, { statement_line.id: {'aml_ids': self.invoice_line_1.ids, 'model': self.rule_1, 'partner': self.invoice_line_1.partner_id, 'status': 'write_off'}, }, self.bank_st) # Test the write-off part expected_write_off = { 'balance': 10, 'currency_id': False, 'reconcile_model_id': self.rule_1.id, 'account_id': self.current_assets_account.id, } matching_result = self.rule_1._apply_rules(statement_line) self.assertEqual(len(matching_result[statement_line.id].get('write_off_vals', [])), 1, "Exactly one write-off line should be proposed.") full_write_off_dict = matching_result[statement_line.id]['write_off_vals'][0] to_compare = { key: full_write_off_dict[key] for key in expected_write_off.keys() } self.assertDictEqual(expected_write_off, to_compare) def test_inv_matching_with_write_off_autoreconcile(self): self.bank_line_1.amount = 95 self.rule_1.sequence = 2 self.rule_1.auto_reconcile = True self.rule_1.match_total_amount_param = 90 self._check_statement_matching(self.rule_1, { self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'status': 'reconciled', 'partner': self.bank_line_1.partner_id}, self.bank_line_2.id: {'aml_ids': []}, }, statements=self.bank_st) # Check first line has been properly reconciled. self.assertRecordValues(self.bank_line_1.line_ids, [ {'partner_id': self.partner_1.id, 'debit': 95.0, 'credit': 0.0, 'account_id': self.bank_journal.default_account_id.id, 'reconciled': False}, {'partner_id': self.partner_1.id, 'debit': 5.0, 'credit': 0.0, 'account_id': self.current_assets_account.id, 'reconciled': False}, {'partner_id': self.partner_1.id, 'debit': 0.0, 'credit': 100.0, 'account_id': self.invoice_line_1.account_id.id, 'reconciled': True}, ]) self.assertEqual(self.invoice_line_1.amount_residual, 0.0, "The invoice should have been fully reconciled") def test_avoid_amount_matching_bypass(self): """ By the default, if the label of statement lines exactly matches a payment reference, it bypasses any kind of amount verification. This is annoying in some setups, so a config parameter was introduced to handle that. """ self.env['ir.config_parameter'].set_param('account.disable_rec_models_bypass', '1') self.rule_1.match_total_amount_param = 90 second_inv_matching_rule = self.env['account.reconcile.model'].create({ 'name': 'Invoices Matching Rule', 'sequence': 2, 'rule_type': 'invoice_matching', 'auto_reconcile': False, 'match_nature': 'both', 'match_same_currency': False, 'match_total_amount': False, 'match_partner': True, 'company_id': self.company.id, }) self.bank_line_1.write({ 'payment_ref': self.invoice_line_1.move_id.payment_reference, 'amount': 99, }) self.bank_line_2.write({ 'payment_ref': self.invoice_line_2.move_id.payment_reference, 'amount': 1, }) self._check_statement_matching(self.rule_1 + second_inv_matching_rule, { self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'status': 'write_off', 'partner': self.bank_line_1.partner_id}, self.bank_line_2.id: {'aml_ids': [self.invoice_line_2.id], 'model': second_inv_matching_rule, 'partner': self.bank_line_2.partner_id} }, statements=self.bank_st)
agpl-3.0
3,438,393,911,488,528,400
-5,684,238,960,782,029,000
47.982759
157
0.55464
false
bourreauEric/or-tools
examples/python/max_flow_taha.py
34
3443
# Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Max flow problem in Google CP Solver. From Taha 'Introduction to Operations Research', Example 6.4-2 Translated from the AMPL code at http://taha.ineg.uark.edu/maxflo.txt Compare with the following model: * MiniZinc: http://www.hakank.org/minizinc/max_flow_taha.mzn This model was created by Hakan Kjellerstrand (hakank@bonetmail.com) Also see my other Google CP Solver models: http://www.hakank.org/google_or_tools/ """ from ortools.constraint_solver import pywrapcp def main(): # Create the solver. solver = pywrapcp.Solver('Max flow problem, Taha') # # data # n = 5 start = 0 end = n - 1 nodes = range(n) # cost matrix c = [ [0, 20, 30, 10, 0], [0, 0, 40, 0, 30], [0, 0, 0, 10, 20], [0, 0, 5, 0, 20], [0, 0, 0, 0, 0] ] # # declare variables # x = {} for i in nodes: for j in nodes: x[i, j] = solver.IntVar(0, c[i][j], 'x[%i,%i]' % (i, j)) x_flat = [x[i, j] for i in nodes for j in nodes] out_flow = [solver.IntVar(0, 10000, 'out_flow[%i]' % i) for i in nodes] in_flow = [solver.IntVar(0, 10000, 'in_flow[%i]' % i) for i in nodes] total = solver.IntVar(0, 10000, 'z') # # constraints # cost_sum = solver.Sum([x[start, j] for j in nodes if c[start][j] > 0]) solver.Add(total == cost_sum) for i in nodes: in_flow_sum = solver.Sum([x[j, i] for j in nodes if c[j][i] > 0]) solver.Add(in_flow[i] == in_flow_sum) out_flow_sum = solver.Sum([x[i, j] for j in nodes if c[i][j] > 0]) solver.Add(out_flow[i] == out_flow_sum) # in_flow == out_flow for i in nodes: if i != start and i != end: solver.Add(out_flow[i] - in_flow[i] == 0) s1 = [x[i, start] for i in nodes if c[i][start] > 0] if len(s1) > 0: solver.Add(solver.Sum([x[i, start] for i in nodes if c[i][start] > 0] == 0)) s2 = [x[end, j] for j in nodes if c[end][j] > 0] if len(s2) > 0: solver.Add(solver.Sum([x[end, j] for j in nodes if c[end][j] > 0]) == 0) # objective: maximize total cost objective = solver.Maximize(total, 1) # # solution and search # db = solver.Phase(x_flat, solver.INT_VAR_DEFAULT, solver.ASSIGN_MAX_VALUE) solver.NewSearch(db, [objective]) num_solutions = 0 while solver.NextSolution(): num_solutions += 1 print 'total:', total.Value() print 'in_flow:', [in_flow[i].Value() for i in nodes] print 'out_flow:', [out_flow[i].Value() for i in nodes] for i in nodes: for j in nodes: print '%2i' % x[i, j].Value(), print print print 'num_solutions:', num_solutions print 'failures:', solver.Failures() print 'branches:', solver.Branches() print 'WallTime:', solver.WallTime(), 'ms' if __name__ == '__main__': main()
apache-2.0
302,889,266,003,685,100
8,965,992,253,103,775,000
25.689922
74
0.604415
false
epssy/hue
desktop/core/ext-py/elementtree/selftest.py
45
28405
# $Id: selftest.py 2326 2005-03-17 07:45:21Z fredrik $ # -*- coding: iso-8859-1 -*- # elementtree selftest program # this test script uses Python's "doctest" module to check that the # *test script* works as expected. # TODO: add more elementtree method tests # TODO: add xml/html parsing tests # TODO: etc import sys, string, StringIO from elementtree import ElementTree from elementtree import ElementPath from elementtree import ElementInclude from elementtree import HTMLTreeBuilder from elementtree import SimpleXMLWriter def serialize(elem, encoding=None): import StringIO file = StringIO.StringIO() tree = ElementTree.ElementTree(elem) if encoding: tree.write(file, encoding) else: tree.write(file) return file.getvalue() def summarize(elem): return elem.tag def summarize_list(seq): return map(summarize, seq) def normalize_crlf(tree): for elem in tree.getiterator(): if elem.text: elem.text = string.replace(elem.text, "\r\n", "\n") if elem.tail: elem.tail = string.replace(elem.tail, "\r\n", "\n") SAMPLE_XML = ElementTree.XML(""" <body> <tag>text</tag> <tag /> <section> <tag>subtext</tag> </section> </body> """) # # interface tests def check_string(string): len(string) for char in string: if len(char) != 1: print "expected one-character string, got %r" % char new_string = string + "" new_string = string + " " string[:0] def check_string_or_none(value): if value is None: return return check_string(value) def check_mapping(mapping): len(mapping) keys = mapping.keys() items = mapping.items() for key in keys: item = mapping[key] mapping["key"] = "value" if mapping["key"] != "value": print "expected value string, got %r" % mapping["key"] def check_element(element): if not hasattr(element, "tag"): print "no tag member" if not hasattr(element, "attrib"): print "no attrib member" if not hasattr(element, "text"): print "no text member" if not hasattr(element, "tail"): print "no tail member" check_string(element.tag) check_mapping(element.attrib) check_string_or_none(element.text) check_string_or_none(element.tail) for elem in element: check_element(elem) def check_element_tree(tree): check_element(tree.getroot()) # -------------------------------------------------------------------- # element tree tests def sanity(): """ >>> from elementtree.ElementTree import * >>> from elementtree.ElementInclude import * >>> from elementtree.ElementPath import * >>> from elementtree.HTMLTreeBuilder import * >>> from elementtree.SimpleXMLTreeBuilder import * >>> from elementtree.SimpleXMLWriter import * >>> from elementtree.TidyTools import * >>> from elementtree.XMLTreeBuilder import * """ def version(): """ >>> ElementTree.VERSION '1.2.6' """ def interface(): """ Test element tree interface. >>> element = ElementTree.Element("tag") >>> check_element(element) >>> tree = ElementTree.ElementTree(element) >>> check_element_tree(tree) """ def simplefind(): """ Test find methods using the elementpath fallback. >>> CurrentElementPath = ElementTree.ElementPath >>> ElementTree.ElementPath = ElementTree._SimpleElementPath() >>> elem = SAMPLE_XML >>> elem.find("tag").tag 'tag' >>> ElementTree.ElementTree(elem).find("tag").tag 'tag' >>> elem.findtext("tag") 'text' >>> elem.findtext("tog") >>> elem.findtext("tog", "default") 'default' >>> ElementTree.ElementTree(elem).findtext("tag") 'text' >>> summarize_list(elem.findall("tag")) ['tag', 'tag'] >>> summarize_list(elem.findall(".//tag")) ['tag', 'tag', 'tag'] Path syntax doesn't work in this case. >>> elem.find("section/tag") >>> elem.findtext("section/tag") >>> elem.findall("section/tag") [] >>> ElementTree.ElementPath = CurrentElementPath """ def find(): """ Test find methods (including xpath syntax). >>> elem = SAMPLE_XML >>> elem.find("tag").tag 'tag' >>> ElementTree.ElementTree(elem).find("tag").tag 'tag' >>> elem.find("section/tag").tag 'tag' >>> ElementTree.ElementTree(elem).find("section/tag").tag 'tag' >>> elem.findtext("tag") 'text' >>> elem.findtext("tog") >>> elem.findtext("tog", "default") 'default' >>> ElementTree.ElementTree(elem).findtext("tag") 'text' >>> elem.findtext("section/tag") 'subtext' >>> ElementTree.ElementTree(elem).findtext("section/tag") 'subtext' >>> summarize_list(elem.findall("tag")) ['tag', 'tag'] >>> summarize_list(elem.findall("*")) ['tag', 'tag', 'section'] >>> summarize_list(elem.findall(".//tag")) ['tag', 'tag', 'tag'] >>> summarize_list(elem.findall("section/tag")) ['tag'] >>> summarize_list(elem.findall("section//tag")) ['tag'] >>> summarize_list(elem.findall("section/*")) ['tag'] >>> summarize_list(elem.findall("section//*")) ['tag'] >>> summarize_list(elem.findall("section/.//*")) ['tag'] >>> summarize_list(elem.findall("*/*")) ['tag'] >>> summarize_list(elem.findall("*//*")) ['tag'] >>> summarize_list(elem.findall("*/tag")) ['tag'] >>> summarize_list(elem.findall("*/./tag")) ['tag'] >>> summarize_list(elem.findall("./tag")) ['tag', 'tag'] >>> summarize_list(elem.findall(".//tag")) ['tag', 'tag', 'tag'] >>> summarize_list(elem.findall("././tag")) ['tag', 'tag'] >>> summarize_list(ElementTree.ElementTree(elem).findall("/tag")) ['tag', 'tag'] >>> summarize_list(ElementTree.ElementTree(elem).findall("./tag")) ['tag', 'tag'] """ def bad_find(): """ Check bad or unsupported path expressions. >>> elem = SAMPLE_XML >>> elem.findall("/tag") Traceback (most recent call last): SyntaxError: cannot use absolute path on element >>> elem.findall("../tag") Traceback (most recent call last): SyntaxError: unsupported path syntax (..) >>> elem.findall("section//") Traceback (most recent call last): SyntaxError: path cannot end with // >>> elem.findall("tag[tag]") Traceback (most recent call last): SyntaxError: expected path separator ([) """ def parsefile(): """ Test parsing from file. >>> tree = ElementTree.parse("samples/simple.xml") >>> normalize_crlf(tree) >>> tree.write(sys.stdout) <root> <element key="value">text</element> <element>text</element>tail <empty-element /> </root> >>> tree = ElementTree.parse("samples/simple-ns.xml") >>> normalize_crlf(tree) >>> tree.write(sys.stdout) <ns0:root xmlns:ns0="namespace"> <ns0:element key="value">text</ns0:element> <ns0:element>text</ns0:element>tail <ns0:empty-element /> </ns0:root> """ def parsehtml(): """ Test HTML parsing. >>> p = HTMLTreeBuilder.TreeBuilder() >>> p.feed("<p><p>spam<b>egg</b></p>") >>> serialize(p.close()) '<p>spam<b>egg</b></p>' """ def parseliteral(): r""" >>> element = ElementTree.XML("<html><body>text</body></html>") >>> ElementTree.ElementTree(element).write(sys.stdout) <html><body>text</body></html> >>> element = ElementTree.fromstring("<html><body>text</body></html>") >>> ElementTree.ElementTree(element).write(sys.stdout) <html><body>text</body></html> >>> print ElementTree.tostring(element) <html><body>text</body></html> >>> print ElementTree.tostring(element, "ascii") <?xml version='1.0' encoding='ascii'?> <html><body>text</body></html> >>> _, ids = ElementTree.XMLID("<html><body>text</body></html>") >>> len(ids) 0 >>> _, ids = ElementTree.XMLID("<html><body id='body'>text</body></html>") >>> len(ids) 1 >>> ids["body"].tag 'body' """ def simpleparsefile(): """ Test the xmllib-based parser. >>> from elementtree import SimpleXMLTreeBuilder >>> parser = SimpleXMLTreeBuilder.TreeBuilder() >>> tree = ElementTree.parse("samples/simple.xml", parser) >>> normalize_crlf(tree) >>> tree.write(sys.stdout) <root> <element key="value">text</element> <element>text</element>tail <empty-element /> </root> """ def iterparse(): """ Test iterparse interface. >>> iterparse = ElementTree.iterparse >>> context = iterparse("samples/simple.xml") >>> for action, elem in context: ... print action, elem.tag end element end element end empty-element end root >>> context.root.tag 'root' >>> context = iterparse("samples/simple-ns.xml") >>> for action, elem in context: ... print action, elem.tag end {namespace}element end {namespace}element end {namespace}empty-element end {namespace}root >>> events = () >>> context = iterparse("samples/simple.xml", events) >>> for action, elem in context: ... print action, elem.tag >>> events = () >>> context = iterparse("samples/simple.xml", events=events) >>> for action, elem in context: ... print action, elem.tag >>> events = ("start", "end") >>> context = iterparse("samples/simple.xml", events) >>> for action, elem in context: ... print action, elem.tag start root start element end element start element end element start empty-element end empty-element end root >>> events = ("start", "end", "start-ns", "end-ns") >>> context = iterparse("samples/simple-ns.xml", events) >>> for action, elem in context: ... if action in ("start", "end"): ... print action, elem.tag ... else: ... print action, elem start-ns ('', 'namespace') start {namespace}root start {namespace}element end {namespace}element start {namespace}element end {namespace}element start {namespace}empty-element end {namespace}empty-element end {namespace}root end-ns None """ def fancyparsefile(): """ Test the "fancy" parser. Sanity check. >>> from elementtree import XMLTreeBuilder >>> parser = XMLTreeBuilder.FancyTreeBuilder() >>> tree = ElementTree.parse("samples/simple.xml", parser) >>> normalize_crlf(tree) >>> tree.write(sys.stdout) <root> <element key="value">text</element> <element>text</element>tail <empty-element /> </root> Callback check. >>> class MyFancyParser(XMLTreeBuilder.FancyTreeBuilder): ... def start(self, elem): ... print "START", elem.tag ... def end(self, elem): ... print "END", elem.tag >>> parser = MyFancyParser() >>> tree = ElementTree.parse("samples/simple.xml", parser) START root START element END element START element END element START empty-element END empty-element END root """ def writefile(): """ >>> elem = ElementTree.Element("tag") >>> elem.text = "text" >>> serialize(elem) '<tag>text</tag>' >>> ElementTree.SubElement(elem, "subtag").text = "subtext" >>> serialize(elem) '<tag>text<subtag>subtext</subtag></tag>' """ def writestring(): """ >>> elem = ElementTree.XML("<html><body>text</body></html>") >>> ElementTree.tostring(elem) '<html><body>text</body></html>' >>> elem = ElementTree.fromstring("<html><body>text</body></html>") >>> ElementTree.tostring(elem) '<html><body>text</body></html>' """ def encoding(): r""" Test encoding issues. >>> elem = ElementTree.Element("tag") >>> elem.text = u"abc" >>> serialize(elem) '<tag>abc</tag>' >>> serialize(elem, "utf-8") '<tag>abc</tag>' >>> serialize(elem, "us-ascii") '<tag>abc</tag>' >>> serialize(elem, "iso-8859-1") "<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>abc</tag>" >>> elem.text = "<&\"\'>" >>> serialize(elem) '<tag>&lt;&amp;"\'&gt;</tag>' >>> serialize(elem, "utf-8") '<tag>&lt;&amp;"\'&gt;</tag>' >>> serialize(elem, "us-ascii") # cdata characters '<tag>&lt;&amp;"\'&gt;</tag>' >>> serialize(elem, "iso-8859-1") '<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag>&lt;&amp;"\'&gt;</tag>' >>> elem.attrib["key"] = "<&\"\'>" >>> elem.text = None >>> serialize(elem) '<tag key="&lt;&amp;&quot;&apos;&gt;" />' >>> serialize(elem, "utf-8") '<tag key="&lt;&amp;&quot;&apos;&gt;" />' >>> serialize(elem, "us-ascii") '<tag key="&lt;&amp;&quot;&apos;&gt;" />' >>> serialize(elem, "iso-8859-1") '<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="&lt;&amp;&quot;&apos;&gt;" />' >>> elem.text = u'\xe5\xf6\xf6<>' >>> elem.attrib.clear() >>> serialize(elem) '<tag>&#229;&#246;&#246;&lt;&gt;</tag>' >>> serialize(elem, "utf-8") '<tag>\xc3\xa5\xc3\xb6\xc3\xb6&lt;&gt;</tag>' >>> serialize(elem, "us-ascii") '<tag>&#229;&#246;&#246;&lt;&gt;</tag>' >>> serialize(elem, "iso-8859-1") "<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>\xe5\xf6\xf6&lt;&gt;</tag>" >>> elem.attrib["key"] = u'\xe5\xf6\xf6<>' >>> elem.text = None >>> serialize(elem) '<tag key="&#229;&#246;&#246;&lt;&gt;" />' >>> serialize(elem, "utf-8") '<tag key="\xc3\xa5\xc3\xb6\xc3\xb6&lt;&gt;" />' >>> serialize(elem, "us-ascii") '<tag key="&#229;&#246;&#246;&lt;&gt;" />' >>> serialize(elem, "iso-8859-1") '<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="\xe5\xf6\xf6&lt;&gt;" />' """ ENTITY_XML = """\ <!DOCTYPE points [ <!ENTITY % user-entities SYSTEM 'user-entities.xml'> %user-entities; ]> <document>&entity;</document> """ def entity(): """ Test entity handling. 1) bad entities >>> ElementTree.XML("<document>&entity;</document>") Traceback (most recent call last): ExpatError: undefined entity: line 1, column 10 >>> ElementTree.XML(ENTITY_XML) Traceback (most recent call last): ExpatError: undefined entity &entity;: line 5, column 10 (add more tests here) """ def namespace(): """ Test namespace issues. 1) xml namespace >>> elem = ElementTree.XML("<tag xml:lang='en' />") >>> serialize(elem) # 1.1 '<tag xml:lang="en" />' 2) other "well-known" namespaces >>> elem = ElementTree.XML("<rdf:RDF xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#' />") >>> serialize(elem) # 2.1 '<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" />' >>> elem = ElementTree.XML("<html:html xmlns:html='http://www.w3.org/1999/xhtml' />") >>> serialize(elem) # 2.2 '<html:html xmlns:html="http://www.w3.org/1999/xhtml" />' >>> elem = ElementTree.XML("<soap:Envelope xmlns:soap='http://schemas.xmlsoap.org/soap/envelope' />") >>> serialize(elem) # 2.3 '<ns0:Envelope xmlns:ns0="http://schemas.xmlsoap.org/soap/envelope" />' 3) unknown namespaces """ def qname(): """ Test QName handling. 1) decorated tags >>> elem = ElementTree.Element("{uri}tag") >>> serialize(elem) # 1.1 '<ns0:tag xmlns:ns0="uri" />' >>> elem = ElementTree.Element(ElementTree.QName("{uri}tag")) >>> serialize(elem) # 1.2 '<ns0:tag xmlns:ns0="uri" />' >>> elem = ElementTree.Element(ElementTree.QName("uri", "tag")) >>> serialize(elem) # 1.3 '<ns0:tag xmlns:ns0="uri" />' 2) decorated attributes >>> elem.clear() >>> elem.attrib["{uri}key"] = "value" >>> serialize(elem) # 2.1 '<ns0:tag ns0:key="value" xmlns:ns0="uri" />' >>> elem.clear() >>> elem.attrib[ElementTree.QName("{uri}key")] = "value" >>> serialize(elem) # 2.2 '<ns0:tag ns0:key="value" xmlns:ns0="uri" />' 3) decorated values are not converted by default, but the QName wrapper can be used for values >>> elem.clear() >>> elem.attrib["{uri}key"] = "{uri}value" >>> serialize(elem) # 3.1 '<ns0:tag ns0:key="{uri}value" xmlns:ns0="uri" />' >>> elem.clear() >>> elem.attrib["{uri}key"] = ElementTree.QName("{uri}value") >>> serialize(elem) # 3.2 '<ns0:tag ns0:key="ns0:value" xmlns:ns0="uri" />' >>> elem.clear() >>> subelem = ElementTree.Element("tag") >>> subelem.attrib["{uri1}key"] = ElementTree.QName("{uri2}value") >>> elem.append(subelem) >>> elem.append(subelem) >>> serialize(elem) # 3.3 '<ns0:tag xmlns:ns0="uri"><tag ns1:key="ns2:value" xmlns:ns1="uri1" xmlns:ns2="uri2" /><tag ns1:key="ns2:value" xmlns:ns1="uri1" xmlns:ns2="uri2" /></ns0:tag>' """ def xpath_tokenizer(p): """ Test the XPath tokenizer. >>> # tests from the xml specification >>> xpath_tokenizer("*") ['*'] >>> xpath_tokenizer("text()") ['text', '()'] >>> xpath_tokenizer("@name") ['@', 'name'] >>> xpath_tokenizer("@*") ['@', '*'] >>> xpath_tokenizer("para[1]") ['para', '[', '1', ']'] >>> xpath_tokenizer("para[last()]") ['para', '[', 'last', '()', ']'] >>> xpath_tokenizer("*/para") ['*', '/', 'para'] >>> xpath_tokenizer("/doc/chapter[5]/section[2]") ['/', 'doc', '/', 'chapter', '[', '5', ']', '/', 'section', '[', '2', ']'] >>> xpath_tokenizer("chapter//para") ['chapter', '/', '/', 'para'] >>> xpath_tokenizer("//para") ['/', '/', 'para'] >>> xpath_tokenizer("//olist/item") ['/', '/', 'olist', '/', 'item'] >>> xpath_tokenizer(".") ['.'] >>> xpath_tokenizer(".//para") ['.', '/', '/', 'para'] >>> xpath_tokenizer("..") ['..'] >>> xpath_tokenizer("../@lang") ['..', '/', '@', 'lang'] >>> xpath_tokenizer("chapter[title]") ['chapter', '[', 'title', ']'] >>> xpath_tokenizer("employee[@secretary and @assistant]") ['employee', '[', '@', 'secretary', '', 'and', '', '@', 'assistant', ']'] >>> # additional tests >>> xpath_tokenizer("{http://spam}egg") ['{http://spam}egg'] >>> xpath_tokenizer("./spam.egg") ['.', '/', 'spam.egg'] >>> xpath_tokenizer(".//{http://spam}egg") ['.', '/', '/', '{http://spam}egg'] """ out = [] for op, tag in ElementPath.xpath_tokenizer(p): out.append(op or tag) return out # # xinclude tests (samples from appendix C of the xinclude specification) XINCLUDE = {} XINCLUDE["C1.xml"] = """\ <?xml version='1.0'?> <document xmlns:xi="http://www.w3.org/2001/XInclude"> <p>120 Mz is adequate for an average home user.</p> <xi:include href="disclaimer.xml"/> </document> """ XINCLUDE["disclaimer.xml"] = """\ <?xml version='1.0'?> <disclaimer> <p>The opinions represented herein represent those of the individual and should not be interpreted as official policy endorsed by this organization.</p> </disclaimer> """ XINCLUDE["C2.xml"] = """\ <?xml version='1.0'?> <document xmlns:xi="http://www.w3.org/2001/XInclude"> <p>This document has been accessed <xi:include href="count.txt" parse="text"/> times.</p> </document> """ XINCLUDE["count.txt"] = "324387" XINCLUDE["C3.xml"] = """\ <?xml version='1.0'?> <document xmlns:xi="http://www.w3.org/2001/XInclude"> <p>The following is the source of the "data.xml" resource:</p> <example><xi:include href="data.xml" parse="text"/></example> </document> """ XINCLUDE["data.xml"] = """\ <?xml version='1.0'?> <data> <item><![CDATA[Brooks & Shields]]></item> </data> """ XINCLUDE["C5.xml"] = """\ <?xml version='1.0'?> <div xmlns:xi="http://www.w3.org/2001/XInclude"> <xi:include href="example.txt" parse="text"> <xi:fallback> <xi:include href="fallback-example.txt" parse="text"> <xi:fallback><a href="mailto:bob@example.org">Report error</a></xi:fallback> </xi:include> </xi:fallback> </xi:include> </div> """ XINCLUDE["default.xml"] = """\ <?xml version='1.0'?> <document xmlns:xi="http://www.w3.org/2001/XInclude"> <p>Example.</p> <xi:include href="samples/simple.xml"/> </document> """ def xinclude_loader(href, parse="xml", encoding=None): try: data = XINCLUDE[href] except KeyError: raise IOError("resource not found") if parse == "xml": return ElementTree.XML(data) return data def xinclude(): r""" Basic inclusion example (XInclude C.1) >>> document = xinclude_loader("C1.xml") >>> ElementInclude.include(document, xinclude_loader) >>> print serialize(document) # C1 <document> <p>120 Mz is adequate for an average home user.</p> <disclaimer> <p>The opinions represented herein represent those of the individual and should not be interpreted as official policy endorsed by this organization.</p> </disclaimer> </document> Textual inclusion example (XInclude C.2) >>> document = xinclude_loader("C2.xml") >>> ElementInclude.include(document, xinclude_loader) >>> print serialize(document) # C2 <document> <p>This document has been accessed 324387 times.</p> </document> Textual inclusion of XML example (XInclude C.3) >>> document = xinclude_loader("C3.xml") >>> ElementInclude.include(document, xinclude_loader) >>> print serialize(document) # C3 <document> <p>The following is the source of the "data.xml" resource:</p> <example>&lt;?xml version='1.0'?&gt; &lt;data&gt; &lt;item&gt;&lt;![CDATA[Brooks &amp; Shields]]&gt;&lt;/item&gt; &lt;/data&gt; </example> </document> Fallback example (XInclude C.5) Note! Fallback support is not yet implemented >>> document = xinclude_loader("C5.xml") >>> ElementInclude.include(document, xinclude_loader) Traceback (most recent call last): IOError: resource not found >>> # print serialize(document) # C5 """ def xinclude_default(): """ >>> document = xinclude_loader("default.xml") >>> ElementInclude.include(document) >>> print serialize(document) # default <document> <p>Example.</p> <root> <element key="value">text</element> <element>text</element>tail <empty-element /> </root> </document> """ # # xmlwriter def xmlwriter(): r""" >>> file = StringIO.StringIO() >>> w = SimpleXMLWriter.XMLWriter(file) >>> html = w.start("html") >>> x = w.start("head") >>> w.element("title", "my document") >>> w.data("\n") >>> w.element("meta", name="hello", value="goodbye") >>> w.data("\n") >>> w.end() >>> x = w.start("body") >>> w.element("h1", "this is a heading") >>> w.data("\n") >>> w.element("p", u"this is a paragraph") >>> w.data("\n") >>> w.element("p", u"reserved characters: <&>") >>> w.data("\n") >>> w.element("p", u"detta är också ett stycke") >>> w.data("\n") >>> w.close(html) >>> print file.getvalue() <html><head><title>my document</title> <meta name="hello" value="goodbye" /> </head><body><h1>this is a heading</h1> <p>this is a paragraph</p> <p>reserved characters: &lt;&amp;&gt;</p> <p>detta &#228;r ocks&#229; ett stycke</p> </body></html> """ # -------------------------------------------------------------------- # reported bugs def bug_xmltoolkit21(): """ marshaller gives obscure errors for non-string values >>> elem = ElementTree.Element(123) >>> serialize(elem) # tag Traceback (most recent call last): TypeError: cannot serialize 123 (type int) >>> elem = ElementTree.Element("elem") >>> elem.text = 123 >>> serialize(elem) # text Traceback (most recent call last): TypeError: cannot serialize 123 (type int) >>> elem = ElementTree.Element("elem") >>> elem.tail = 123 >>> serialize(elem) # tail Traceback (most recent call last): TypeError: cannot serialize 123 (type int) >>> elem = ElementTree.Element("elem") >>> elem.set(123, "123") >>> serialize(elem) # attribute key Traceback (most recent call last): TypeError: cannot serialize 123 (type int) >>> elem = ElementTree.Element("elem") >>> elem.set("123", 123) >>> serialize(elem) # attribute value Traceback (most recent call last): TypeError: cannot serialize 123 (type int) """ def bug_xmltoolkit25(): """ typo in ElementTree.findtext >>> tree = ElementTree.ElementTree(SAMPLE_XML) >>> tree.findtext("tag") 'text' >>> tree.findtext("section/tag") 'subtext' """ def bug_xmltoolkit28(): """ .//tag causes exceptions >>> tree = ElementTree.XML("<doc><table><tbody/></table></doc>") >>> summarize_list(tree.findall(".//thead")) [] >>> summarize_list(tree.findall(".//tbody")) ['tbody'] """ def bug_xmltoolkitX1(): """ dump() doesn't flush the output buffer >>> tree = ElementTree.XML("<doc><table><tbody/></table></doc>") >>> ElementTree.dump(tree); sys.stdout.write("tail") <doc><table><tbody /></table></doc> tail """ def bug_xmltoolkit39(): """ non-ascii element and attribute names doesn't work >>> tree = ElementTree.XML("<?xml version='1.0' encoding='iso-8859-1'?><täg />") >>> ElementTree.tostring(tree, "utf-8") '<t\\xc3\\xa4g />' >>> tree = ElementTree.XML("<?xml version='1.0' encoding='iso-8859-1'?><tag ättr='v&#228;lue' />") >>> tree.attrib {u'\\xe4ttr': u'v\\xe4lue'} >>> ElementTree.tostring(tree, "utf-8") '<tag \\xc3\\xa4ttr="v\\xc3\\xa4lue" />' >>> tree = ElementTree.XML("<?xml version='1.0' encoding='iso-8859-1'?><täg>text</täg>") >>> ElementTree.tostring(tree, "utf-8") '<t\\xc3\\xa4g>text</t\\xc3\\xa4g>' >>> tree = ElementTree.Element(u"täg") >>> ElementTree.tostring(tree, "utf-8") '<t\\xc3\\xa4g />' >>> tree = ElementTree.Element("tag") >>> tree.set(u"ättr", u"välue") >>> ElementTree.tostring(tree, "utf-8") '<tag \\xc3\\xa4ttr="v\\xc3\\xa4lue" />' """ def bug_xmltoolkit45(): """ problems parsing mixed unicode/non-ascii html documents latin-1 text >>> p = HTMLTreeBuilder.TreeBuilder() >>> p.feed("<p>välue</p>") >>> serialize(p.close()) '<p>v&#228;lue</p>' utf-8 text >>> p = HTMLTreeBuilder.TreeBuilder(encoding="utf-8") >>> p.feed("<p>v\xc3\xa4lue</p>") >>> serialize(p.close()) '<p>v&#228;lue</p>' utf-8 text using meta tag >>> p = HTMLTreeBuilder.TreeBuilder() >>> p.feed("<html><meta http-equiv='Content-Type' content='text/html; charset=utf-8'><p>v\xc3\xa4lue</p></html>") >>> serialize(p.close().find("p")) '<p>v&#228;lue</p>' latin-1 character references >>> p = HTMLTreeBuilder.TreeBuilder() >>> p.feed("<p>v&#228;lue</p>") >>> serialize(p.close()) '<p>v&#228;lue</p>' latin-1 character entities >>> p = HTMLTreeBuilder.TreeBuilder() >>> p.feed("<p>v&auml;lue</p>") >>> serialize(p.close()) '<p>v&#228;lue</p>' mixed latin-1 text and unicode entities >>> p = HTMLTreeBuilder.TreeBuilder() >>> p.feed("<p>&#8221;välue&#8221;</p>") >>> serialize(p.close()) '<p>&#8221;v&#228;lue&#8221;</p>' mixed unicode and latin-1 entities >>> p = HTMLTreeBuilder.TreeBuilder() >>> p.feed("<p>&#8221;v&auml;lue&#8221;</p>") >>> serialize(p.close()) '<p>&#8221;v&#228;lue&#8221;</p>' """ def bug_xmltoolkit46(): """ problems parsing open BR tags >>> p = HTMLTreeBuilder.TreeBuilder() >>> p.feed("<p>key<br>value</p>") >>> serialize(p.close()) '<p>key<br />value</p>' """ def bug_xmltoolkit54(): """ problems handling internally defined entities >>> e = ElementTree.XML("<!DOCTYPE doc [<!ENTITY ldots '&#x8230;'>]><doc>&ldots;</doc>") >>> serialize(e) '<doc>&#33328;</doc>' """ def bug_xmltoolkit55(): """ make sure we're reporting the first error, not the last >>> e = ElementTree.XML("<!DOCTYPE doc SYSTEM 'doc.dtd'><doc>&ldots;&ndots;&rdots;</doc>") Traceback (most recent call last): ExpatError: undefined entity &ldots;: line 1, column 36 """ # -------------------------------------------------------------------- if __name__ == "__main__": import doctest, selftest failed, tested = doctest.testmod(selftest) print tested - failed, "tests ok."
apache-2.0
3,944,516,991,313,844,700
8,237,353,730,597,972,000
27.263682
163
0.576905
false
denisenkom/django-sqlserver
tests/pagination/tests.py
1
15383
from __future__ import unicode_literals import unittest import warnings from datetime import datetime import django from django.core.paginator import ( EmptyPage, InvalidPage, PageNotAnInteger, Paginator, ) if django.VERSION >= (1, 11, 0): from django.core.paginator import UnorderedObjectListWarning from django.test import TestCase from django.utils import six from .custom import ValidAdjacentNumsPaginator from .models import Article class PaginationTests(unittest.TestCase): """ Tests for the Paginator and Page classes. """ def check_paginator(self, params, output): """ Helper method that instantiates a Paginator object from the passed params and then checks that its attributes match the passed output. """ count, num_pages, page_range = output paginator = Paginator(*params) self.check_attribute('count', paginator, count, params) self.check_attribute('num_pages', paginator, num_pages, params) self.check_attribute('page_range', paginator, page_range, params, coerce=list) def check_attribute(self, name, paginator, expected, params, coerce=None): """ Helper method that checks a single attribute and gives a nice error message upon test failure. """ got = getattr(paginator, name) if coerce is not None: got = coerce(got) self.assertEqual( expected, got, "For '%s', expected %s but got %s. Paginator parameters were: %s" % (name, expected, got, params) ) def test_paginator(self): """ Tests the paginator attributes using varying inputs. """ nine = [1, 2, 3, 4, 5, 6, 7, 8, 9] ten = nine + [10] eleven = ten + [11] tests = ( # Each item is two tuples: # First tuple is Paginator parameters - object_list, per_page, # orphans, and allow_empty_first_page. # Second tuple is resulting Paginator attributes - count, # num_pages, and page_range. # Ten items, varying orphans, no empty first page. ((ten, 4, 0, False), (10, 3, [1, 2, 3])), ((ten, 4, 1, False), (10, 3, [1, 2, 3])), ((ten, 4, 2, False), (10, 2, [1, 2])), ((ten, 4, 5, False), (10, 2, [1, 2])), ((ten, 4, 6, False), (10, 1, [1])), # Ten items, varying orphans, allow empty first page. ((ten, 4, 0, True), (10, 3, [1, 2, 3])), ((ten, 4, 1, True), (10, 3, [1, 2, 3])), ((ten, 4, 2, True), (10, 2, [1, 2])), ((ten, 4, 5, True), (10, 2, [1, 2])), ((ten, 4, 6, True), (10, 1, [1])), # One item, varying orphans, no empty first page. (([1], 4, 0, False), (1, 1, [1])), (([1], 4, 1, False), (1, 1, [1])), (([1], 4, 2, False), (1, 1, [1])), # One item, varying orphans, allow empty first page. (([1], 4, 0, True), (1, 1, [1])), (([1], 4, 1, True), (1, 1, [1])), (([1], 4, 2, True), (1, 1, [1])), # Zero items, varying orphans, no empty first page. (([], 4, 0, False), (0, 0, [])), (([], 4, 1, False), (0, 0, [])), (([], 4, 2, False), (0, 0, [])), # Zero items, varying orphans, allow empty first page. (([], 4, 0, True), (0, 1, [1])), (([], 4, 1, True), (0, 1, [1])), (([], 4, 2, True), (0, 1, [1])), # Number if items one less than per_page. (([], 1, 0, True), (0, 1, [1])), (([], 1, 0, False), (0, 0, [])), (([1], 2, 0, True), (1, 1, [1])), ((nine, 10, 0, True), (9, 1, [1])), # Number if items equal to per_page. (([1], 1, 0, True), (1, 1, [1])), (([1, 2], 2, 0, True), (2, 1, [1])), ((ten, 10, 0, True), (10, 1, [1])), # Number if items one more than per_page. (([1, 2], 1, 0, True), (2, 2, [1, 2])), (([1, 2, 3], 2, 0, True), (3, 2, [1, 2])), ((eleven, 10, 0, True), (11, 2, [1, 2])), # Number if items one more than per_page with one orphan. (([1, 2], 1, 1, True), (2, 1, [1])), (([1, 2, 3], 2, 1, True), (3, 1, [1])), ((eleven, 10, 1, True), (11, 1, [1])), # Non-integer inputs ((ten, '4', 1, False), (10, 3, [1, 2, 3])), ((ten, '4', 1, False), (10, 3, [1, 2, 3])), ((ten, 4, '1', False), (10, 3, [1, 2, 3])), ((ten, 4, '1', False), (10, 3, [1, 2, 3])), ) for params, output in tests: self.check_paginator(params, output) def test_invalid_page_number(self): """ Invalid page numbers result in the correct exception being raised. """ paginator = Paginator([1, 2, 3], 2) with self.assertRaises(InvalidPage): paginator.page(3) with self.assertRaises(PageNotAnInteger): paginator.validate_number(None) with self.assertRaises(PageNotAnInteger): paginator.validate_number('x') # With no content and allow_empty_first_page=True, 1 is a valid page number paginator = Paginator([], 2) self.assertEqual(paginator.validate_number(1), 1) def test_paginate_misc_classes(self): class CountContainer(object): def count(self): return 42 # Paginator can be passed other objects with a count() method. paginator = Paginator(CountContainer(), 10) self.assertEqual(42, paginator.count) self.assertEqual(5, paginator.num_pages) self.assertEqual([1, 2, 3, 4, 5], list(paginator.page_range)) # Paginator can be passed other objects that implement __len__. class LenContainer(object): def __len__(self): return 42 paginator = Paginator(LenContainer(), 10) self.assertEqual(42, paginator.count) self.assertEqual(5, paginator.num_pages) self.assertEqual([1, 2, 3, 4, 5], list(paginator.page_range)) def check_indexes(self, params, page_num, indexes): """ Helper method that instantiates a Paginator object from the passed params and then checks that the start and end indexes of the passed page_num match those given as a 2-tuple in indexes. """ paginator = Paginator(*params) if page_num == 'first': page_num = 1 elif page_num == 'last': page_num = paginator.num_pages page = paginator.page(page_num) start, end = indexes msg = ("For %s of page %s, expected %s but got %s. Paginator parameters were: %s") self.assertEqual(start, page.start_index(), msg % ('start index', page_num, start, page.start_index(), params)) self.assertEqual(end, page.end_index(), msg % ('end index', page_num, end, page.end_index(), params)) def test_page_indexes(self): """ Paginator pages have the correct start and end indexes. """ ten = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] tests = ( # Each item is three tuples: # First tuple is Paginator parameters - object_list, per_page, # orphans, and allow_empty_first_page. # Second tuple is the start and end indexes of the first page. # Third tuple is the start and end indexes of the last page. # Ten items, varying per_page, no orphans. ((ten, 1, 0, True), (1, 1), (10, 10)), ((ten, 2, 0, True), (1, 2), (9, 10)), ((ten, 3, 0, True), (1, 3), (10, 10)), ((ten, 5, 0, True), (1, 5), (6, 10)), # Ten items, varying per_page, with orphans. ((ten, 1, 1, True), (1, 1), (9, 10)), ((ten, 1, 2, True), (1, 1), (8, 10)), ((ten, 3, 1, True), (1, 3), (7, 10)), ((ten, 3, 2, True), (1, 3), (7, 10)), ((ten, 3, 4, True), (1, 3), (4, 10)), ((ten, 5, 1, True), (1, 5), (6, 10)), ((ten, 5, 2, True), (1, 5), (6, 10)), ((ten, 5, 5, True), (1, 10), (1, 10)), # One item, varying orphans, no empty first page. (([1], 4, 0, False), (1, 1), (1, 1)), (([1], 4, 1, False), (1, 1), (1, 1)), (([1], 4, 2, False), (1, 1), (1, 1)), # One item, varying orphans, allow empty first page. (([1], 4, 0, True), (1, 1), (1, 1)), (([1], 4, 1, True), (1, 1), (1, 1)), (([1], 4, 2, True), (1, 1), (1, 1)), # Zero items, varying orphans, allow empty first page. (([], 4, 0, True), (0, 0), (0, 0)), (([], 4, 1, True), (0, 0), (0, 0)), (([], 4, 2, True), (0, 0), (0, 0)), ) for params, first, last in tests: self.check_indexes(params, 'first', first) self.check_indexes(params, 'last', last) # When no items and no empty first page, we should get EmptyPage error. with self.assertRaises(EmptyPage): self.check_indexes(([], 4, 0, False), 1, None) with self.assertRaises(EmptyPage): self.check_indexes(([], 4, 1, False), 1, None) with self.assertRaises(EmptyPage): self.check_indexes(([], 4, 2, False), 1, None) def test_page_sequence(self): """ A paginator page acts like a standard sequence. """ eleven = 'abcdefghijk' page2 = Paginator(eleven, per_page=5, orphans=1).page(2) self.assertEqual(len(page2), 6) self.assertIn('k', page2) self.assertNotIn('a', page2) self.assertEqual(''.join(page2), 'fghijk') self.assertEqual(''.join(reversed(page2)), 'kjihgf') def test_get_page_hook(self): """ A Paginator subclass can use the ``_get_page`` hook to return an alternative to the standard Page class. """ eleven = 'abcdefghijk' paginator = ValidAdjacentNumsPaginator(eleven, per_page=6) page1 = paginator.page(1) page2 = paginator.page(2) self.assertIsNone(page1.previous_page_number()) self.assertEqual(page1.next_page_number(), 2) self.assertEqual(page2.previous_page_number(), 1) self.assertIsNone(page2.next_page_number()) def test_page_range_iterator(self): """ Paginator.page_range should be an iterator. """ self.assertIsInstance(Paginator([1, 2, 3], 2).page_range, type(six.moves.range(0))) class ModelPaginationTests(TestCase): """ Test pagination with Django model instances """ def setUp(self): # Prepare a list of objects for pagination. for x in range(1, 10): a = Article(headline='Article %s' % x, pub_date=datetime(2005, 7, 29)) a.save() def test_first_page(self): paginator = Paginator(Article.objects.order_by('id'), 5) p = paginator.page(1) self.assertEqual("<Page 1 of 2>", six.text_type(p)) self.assertQuerysetEqual(p.object_list, [ "<Article: Article 1>", "<Article: Article 2>", "<Article: Article 3>", "<Article: Article 4>", "<Article: Article 5>" ]) self.assertTrue(p.has_next()) self.assertFalse(p.has_previous()) self.assertTrue(p.has_other_pages()) self.assertEqual(2, p.next_page_number()) with self.assertRaises(InvalidPage): p.previous_page_number() self.assertEqual(1, p.start_index()) self.assertEqual(5, p.end_index()) def test_last_page(self): paginator = Paginator(Article.objects.order_by('id'), 5) p = paginator.page(2) self.assertEqual("<Page 2 of 2>", six.text_type(p)) self.assertQuerysetEqual(p.object_list, [ "<Article: Article 6>", "<Article: Article 7>", "<Article: Article 8>", "<Article: Article 9>" ]) self.assertFalse(p.has_next()) self.assertTrue(p.has_previous()) self.assertTrue(p.has_other_pages()) with self.assertRaises(InvalidPage): p.next_page_number() self.assertEqual(1, p.previous_page_number()) self.assertEqual(6, p.start_index()) self.assertEqual(9, p.end_index()) def test_page_getitem(self): """ Tests proper behavior of a paginator page __getitem__ (queryset evaluation, slicing, exception raised). """ paginator = Paginator(Article.objects.order_by('id'), 5) p = paginator.page(1) # Make sure object_list queryset is not evaluated by an invalid __getitem__ call. # (this happens from the template engine when using eg: {% page_obj.has_previous %}) self.assertIsNone(p.object_list._result_cache) with self.assertRaises(TypeError): p['has_previous'] self.assertIsNone(p.object_list._result_cache) self.assertNotIsInstance(p.object_list, list) # Make sure slicing the Page object with numbers and slice objects work. self.assertEqual(p[0], Article.objects.get(headline='Article 1')) self.assertQuerysetEqual(p[slice(2)], [ "<Article: Article 1>", "<Article: Article 2>", ] ) # After __getitem__ is called, object_list is a list self.assertIsInstance(p.object_list, list) def test_paginating_unordered_queryset_raises_warning(self): if django.VERSION < (1, 11, 0): self.skipTest("does not work on older version of Django") with warnings.catch_warnings(record=True) as warns: # Prevent the RuntimeWarning subclass from appearing as an # exception due to the warnings.simplefilter() in runtests.py. warnings.filterwarnings('always', category=UnorderedObjectListWarning) Paginator(Article.objects.all(), 5) self.assertEqual(len(warns), 1) warning = warns[0] self.assertEqual(str(warning.message), ( "Pagination may yield inconsistent results with an unordered " "object_list: <class 'pagination.models.Article'> QuerySet." )) # The warning points at the Paginator caller (i.e. the stacklevel # is appropriate). self.assertEqual(warning.filename, __file__) def test_paginating_unordered_object_list_raises_warning(self): """ Unordered object list warning with an object that has an orderd attribute but not a model attribute. """ if django.VERSION < (1, 11, 0): self.skipTest("does not work on older version of Django") class ObjectList(): ordered = False object_list = ObjectList() with warnings.catch_warnings(record=True) as warns: warnings.filterwarnings('always', category=UnorderedObjectListWarning) Paginator(object_list, 5) self.assertEqual(len(warns), 1) self.assertEqual(str(warns[0].message), ( "Pagination may yield inconsistent results with an unordered " "object_list: {!r}.".format(object_list) ))
mit
4,391,191,717,116,693,000
-5,673,619,401,573,357,000
41.494475
119
0.536176
false
adw0rd/lettuce-py3
lettuce/__init__.py
1
6767
# -*- coding: utf-8 -*- # <Lettuce - Behaviour Driven Development for python> # Copyright (C) <2010-2012> Gabriel Falcão <gabriel@nacaolivre.org> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. __version__ = version = '0.2.22' release = 'kryptonite' import os import sys import traceback import warnings try: from imp import reload except ImportError: # python 2.5 fallback pass import random from lettuce.core import Feature, TotalResult from lettuce.terrain import after from lettuce.terrain import before from lettuce.terrain import world from lettuce.decorators import step, steps from lettuce.registry import call_hook from lettuce.registry import STEP_REGISTRY from lettuce.registry import CALLBACK_REGISTRY from lettuce.exceptions import StepLoadingError from lettuce.plugins import ( xunit_output, subunit_output, autopdb, smtp_mail_queue, ) from lettuce import fs from lettuce import exceptions try: from colorama import init as ms_windows_workaround ms_windows_workaround() except ImportError: pass __all__ = [ 'after', 'before', 'step', 'steps', 'world', 'STEP_REGISTRY', 'CALLBACK_REGISTRY', 'call_hook', ] try: terrain = fs.FileSystem._import("terrain") reload(terrain) except Exception as e: if not "No module named 'terrain'" in str(e): string = 'Lettuce has tried to load the conventional environment ' \ 'module "terrain"\nbut it has errors, check its contents and ' \ 'try to run lettuce again.\n\nOriginal traceback below:\n\n' sys.stderr.write(string) sys.stderr.write(exceptions.traceback.format_exc()) raise SystemExit(1) class Runner(object): """ Main lettuce's test runner Takes a base path as parameter (string), so that it can look for features and step definitions on there. """ def __init__(self, base_path, scenarios=None, verbosity=0, no_color=False, random=False, enable_xunit=False, xunit_filename=None, enable_subunit=False, subunit_filename=None, tags=None, failfast=False, auto_pdb=False, smtp_queue=None, root_dir=None, **kwargs): """ lettuce.Runner will try to find a terrain.py file and import it from within `base_path` """ self.tags = tags self.single_feature = None if os.path.isfile(base_path) and os.path.exists(base_path): self.single_feature = base_path base_path = os.path.dirname(base_path) sys.path.insert(0, base_path) self.loader = fs.FeatureLoader(base_path, root_dir) self.verbosity = verbosity self.scenarios = scenarios and list(map(int, scenarios.split(","))) or None self.failfast = failfast if auto_pdb: autopdb.enable(self) sys.path.remove(base_path) if verbosity == 0: from lettuce.plugins import non_verbose as output elif verbosity == 1: from lettuce.plugins import dots as output elif verbosity == 2: from lettuce.plugins import scenario_names as output else: if verbosity == 4: from lettuce.plugins import colored_shell_output as output msg = ('Deprecated in lettuce 2.2.21. Use verbosity 3 without ' '--no-color flag instead of verbosity 4') warnings.warn(msg, DeprecationWarning) elif verbosity == 3: if no_color: from lettuce.plugins import shell_output as output else: from lettuce.plugins import colored_shell_output as output self.random = random if enable_xunit: xunit_output.enable(filename=xunit_filename) if smtp_queue: smtp_mail_queue.enable() if enable_subunit: subunit_output.enable(filename=subunit_filename) reload(output) self.output = output def run(self): """ Find and load step definitions, and them find and load features under `base_path` specified on constructor """ results = [] if self.single_feature: features_files = [self.single_feature] else: features_files = self.loader.find_feature_files() if self.random: random.shuffle(features_files) if not features_files: self.output.print_no_features_found(self.loader.base_dir) return # only load steps if we've located some features. # this prevents stupid bugs when loading django modules # that we don't even want to test. try: self.loader.find_and_load_step_definitions() except StepLoadingError as e: print("Error loading step definitions:\n", e) return call_hook('before', 'all') failed = False try: for filename in features_files: feature = Feature.from_file(filename) results.append( feature.run(self.scenarios, tags=self.tags, random=self.random, failfast=self.failfast)) except exceptions.LettuceSyntaxError as e: sys.stderr.write(e.msg) failed = True except exceptions.NoDefinitionFound as e: sys.stderr.write(e.msg) failed = True except: if not self.failfast: e = sys.exc_info()[1] print("Died with %s" % str(e)) traceback.print_exc() else: print() print ("Lettuce aborted running any more tests " "because was called with the `--failfast` option") failed = True finally: total = TotalResult(results) total.output_format() call_hook('after', 'all', total) if failed: raise SystemExit(2) return total
gpl-3.0
-1,812,663,612,514,826,000
-6,734,317,743,778,212,000
30.469767
83
0.604936
false
arne-cl/pattern
pattern/text/en/modality.py
21
21985
#### PATTERN | EN | MOOD & MODALITY ################################################################ # -*- coding: utf-8 -*- # Copyright (c) 2010 University of Antwerp, Belgium # Author: Tom De Smedt <tom@organisms.be> # License: BSD (see LICENSE.txt for details). # http://www.clips.ua.ac.be/pages/pattern ### LIST FUNCTIONS ################################################################################# def find(function, list): """ Returns the first item in the list for which function(item) is True, None otherwise. """ for item in list: if function(item) == True: return item ### MOOD ########################################################################################### # Functions take Sentence objects, see pattern.text.tree.Sentence and pattern.text.parsetree(). INDICATIVE = "indicative" # They went for a walk. IMPERATIVE = "imperative" # Let's go for a walk! CONDITIONAL = "conditional" # It might be nice to go for a walk when it stops raining. SUBJUNCTIVE = "subjunctive" # It would be nice to go for a walk sometime. def s(word): return word.string.lower() def join(words): return " ".join([w.string.lower() for w in words]) def question(sentence): return len(sentence) > 0 and sentence[-1].string == "?" def verb(word): return word.type.startswith(("VB","MD")) and (word.chunk is None or word.chunk.type.endswith("VP")) def verbs(sentence, i=0, j=None): return [w for w in sentence[i:j or len(sentence)] if verb(w)] def imperative(sentence, **kwargs): """ The imperative mood is used to give orders, commands, warnings, instructions, or to make requests (if used with "please"). It is marked by the infinitive form of the verb, without "to": "For goodness sake, just stop it!" """ S = sentence if not (hasattr(S, "words") and hasattr(S, "parse_token")): raise TypeError("%s object is not a parsed Sentence" % repr(S.__class__.__name__)) if question(S): return False if S.subjects and s(S.subjects[0]) not in ("you", "yourself"): # The subject can only identify as "you" (2sg): "Control yourself!". return False r = s(S).rstrip(" .!") for cc in ("if", "assuming", "provided that", "given that"): # A conjunction can also indicate conditional mood. if cc+" " in r: return False for i, w in enumerate(S): if verb(w): if s(w) in ("do", "let") and w == verbs(S)[0]: # "Do your homework!" return True if s(w) in ("do", "let"): # "Let's not argue." continue if s(w) in ("would", "should", "'d", "could", "can", "may", "might"): # "You should leave." => conditional. return False if s(w) in ("will", "shall") and i > 0 and s(S[i-1]) == "you" and not verbs(S,0,i): # "You will eat your dinner." continue if w.type == "VB" and (i == 0 or s(S[i-1]) != "to"): # "Come here!" return True # Break on any other verb form. return False return False #from __init__ import parse, Sentence # #for str in ( # "Do your homework!", # True # "Do whatever you want.", # True # "Do not listen to me.", # True # "Do it if you think it is necessary.", # False # "Turn that off, will you.", # True # "Let's help him.", # True # "Help me!", # True # "You will help me.", # True # "I hope you will help me.", # False # "I can help you.", # False # "I can help you if you let me."): # False # print str # print parse(str) # print imperative(Sentence(parse(str))) # print def conditional(sentence, predictive=True, **kwargs): """ The conditional mood is used to talk about possible or imaginary situations. It is marked by the infinitive form of the verb, preceded by would/could/should: "we should be going", "we could have stayed longer". With predictive=False, sentences with will/shall need an explicit if/when/once-clause: - "I will help you" => predictive. - "I will help you if you pay me" => speculative. Sentences with can/may always need an explicit if-clause. """ S = sentence if not (hasattr(S, "words") and hasattr(S, "parse_token")): raise TypeError("%s object is not a parsed Sentence" % repr(S.__class__.__name__)) if question(S): return False i = find(lambda w: s(w) == "were", S) i = i and i.index or 0 if i > 0 and (s(S[i-1]) in ("i", "it", "he", "she") or S[i-1].type == "NN"): # "As if it were summer already." => subjunctive (wish). return False for i, w in enumerate(S): if w.type == "MD": if s(w) == "ought" and i < len(S) and s(S[i+1]) == "to": # "I ought to help you." return True if s(w) in ("would", "should", "'d", "could", "might"): # "I could help you." return True if s(w) in ("will", "shall", "'ll") and i > 0 and s(S[i-1]) == "you" and not verbs(S,0,i): # "You will help me." => imperative. return False if s(w) in ("will", "shall", "'ll") and predictive: # "I will help you." => predictive. return True if s(w) in ("will", "shall", "'ll", "can", "may"): # "I will help you when I get back." => speculative. r = s(S).rstrip(" .!") for cc in ("if", "when", "once", "as soon as", "assuming", "provided that", "given that"): if cc+" " in r: return True return False #from __init__ import parse, Sentence # #for str in ( # "We ought to help him.", # True # "We could help him.", # True # "I will help you.", # True # "You will help me.", # False (imperative) # "I hope you will help me.", # True (predictive) # "I can help you.", # False # "I can help you if you let me."): # True # print str # print parse(str) # print conditional(Sentence(parse(str))) # print subjunctive1 = [ "advise", "ask", "command", "demand", "desire", "insist", "propose", "recommend", "request", "suggest", "urge"] subjunctive2 = [ "best", "crucial", "desirable", "essential", "imperative", "important", "recommended", "urgent", "vital"] for w in list(subjunctive1): # Inflect. subjunctive1.append(w+"s") subjunctive1.append(w.rstrip("e")+"ed") def subjunctive(sentence, classical=True, **kwargs): """ The subjunctive mood is a classical mood used to express a wish, judgment or opinion. It is marked by the verb wish/were, or infinitive form of a verb preceded by an "it is"-statement: "It is recommended that he bring his own computer." """ S = sentence if not (hasattr(S, "words") and hasattr(S, "parse_token")): raise TypeError("%s object is not a parsed Sentence" % repr(S.__class__.__name__)) if question(S): return False for i, w in enumerate(S): b = False if w.type.startswith("VB"): if s(w).startswith("wish"): # "I wish I knew." return True if s(w) == "hope" and i > 0 and s(S[i-1]) in ("i", "we"): # "I hope ..." return True if s(w) == "were" and i > 0 and (s(S[i-1]) in ("i", "it", "he", "she") or S[i-1].type == "NN"): # "It is as though she were here." => counterfactual. return True if s(w) in subjunctive1: # "I propose that you be on time." b = True elif s(w) == "is" and 0 < i < len(S)-1 and s(S[i-1]) == "it" \ and s(S[i+1]) in subjunctive2: # "It is important that you be there." => but you aren't (yet). b = True elif s(w) == "is" and 0 < i < len(S)-3 and s(S[i-1]) == "it" \ and s(S[i+2]) in ("good", "bad") and s(S[i+3]) == "idea": # "It is a good idea that you be there." b = True if b: # With classical=False, "It is important that you are there." passes. # This is actually an informal error: it states a fact, not a wish. v = find(lambda w: w.type.startswith("VB"), S[i+1:]) if v and classical is True and v and v.type == "VB": return True if v and classical is False: return True return False #from __init__ import parse, Sentence # #for str in ( # "I wouldn't do that if I were you.", # True # "I wish I knew.", # True # "I propose that you be on time.", # True # "It is a bad idea to be late.", # True # "I will be dead."): # False, predictive # print str # print parse(str) # print subjunctive(Sentence(parse(str))) # print def negated(sentence, negative=("not", "n't", "never")): if hasattr(sentence, "string"): # Sentence object => string. sentence = sentence.string S = " %s " % (sentence).strip(".?!").lower() for w in negative: if " %s " % w in S: return True return False def mood(sentence, **kwargs): """ Returns IMPERATIVE (command), CONDITIONAL (possibility), SUBJUNCTIVE (wish) or INDICATIVE (fact). """ if isinstance(sentence, basestring): try: # A Sentence is expected but a string given. # Attempt to parse the string on-the-fly. from pattern.en import parse, Sentence sentence = Sentence(parse(sentence)) except ImportError: pass if imperative(sentence, **kwargs): return IMPERATIVE if conditional(sentence, **kwargs): return CONDITIONAL if subjunctive(sentence, **kwargs): return SUBJUNCTIVE else: return INDICATIVE ### MODALITY ####################################################################################### # Functions take Sentence objects, see pattern.text.tree.Sentence and pattern.text.parsetree(). def d(*args): return dict.fromkeys(args, True) AUXILLARY = { "be": ["be", "am", "m", "are", "is", "being", "was", "were" "been"], "can": ["can", "ca", "could"], "dare": ["dare", "dares", "daring", "dared"], "do": ["do", "does", "doing", "did", "done"], "have": ["have", "ve", "has", "having", "had"], "may": ["may", "might"], "must": ["must"], "need": ["need", "needs", "needing", "needed"], "ought": ["ought"], "shall": ["shall", "sha"], "will": ["will", "ll", "wo", "willing", "would", "d"] } MODIFIERS = ("fully", "highly", "most", "much", "strongly", "very") EPISTEMIC = "epistemic" # Expresses degree of possiblity. # -1.00 = NEGATIVE # -0.75 = NEGATIVE, with slight doubts # -0.50 = NEGATIVE, with doubts # -0.25 = NEUTRAL, slightly negative # +0.00 = NEUTRAL # +0.25 = NEUTRAL, slightly positive # +0.50 = POSITIVE, with doubts # +0.75 = POSITIVE, with slight doubts # +1.00 = POSITIVE epistemic_MD = { # would => could => can => should => shall => will => must -1.00: d(), -0.75: d(), -0.50: d("would"), -0.25: d("could", "dare", "might"), 0.00: d("can", "ca", "may"), +0.25: d("ought", "should"), +0.50: d("shall", "sha"), +0.75: d("will", "'ll", "wo"), +1.00: d("have", "has", "must", "need"), } epistemic_VB = { # wish => feel => believe => seem => think => know => prove + THAT -1.00: d(), -0.75: d(), -0.50: d("dispute", "disputed", "doubt", "question"), -0.25: d("hope", "want", "wish"), 0.00: d("guess", "imagine", "seek"), +0.25: d("appear", "bet", "feel", "hear", "rumor", "rumour", "say", "said", "seem", "seemed", "sense", "speculate", "suspect", "suppose", "wager"), +0.50: d("allude", "anticipate", "assume", "claim", "claimed", "believe", "believed", "conjecture", "consider", "considered", "decide", "expect", "find", "found", "hypothesize", "imply", "indicate", "infer", "postulate", "predict", "presume", "propose", "report", "reported", "suggest", "suggested", "tend", "think", "thought"), +0.75: d("know", "known", "look", "see", "show", "shown"), +1.00: d("certify", "demonstrate", "prove", "proven", "verify"), } epistemic_RB = { # unlikely => supposedly => maybe => probably => usually => clearly => definitely -1.00: d("impossibly"), -0.75: d("hardly"), -0.50: d("presumptively", "rarely", "scarcely", "seldomly", "uncertainly", "unlikely"), -0.25: d("almost", "allegedly", "debatably", "nearly", "presumably", "purportedly", "reportedly", "reputedly", "rumoredly", "rumouredly", "supposedly"), 0.00: d("barely", "hypothetically", "maybe", "occasionally", "perhaps", "possibly", "putatively", "sometimes", "sporadically", "traditionally", "widely"), +0.25: d("admittedly", "apparently", "arguably", "believably", "conceivably", "feasibly", "fairly", "hopefully", "likely", "ostensibly", "potentially", "probably", "quite", "seemingly"), +0.50: d("commonly", "credibly", "defendably", "defensibly", "effectively", "frequently", "generally", "largely", "mostly", "normally", "noticeably", "often", "plausibly", "reasonably", "regularly", "relatively", "typically", "usually"), +0.75: d("assuredly", "certainly", "clearly", "doubtless", "evidently", "evitably", "manifestly", "necessarily", "nevertheless", "observably", "ostensively", "patently", "plainly", "positively", "really", "surely", "truly", "undoubtably", "undoubtedly", "verifiably"), +1.00: d("absolutely", "always", "definitely", "incontestably", "indisputably", "indubitably", "ineluctably", "inescapably", "inevitably", "invariably", "obviously", "unarguably", "unavoidably", "undeniably", "unquestionably") } epistemic_JJ = { -1.00: d("absurd", "prepostoreous", "ridiculous"), -0.75: d("inconceivable", "unthinkable"), -0.50: d("misleading", "scant", "unlikely", "unreliable"), -0.25: d("customer-centric", "doubtful", "ever", "ill-defined, ""inadequate", "late", "uncertain", "unclear", "unrealistic", "unspecified", "unsure", "wild"), 0.00: d("dynamic", "possible", "unknown"), +0.25: d("according", "creative", "likely", "local", "innovative", "interesting", "potential", "probable", "several", "some", "talented", "viable"), +0.50: d("certain", "generally", "many", "notable", "numerous", "performance-oriented", "promising", "putative", "well-known"), +0.75: d("concrete", "credible", "famous", "important", "major", "necessary", "original", "positive", "significant", "real", "robust", "substantial", "sure"), +1.00: d("confirmed", "definite", "prime", "undisputable"), } epistemic_NN = { -1.00: d("fantasy", "fiction", "lie", "myth", "nonsense"), -0.75: d("controversy"), -0.50: d("criticism", "debate", "doubt"), -0.25: d("belief", "chance", "faith", "luck", "perception", "speculation"), 0.00: d("challenge", "guess", "feeling", "hunch", "opinion", "possibility", "question"), +0.25: d("assumption", "expectation", "hypothesis", "notion", "others", "team"), +0.50: d("example", "proces", "theory"), +0.75: d("conclusion", "data", "evidence", "majority", "proof", "symptom", "symptoms"), +1.00: d("fact", "truth", "power"), } epistemic_CC_DT_IN = { 0.00: d("either", "whether"), +0.25: d("however", "some"), +1.00: d("despite") } epistemic_PRP = { +0.25: d("I", "my"), +0.50: d("our"), +0.75: d("we") } epistemic_weaseling = { -0.75: d("popular belief"), -0.50: d("but that", "but this", "have sought", "might have", "seems to"), -0.25: d("may also", "may be", "may have", "may have been", "some have", "sort of"), +0.00: d("been argued", "believed to", "considered to", "claimed to", "is considered", "is possible", "overall solutions", "regarded as", "said to"), +0.25: d("a number of", "in some", "one of", "some of", "many modern", "many people", "most people", "some people", "some cases", "some studies", "scientists", "researchers"), +0.50: d("in several", "is likely", "many of", "many other", "of many", "of the most", "such as", "several reasons", "several studies", "several universities", "wide range"), +0.75: d("almost always", "and many", "and some", "around the world", "by many", "in many", "in order to", "most likely"), +1.00: d("i.e.", "'s most", "of course", "There are", "without doubt"), } def modality(sentence, type=EPISTEMIC): """ Returns the sentence's modality as a weight between -1.0 and +1.0. Currently, the only type implemented is EPISTEMIC. Epistemic modality is used to express possibility (i.e. how truthful is what is being said). """ if isinstance(sentence, basestring): try: # A Sentence is expected but a string given. # Attempt to parse the string on-the-fly. from pattern.en import parse, Sentence sentence = Sentence(parse(sentence)) except ImportError: pass S, n, m = sentence, 0.0, 0 if not (hasattr(S, "words") and hasattr(S, "parse_token")): raise TypeError("%s object is not a parsed Sentence" % repr(S.__class__.__name__)) if type == EPISTEMIC: r = S.string.rstrip(" .!") for k, v in epistemic_weaseling.items(): for phrase in v: if phrase in r: n += k m += 2 for i, w in enumerate(S.words): for type, dict, weight in ( ( "MD", epistemic_MD, 4), ( "VB", epistemic_VB, 2), ( "RB", epistemic_RB, 2), ( "JJ", epistemic_JJ, 1), ( "NN", epistemic_NN, 1), ( "CC", epistemic_CC_DT_IN, 1), ( "DT", epistemic_CC_DT_IN, 1), ( "IN", epistemic_CC_DT_IN, 1), ("PRP" , epistemic_PRP, 1), ("PRP$", epistemic_PRP, 1), ( "WP" , epistemic_PRP, 1)): # "likely" => weight 1, "very likely" => weight 2 if i > 0 and s(S[i-1]) in MODIFIERS: weight += 1 # likely" => score 0.25 (neutral inclining towards positive). if w.type and w.type.startswith(type): for k, v in dict.items(): # Prefer lemmata. if (w.lemma or s(w)) in v: # Reverse score for negated terms. if i > 0 and s(S[i-1]) in ("not", "n't", "never", "without"): k = -k * 0.5 n += weight * k m += weight break # Numbers, citations, explanations make the sentence more factual. if w.type in ("CD", "\"", "'", ":", "("): n += 0.75 m += 1 if m == 0: return 1.0 # No modal verbs/adverbs used, so statement must be true. return max(-1.0, min(n / (m or 1), +1.0)) def uncertain(sentence, threshold=0.5): return modality(sentence) <= threshold #from __init__ import parse, Sentence # #for str in ( # "I wish it would stop raining.", # "It will surely stop raining soon."): # print str # print parse(str) # print modality(Sentence(parse(str))) # print #--------------------------------------------------------------------------------------------------- # Celle, A. (2009). Hearsay adverbs and modality, in: Modality in English, Mouton. # Allegedly, presumably, purportedly, ... are in the negative range because # they introduce a fictious point of view by referring to an unclear source. #--------------------------------------------------------------------------------------------------- # Tseronis, A. (2009). Qualifying standpoints. LOT Dissertation Series: 233. # Following adverbs are not epistemic but indicate the way in which things are said. # 1) actually, admittedly, avowedly, basically, bluntly, briefly, broadly, candidly, # confidentially, factually, figuratively, frankly, generally, honestly, hypothetically, # in effect, in fact, in reality, indeed, literally, metaphorically, naturally, # of course, objectively, personally, really, roughly, seriously, simply, sincerely, # strictly, truly, truthfully. # 2) bizarrely, commendably, conveniently, curiously, disappointingly, fortunately, funnily, # happily, hopefully, illogically, interestingly, ironically, justifiably, justly, luckily, # oddly, paradoxically, preferably, regretfully, regrettably, sadly, significantly, # strangely, surprisingly, tragically, unaccountably, unfortunately, unhappily unreasonably #--------------------------------------------------------------------------------------------------- # The modality() function was tested with BioScope and Wikipedia training data from CoNLL2010 Shared Task 1. # See for example Morante, R., Van Asch, V., Daelemans, W. (2010): # Memory-Based Resolution of In-Sentence Scopes of Hedge Cues # http://www.aclweb.org/anthology/W/W10/W10-3006.pdf # Sentences in the training corpus are labelled as "certain" or "uncertain". # For Wikipedia sentences, 2000 "certain" and 2000 "uncertain": # modality(sentence) > 0.5 => A 0.70 P 0.73 R 0.64 F1 0.68
bsd-3-clause
-4,491,689,757,536,125,000
2,964,190,891,170,039,000
44.053279
111
0.534273
false
dennybaa/st2
st2client/st2client/utils/date.py
7
1586
# Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import dateutil.tz import dateutil.parser __all__ = [ 'parse', 'format_isodate' ] def add_utc_tz(dt): return dt.replace(tzinfo=dateutil.tz.tzutc()) def parse(value): dt = dateutil.parser.parse(str(value)) # pylint: disable=no-member # For some reason pylint thinks it returns a tuple but it returns a datetime object return dt if dt.tzinfo else add_utc_tz(dt) def format_isodate(value): """ Make a ISO date time string human friendly. :type value: ``str`` :rtype: ``str`` """ if not value: return '' # pylint: disable=no-member # For some reason pylint thinks it returns a tuple but it returns a datetime object date = dateutil.parser.parse(str(value)) value = date.strftime('%a, %d %b %Y %H:%M:%S %Z') return value
apache-2.0
-1,130,698,962,943,503,900
-8,290,943,191,393,660,000
30.098039
87
0.704918
false
sipwise/repoapi
repoapi/wsgi.py
1
1088
# Copyright (C) 2015 The Sipwise Team - http://sipwise.com # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # You should have received a copy of the GNU General Public License along # with this program. If not, see <http://www.gnu.org/licenses/>. """ WSGI config for repoapi project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "repoapi.settings.prod") application = get_wsgi_application()
gpl-3.0
-2,133,405,463,189,636,900
6,323,652,141,942,587,000
35.266667
78
0.765625
false
ozburo/youtube-dl
youtube_dl/extractor/tf1.py
3
3073
# coding: utf-8 from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..utils import ( int_or_none, parse_iso8601, try_get, ) class TF1IE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tf1\.fr/[^/]+/(?P<program_slug>[^/]+)/videos/(?P<id>[^/?&#]+)\.html' _TESTS = [{ 'url': 'https://www.tf1.fr/tmc/quotidien-avec-yann-barthes/videos/quotidien-premiere-partie-11-juin-2019.html', 'info_dict': { 'id': '13641379', 'ext': 'mp4', 'title': 'md5:f392bc52245dc5ad43771650c96fb620', 'description': 'md5:a02cdb217141fb2d469d6216339b052f', 'upload_date': '20190611', 'timestamp': 1560273989, 'duration': 1738, 'series': 'Quotidien avec Yann Barthès', 'tags': ['intégrale', 'quotidien', 'Replay'], }, 'params': { # Sometimes wat serves the whole file with the --test option 'skip_download': True, 'format': 'bestvideo', }, }, { 'url': 'http://www.tf1.fr/tf1/koh-lanta/videos/replay-koh-lanta-22-mai-2015.html', 'only_matching': True, }, { 'url': 'http://www.tf1.fr/hd1/documentaire/videos/mylene-farmer-d-une-icone.html', 'only_matching': True, }] def _real_extract(self, url): program_slug, slug = re.match(self._VALID_URL, url).groups() video = self._download_json( 'https://www.tf1.fr/graphql/web', slug, query={ 'id': '9b80783950b85247541dd1d851f9cc7fa36574af015621f853ab111a679ce26f', 'variables': json.dumps({ 'programSlug': program_slug, 'slug': slug, }) })['data']['videoBySlug'] wat_id = video['streamId'] tags = [] for tag in (video.get('tags') or []): label = tag.get('label') if not label: continue tags.append(label) decoration = video.get('decoration') or {} thumbnails = [] for source in (try_get(decoration, lambda x: x['image']['sources'], list) or []): source_url = source.get('url') if not source_url: continue thumbnails.append({ 'url': source_url, 'width': int_or_none(source.get('width')), }) return { '_type': 'url_transparent', 'id': wat_id, 'url': 'wat:' + wat_id, 'title': video.get('title'), 'thumbnails': thumbnails, 'description': decoration.get('description'), 'timestamp': parse_iso8601(video.get('date')), 'duration': int_or_none(try_get(video, lambda x: x['publicPlayingInfos']['duration'])), 'tags': tags, 'series': decoration.get('programLabel'), 'season_number': int_or_none(video.get('season')), 'episode_number': int_or_none(video.get('episode')), }
unlicense
-2,689,248,399,720,628,000
3,197,591,528,412,520,400
34.298851
119
0.5197
false
sam-roth/Keypad
keypad/plugins/shell/bourne_model.py
1
4068
import subprocess import shlex from keypad.api import (Plugin, register_plugin, Filetype, Cursor) from keypad.abstract.code import IndentRetainingCodeModel, AbstractCompletionResults from keypad.core.syntaxlib import SyntaxHighlighter, lazy from keypad.core.processmgr.client import AsyncServerProxy from keypad.core.fuzzy import FuzzyMatcher from keypad.core.executors import future_wrap from keypad.core.attributed_string import AttributedString @lazy def lexer(): from . import bourne_lexer return bourne_lexer.Shell class GetManPage: def __init__(self, cmd): self.cmd = cmd def __call__(self, ns): with subprocess.Popen(['man', self.cmd], stdout=subprocess.PIPE) as proc: out, _ = proc.communicate() import re return [re.subn('.\x08', '', out.decode())[0]] class ShellCompletionResults(AbstractCompletionResults): def __init__(self, token_start, results, prox): ''' token_start - the (line, col) position at which the token being completed starts ''' super().__init__(token_start) self.results = [(AttributedString(x.decode()),) for x in results] self._prox = prox def doc_async(self, index): ''' Return a Future for the documentation for a given completion result as a list of AttributedString. ''' return self._prox.submit(GetManPage(self.text(index))) @property def rows(self): ''' Return a list of tuples of AttributedString containing the contents of each column for each row in the completion results. ''' return self._filtered.rows def text(self, index): ''' Return the text that should be inserted for the given completion. ''' return self._filtered.rows[index][0].text def filter(self, text=''): ''' Filter the completion results using the given text. ''' self._filtered = FuzzyMatcher(text).filter(self.results, key=lambda x: x[0].text) self._filtered.sort(lambda item: len(item[0].text)) def dispose(self): pass class GetPathItems: def __init__(self, prefix): self.prefix = prefix def __call__(self, ns): with subprocess.Popen(['bash', '-c', 'compgen -c ' + shlex.quote(self.prefix)], stdout=subprocess.PIPE) as proc: out, _ = proc.communicate() return [l.strip() for l in out.splitlines()] class BourneCodeModel(IndentRetainingCodeModel): completion_triggers = [] def __init__(self, *args, **kw): super().__init__(*args, **kw) self._prox = AsyncServerProxy() self._prox.start() def dispose(self): self._prox.shutdown() super().dispose() def highlight(self): ''' Rehighlight the buffer. ''' highlighter = SyntaxHighlighter( 'keypad.plugins.shell.syntax', lexer(), dict(lexcat=None) ) highlighter.highlight_buffer(self.buffer) def completions_async(self, pos): ''' Return a future to the completions available at the given position in the document. Raise NotImplementedError if not implemented. ''' c = Cursor(self.buffer).move(pos) text_to_pos = c.line.text[:c.x] for x, ch in reversed(list(enumerate(text_to_pos))): if ch.isspace(): x += 1 break else: x = 0 print('text_to_pos', text_to_pos[x:], pos) return self._prox.submit(GetPathItems(text_to_pos[x:]), transform=lambda r: ShellCompletionResults((pos[0], x), r, self._prox))
gpl-3.0
-488,946,358,583,967,700
-3,226,000,671,740,864,500
26.863014
91
0.555556
false
ehashman/oh-mainline
vendor/packages/docutils/docutils/transforms/peps.py
122
11065
# $Id: peps.py 6433 2010-09-28 08:21:25Z milde $ # Author: David Goodger <goodger@python.org> # Copyright: This module has been placed in the public domain. """ Transforms for PEP processing. - `Headers`: Used to transform a PEP's initial RFC-2822 header. It remains a field list, but some entries get processed. - `Contents`: Auto-inserts a table of contents. - `PEPZero`: Special processing for PEP 0. """ __docformat__ = 'reStructuredText' import sys import os import re import time from docutils import nodes, utils, languages from docutils import ApplicationError, DataError from docutils.transforms import Transform, TransformError from docutils.transforms import parts, references, misc class Headers(Transform): """ Process fields in a PEP's initial RFC-2822 header. """ default_priority = 360 pep_url = 'pep-%04d' pep_cvs_url = ('http://svn.python.org/view/*checkout*' '/peps/trunk/pep-%04d.txt') rcs_keyword_substitutions = ( (re.compile(r'\$' r'RCSfile: (.+),v \$$', re.IGNORECASE), r'\1'), (re.compile(r'\$[a-zA-Z]+: (.+) \$$'), r'\1'),) def apply(self): if not len(self.document): # @@@ replace these DataErrors with proper system messages raise DataError('Document tree is empty.') header = self.document[0] if not isinstance(header, nodes.field_list) or \ 'rfc2822' not in header['classes']: raise DataError('Document does not begin with an RFC-2822 ' 'header; it is not a PEP.') pep = None for field in header: if field[0].astext().lower() == 'pep': # should be the first field value = field[1].astext() try: pep = int(value) cvs_url = self.pep_cvs_url % pep except ValueError: pep = value cvs_url = None msg = self.document.reporter.warning( '"PEP" header must contain an integer; "%s" is an ' 'invalid value.' % pep, base_node=field) msgid = self.document.set_id(msg) prb = nodes.problematic(value, value or '(none)', refid=msgid) prbid = self.document.set_id(prb) msg.add_backref(prbid) if len(field[1]): field[1][0][:] = [prb] else: field[1] += nodes.paragraph('', '', prb) break if pep is None: raise DataError('Document does not contain an RFC-2822 "PEP" ' 'header.') if pep == 0: # Special processing for PEP 0. pending = nodes.pending(PEPZero) self.document.insert(1, pending) self.document.note_pending(pending) if len(header) < 2 or header[1][0].astext().lower() != 'title': raise DataError('No title!') for field in header: name = field[0].astext().lower() body = field[1] if len(body) > 1: raise DataError('PEP header field body contains multiple ' 'elements:\n%s' % field.pformat(level=1)) elif len(body) == 1: if not isinstance(body[0], nodes.paragraph): raise DataError('PEP header field body may only contain ' 'a single paragraph:\n%s' % field.pformat(level=1)) elif name == 'last-modified': date = time.strftime( '%d-%b-%Y', time.localtime(os.stat(self.document['source'])[8])) if cvs_url: body += nodes.paragraph( '', '', nodes.reference('', date, refuri=cvs_url)) else: # empty continue para = body[0] if name == 'author': for node in para: if isinstance(node, nodes.reference): node.replace_self(mask_email(node)) elif name == 'discussions-to': for node in para: if isinstance(node, nodes.reference): node.replace_self(mask_email(node, pep)) elif name in ('replaces', 'replaced-by', 'requires'): newbody = [] space = nodes.Text(' ') for refpep in re.split(',?\s+', body.astext()): pepno = int(refpep) newbody.append(nodes.reference( refpep, refpep, refuri=(self.document.settings.pep_base_url + self.pep_url % pepno))) newbody.append(space) para[:] = newbody[:-1] # drop trailing space elif name == 'last-modified': utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions) if cvs_url: date = para.astext() para[:] = [nodes.reference('', date, refuri=cvs_url)] elif name == 'content-type': pep_type = para.astext() uri = self.document.settings.pep_base_url + self.pep_url % 12 para[:] = [nodes.reference('', pep_type, refuri=uri)] elif name == 'version' and len(body): utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions) class Contents(Transform): """ Insert an empty table of contents topic and a transform placeholder into the document after the RFC 2822 header. """ default_priority = 380 def apply(self): language = languages.get_language(self.document.settings.language_code, self.document.reporter) name = language.labels['contents'] title = nodes.title('', name) topic = nodes.topic('', title, classes=['contents']) name = nodes.fully_normalize_name(name) if not self.document.has_name(name): topic['names'].append(name) self.document.note_implicit_target(topic) pending = nodes.pending(parts.Contents) topic += pending self.document.insert(1, topic) self.document.note_pending(pending) class TargetNotes(Transform): """ Locate the "References" section, insert a placeholder for an external target footnote insertion transform at the end, and schedule the transform to run immediately. """ default_priority = 520 def apply(self): doc = self.document i = len(doc) - 1 refsect = copyright = None while i >= 0 and isinstance(doc[i], nodes.section): title_words = doc[i][0].astext().lower().split() if 'references' in title_words: refsect = doc[i] break elif 'copyright' in title_words: copyright = i i -= 1 if not refsect: refsect = nodes.section() refsect += nodes.title('', 'References') doc.set_id(refsect) if copyright: # Put the new "References" section before "Copyright": doc.insert(copyright, refsect) else: # Put the new "References" section at end of doc: doc.append(refsect) pending = nodes.pending(references.TargetNotes) refsect.append(pending) self.document.note_pending(pending, 0) pending = nodes.pending(misc.CallBack, details={'callback': self.cleanup_callback}) refsect.append(pending) self.document.note_pending(pending, 1) def cleanup_callback(self, pending): """ Remove an empty "References" section. Called after the `references.TargetNotes` transform is complete. """ if len(pending.parent) == 2: # <title> and <pending> pending.parent.parent.remove(pending.parent) class PEPZero(Transform): """ Special processing for PEP 0. """ default_priority =760 def apply(self): visitor = PEPZeroSpecial(self.document) self.document.walk(visitor) self.startnode.parent.remove(self.startnode) class PEPZeroSpecial(nodes.SparseNodeVisitor): """ Perform the special processing needed by PEP 0: - Mask email addresses. - Link PEP numbers in the second column of 4-column tables to the PEPs themselves. """ pep_url = Headers.pep_url def unknown_visit(self, node): pass def visit_reference(self, node): node.replace_self(mask_email(node)) def visit_field_list(self, node): if 'rfc2822' in node['classes']: raise nodes.SkipNode def visit_tgroup(self, node): self.pep_table = node['cols'] == 4 self.entry = 0 def visit_colspec(self, node): self.entry += 1 if self.pep_table and self.entry == 2: node['classes'].append('num') def visit_row(self, node): self.entry = 0 def visit_entry(self, node): self.entry += 1 if self.pep_table and self.entry == 2 and len(node) == 1: node['classes'].append('num') p = node[0] if isinstance(p, nodes.paragraph) and len(p) == 1: text = p.astext() try: pep = int(text) ref = (self.document.settings.pep_base_url + self.pep_url % pep) p[0] = nodes.reference(text, text, refuri=ref) except ValueError: pass non_masked_addresses = ('peps@python.org', 'python-list@python.org', 'python-dev@python.org') def mask_email(ref, pepno=None): """ Mask the email address in `ref` and return a replacement node. `ref` is returned unchanged if it contains no email address. For email addresses such as "user@host", mask the address as "user at host" (text) to thwart simple email address harvesters (except for those listed in `non_masked_addresses`). If a PEP number (`pepno`) is given, return a reference including a default email subject. """ if ref.hasattr('refuri') and ref['refuri'].startswith('mailto:'): if ref['refuri'][8:] in non_masked_addresses: replacement = ref[0] else: replacement_text = ref.astext().replace('@', '&#32;&#97;t&#32;') replacement = nodes.raw('', replacement_text, format='html') if pepno is None: return replacement else: ref['refuri'] += '?subject=PEP%%20%s' % pepno ref[:] = [replacement] return ref else: return ref
agpl-3.0
-8,896,177,294,446,902,000
3,201,157,865,215,171,600
35.278689
79
0.532128
false
dmvieira/P.O.D.
func.py
1
5799
from mergesort import * def comeca(sequencia,entrada,entrada2,entrada3): div=open(entrada3,'w') t=open(entrada,'r') saida=open(entrada2,'w') x=t.readlines() if (x[-1][-1])<>'\n': comp=x[-1][-1] comp=comp+'\n' x.insert(-1,comp) comp=x[-1] comp=comp+'\n' del(x[-1]) x.insert(-1,comp) del(x[-1]) l=[] b=0 t.close() if sequencia=='r': for j in range(0,len(x)): k=len(x[j]) if x[j][0]=='>': if b==1: l.append(c) l.append(x[j][:k-1]) c="" b=1 else: y="" for i in range(0,k-1): if x[j][i] == 'a' or x[j][i] == 'A' or x[j][i] == 'c' or x[j][i] == 'C' or x[j][i] == 'g' or x[j][i] == 'G' or x[j][i] == 'u' or x[j][i] == 'U' or x[j][i] == 'r' or x[j][i] == 'R' or x[j][i] == 'y' or x[j][i] == 'Y' or x[j][i] == 'k' or x[j][i] == 'K' or x[j][i] == 'm' or x[j][i] == 'M' or x[j][i] == 's' or x[j][i] == 'S' or x[j][i] == 'w' or x[j][i] == 'W' or x[j][i] == 'b' or x[j][i] == 'B' or x[j][i] == 'd' or x[j][i] == 'D' or x[j][i] == 'h' or x[j][i] == 'H' or x[j][i] == 'v' or x[j][i] == 'V' or x[j][i] == 'n' or x[j][i] == 'N': y=y+x[j][i] c=c+y l.append(c) elif sequencia=='p': for j in range(0,len(x)): k=len(x[j]) if x[j][0]=='>': if b==1: l.append(c) l.append(x[j][:k-1]) c="" b=1 else: y="" for i in range(0,k-1): if x[j][i] == 'a' or x[j][i] == 'A' or x[j][i] == 'c' or x[j][i] == 'C' or x[j][i] == 'g' or x[j][i] == 'G' or x[j][i] == 'v' or x[j][i] == 'V' or x[j][i] == 'L' or x[j][i] == 'l' or x[j][i] == 'I' or x[j][i] == 'i' or x[j][i] == 'S' or x[j][i] == 's' or x[j][i] == 'T' or x[j][i] == 't' or x[j][i] == 'Y' or x[j][i] == 'y' or x[j][i] == 'M' or x[j][i] == 'm' or x[j][i] == 'd' or x[j][i] == 'D' or x[j][i] == 'n' or x[j][i] == 'N' or x[j][i] == 'E' or x[j][i] == 'e' or x[j][i] == 'Q' or x[j][i] == 'q' or x[j][i] == 'R' or x[j][i] == 'r' or x[j][i] == 'K' or x[j][i] == 'k' or x[j][i] == 'H' or x[j][i] == 'h' or x[j][i] == 'F' or x[j][i] == 'f' or x[j][i] == 'W' or x[j][i] == 'w' or x[j][i] == 'P' or x[j][i] == 'p' or x[j][i] == 'b' or x[j][i] == 'B' or x[j][i] == 'z' or x[j][i] == 'Z' or x[j][i] == 'x' or x[j][i] == 'X' or x[j][i] == 'u' or x[j][i] == 'U': y=y+x[j][i] c=c+y l.append(c) else: for j in range(0,len(x)): k=len(x[j]) if x[j][0]=='>': if b==1: l.append(c) l.append(x[j][:k-1]) c="" b=1 else: y="" for i in range(0,k-1): if x[j][i] == 'a' or x[j][i] == 'A' or x[j][i] == 'c' or x[j][i] == 'C' or x[j][i] == 'g' or x[j][i] == 'G' or x[j][i] == 't' or x[j][i] == 'T' or x[j][i] == 'r' or x[j][i] == 'R' or x[j][i] == 'y' or x[j][i] == 'Y' or x[j][i] == 'k' or x[j][i] == 'K' or x[j][i] == 'm' or x[j][i] == 'M' or x[j][i] == 's' or x[j][i] == 'S' or x[j][i] == 'w' or x[j][i] == 'W' or x[j][i] == 'b' or x[j][i] == 'B' or x[j][i] == 'd' or x[j][i] == 'D' or x[j][i] == 'h' or x[j][i] == 'H' or x[j][i] == 'v' or x[j][i] == 'V' or x[j][i] == 'n' or x[j][i] == 'N': y=y+x[j][i] c=c+y l.append(c) dec,dic={},{} for j in range(0,len(l),2): alta=(l[j+1]).upper() del(l[j+1]) l.insert(j+1,alta) if (dic.has_key((l[j+1][::-1])))==True: del(l[j+1]) l.insert((j+1),alta[::-1]) d={l[j]:l[j+1]} dec.update(d) d={l[j+1]:l[j]} dic.update(d) vou=dic.keys() v=dec.values() diversidade=[] dic={} for j in range(0,len(l),2): alta=(l[j+1]) divo=(len(alta))/65 if divo > 0: alta2='' for h in range(1,divo+1): alta2=alta2+alta[(65*(h-1)):(65*h)]+'\n' alta=alta2+alta[65*divo:] del(l[j+1]) l.insert(j+1,alta) d= {alta:l[j]} dic.update(d) key=dic.keys() value=dic.values() for j in range(len(key)): saida.write(value[j]+'\n'+key[j]+'\n') diversidade.append((v.count(vou[j]))) saida.close() ordena(diversidade, value, key, div) div.close()
gpl-3.0
-6,141,784,674,418,833,000
3,817,808,036,759,005,000
52.196262
904
0.272116
false
tectronics/huhamhire-hosts
doc/dev/conf.py
23
8175
# -*- coding: utf-8 -*- # # huhamhire-hosts documentation build configuration file, created by # sphinx-quickstart on Tue Jan 14 10:49:55 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os sys.path.insert(0, os.path.abspath('../../')) extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode', 'sphinx.ext.graphviz', 'sphinx.ext.inheritance_diagram', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Hosts Setup Utility' copyright = u'2011-2014, huhamhire-hosts team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.9.8' # The full version, including alpha/beta/rc tags. release = '1.9.8 beta' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. #exclude_patterns = [] #unused_docs = ["gpl"] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} html_theme_options = { "stickysidebar": True, "collapsiblesidebar": False, } # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'huhamhire-hostsdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { 'inputenc': '', 'utf8extra': '', 'preamble': ''' \\hypersetup{unicode=true} \\usepackage{CJKutf8} \\AtBeginDocument{\\begin{CJK}{UTF8}{}} \\AtEndDocument{\\end{CJK}} ''', 'papersize': 'a4paper', # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'huhamhire-hosts.tex', u'Hosts Setup Utility Documentation', u'huhamhire-hosts team', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. latex_show_pagerefs = True # If true, show URL addresses after external links. latex_show_urls = True # Documents to append as an appendix to all manuals. latex_appendices = ['gpl'] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'huhamhire-hosts', u'Hosts Setup Utility Documentation', [u'huhamhire-hosts team'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'huhamhire-hosts', u'Hosts Setup Utility Documentation', u'huhamhire-hosts team', 'huhamhire-hosts', 'Easy managing hosts file.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
gpl-3.0
-218,304,251,756,851,970
-3,161,141,444,915,697,000
29.503731
79
0.705566
false
cdriehuys/chmvh-website
chmvh_website/contact/forms.py
1
2333
import logging from smtplib import SMTPException from captcha.fields import ReCaptchaField from django import forms from django.conf import settings from django.core import mail from django.template import loader logger = logging.getLogger("chmvh_website.{0}".format(__name__)) class ContactForm(forms.Form): captcha = ReCaptchaField() name = forms.CharField() email = forms.EmailField() message = forms.CharField(widget=forms.Textarea(attrs={"rows": 5})) street_address = forms.CharField(required=False) city = forms.CharField(required=False) zipcode = forms.CharField(required=False) template = loader.get_template("contact/email/message.txt") def clean_city(self): """ If no city was provided, use a default string. """ if not self.cleaned_data["city"]: return "<No City Given>" return self.cleaned_data["city"] def send_email(self): assert self.is_valid(), self.errors subject = "[CHMVH Website] Message from {}".format( self.cleaned_data["name"] ) address_line_2_parts = [self.cleaned_data["city"], "North Carolina"] if self.cleaned_data["zipcode"]: address_line_2_parts.append(self.cleaned_data["zipcode"]) address_line_1 = self.cleaned_data["street_address"] address_line_2 = ", ".join(address_line_2_parts) address = "" if address_line_1: address = "\n".join([address_line_1, address_line_2]) context = { "name": self.cleaned_data["name"], "email": self.cleaned_data["email"], "message": self.cleaned_data["message"], "address": address, } logger.debug("Preparing to send email") try: emails_sent = mail.send_mail( subject, self.template.render(context), settings.DEFAULT_FROM_EMAIL, ["info@chapelhillvet.com"], ) logger.info( "Succesfully sent email from {0}".format( self.cleaned_data["email"] ) ) except SMTPException as e: emails_sent = 0 logger.exception("Failed to send email.", exc_info=e) return emails_sent == 1
mit
-1,464,601,156,046,153,500
3,023,894,789,900,929,000
28.1625
76
0.582512
false
poojavade/Genomics_Docker
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/bx_python-0.7.1-py2.7-linux-x86_64.egg/bx/intervals/operations/concat.py
7
2623
""" Concatenate sets of intervals. Preserves format of the first input -- it is possible to concat two files that have different column orders. Of course, the meta-data of the second will be lost (and filled with a "."). If all of the files (GenomicInteralReaders) are the same format, sameformat=True will preserve all columns of the first input, cuts extra columns on subsequent input, and pads missing columns. If sameformat=False then extra columns are filled with ".". """ import psyco_full import traceback import fileinput from warnings import warn from bx.intervals.io import * from bx.intervals.operations import * def concat(readers, comments=True, header=True, sameformat=True): # Save columns from the first input chrom_col = readers[0].chrom_col start_col = readers[0].start_col end_col = readers[0].end_col strand_col = readers[0].strand_col nfields = None firstdataset = True output = False for intervals in readers: for interval in intervals: if isinstance(interval, GenomicInterval): if not nfields: nfields = interval.nfields out_interval = interval.copy() if sameformat or firstdataset: # everything except the first input has to be # trimmed or padded to match the first input if len(out_interval.fields) > nfields: out_interval.fields = out_interval.fields[0:nfields] while len(out_interval.fields) < nfields: out_interval.fields.append(".") output = True yield out_interval else: chrom = out_interval.chrom start = out_interval.start end = out_interval.end strand = out_interval.strand out_interval.fields = ["." for col in range(nfields)] out_interval.fields[chrom_col] = chrom out_interval.fields[start_col] = str(start) out_interval.fields[end_col] = str(end) # Strand is optional, might not exist in output if strand_col < len( out_interval.fields ): out_interval.fields[strand_col] = strand yield out_interval elif isinstance(interval, Header) and header: yield interval elif isinstance(interval, Comment) and comments: yield interval if output and firstdataset: firstdataset = False
apache-2.0
9,042,713,370,509,604,000
1,793,513,676,899,085,000
42
78
0.59512
false
mvaled/OpenUpgrade
openerp/addons/base/ir/ir_sequence.py
83
14810
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import logging import time import openerp from openerp.osv import osv from openerp.tools.translate import _ _logger = logging.getLogger(__name__) class ir_sequence_type(openerp.osv.osv.osv): _name = 'ir.sequence.type' _order = 'name' _columns = { 'name': openerp.osv.fields.char('Name', required=True), 'code': openerp.osv.fields.char('Code', size=32, required=True), } _sql_constraints = [ ('code_unique', 'unique(code)', '`code` must be unique.'), ] def _code_get(self, cr, uid, context=None): cr.execute('select code, name from ir_sequence_type') return cr.fetchall() class ir_sequence(openerp.osv.osv.osv): """ Sequence model. The sequence model allows to define and use so-called sequence objects. Such objects are used to generate unique identifiers in a transaction-safe way. """ _name = 'ir.sequence' _order = 'name' def _get_number_next_actual(self, cr, user, ids, field_name, arg, context=None): '''Return number from ir_sequence row when no_gap implementation, and number from postgres sequence when standard implementation.''' res = dict.fromkeys(ids) for element in self.browse(cr, user, ids, context=context): if element.implementation != 'standard': res[element.id] = element.number_next else: # get number from postgres sequence. Cannot use # currval, because that might give an error when # not having used nextval before. statement = ( "SELECT last_value, increment_by, is_called" " FROM ir_sequence_%03d" % element.id) cr.execute(statement) (last_value, increment_by, is_called) = cr.fetchone() if is_called: res[element.id] = last_value + increment_by else: res[element.id] = last_value return res def _set_number_next_actual(self, cr, uid, id, name, value, args=None, context=None): return self.write(cr, uid, id, {'number_next': value or 0}, context=context) _columns = { 'name': openerp.osv.fields.char('Name', size=64, required=True), 'code': openerp.osv.fields.selection(_code_get, 'Sequence Type', size=64), 'implementation': openerp.osv.fields.selection( # TODO update the view [('standard', 'Standard'), ('no_gap', 'No gap')], 'Implementation', required=True, help="Two sequence object implementations are offered: Standard " "and 'No gap'. The later is slower than the former but forbids any" " gap in the sequence (while they are possible in the former)."), 'active': openerp.osv.fields.boolean('Active'), 'prefix': openerp.osv.fields.char('Prefix', help="Prefix value of the record for the sequence"), 'suffix': openerp.osv.fields.char('Suffix', help="Suffix value of the record for the sequence"), 'number_next': openerp.osv.fields.integer('Next Number', required=True, help="Next number of this sequence"), 'number_next_actual': openerp.osv.fields.function(_get_number_next_actual, fnct_inv=_set_number_next_actual, type='integer', required=True, string='Next Number', help='Next number that will be used. This number can be incremented frequently so the displayed value might already be obsolete'), 'number_increment': openerp.osv.fields.integer('Increment Number', required=True, help="The next number of the sequence will be incremented by this number"), 'padding' : openerp.osv.fields.integer('Number Padding', required=True, help="Odoo will automatically adds some '0' on the left of the 'Next Number' to get the required padding size."), 'company_id': openerp.osv.fields.many2one('res.company', 'Company'), } _defaults = { 'implementation': 'standard', 'active': True, 'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'ir.sequence', context=c), 'number_increment': 1, 'number_next': 1, 'number_next_actual': 1, 'padding' : 0, } def init(self, cr): return # Don't do the following index yet. # CONSTRAINT/UNIQUE INDEX on (code, company_id) # /!\ The unique constraint 'unique_name_company_id' is not sufficient, because SQL92 # only support field names in constraint definitions, and we need a function here: # we need to special-case company_id to treat all NULL company_id as equal, otherwise # we would allow duplicate (code, NULL) ir_sequences. cr.execute(""" SELECT indexname FROM pg_indexes WHERE indexname = 'ir_sequence_unique_code_company_id_idx'""") if not cr.fetchone(): cr.execute(""" CREATE UNIQUE INDEX ir_sequence_unique_code_company_id_idx ON ir_sequence (code, (COALESCE(company_id,-1)))""") def _create_sequence(self, cr, id, number_increment, number_next): """ Create a PostreSQL sequence. There is no access rights check. """ if number_increment == 0: raise osv.except_osv(_('Warning!'),_("Increment number must not be zero.")) assert isinstance(id, (int, long)) sql = "CREATE SEQUENCE ir_sequence_%03d INCREMENT BY %%s START WITH %%s" % id cr.execute(sql, (number_increment, number_next)) def _drop_sequence(self, cr, ids): """ Drop the PostreSQL sequence if it exists. There is no access rights check. """ ids = ids if isinstance(ids, (list, tuple)) else [ids] assert all(isinstance(i, (int, long)) for i in ids), \ "Only ids in (int, long) allowed." names = ','.join('ir_sequence_%03d' % i for i in ids) # RESTRICT is the default; it prevents dropping the sequence if an # object depends on it. cr.execute("DROP SEQUENCE IF EXISTS %s RESTRICT " % names) def _alter_sequence(self, cr, id, number_increment, number_next=None): """ Alter a PostreSQL sequence. There is no access rights check. """ if number_increment == 0: raise osv.except_osv(_('Warning!'),_("Increment number must not be zero.")) assert isinstance(id, (int, long)) seq_name = 'ir_sequence_%03d' % (id,) cr.execute("SELECT relname FROM pg_class WHERE relkind = %s AND relname=%s", ('S', seq_name)) if not cr.fetchone(): # sequence is not created yet, we're inside create() so ignore it, will be set later return statement = "ALTER SEQUENCE %s INCREMENT BY %d" % (seq_name, number_increment) if number_next is not None: statement += " RESTART WITH %d" % (number_next, ) cr.execute(statement) def create(self, cr, uid, values, context=None): """ Create a sequence, in implementation == standard a fast gaps-allowed PostgreSQL sequence is used. """ values = self._add_missing_default_values(cr, uid, values, context) values['id'] = super(ir_sequence, self).create(cr, uid, values, context) if values['implementation'] == 'standard': self._create_sequence(cr, values['id'], values['number_increment'], values['number_next']) return values['id'] def unlink(self, cr, uid, ids, context=None): super(ir_sequence, self).unlink(cr, uid, ids, context) self._drop_sequence(cr, ids) return True def write(self, cr, uid, ids, values, context=None): if not isinstance(ids, (list, tuple)): ids = [ids] new_implementation = values.get('implementation') rows = self.read(cr, uid, ids, ['implementation', 'number_increment', 'number_next'], context) super(ir_sequence, self).write(cr, uid, ids, values, context) for row in rows: # 4 cases: we test the previous impl. against the new one. i = values.get('number_increment', row['number_increment']) n = values.get('number_next', row['number_next']) if row['implementation'] == 'standard': if new_implementation in ('standard', None): # Implementation has NOT changed. # Only change sequence if really requested. if row['number_next'] != n: self._alter_sequence(cr, row['id'], i, n) else: # Just in case only increment changed self._alter_sequence(cr, row['id'], i) else: self._drop_sequence(cr, row['id']) else: if new_implementation in ('no_gap', None): pass else: self._create_sequence(cr, row['id'], i, n) return True def _interpolate(self, s, d): if s: return s % d return '' def _interpolation_dict(self): t = time.localtime() # Actually, the server is always in UTC. return { 'year': time.strftime('%Y', t), 'month': time.strftime('%m', t), 'day': time.strftime('%d', t), 'y': time.strftime('%y', t), 'doy': time.strftime('%j', t), 'woy': time.strftime('%W', t), 'weekday': time.strftime('%w', t), 'h24': time.strftime('%H', t), 'h12': time.strftime('%I', t), 'min': time.strftime('%M', t), 'sec': time.strftime('%S', t), } def _next(self, cr, uid, ids, context=None): if not ids: return False if context is None: context = {} force_company = context.get('force_company') if not force_company: force_company = self.pool.get('res.users').browse(cr, uid, uid).company_id.id sequences = self.read(cr, uid, ids, ['name','company_id','implementation','number_next','prefix','suffix','padding']) preferred_sequences = [s for s in sequences if s['company_id'] and s['company_id'][0] == force_company ] seq = preferred_sequences[0] if preferred_sequences else sequences[0] if seq['implementation'] == 'standard': cr.execute("SELECT nextval('ir_sequence_%03d')" % seq['id']) seq['number_next'] = cr.fetchone() else: cr.execute("SELECT number_next FROM ir_sequence WHERE id=%s FOR UPDATE NOWAIT", (seq['id'],)) cr.execute("UPDATE ir_sequence SET number_next=number_next+number_increment WHERE id=%s ", (seq['id'],)) self.invalidate_cache(cr, uid, ['number_next'], [seq['id']], context=context) d = self._interpolation_dict() try: interpolated_prefix = self._interpolate(seq['prefix'], d) interpolated_suffix = self._interpolate(seq['suffix'], d) except ValueError: raise osv.except_osv(_('Warning'), _('Invalid prefix or suffix for sequence \'%s\'') % (seq.get('name'))) return interpolated_prefix + '%%0%sd' % seq['padding'] % seq['number_next'] + interpolated_suffix def next_by_id(self, cr, uid, sequence_id, context=None): """ Draw an interpolated string using the specified sequence.""" self.check_access_rights(cr, uid, 'read') company_ids = self.pool.get('res.company').search(cr, uid, [], context=context) + [False] ids = self.search(cr, uid, ['&',('id','=', sequence_id),('company_id','in',company_ids)]) return self._next(cr, uid, ids, context) def next_by_code(self, cr, uid, sequence_code, context=None): """ Draw an interpolated string using a sequence with the requested code. If several sequences with the correct code are available to the user (multi-company cases), the one from the user's current company will be used. :param dict context: context dictionary may contain a ``force_company`` key with the ID of the company to use instead of the user's current company for the sequence selection. A matching sequence for that specific company will get higher priority. """ self.check_access_rights(cr, uid, 'read') company_ids = self.pool.get('res.company').search(cr, uid, [], context=context) + [False] ids = self.search(cr, uid, ['&', ('code', '=', sequence_code), ('company_id', 'in', company_ids)]) return self._next(cr, uid, ids, context) def get_id(self, cr, uid, sequence_code_or_id, code_or_id='id', context=None): """ Draw an interpolated string using the specified sequence. The sequence to use is specified by the ``sequence_code_or_id`` argument, which can be a code or an id (as controlled by the ``code_or_id`` argument. This method is deprecated. """ # TODO: bump up to warning after 6.1 release _logger.debug("ir_sequence.get() and ir_sequence.get_id() are deprecated. " "Please use ir_sequence.next_by_code() or ir_sequence.next_by_id().") if code_or_id == 'id': return self.next_by_id(cr, uid, sequence_code_or_id, context) else: return self.next_by_code(cr, uid, sequence_code_or_id, context) def get(self, cr, uid, code, context=None): """ Draw an interpolated string using the specified sequence. The sequence to use is specified by its code. This method is deprecated. """ return self.get_id(cr, uid, code, 'code', context) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
-340,556,199,178,529,540
-6,975,893,008,508,381,000
46.620579
300
0.592505
false
isra17/DIE
DIE/UI/AboutScreen.py
7
1977
from idaapi import Form import os import DIE.Lib.DieConfig from sark.qt import QtGui, QtCore class AboutWindow(QtGui.QDialog): def __init__(self): super(AboutWindow, self).__init__() self.initUI() def initUI(self): config = DIE.Lib.DieConfig.get_config() self.setFixedSize(400, 330) self.setWindowTitle("About DIE") image = QtGui.QImage(os.path.join(config.icons_path, "logo.png")) pixmap = QtGui.QPixmap.fromImage(image) logo = QtGui.QLabel(self) logo.setFixedSize(pixmap.size()) logo.move(0.5*(self.width() - logo.width()), 20) logo.setPixmap(pixmap) title = QtGui.QLabel("DIE",self) title.setAlignment(QtCore.Qt.AlignCenter) font = title.font() font.setPointSize(16) font.setBold(True) title.setFont(font) title.setFixedWidth(400) title.move(0, logo.height() + logo.y() + 20) subtitle = QtGui.QLabel("Dynamic IDA Enrichment framework",self) font = subtitle.font() font.setPointSize(14) subtitle.setFont(font) subtitle.setAlignment(QtCore.Qt.AlignCenter) subtitle.setFixedWidth(400) subtitle.move(0, title.height() + title.y() + 10) version = QtGui.QLabel("Version 0.1",self) font = subtitle.font() font.setPointSize(12) version.setFont(font) version.setAlignment(QtCore.Qt.AlignCenter) version.setFixedWidth(400) version.move(0, subtitle.height() + subtitle.y() + 30) author = QtGui.QLabel("Written by Yaniv Balmas @ynvb - Check Point Software Technologies",self) font = subtitle.font() font.setPointSize(12) author.setFont(font) author.setAlignment(QtCore.Qt.AlignCenter) author.setFixedWidth(400) author.move(0, version.height() + version.y()) self.show()
mit
-8,531,924,430,523,539,000
-8,566,941,770,119,547,000
28.890625
103
0.601416
false
stevekuznetsov/ansible
lib/ansible/modules/cloud/cloudstack/cs_affinitygroup.py
48
7633
#!/usr/bin/python # -*- coding: utf-8 -*- # # (c) 2015, René Moser <mail@renemoser.net> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['stableinterface'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: cs_affinitygroup short_description: Manages affinity groups on Apache CloudStack based clouds. description: - Create and remove affinity groups. version_added: '2.0' author: "René Moser (@resmo)" options: name: description: - Name of the affinity group. required: true affinty_type: description: - Type of the affinity group. If not specified, first found affinity type is used. required: false default: null description: description: - Description of the affinity group. required: false default: null state: description: - State of the affinity group. required: false default: 'present' choices: [ 'present', 'absent' ] domain: description: - Domain the affinity group is related to. required: false default: null account: description: - Account the affinity group is related to. required: false default: null project: description: - Name of the project the affinity group is related to. required: false default: null poll_async: description: - Poll async jobs until job has finished. required: false default: true extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' # Create a affinity group - local_action: module: cs_affinitygroup name: haproxy affinty_type: host anti-affinity # Remove a affinity group - local_action: module: cs_affinitygroup name: haproxy state: absent ''' RETURN = ''' --- id: description: UUID of the affinity group. returned: success type: string sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8 name: description: Name of affinity group. returned: success type: string sample: app description: description: Description of affinity group. returned: success type: string sample: application affinity group affinity_type: description: Type of affinity group. returned: success type: string sample: host anti-affinity project: description: Name of project the affinity group is related to. returned: success type: string sample: Production domain: description: Domain the affinity group is related to. returned: success type: string sample: example domain account: description: Account the affinity group is related to. returned: success type: string sample: example account ''' # import cloudstack common from ansible.module_utils.cloudstack import * class AnsibleCloudStackAffinityGroup(AnsibleCloudStack): def __init__(self, module): super(AnsibleCloudStackAffinityGroup, self).__init__(module) self.returns = { 'type': 'affinity_type', } self.affinity_group = None def get_affinity_group(self): if not self.affinity_group: args = { 'projectid': self.get_project(key='id'), 'account': self.get_account(key='name'), 'domainid': self.get_domain(key='id'), 'name': self.module.params.get('name'), } affinity_groups = self.cs.listAffinityGroups(**args) if affinity_groups: self.affinity_group = affinity_groups['affinitygroup'][0] return self.affinity_group def get_affinity_type(self): affinity_type = self.module.params.get('affinty_type') affinity_types = self.cs.listAffinityGroupTypes() if affinity_types: if not affinity_type: return affinity_types['affinityGroupType'][0]['type'] for a in affinity_types['affinityGroupType']: if a['type'] == affinity_type: return a['type'] self.module.fail_json(msg="affinity group type '%s' not found" % affinity_type) def create_affinity_group(self): affinity_group = self.get_affinity_group() if not affinity_group: self.result['changed'] = True args = { 'name': self.module.params.get('name'), 'type': self.get_affinity_type(), 'description': self.module.params.get('description'), 'projectid': self.get_project(key='id'), 'account': self.get_account(key='name'), 'domainid': self.get_domain(key='id'), } if not self.module.check_mode: res = self.cs.createAffinityGroup(**args) if 'errortext' in res: self.module.fail_json(msg="Failed: '%s'" % res['errortext']) poll_async = self.module.params.get('poll_async') if res and poll_async: affinity_group = self.poll_job(res, 'affinitygroup') return affinity_group def remove_affinity_group(self): affinity_group = self.get_affinity_group() if affinity_group: self.result['changed'] = True args = { 'name': self.module.params.get('name'), 'projectid': self.get_project(key='id'), 'account': self.get_account(key='name'), 'domainid': self.get_domain(key='id'), } if not self.module.check_mode: res = self.cs.deleteAffinityGroup(**args) if 'errortext' in res: self.module.fail_json(msg="Failed: '%s'" % res['errortext']) poll_async = self.module.params.get('poll_async') if res and poll_async: self.poll_job(res, 'affinitygroup') return affinity_group def main(): argument_spec = cs_argument_spec() argument_spec.update(dict( name=dict(required=True), affinty_type=dict(default=None), description=dict(default=None), state=dict(choices=['present', 'absent'], default='present'), domain=dict(default=None), account=dict(default=None), project=dict(default=None), poll_async=dict(type='bool', default=True), )) module = AnsibleModule( argument_spec=argument_spec, required_together=cs_required_together(), supports_check_mode=True ) try: acs_ag = AnsibleCloudStackAffinityGroup(module) state = module.params.get('state') if state in ['absent']: affinity_group = acs_ag.remove_affinity_group() else: affinity_group = acs_ag.create_affinity_group() result = acs_ag.get_result(affinity_group) except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
-2,959,841,711,584,190,500
-6,637,148,554,675,232,000
28.92549
88
0.62102
false
ghedsouza/snakebite
test/effective_user_test.py
7
1151
from minicluster_testbase import MiniClusterTestBase from snakebite.client import Client import os class EffectiveUserTest(MiniClusterTestBase): ERR_MSG_TOUCH = "org.apache.hadoop.security.AccessControlException\nPermission denied: user=__foobar" ERR_MSG_STAT = "`/foobar2': No such file or directory" VALID_FILE = '/foobar' INVALID_FILE = '/foobar2' def setUp(self): self.custom_client = Client(self.cluster.host, self.cluster.port) self.custom_foobar_client = Client(host=self.cluster.host, port=self.cluster.port, effective_user='__foobar') def test_touch(self): print tuple(self.custom_client.touchz([self.VALID_FILE])) try: tuple(self.custom_foobar_client.touchz([self.INVALID_FILE])) except Exception, e: self.assertTrue(e.message.startswith(self.ERR_MSG_TOUCH)) self.custom_client.stat([self.VALID_FILE]) try: self.custom_client.stat([self.INVALID_FILE]) except Exception, e: self.assertEquals(e.message, self.ERR_MSG_STAT)
apache-2.0
7,574,813,257,573,831,000
-6,443,532,624,770,062,000
38.689655
105
0.6351
false
gdsfactory/gdsfactory
pp/components/coupler.py
1
2755
import pp from pp.component import Component from pp.components.coupler_straight import coupler_straight from pp.components.coupler_symmetric import coupler_symmetric from pp.cross_section import get_waveguide_settings from pp.snap import assert_on_1nm_grid from pp.types import ComponentFactory @pp.cell_with_validator def coupler( gap: float = 0.236, length: float = 20.0, coupler_symmetric_factory: ComponentFactory = coupler_symmetric, coupler_straight_factory: ComponentFactory = coupler_straight, dy: float = 5.0, dx: float = 10.0, waveguide: str = "strip", **kwargs ) -> Component: r"""Symmetric coupler. Args: gap: between straights length: of coupling region coupler_symmetric_factory coupler_straight_factory dy: port to port vertical spacing dx: length of bend in x direction waveguide: from tech.waveguide kwargs: overwrites waveguide_settings .. code:: dx dx |------| |------| W1 ________ _______E1 \ / | \ length / | ======================= gap | dy / \ | ________/ \_______ | W0 E0 coupler_straight_factory coupler_symmetric_factory """ assert_on_1nm_grid(length) assert_on_1nm_grid(gap) c = Component() waveguide_settings = get_waveguide_settings(waveguide, **kwargs) sbend = coupler_symmetric_factory(gap=gap, dy=dy, dx=dx, **waveguide_settings) sr = c << sbend sl = c << sbend cs = c << coupler_straight_factory(length=length, gap=gap, **waveguide_settings) sl.connect("W1", destination=cs.ports["W0"]) sr.connect("W0", destination=cs.ports["E0"]) c.add_port("W1", port=sl.ports["E0"]) c.add_port("W0", port=sl.ports["E1"]) c.add_port("E0", port=sr.ports["E0"]) c.add_port("E1", port=sr.ports["E1"]) c.absorb(sl) c.absorb(sr) c.absorb(cs) c.length = sbend.length c.min_bend_radius = sbend.min_bend_radius return c if __name__ == "__main__": # c = pp.Component() # cp1 = c << coupler(gap=0.2) # cp2 = c << coupler(gap=0.5) # cp1.ymin = 0 # cp2.ymin = 0 # c = coupler(gap=0.2, waveguide="nitride") # c = coupler(width=0.9, length=1, dy=2, gap=0.2) # print(c.settings_changed) c = coupler(gap=0.2, waveguide="nitride") # c = coupler(gap=0.2, waveguide="strip_heater") c.show()
mit
1,315,042,398,996,860,400
-8,475,927,363,535,448,000
29.955056
84
0.52559
false
Murali-group/GraphSpace
applications/uniprot/models.py
1
1246
from __future__ import unicode_literals from sqlalchemy import ForeignKeyConstraint, text from applications.users.models import * from django.conf import settings from graphspace.mixins import * Base = settings.BASE # ================== Table Definitions =================== # class UniprotAlias(IDMixin, TimeStampMixin, Base): __tablename__ = 'uniprot_alias' accession_number = Column(String, nullable=False) alias_source = Column(String, nullable=False) alias_name = Column(String, nullable=False) constraints = ( UniqueConstraint('accession_number', 'alias_source', 'alias_name', name='_uniprot_alias_uc_accession_number_alias_source_alias_name'), ) indices = ( Index('uniprot_alias_idx_accession_number', text("accession_number gin_trgm_ops"), postgresql_using="gin"), Index('uniprot_alias_idx_alias_name', text("alias_name gin_trgm_ops"), postgresql_using="gin"), ) @declared_attr def __table_args__(cls): args = cls.constraints + cls.indices return args def serialize(cls, **kwargs): return { # 'id': cls.id, 'id': cls.accession_number, 'alias_source': cls.alias_source, 'alias_name': cls.alias_name, 'created_at': cls.created_at.isoformat(), 'updated_at': cls.updated_at.isoformat() }
gpl-2.0
-3,978,246,234,347,208,000
-3,171,166,397,655,219,700
27.976744
136
0.695024
false
redhat-openstack/nova
nova/tests/api/openstack/compute/plugins/v3/test_extended_volumes.py
12
16239
# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import webob from nova.api.openstack.compute.plugins.v3 import extended_volumes from nova import compute from nova import context from nova import db from nova import exception from nova import objects from nova.objects import instance as instance_obj from nova.openstack.common import jsonutils from nova import test from nova.tests.api.openstack import fakes from nova.tests import fake_block_device from nova.tests import fake_instance from nova import volume UUID1 = '00000000-0000-0000-0000-000000000001' UUID2 = '00000000-0000-0000-0000-000000000002' UUID3 = '00000000-0000-0000-0000-000000000003' def fake_compute_get(*args, **kwargs): inst = fakes.stub_instance(1, uuid=UUID1) return fake_instance.fake_instance_obj(args[1], **inst) def fake_compute_get_not_found(*args, **kwargs): raise exception.InstanceNotFound(instance_id=UUID1) def fake_compute_get_all(*args, **kwargs): db_list = [fakes.stub_instance(1), fakes.stub_instance(2)] fields = instance_obj.INSTANCE_DEFAULT_FIELDS return instance_obj._make_instance_list(args[1], objects.InstanceList(), db_list, fields) def fake_bdms_get_all_by_instance(*args, **kwargs): return [fake_block_device.FakeDbBlockDeviceDict( {'volume_id': UUID1, 'source_type': 'volume', 'destination_type': 'volume', 'id': 1}), fake_block_device.FakeDbBlockDeviceDict( {'volume_id': UUID2, 'source_type': 'volume', 'destination_type': 'volume', 'id': 2})] def fake_attach_volume(self, context, instance, volume_id, device, disk_bus, device_type): pass def fake_attach_volume_not_found_vol(self, context, instance, volume_id, device, disk_bus, device_type): raise exception.VolumeNotFound(volume_id=volume_id) def fake_attach_volume_invalid_device_path(self, context, instance, volume_id, device, disk_bus, device_type): raise exception.InvalidDevicePath(path=device) def fake_attach_volume_instance_invalid_state(self, context, instance, volume_id, device, disk_bus, device_type): raise exception.InstanceInvalidState(instance_uuid=UUID1, state='', method='', attr='') def fake_attach_volume_invalid_volume(self, context, instance, volume_id, device, disk_bus, device_type): raise exception.InvalidVolume(reason='') def fake_detach_volume(self, context, instance, volume): pass def fake_swap_volume(self, context, instance, old_volume_id, new_volume_id): pass def fake_swap_volume_invalid_volume(self, context, instance, volume_id, device): raise exception.InvalidVolume(reason='', volume_id=volume_id) def fake_swap_volume_unattached_volume(self, context, instance, volume_id, device): raise exception.VolumeUnattached(reason='', volume_id=volume_id) def fake_detach_volume_invalid_volume(self, context, instance, volume): raise exception.InvalidVolume(reason='') def fake_swap_volume_instance_invalid_state(self, context, instance, volume_id, device): raise exception.InstanceInvalidState(instance_uuid=UUID1, state='', method='', attr='') def fake_volume_get(*args, **kwargs): pass def fake_volume_get_not_found(*args, **kwargs): raise exception.VolumeNotFound(volume_id=UUID1) class ExtendedVolumesTest(test.TestCase): content_type = 'application/json' prefix = 'os-extended-volumes:' def setUp(self): super(ExtendedVolumesTest, self).setUp() self.Controller = extended_volumes.ExtendedVolumesController() fakes.stub_out_nw_api(self.stubs) self.stubs.Set(compute.api.API, 'get', fake_compute_get) self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all) self.stubs.Set(db, 'block_device_mapping_get_all_by_instance', fake_bdms_get_all_by_instance) self.stubs.Set(volume.cinder.API, 'get', fake_volume_get) self.stubs.Set(compute.api.API, 'detach_volume', fake_detach_volume) self.stubs.Set(compute.api.API, 'attach_volume', fake_attach_volume) self.app = fakes.wsgi_app_v3(init_only=('os-extended-volumes', 'servers')) return_server = fakes.fake_instance_get() self.stubs.Set(db, 'instance_get_by_uuid', return_server) def _make_request(self, url, body=None): req = webob.Request.blank(url) req.headers['Accept'] = self.content_type if body: req.body = jsonutils.dumps(body) req.method = 'POST' req.content_type = 'application/json' res = req.get_response(self.app) return res def _get_server(self, body): return jsonutils.loads(body).get('server') def _get_servers(self, body): return jsonutils.loads(body).get('servers') def test_show(self): url = '/v3/servers/%s' % UUID1 res = self._make_request(url) self.assertEqual(res.status_int, 200) server = self._get_server(res.body) exp_volumes = [{'id': UUID1}, {'id': UUID2}] if self.content_type == 'application/json': actual = server.get('%svolumes_attached' % self.prefix) self.assertEqual(exp_volumes, actual) def test_detail(self): url = '/v3/servers/detail' res = self._make_request(url) self.assertEqual(res.status_int, 200) exp_volumes = [{'id': UUID1}, {'id': UUID2}] for i, server in enumerate(self._get_servers(res.body)): if self.content_type == 'application/json': actual = server.get('%svolumes_attached' % self.prefix) self.assertEqual(exp_volumes, actual) def test_detach(self): url = "/v3/servers/%s/action" % UUID1 res = self._make_request(url, {"detach": {"volume_id": UUID1}}) self.assertEqual(res.status_int, 202) def test_detach_volume_from_locked_server(self): url = "/v3/servers/%s/action" % UUID1 self.stubs.Set(compute.api.API, 'detach_volume', fakes.fake_actions_to_locked_server) res = self._make_request(url, {"detach": {"volume_id": UUID1}}) self.assertEqual(res.status_int, 409) def test_detach_with_non_existed_vol(self): url = "/v3/servers/%s/action" % UUID1 self.stubs.Set(volume.cinder.API, 'get', fake_volume_get_not_found) res = self._make_request(url, {"detach": {"volume_id": UUID2}}) self.assertEqual(res.status_int, 404) def test_detach_with_non_existed_instance(self): url = "/v3/servers/%s/action" % UUID1 self.stubs.Set(compute.api.API, 'get', fake_compute_get_not_found) res = self._make_request(url, {"detach": {"volume_id": UUID2}}) self.assertEqual(res.status_int, 404) def test_detach_with_invalid_vol(self): url = "/v3/servers/%s/action" % UUID1 self.stubs.Set(compute.api.API, 'detach_volume', fake_detach_volume_invalid_volume) res = self._make_request(url, {"detach": {"volume_id": UUID2}}) self.assertEqual(res.status_int, 400) def test_detach_with_bad_id(self): url = "/v3/servers/%s/action" % UUID1 res = self._make_request(url, {"detach": {"volume_id": 'xxx'}}) self.assertEqual(res.status_int, 400) def test_detach_without_id(self): url = "/v3/servers/%s/action" % UUID1 res = self._make_request(url, {"detach": {}}) self.assertEqual(res.status_int, 400) def test_detach_volume_with_invalid_request(self): url = "/v3/servers/%s/action" % UUID1 res = self._make_request(url, {"detach": None}) self.assertEqual(res.status_int, 400) @mock.patch('nova.objects.BlockDeviceMapping.is_root', new_callable=mock.PropertyMock) def test_detach_volume_root(self, mock_isroot): url = "/v3/servers/%s/action" % UUID1 mock_isroot.return_value = True res = self._make_request(url, {"detach": {"volume_id": UUID1}}) self.assertEqual(res.status_int, 403) def test_attach_volume(self): url = "/v3/servers/%s/action" % UUID1 res = self._make_request(url, {"attach": {"volume_id": UUID1}}) self.assertEqual(res.status_int, 202) def test_attach_volume_to_locked_server(self): url = "/v3/servers/%s/action" % UUID1 self.stubs.Set(compute.api.API, 'attach_volume', fakes.fake_actions_to_locked_server) res = self._make_request(url, {"attach": {"volume_id": UUID1}}) self.assertEqual(res.status_int, 409) def test_attach_volume_disk_bus_and_disk_dev(self): url = "/v3/servers/%s/action" % UUID1 self._make_request(url, {"attach": {"volume_id": UUID1, "device": "/dev/vdb", "disk_bus": "ide", "device_type": "cdrom"}}) def test_attach_volume_with_bad_id(self): url = "/v3/servers/%s/action" % UUID1 res = self._make_request(url, {"attach": {"volume_id": 'xxx'}}) self.assertEqual(res.status_int, 400) def test_attach_volume_without_id(self): url = "/v3/servers/%s/action" % UUID1 res = self._make_request(url, {"attach": {}}) self.assertEqual(res.status_int, 400) def test_attach_volume_with_invalid_request(self): url = "/v3/servers/%s/action" % UUID1 res = self._make_request(url, {"attach": None}) self.assertEqual(res.status_int, 400) def test_attach_volume_with_non_existe_vol(self): url = "/v3/servers/%s/action" % UUID1 self.stubs.Set(compute.api.API, 'attach_volume', fake_attach_volume_not_found_vol) res = self._make_request(url, {"attach": {"volume_id": UUID1}}) self.assertEqual(res.status_int, 404) def test_attach_volume_with_non_existed_instance(self): url = "/v3/servers/%s/action" % UUID1 self.stubs.Set(compute.api.API, 'get', fake_compute_get_not_found) res = self._make_request(url, {"attach": {"volume_id": UUID1}}) self.assertEqual(res.status_int, 404) def test_attach_volume_with_invalid_device_path(self): url = "/v3/servers/%s/action" % UUID1 self.stubs.Set(compute.api.API, 'attach_volume', fake_attach_volume_invalid_device_path) res = self._make_request(url, {"attach": {"volume_id": UUID1, 'device': 'xxx'}}) self.assertEqual(res.status_int, 400) def test_attach_volume_with_instance_invalid_state(self): url = "/v3/servers/%s/action" % UUID1 self.stubs.Set(compute.api.API, 'attach_volume', fake_attach_volume_instance_invalid_state) res = self._make_request(url, {"attach": {"volume_id": UUID1}}) self.assertEqual(res.status_int, 409) def test_attach_volume_with_invalid_volume(self): url = "/v3/servers/%s/action" % UUID1 self.stubs.Set(compute.api.API, 'attach_volume', fake_attach_volume_invalid_volume) res = self._make_request(url, {"attach": {"volume_id": UUID1}}) self.assertEqual(res.status_int, 400) def test_attach_volume_with_invalid_request_body(self): url = "/v3/servers/%s/action" % UUID1 self.stubs.Set(compute.api.API, 'attach_volume', fake_attach_volume_invalid_volume) res = self._make_request(url, {"attach": None}) self.assertEqual(res.status_int, 400) def _test_swap(self, uuid=UUID1, body=None): body = body or {'swap_volume_attachment': {'old_volume_id': uuid, 'new_volume_id': UUID2}} req = webob.Request.blank('/v3/servers/%s/action' % UUID1) req.method = 'PUT' req.body = jsonutils.dumps({}) req.headers['content-type'] = 'application/json' req.environ['nova.context'] = context.get_admin_context() return self.Controller.swap(req, UUID1, body=body) def test_swap_volume(self): self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume) result = self._test_swap() self.assertEqual('202 Accepted', result.status) def test_swap_volume_for_locked_server(self): def fake_swap_volume_for_locked_server(self, context, instance, old_volume, new_volume): raise exception.InstanceIsLocked(instance_uuid=instance['uuid']) self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume_for_locked_server) self.assertRaises(webob.exc.HTTPConflict, self._test_swap) def test_swap_volume_for_locked_server_new(self): self.stubs.Set(compute.api.API, 'swap_volume', fakes.fake_actions_to_locked_server) self.assertRaises(webob.exc.HTTPConflict, self._test_swap) def test_swap_volume_instance_not_found(self): self.stubs.Set(compute.api.API, 'get', fake_compute_get_not_found) self.assertRaises(webob.exc.HTTPNotFound, self._test_swap) def test_swap_volume_with_bad_action(self): self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume) body = {'swap_volume_attachment_bad_action': None} self.assertRaises(exception.ValidationError, self._test_swap, body=body) def test_swap_volume_with_invalid_body(self): self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume) body = {'swap_volume_attachment': {'bad_volume_id_body': UUID1, 'new_volume_id': UUID2}} self.assertRaises(exception.ValidationError, self._test_swap, body=body) def test_swap_volume_with_invalid_volume(self): self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume_invalid_volume) self.assertRaises(webob.exc.HTTPBadRequest, self._test_swap) def test_swap_volume_with_unattached_volume(self): self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume_unattached_volume) self.assertRaises(webob.exc.HTTPNotFound, self._test_swap) def test_swap_volume_with_bad_state_instance(self): self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume_instance_invalid_state) self.assertRaises(webob.exc.HTTPConflict, self._test_swap) def test_swap_volume_no_attachment(self): self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume) self.assertRaises(webob.exc.HTTPNotFound, self._test_swap, UUID3) def test_swap_volume_not_found(self): self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume) self.stubs.Set(volume.cinder.API, 'get', fake_volume_get_not_found) self.assertRaises(webob.exc.HTTPNotFound, self._test_swap)
apache-2.0
7,381,003,427,922,152,000
-4,890,148,418,327,772,000
41.179221
78
0.60718
false
setjet/spark
examples/src/main/python/ml/kmeans_example.py
69
1828
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function # $example on$ from pyspark.ml.clustering import KMeans # $example off$ from pyspark.sql import SparkSession """ An example demonstrating k-means clustering. Run with: bin/spark-submit examples/src/main/python/ml/kmeans_example.py This example requires NumPy (http://www.numpy.org/). """ if __name__ == "__main__": spark = SparkSession\ .builder\ .appName("KMeansExample")\ .getOrCreate() # $example on$ # Loads data. dataset = spark.read.format("libsvm").load("data/mllib/sample_kmeans_data.txt") # Trains a k-means model. kmeans = KMeans().setK(2).setSeed(1) model = kmeans.fit(dataset) # Evaluate clustering by computing Within Set Sum of Squared Errors. wssse = model.computeCost(dataset) print("Within Set Sum of Squared Errors = " + str(wssse)) # Shows the result. centers = model.clusterCenters() print("Cluster Centers: ") for center in centers: print(center) # $example off$ spark.stop()
apache-2.0
-7,578,159,800,503,286,000
-1,970,092,860,357,127,000
29.983051
83
0.708972
false
CodyTXR0KR/cyanideBot
cyanide_bot/bot_logic.py
2
9045
# -*- coding: utf-8 -*- ### cyanide_bot ### GNU/GPL v2 ### Author: Cody Rocker ### Author_email: cody.rocker.83@gmail.com ### 2016 #----------------------------------- # Requires: """ # - Python 3 """ # - imgurpython """ #----------------------------------- import sys import urllib.request import urllib.error import re import smtplib import keyring from socket import gethostname # Email dependancies from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from time import strftime from .imgur_api_client import Client class ImgurBot(): def __init__(self, static): self.static = static self.debug = static.debugger() self.load_settings() self.client = Client(static).login() self.debug.log('ImgurBot successfuly initialized.') def load_settings(self): self.debug.log('ImgurBot.load_settings()') config = self.static.get_bot_settings() self.botmail = config.get('messaging', 'botmail') self.devmail = config.get('messaging', 'devmail') self.messaging_enabled = config.getboolean('messaging', 'enabled') # Email message to developer def send_message(self, MODE, message): self.debug.log('ImgurBot.send_message() :: sending {0}...'.format(MODE)) if self.messaging_enabled: msg = MIMEMultipart() msg['From'] = self.botmail msg['To'] = self.devmail if MODE == "message": msg['Subject'] = '{0}.cyanideBot -- message'.format(gethostname()) text = MIMEText( 'cyanideBot.explosmdotnet posted an image to Imgur\n' '%s' % message) elif MODE == "error": msg['Subject'] = '{0}.cyanideBot -- error'.format(gethostname()) text = MIMEText( 'cyanideBot.explosmdotnet failed with error:\n' '%s' % str(message)) msg.attach(text) try: server = smtplib.SMTP('smtp.gmail.com:587') server.ehlo() server.starttls() server.ehlo() password = keyring.get_password('cyanide_bot', 'botmail') server.login(self.botmail, password) password = None server.sendmail(self.botmail, self.devmail, msg.as_string()) server.quit() except Exception as error: self.debug.log_error('ImgurBot.send_message() :: Failed.', error) sys.exit() self.debug.log('ImgurBot.send_message() :: {0} sent.'.format(MODE)) else: self.debug.log('ImgurBot.send_message() :: messaging disabled.') ###==========================================================### ### -- CYANIDE-BOT functions ### ### -- This is where specific behaviors should be modified. ### ###==========================================================### def get_urls(self, random=False): self.debug.log('ImgurBot.get_urls()') self.urls = {} # define in local scope to ensure clean empty dict ## test if current comic is an amiated episode if random == False: response = urllib.request.urlopen('http://explosm.net') html = response.read() random = self.isAnimation(html) try: if random: response = urllib.request.urlopen('http://explosm.net/comics/random') html = response.read() self.urls['imgUrl'] = 'http://{0}'.format(re.findall( b'<img id="main-comic" src="//(.*?)"/>', html)[0].decode('utf-8')) self.debug.log('Image Url: {0}'.format(self.urls['imgUrl'])) self.urls['permalinkUrl'] = re.findall( b'<input id="permalink" type="text" value="(.*?)" onclick=', html)[0].decode('utf-8') self.debug.log('Permalink Url: {0}'.format(self.urls['permalinkUrl'])) else: # response = urllib.request.urlopen('http://explosm.net') # html = response.read() self.urls['imgUrl'] = 'http://{0}'.format(re.findall( b'<img id="featured-comic" src="//(.*?)"/></a>', html)[0].decode('utf-8')) self.debug.log('Image Url: {0}'.format(self.urls['imgUrl'])) self.urls['permalinkUrl'] = re.findall( b'<input id="permalink" type="text" value="(.*?)" onclick=', html)[0].decode('utf-8') self.debug.log('Permalink Url: {0}'.format(self.urls['permalinkUrl'])) # self.urls['hotlinkUrl'] = 'http://explosm.net{0}'.format(re.findall( # b'<a href="(.*?)"><img id="featured-comic" src="', # html)[0].decode('utf-8')) # self.debug.log('Hotlink Url: {0}'.format(self.urls['hotlinkUrl'])) except Exception as error: self.send_message('error', error) self.debug.log_error('ImgurBot.get_urls() :: Failed.', error) sys.exit() self.debug.log('ImgurBot.get_urls() :: Complete.') return self.urls def isAnimation(self, html): isAnimation = False try: current_comic = re.findall( b'<a href="(.*?)"><img id="featured-comic" src="//files.explosm.net/comics/.*"/></a>', html)[0].decode('utf-8') except Exception as e: self.debug.log_error('ImgurBot.isAnimation threw an exception', e) if '//explosm.net/show/episode/' in current_comic: self.debug.log('ImgurBot.isAnimation :: animation found, switching to random') self.urls['hotlinkUrl'] = current_comic isAnimation = True return isAnimation def make_post(self, publish=False, tag_image=False, tag='', random=False): self.debug.log('ImgurBot.make_post()') # Fetch image and build post metadata urls = self.get_urls(random=random) meta = {} meta['album'] = None meta['name'] = None meta['title'] = 'Daily dose of Cyanide for ' + get_date() meta['description'] = ( 'Permalink -- %s\nFind more at -- http://explosm.net' % ( urls['permalinkUrl'])) try: # imgur_api functionality # Perform upload action from provided url upload_response = self.upload_from_url(urls['imgUrl'], meta) if publish: # Publish image to gallery if -p, --publish is true self.publish_to_gallery( upload_response['item_id'], upload_response['title']) if tag_image: if tag is not '': # Tag the image with user defined tag self.tag_image(tag, upload_response['item_id']) except Exception as error: self.send_message('error', error) self.debug.log_error('ImgurBot.make_post() :: Failed.', error) sys.exit() self.send_message('message', upload_response['link']) self.debug.log('ImgurBot.make_post() :: Complete.') def upload_from_url(self, url, meta): self.debug.log('ImgurBot.upload_from_url()') try: upload_response = self.client.upload_from_url(url, meta, anon=False) response = {} response['item_id'] = upload_response['id'] response['title'] = upload_response['title'] response['link'] = upload_response['link'] except Exception as error: self.debug.log_error('ImgurBot.upload_from_url() :: Failed', error) sys.exit() self.debug.log('ImgurBot.upload_from_url() :: Complete.') return response def publish_to_gallery(self, item_id, title): self.debug.log('ImgurBot.publish_to_gallery()') try: # publish_response will be True if operation successful publish_response = self.client.share_on_imgur(item_id, title) except Exception as error: self.debug.log_error('ImgurBot.publish_to_gallery() :: Failed.', error) sys.exit() self.debug.log('ImgurBot.publish_to_gallery() :: Complete.') return publish_response def tag_image(self, tag, item_id): self.debug.log('ImgurBot.tag_image()') try: tag_response = self.client.gallery_tag_image(tag, item_id) self.debug.log('tag_response=%s' % tag_response) except Exception as error: self.debug.log_error('ImgurBot.tag_image() :: Failed', error) sys.exit() self.debug.log('ImgurBot.tag_image() :: Image tagged with: [{0}]'.format(tag)) return tag_response # Return formatted date string def get_date(): return strftime('%b %d %Y')
gpl-2.0
4,311,524,819,478,155,000
-9,018,956,442,690,654,000
40.118182
103
0.532338
false
TobbeTripitaka/src
book/rsf/school/pydemo.py
3
1693
# ------------------------------------------------------------ # strings a='StPetersburg' len(a) a[0] a[4:7] b=a+' '+'workshop' print b c=b+2014 c=b+' '+str(2014) print c # ------------------------------------------------------------ # lists d = ['StPetersburg', 'workshop'] len(d) print d[0] print d[1] d.append('2014') print d # ------------------------------------------------------------ # tuple = a sequence of immutable Python objects. t = ('StPetersburg', 'workshop') t = t + (2014,) print t # ------------------------------------------------------------ # dictionaries e={'what':'workshop','where':'StPetersburg','when':2014} print e print e['where']+' '+e['what']+' '+str(e['when']) f=dict(what='workshop',where='Melbourne',when=2013) print f print f['where']+' '+f['what']+' '+str(f['when']) # ------------------------------------------------------------ # loops for i in range(len(a)): print a[i] for i in range(len(d)): print d[i] for i in t: print i for key in e.keys(): print key,e[key] # ------------------------------------------------------------ # conditional statements for k in range(5): if k < 2: print k,'<2' else: print k,'>=2' try: b+2014 except: print "error!" # ------------------------------------------------------------ # functions def m8rschool(year): workshops=dict(StPetersburg=2014,Melbourne=2013) for key in workshops.keys(): if workshops[key]==year: return key print m8rschool(2014) def increment(a,b=5): return a+b # ------------------------------------------------------------ # modules import math x=math.sqrt(increment(4)) print x
gpl-2.0
-764,487,345,823,829,400
-657,622,655,906,759,300
17.010638
62
0.419965
false
rohithredd94/Computer-Vision-using-OpenCV
Particle-Filter-Tracking/PF_Tracker.py
1
4110
import cv2 import numpy as mp from similarity import * from hist import * class PF_Tracker: def __init__(self, model, search_space, num_particles=100, state_dims=2, control_std=10, sim_std=20, alpha=0.0): self.model = model self.search_space = search_space[::-1] self.num_particles = num_particles self.state_dims = state_dims self.control_std = control_std self.sim_std = sim_std self.alpha = alpha #Initialize particles using a uniform distribution self.particles = np.array([np.random.uniform(0, self.search_space[i],self.num_particles) for i in range(self.state_dims)]).T self.weights = np.ones(len(self.particles)) / len(self.particles) self.idxs = np.arange(num_particles) self.estimate_state() def update(self, frame): self.displace() self.observe(frame) self.resample() self.estimate_state() if self.alpha > 0: self.update_model(frame) def displace(self): #Displace particles using a normal distribution centered around 0 self.particles += np.random.normal(0, self.control_std, self.particles.shape) def observe(self, img): #Get patches corresponding to each particle mh, mw = self.model.shape[:2] minx = (self.particles[:,0] - mw/2).astype(np.int) miny = (self.particles[:,1] - mh/2).astype(np.int) candidates = [img[miny[i]:miny[i]+mh, minx[i]:minx[i]+mw] for i in range(self.num_particles)] #Compute importance weight - similarity of each patch to the model self.weights = np.array([similarity(cand, self.model, self.sim_std) for cand in candidates]) self.weights /= np.sum(self.weights) def resample(self): sw, sh = self.search_space[:2] mh, mw = self.model.shape[:2] j = np.random.choice(self.idxs, self.num_particles, True, p=self.weights.T) #Sample new particle indices using the distribution of the weights control = np.random.normal(0, self.control_std, self.particles.shape) #Get a random control input from a normal distribution self.particles = np.array(self.particles[j]) self.particles[:,0] = np.clip(self.particles[:,0], 0, sw - 1) self.particles[:,1] = np.clip(self.particles[:,1], 0, sh - 1) def estimate_state(self): state_idx = np.random.choice(self.idxs, 1, p=self.weights) self.state = self.particles[state_idx][0] def update_model(self, frame): #Get current model based on belief mh, mw = self.model.shape[:2] minx = int(self.state[0] - mw/2) miny = int(self.state[1] - mh/2) best_model = frame[miny:miny+mh, minx:minx+mw] #Apply appearance model update if new model shape is unchanged if best_model.shape == self.model.shape: self.model = self.alpha * best_model + (1-self.alpha) * self.model self.model = self.model.astype(np.uint8) def visualize_filter(self, img): self.draw_particles(img) self.draw_window(img) self.draw_std(img) def draw_particles(self, img): for p in self.particles: cv2.circle(img, tuple(p.astype(int)), 2, (180,255,0), -1) def draw_window(self, img): best_idx = cv2.minMaxLoc(self.weights)[3][1] best_state = self.particles[best_idx] pt1 = (best_state - np.array(self.model.shape[::-1])/2).astype(np.int) pt2 = pt1 + np.array(self.model.shape[::-1]) cv2.rectangle(img, tuple(pt1), tuple(pt2), (0,255,0), 2) def draw_std(self, img): weighted_sum = 0 dist = np.linalg.norm(self.particles - self.state) weighted_sum = np.sum(dist * self.weights.reshape((-1,1))) cv2.circle(img, tuple(self.state.astype(np.int)), int(weighted_sum), (255,255,255), 1)
mit
-7,338,149,012,301,719,000
4,405,007,795,374,881,300
37.92233
132
0.584672
false
albertomurillo/ansible
test/integration/targets/module_precedence/lib_with_extension/ping.py
320
2144
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> # (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'core'} DOCUMENTATION = ''' --- module: ping version_added: historical short_description: Try to connect to host, verify a usable python and return C(pong) on success. description: - A trivial test module, this module always returns C(pong) on successful contact. It does not make sense in playbooks, but it is useful from C(/usr/bin/ansible) to verify the ability to login and that a usable python is configured. - This is NOT ICMP ping, this is just a trivial test module. options: {} author: - "Ansible Core Team" - "Michael DeHaan" ''' EXAMPLES = ''' # Test we can logon to 'webservers' and execute python with json lib. ansible webservers -m ping ''' from ansible.module_utils.basic import AnsibleModule def main(): module = AnsibleModule( argument_spec=dict( data=dict(required=False, default=None), ), supports_check_mode=True ) result = dict(ping='pong') if module.params['data']: if module.params['data'] == 'crash': raise Exception("boom") result['ping'] = module.params['data'] result['location'] = 'library' module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
1,541,382,054,819,861,200
-1,370,464,311,255,940,000
30.072464
96
0.67444
false
weso/CWR-DataApi
tests/grammar/factory/record/test_publisher_territory.py
1
4905
# -*- coding: utf-8 -*- import unittest from pyparsing import ParseException from tests.utils.grammar import get_record_grammar """ CWR Publisher Territory of Control (SPT) grammar tests. The following cases are tested: """ __author__ = 'Bernardo Martínez Garrido' __license__ = 'MIT' __status__ = 'Development' class TestPublisherTerritoryGrammar(unittest.TestCase): """ Tests that the NPN grammar decodes correctly formatted strings """ def setUp(self): self.grammar = get_record_grammar('publisher_territory') def test_valid_common(self): """ Tests that Publisher Territory of Control grammar decodes correctly formatted record prefixes. This test contains all the optional fields. """ record = 'SPT000001790000054770 013330133301333I0484Y001' result = self.grammar.parseString(record)[0] self.assertEqual('SPT', result.record_type) self.assertEqual(179, result.transaction_sequence_n) self.assertEqual(547, result.record_sequence_n) self.assertEqual('70', result.ip_n) self.assertEqual(13.33, result.pr_collection_share) self.assertEqual(13.33, result.mr_collection_share) self.assertEqual(13.33, result.sr_collection_share) self.assertEqual('I', result.inclusion_exclusion_indicator) self.assertEqual(484, result.tis_numeric_code) self.assertEqual(True, result.shares_change) self.assertEqual(1, result.sequence_n) def test_valid_common_short(self): """ Tests that Publisher Territory of Control grammar decodes correctly formatted record prefixes. This test contains all the optional fields. """ record = 'SPT0000000100000002160694172 050000500000000I0484N01' result = self.grammar.parseString(record)[0] self.assertEqual('SPT', result.record_type) self.assertEqual(1, result.transaction_sequence_n) self.assertEqual(2, result.record_sequence_n) self.assertEqual('160694172', result.ip_n) self.assertEqual(50, result.pr_collection_share) self.assertEqual(50, result.mr_collection_share) self.assertEqual(0, result.sr_collection_share) self.assertEqual('I', result.inclusion_exclusion_indicator) self.assertEqual(484, result.tis_numeric_code) self.assertEqual(False, result.shares_change) self.assertEqual(1, result.sequence_n) def test_valid_full(self): """ Tests that Publisher Territory of Control grammar decodes correctly formatted record prefixes. This test contains all the optional fields. """ record = 'SPT0000123400000023A12345678 010120500002520I0008Y012' result = self.grammar.parseString(record)[0] self.assertEqual('SPT', result.record_type) self.assertEqual(1234, result.transaction_sequence_n) self.assertEqual(23, result.record_sequence_n) self.assertEqual('A12345678', result.ip_n) self.assertEqual(10.12, result.pr_collection_share) self.assertEqual(50, result.mr_collection_share) self.assertEqual(25.2, result.sr_collection_share) self.assertEqual('I', result.inclusion_exclusion_indicator) self.assertEqual(8, result.tis_numeric_code) self.assertEqual(True, result.shares_change) self.assertEqual(12, result.sequence_n) def test_valid_min(self): """ Tests that Publisher Territory of Control grammar decodes correctly formatted record prefixes. This test contains all the optional fields. """ record = 'SPT0000000100000001 000000000000000I0008Y012' result = self.grammar.parseString(record)[0] self.assertEqual('SPT', result.record_type) self.assertEqual(1, result.transaction_sequence_n) self.assertEqual(1, result.record_sequence_n) self.assertEqual(None, result.ip_n) self.assertEqual(0, result.pr_collection_share) self.assertEqual(0, result.mr_collection_share) self.assertEqual(0, result.sr_collection_share) self.assertEqual('I', result.inclusion_exclusion_indicator) self.assertEqual(8, result.tis_numeric_code) self.assertEqual(True, result.shares_change) self.assertEqual(12, result.sequence_n) class TestPublisherTerritoryGrammarException(unittest.TestCase): def setUp(self): self.grammar = get_record_grammar('publisher_territory') def test_empty(self): """ Tests that a exception is thrown when the the works number is zero. """ record = '' self.assertRaises(ParseException, self.grammar.parseString, record) def test_invalid(self): record = 'This is an invalid string' self.assertRaises(ParseException, self.grammar.parseString, record)
mit
-2,359,727,385,676,884,000
-8,569,354,781,610,624,000
36.435115
102
0.681077
false
marcydoty/geraldo
site/newsite/site-geraldo/django/contrib/admindocs/views.py
15
15909
from django import template, templatetags from django.template import RequestContext from django.conf import settings from django.contrib.admin.views.decorators import staff_member_required from django.db import models from django.shortcuts import render_to_response from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist from django.http import Http404 from django.core import urlresolvers from django.contrib.admindocs import utils from django.contrib.sites.models import Site from django.utils.translation import ugettext as _ from django.utils.safestring import mark_safe import inspect, os, re # Exclude methods starting with these strings from documentation MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_') class GenericSite(object): domain = 'example.com' name = 'my site' def get_root_path(): from django.contrib import admin try: return urlresolvers.reverse(admin.site.root, args=['']) except urlresolvers.NoReverseMatch: return getattr(settings, "ADMIN_SITE_ROOT_URL", "/admin/") def doc_index(request): if not utils.docutils_is_available: return missing_docutils_page(request) return render_to_response('admin_doc/index.html', { 'root_path': get_root_path(), }, context_instance=RequestContext(request)) doc_index = staff_member_required(doc_index) def bookmarklets(request): admin_root = get_root_path() return render_to_response('admin_doc/bookmarklets.html', { 'root_path': admin_root, 'admin_url': mark_safe("%s://%s%s" % (request.is_secure() and 'https' or 'http', request.get_host(), admin_root)), }, context_instance=RequestContext(request)) bookmarklets = staff_member_required(bookmarklets) def template_tag_index(request): if not utils.docutils_is_available: return missing_docutils_page(request) load_all_installed_template_libraries() tags = [] for module_name, library in template.libraries.items(): for tag_name, tag_func in library.tags.items(): title, body, metadata = utils.parse_docstring(tag_func.__doc__) if title: title = utils.parse_rst(title, 'tag', _('tag:') + tag_name) if body: body = utils.parse_rst(body, 'tag', _('tag:') + tag_name) for key in metadata: metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name) if library in template.builtins: tag_library = None else: tag_library = module_name.split('.')[-1] tags.append({ 'name': tag_name, 'title': title, 'body': body, 'meta': metadata, 'library': tag_library, }) return render_to_response('admin_doc/template_tag_index.html', { 'root_path': get_root_path(), 'tags': tags }, context_instance=RequestContext(request)) template_tag_index = staff_member_required(template_tag_index) def template_filter_index(request): if not utils.docutils_is_available: return missing_docutils_page(request) load_all_installed_template_libraries() filters = [] for module_name, library in template.libraries.items(): for filter_name, filter_func in library.filters.items(): title, body, metadata = utils.parse_docstring(filter_func.__doc__) if title: title = utils.parse_rst(title, 'filter', _('filter:') + filter_name) if body: body = utils.parse_rst(body, 'filter', _('filter:') + filter_name) for key in metadata: metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name) if library in template.builtins: tag_library = None else: tag_library = module_name.split('.')[-1] filters.append({ 'name': filter_name, 'title': title, 'body': body, 'meta': metadata, 'library': tag_library, }) return render_to_response('admin_doc/template_filter_index.html', { 'root_path': get_root_path(), 'filters': filters }, context_instance=RequestContext(request)) template_filter_index = staff_member_required(template_filter_index) def view_index(request): if not utils.docutils_is_available: return missing_docutils_page(request) if settings.ADMIN_FOR: settings_modules = [__import__(m, {}, {}, ['']) for m in settings.ADMIN_FOR] else: settings_modules = [settings] views = [] for settings_mod in settings_modules: urlconf = __import__(settings_mod.ROOT_URLCONF, {}, {}, ['']) view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns) if Site._meta.installed: site_obj = Site.objects.get(pk=settings_mod.SITE_ID) else: site_obj = GenericSite() for (func, regex) in view_functions: views.append({ 'name': func.__name__, 'module': func.__module__, 'site_id': settings_mod.SITE_ID, 'site': site_obj, 'url': simplify_regex(regex), }) return render_to_response('admin_doc/view_index.html', { 'root_path': get_root_path(), 'views': views }, context_instance=RequestContext(request)) view_index = staff_member_required(view_index) def view_detail(request, view): if not utils.docutils_is_available: return missing_docutils_page(request) mod, func = urlresolvers.get_mod_func(view) try: view_func = getattr(__import__(mod, {}, {}, ['']), func) except (ImportError, AttributeError): raise Http404 title, body, metadata = utils.parse_docstring(view_func.__doc__) if title: title = utils.parse_rst(title, 'view', _('view:') + view) if body: body = utils.parse_rst(body, 'view', _('view:') + view) for key in metadata: metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view) return render_to_response('admin_doc/view_detail.html', { 'root_path': get_root_path(), 'name': view, 'summary': title, 'body': body, 'meta': metadata, }, context_instance=RequestContext(request)) view_detail = staff_member_required(view_detail) def model_index(request): if not utils.docutils_is_available: return missing_docutils_page(request) m_list = [m._meta for m in models.get_models()] return render_to_response('admin_doc/model_index.html', { 'root_path': get_root_path(), 'models': m_list }, context_instance=RequestContext(request)) model_index = staff_member_required(model_index) def model_detail(request, app_label, model_name): if not utils.docutils_is_available: return missing_docutils_page(request) # Get the model class. try: app_mod = models.get_app(app_label) except ImproperlyConfigured: raise Http404, _("App %r not found") % app_label model = None for m in models.get_models(app_mod): if m._meta.object_name.lower() == model_name: model = m break if model is None: raise Http404, _("Model %(model_name)r not found in app %(app_label)r") % {'model_name': model_name, 'app_label': app_label} opts = model._meta # Gather fields/field descriptions. fields = [] for field in opts.fields: # ForeignKey is a special case since the field will actually be a # descriptor that returns the other object if isinstance(field, models.ForeignKey): data_type = related_object_name = field.rel.to.__name__ app_label = field.rel.to._meta.app_label verbose = utils.parse_rst((_("the related `%(app_label)s.%(data_type)s` object") % {'app_label': app_label, 'data_type': data_type}), 'model', _('model:') + data_type) else: data_type = get_readable_field_data_type(field) verbose = field.verbose_name fields.append({ 'name': field.name, 'data_type': data_type, 'verbose': verbose, 'help_text': field.help_text, }) # Gather model methods. for func_name, func in model.__dict__.items(): if (inspect.isfunction(func) and len(inspect.getargspec(func)[0]) == 1): try: for exclude in MODEL_METHODS_EXCLUDE: if func_name.startswith(exclude): raise StopIteration except StopIteration: continue verbose = func.__doc__ if verbose: verbose = utils.parse_rst(utils.trim_docstring(verbose), 'model', _('model:') + opts.module_name) fields.append({ 'name': func_name, 'data_type': get_return_data_type(func_name), 'verbose': verbose, }) # Gather related objects for rel in opts.get_all_related_objects(): verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': rel.opts.app_label, 'object_name': rel.opts.object_name} accessor = rel.get_accessor_name() fields.append({ 'name' : "%s.all" % accessor, 'data_type' : 'List', 'verbose' : utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name), }) fields.append({ 'name' : "%s.count" % accessor, 'data_type' : 'Integer', 'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name), }) return render_to_response('admin_doc/model_detail.html', { 'root_path': get_root_path(), 'name': '%s.%s' % (opts.app_label, opts.object_name), 'summary': _("Fields on %s objects") % opts.object_name, 'description': model.__doc__, 'fields': fields, }, context_instance=RequestContext(request)) model_detail = staff_member_required(model_detail) def template_detail(request, template): templates = [] for site_settings_module in settings.ADMIN_FOR: settings_mod = __import__(site_settings_module, {}, {}, ['']) if Site._meta.installed: site_obj = Site.objects.get(pk=settings_mod.SITE_ID) else: site_obj = GenericSite() for dir in settings_mod.TEMPLATE_DIRS: template_file = os.path.join(dir, "%s.html" % template) templates.append({ 'file': template_file, 'exists': os.path.exists(template_file), 'contents': lambda: os.path.exists(template_file) and open(template_file).read() or '', 'site_id': settings_mod.SITE_ID, 'site': site_obj, 'order': list(settings_mod.TEMPLATE_DIRS).index(dir), }) return render_to_response('admin_doc/template_detail.html', { 'root_path': get_root_path(), 'name': template, 'templates': templates, }, context_instance=RequestContext(request)) template_detail = staff_member_required(template_detail) #################### # Helper functions # #################### def missing_docutils_page(request): """Display an error message for people without docutils""" return render_to_response('admin_doc/missing_docutils.html') def load_all_installed_template_libraries(): # Load/register all template tag libraries from installed apps. for e in templatetags.__path__: libraries = [os.path.splitext(p)[0] for p in os.listdir(e) if p.endswith('.py') and p[0].isalpha()] for library_name in libraries: try: lib = template.get_library("django.templatetags.%s" % library_name.split('.')[-1]) except template.InvalidTemplateLibrary: pass def get_return_data_type(func_name): """Return a somewhat-helpful data type given a function name""" if func_name.startswith('get_'): if func_name.endswith('_list'): return 'List' elif func_name.endswith('_count'): return 'Integer' return '' # Maps Field objects to their human-readable data types, as strings. # Column-type strings can contain format strings; they'll be interpolated # against the values of Field.__dict__ before being output. # If a column type is set to None, it won't be included in the output. DATA_TYPE_MAPPING = { 'AutoField' : _('Integer'), 'BooleanField' : _('Boolean (Either True or False)'), 'CharField' : _('String (up to %(max_length)s)'), 'CommaSeparatedIntegerField': _('Comma-separated integers'), 'DateField' : _('Date (without time)'), 'DateTimeField' : _('Date (with time)'), 'DecimalField' : _('Decimal number'), 'EmailField' : _('E-mail address'), 'FileField' : _('File path'), 'FilePathField' : _('File path'), 'FloatField' : _('Floating point number'), 'ForeignKey' : _('Integer'), 'ImageField' : _('File path'), 'IntegerField' : _('Integer'), 'IPAddressField' : _('IP address'), 'ManyToManyField' : '', 'NullBooleanField' : _('Boolean (Either True, False or None)'), 'OneToOneField' : _('Relation to parent model'), 'PhoneNumberField' : _('Phone number'), 'PositiveIntegerField' : _('Integer'), 'PositiveSmallIntegerField' : _('Integer'), 'SlugField' : _('String (up to %(max_length)s)'), 'SmallIntegerField' : _('Integer'), 'TextField' : _('Text'), 'TimeField' : _('Time'), 'URLField' : _('URL'), 'USStateField' : _('U.S. state (two uppercase letters)'), 'XMLField' : _('XML text'), } def get_readable_field_data_type(field): return DATA_TYPE_MAPPING[field.get_internal_type()] % field.__dict__ def extract_views_from_urlpatterns(urlpatterns, base=''): """ Return a list of views from a list of urlpatterns. Each object in the returned list is a two-tuple: (view_func, regex) """ views = [] for p in urlpatterns: if hasattr(p, '_get_callback'): try: views.append((p._get_callback(), base + p.regex.pattern)) except ViewDoesNotExist: continue elif hasattr(p, '_get_url_patterns'): try: patterns = p.url_patterns except ImportError: continue views.extend(extract_views_from_urlpatterns(patterns, base + p.regex.pattern)) else: raise TypeError, _("%s does not appear to be a urlpattern object") % p return views named_group_matcher = re.compile(r'\(\?P(<\w+>).+?\)') non_named_group_matcher = re.compile(r'\(.*?\)') def simplify_regex(pattern): """ Clean up urlpattern regexes into something somewhat readable by Mere Humans: turns something like "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$" into "<sport_slug>/athletes/<athlete_slug>/" """ # handle named groups first pattern = named_group_matcher.sub(lambda m: m.group(1), pattern) # handle non-named groups pattern = non_named_group_matcher.sub("<var>", pattern) # clean up any outstanding regex-y characters. pattern = pattern.replace('^', '').replace('$', '').replace('?', '').replace('//', '/').replace('\\', '') if not pattern.startswith('/'): pattern = '/' + pattern return pattern
lgpl-3.0
-5,925,339,568,064,618,000
1,357,408,830,760,842,800
39.897172
180
0.583318
false
akarki15/mozillians
vendor-local/lib/python/tablib/packages/openpyxl3/writer/strings.py
55
2928
# file openpyxl/writer/strings.py # Copyright (c) 2010 openpyxl # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # @license: http://www.opensource.org/licenses/mit-license.php # @author: Eric Gazoni """Write the shared string table.""" # Python stdlib imports from io import StringIO # package imports from ..shared.xmltools import start_tag, end_tag, tag, XMLGenerator def create_string_table(workbook): """Compile the string table for a workbook.""" strings = set() for sheet in workbook.worksheets: for cell in sheet.get_cell_collection(): if cell.data_type == cell.TYPE_STRING and cell._value is not None: strings.add(cell.value) return dict((key, i) for i, key in enumerate(strings)) def write_string_table(string_table): """Write the string table xml.""" temp_buffer = StringIO() doc = XMLGenerator(temp_buffer, 'utf-8') start_tag(doc, 'sst', {'xmlns': 'http://schemas.openxmlformats.org/spreadsheetml/2006/main', 'uniqueCount': '%d' % len(string_table)}) strings_to_write = sorted(iter(string_table.items()), key=lambda pair: pair[1]) for key in [pair[0] for pair in strings_to_write]: start_tag(doc, 'si') if key.strip() != key: attr = {'xml:space': 'preserve'} else: attr = {} tag(doc, 't', attr, key) end_tag(doc, 'si') end_tag(doc, 'sst') string_table_xml = temp_buffer.getvalue() temp_buffer.close() return string_table_xml class StringTableBuilder(object): def __init__(self): self.counter = 0 self.dct = {} def add(self, key): key = key.strip() try: return self.dct[key] except KeyError: res = self.dct[key] = self.counter self.counter += 1 return res def get_table(self): return self.dct
bsd-3-clause
-6,786,876,704,798,704,000
644,980,594,526,211,700
33.046512
79
0.66291
false
mollstam/UnrealPy
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Lib/test/test_coercion.py
121
11399
import copy import unittest from test.test_support import run_unittest, TestFailed, check_warnings # Fake a number that implements numeric methods through __coerce__ class CoerceNumber: def __init__(self, arg): self.arg = arg def __repr__(self): return '<CoerceNumber %s>' % repr(self.arg) def __coerce__(self, other): if isinstance(other, CoerceNumber): return self.arg, other.arg else: return (self.arg, other) # New-style class version of CoerceNumber class CoerceTo(object): def __init__(self, arg): self.arg = arg def __coerce__(self, other): if isinstance(other, CoerceTo): return self.arg, other.arg else: return self.arg, other # Fake a number that implements numeric ops through methods. class MethodNumber: def __init__(self,arg): self.arg = arg def __repr__(self): return '<MethodNumber %s>' % repr(self.arg) def __add__(self,other): return self.arg + other def __radd__(self,other): return other + self.arg def __sub__(self,other): return self.arg - other def __rsub__(self,other): return other - self.arg def __mul__(self,other): return self.arg * other def __rmul__(self,other): return other * self.arg def __div__(self,other): return self.arg / other def __rdiv__(self,other): return other / self.arg def __truediv__(self,other): return self.arg / other def __rtruediv__(self,other): return other / self.arg def __floordiv__(self,other): return self.arg // other def __rfloordiv__(self,other): return other // self.arg def __pow__(self,other): return self.arg ** other def __rpow__(self,other): return other ** self.arg def __mod__(self,other): return self.arg % other def __rmod__(self,other): return other % self.arg def __cmp__(self, other): return cmp(self.arg, other) candidates = [2, 2L, 4.0, 2+0j, [1], (2,), None, MethodNumber(2), CoerceNumber(2)] infix_binops = [ '+', '-', '*', '**', '%', '//', '/' ] TE = TypeError # b = both normal and augmented give same result list # s = single result lists for normal and augmented # e = equals other results # result lists: ['+', '-', '*', '**', '%', '//', ('classic /', 'new /')] # ^^^^^^^^^^^^^^^^^^^^^^ # 2-tuple if results differ # else only one value infix_results = { # 2 (0,0): ('b', [4, 0, 4, 4, 0, 1, (1, 1.0)]), (0,1): ('e', (0,0)), (0,2): ('b', [6.0, -2.0, 8.0, 16.0, 2.0, 0.0, 0.5]), (0,3): ('b', [4+0j, 0+0j, 4+0j, 4+0j, 0+0j, 1+0j, 1+0j]), (0,4): ('b', [TE, TE, [1, 1], TE, TE, TE, TE]), (0,5): ('b', [TE, TE, (2, 2), TE, TE, TE, TE]), (0,6): ('b', [TE, TE, TE, TE, TE, TE, TE]), (0,7): ('e', (0,0)), (0,8): ('e', (0,0)), # 2L (1,0): ('e', (0,0)), (1,1): ('e', (0,1)), (1,2): ('e', (0,2)), (1,3): ('e', (0,3)), (1,4): ('e', (0,4)), (1,5): ('e', (0,5)), (1,6): ('e', (0,6)), (1,7): ('e', (0,7)), (1,8): ('e', (0,8)), # 4.0 (2,0): ('b', [6.0, 2.0, 8.0, 16.0, 0.0, 2.0, 2.0]), (2,1): ('e', (2,0)), (2,2): ('b', [8.0, 0.0, 16.0, 256.0, 0.0, 1.0, 1.0]), (2,3): ('b', [6+0j, 2+0j, 8+0j, 16+0j, 0+0j, 2+0j, 2+0j]), (2,4): ('b', [TE, TE, TE, TE, TE, TE, TE]), (2,5): ('e', (2,4)), (2,6): ('e', (2,4)), (2,7): ('e', (2,0)), (2,8): ('e', (2,0)), # (2+0j) (3,0): ('b', [4+0j, 0+0j, 4+0j, 4+0j, 0+0j, 1+0j, 1+0j]), (3,1): ('e', (3,0)), (3,2): ('b', [6+0j, -2+0j, 8+0j, 16+0j, 2+0j, 0+0j, 0.5+0j]), (3,3): ('b', [4+0j, 0+0j, 4+0j, 4+0j, 0+0j, 1+0j, 1+0j]), (3,4): ('b', [TE, TE, TE, TE, TE, TE, TE]), (3,5): ('e', (3,4)), (3,6): ('e', (3,4)), (3,7): ('e', (3,0)), (3,8): ('e', (3,0)), # [1] (4,0): ('b', [TE, TE, [1, 1], TE, TE, TE, TE]), (4,1): ('e', (4,0)), (4,2): ('b', [TE, TE, TE, TE, TE, TE, TE]), (4,3): ('b', [TE, TE, TE, TE, TE, TE, TE]), (4,4): ('b', [[1, 1], TE, TE, TE, TE, TE, TE]), (4,5): ('s', [TE, TE, TE, TE, TE, TE, TE], [[1, 2], TE, TE, TE, TE, TE, TE]), (4,6): ('b', [TE, TE, TE, TE, TE, TE, TE]), (4,7): ('e', (4,0)), (4,8): ('e', (4,0)), # (2,) (5,0): ('b', [TE, TE, (2, 2), TE, TE, TE, TE]), (5,1): ('e', (5,0)), (5,2): ('b', [TE, TE, TE, TE, TE, TE, TE]), (5,3): ('e', (5,2)), (5,4): ('e', (5,2)), (5,5): ('b', [(2, 2), TE, TE, TE, TE, TE, TE]), (5,6): ('b', [TE, TE, TE, TE, TE, TE, TE]), (5,7): ('e', (5,0)), (5,8): ('e', (5,0)), # None (6,0): ('b', [TE, TE, TE, TE, TE, TE, TE]), (6,1): ('e', (6,0)), (6,2): ('e', (6,0)), (6,3): ('e', (6,0)), (6,4): ('e', (6,0)), (6,5): ('e', (6,0)), (6,6): ('e', (6,0)), (6,7): ('e', (6,0)), (6,8): ('e', (6,0)), # MethodNumber(2) (7,0): ('e', (0,0)), (7,1): ('e', (0,1)), (7,2): ('e', (0,2)), (7,3): ('e', (0,3)), (7,4): ('e', (0,4)), (7,5): ('e', (0,5)), (7,6): ('e', (0,6)), (7,7): ('e', (0,7)), (7,8): ('e', (0,8)), # CoerceNumber(2) (8,0): ('e', (0,0)), (8,1): ('e', (0,1)), (8,2): ('e', (0,2)), (8,3): ('e', (0,3)), (8,4): ('e', (0,4)), (8,5): ('e', (0,5)), (8,6): ('e', (0,6)), (8,7): ('e', (0,7)), (8,8): ('e', (0,8)), } def process_infix_results(): for key in sorted(infix_results): val = infix_results[key] if val[0] == 'e': infix_results[key] = infix_results[val[1]] else: if val[0] == 's': res = (val[1], val[2]) elif val[0] == 'b': res = (val[1], val[1]) for i in range(1): if isinstance(res[i][6], tuple): if 1/2 == 0: # testing with classic (floor) division res[i][6] = res[i][6][0] else: # testing with -Qnew res[i][6] = res[i][6][1] infix_results[key] = res with check_warnings(("classic (int|long) division", DeprecationWarning), quiet=True): process_infix_results() # now infix_results has two lists of results for every pairing. prefix_binops = [ 'divmod' ] prefix_results = [ [(1,0), (1L,0L), (0.0,2.0), ((1+0j),0j), TE, TE, TE, TE, (1,0)], [(1L,0L), (1L,0L), (0.0,2.0), ((1+0j),0j), TE, TE, TE, TE, (1L,0L)], [(2.0,0.0), (2.0,0.0), (1.0,0.0), ((2+0j),0j), TE, TE, TE, TE, (2.0,0.0)], [((1+0j),0j), ((1+0j),0j), (0j,(2+0j)), ((1+0j),0j), TE, TE, TE, TE, ((1+0j),0j)], [TE, TE, TE, TE, TE, TE, TE, TE, TE], [TE, TE, TE, TE, TE, TE, TE, TE, TE], [TE, TE, TE, TE, TE, TE, TE, TE, TE], [TE, TE, TE, TE, TE, TE, TE, TE, TE], [(1,0), (1L,0L), (0.0,2.0), ((1+0j),0j), TE, TE, TE, TE, (1,0)] ] def format_float(value): if abs(value) < 0.01: return '0.0' else: return '%.1f' % value # avoid testing platform fp quirks def format_result(value): if isinstance(value, complex): return '(%s + %sj)' % (format_float(value.real), format_float(value.imag)) elif isinstance(value, float): return format_float(value) return str(value) class CoercionTest(unittest.TestCase): def test_infix_binops(self): for ia, a in enumerate(candidates): for ib, b in enumerate(candidates): results = infix_results[(ia, ib)] for op, res, ires in zip(infix_binops, results[0], results[1]): if res is TE: self.assertRaises(TypeError, eval, 'a %s b' % op, {'a': a, 'b': b}) else: self.assertEqual(format_result(res), format_result(eval('a %s b' % op)), '%s %s %s == %s failed' % (a, op, b, res)) try: z = copy.copy(a) except copy.Error: z = a # assume it has no inplace ops if ires is TE: try: exec 'z %s= b' % op except TypeError: pass else: self.fail("TypeError not raised") else: exec('z %s= b' % op) self.assertEqual(ires, z) def test_prefix_binops(self): for ia, a in enumerate(candidates): for ib, b in enumerate(candidates): for op in prefix_binops: res = prefix_results[ia][ib] if res is TE: self.assertRaises(TypeError, eval, '%s(a, b)' % op, {'a': a, 'b': b}) else: self.assertEqual(format_result(res), format_result(eval('%s(a, b)' % op)), '%s(%s, %s) == %s failed' % (op, a, b, res)) def test_cmptypes(self): # Built-in tp_compare slots expect their arguments to have the # same type, but a user-defined __coerce__ doesn't have to obey. # SF #980352 evil_coercer = CoerceTo(42) # Make sure these don't crash any more self.assertNotEqual(cmp(u'fish', evil_coercer), 0) self.assertNotEqual(cmp(slice(1), evil_coercer), 0) # ...but that this still works class WackyComparer(object): def __cmp__(slf, other): self.assertTrue(other == 42, 'expected evil_coercer, got %r' % other) return 0 __hash__ = None # Invalid cmp makes this unhashable self.assertEqual(cmp(WackyComparer(), evil_coercer), 0) # ...and classic classes too, since that code path is a little different class ClassicWackyComparer: def __cmp__(slf, other): self.assertTrue(other == 42, 'expected evil_coercer, got %r' % other) return 0 self.assertEqual(cmp(ClassicWackyComparer(), evil_coercer), 0) def test_infinite_rec_classic_classes(self): # if __coerce__() returns its arguments reversed it causes an infinite # recursion for classic classes. class Tester: def __coerce__(self, other): return other, self exc = TestFailed("__coerce__() returning its arguments reverse " "should raise RuntimeError") try: Tester() + 1 except (RuntimeError, TypeError): return except: raise exc else: raise exc def test_main(): with check_warnings(("complex divmod.., // and % are deprecated", DeprecationWarning), ("classic (int|long) division", DeprecationWarning), quiet=True): run_unittest(CoercionTest) if __name__ == "__main__": test_main()
mit
-3,327,962,997,517,565,400
4,264,625,607,889,480,700
31.755747
86
0.431178
false
garg10may/youtube-dl
youtube_dl/extractor/dcn.py
34
2915
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_urllib_parse, compat_urllib_request, ) from ..utils import ( int_or_none, parse_iso8601, ) class DCNIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?dcndigital\.ae/(?:#/)?(?:video/.+|show/\d+/.+?)/(?P<id>\d+)' _TEST = { 'url': 'http://www.dcndigital.ae/#/show/199074/%D8%B1%D8%AD%D9%84%D8%A9-%D8%A7%D9%84%D8%B9%D9%85%D8%B1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1/17375/6887', 'info_dict': { 'id': '17375', 'ext': 'mp4', 'title': 'رحلة العمر : الحلقة 1', 'description': 'md5:0156e935d870acb8ef0a66d24070c6d6', 'thumbnail': 're:^https?://.*\.jpg$', 'duration': 2041, 'timestamp': 1227504126, 'upload_date': '20081124', }, 'params': { # m3u8 download 'skip_download': True, }, } def _real_extract(self, url): video_id = self._match_id(url) request = compat_urllib_request.Request( 'http://admin.mangomolo.com/analytics/index.php/plus/video?id=%s' % video_id, headers={'Origin': 'http://www.dcndigital.ae'}) video = self._download_json(request, video_id) title = video.get('title_en') or video['title_ar'] webpage = self._download_webpage( 'http://admin.mangomolo.com/analytics/index.php/customers/embed/video?' + compat_urllib_parse.urlencode({ 'id': video['id'], 'user_id': video['user_id'], 'signature': video['signature'], 'countries': 'Q0M=', 'filter': 'DENY', }), video_id) m3u8_url = self._html_search_regex(r'file:\s*"([^"]+)', webpage, 'm3u8 url') formats = self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') rtsp_url = self._search_regex( r'<a[^>]+href="(rtsp://[^"]+)"', webpage, 'rtsp url', fatal=False) if rtsp_url: formats.append({ 'url': rtsp_url, 'format_id': 'rtsp', }) self._sort_formats(formats) img = video.get('img') thumbnail = 'http://admin.mangomolo.com/analytics/%s' % img if img else None duration = int_or_none(video.get('duration')) description = video.get('description_en') or video.get('description_ar') timestamp = parse_iso8601(video.get('create_time') or video.get('update_time'), ' ') return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'timestamp': timestamp, 'formats': formats, }
unlicense
2,170,134,336,058,652,200
7,752,495,345,292,330,000
33.52381
162
0.522759
false
ingadhoc/odoo
openerp/osv/orm.py
199
6222
import simplejson from lxml import etree from ..exceptions import except_orm from ..models import ( MetaModel, BaseModel, Model, TransientModel, AbstractModel, MAGIC_COLUMNS, LOG_ACCESS_COLUMNS, ) from openerp.tools.safe_eval import safe_eval as eval # extra definitions for backward compatibility browse_record_list = BaseModel class browse_record(object): """ Pseudo-class for testing record instances """ class __metaclass__(type): def __instancecheck__(self, inst): return isinstance(inst, BaseModel) and len(inst) <= 1 class browse_null(object): """ Pseudo-class for testing null instances """ class __metaclass__(type): def __instancecheck__(self, inst): return isinstance(inst, BaseModel) and not inst def transfer_field_to_modifiers(field, modifiers): default_values = {} state_exceptions = {} for attr in ('invisible', 'readonly', 'required'): state_exceptions[attr] = [] default_values[attr] = bool(field.get(attr)) for state, modifs in (field.get("states",{})).items(): for modif in modifs: if default_values[modif[0]] != modif[1]: state_exceptions[modif[0]].append(state) for attr, default_value in default_values.items(): if state_exceptions[attr]: modifiers[attr] = [("state", "not in" if default_value else "in", state_exceptions[attr])] else: modifiers[attr] = default_value # Don't deal with groups, it is done by check_group(). # Need the context to evaluate the invisible attribute on tree views. # For non-tree views, the context shouldn't be given. def transfer_node_to_modifiers(node, modifiers, context=None, in_tree_view=False): if node.get('attrs'): modifiers.update(eval(node.get('attrs'))) if node.get('states'): if 'invisible' in modifiers and isinstance(modifiers['invisible'], list): # TODO combine with AND or OR, use implicit AND for now. modifiers['invisible'].append(('state', 'not in', node.get('states').split(','))) else: modifiers['invisible'] = [('state', 'not in', node.get('states').split(','))] for a in ('invisible', 'readonly', 'required'): if node.get(a): v = bool(eval(node.get(a), {'context': context or {}})) if in_tree_view and a == 'invisible': # Invisible in a tree view has a specific meaning, make it a # new key in the modifiers attribute. modifiers['tree_invisible'] = v elif v or (a not in modifiers or not isinstance(modifiers[a], list)): # Don't set the attribute to False if a dynamic value was # provided (i.e. a domain from attrs or states). modifiers[a] = v def simplify_modifiers(modifiers): for a in ('invisible', 'readonly', 'required'): if a in modifiers and not modifiers[a]: del modifiers[a] def transfer_modifiers_to_node(modifiers, node): if modifiers: simplify_modifiers(modifiers) node.set('modifiers', simplejson.dumps(modifiers)) def setup_modifiers(node, field=None, context=None, in_tree_view=False): """ Processes node attributes and field descriptors to generate the ``modifiers`` node attribute and set it on the provided node. Alters its first argument in-place. :param node: ``field`` node from an OpenERP view :type node: lxml.etree._Element :param dict field: field descriptor corresponding to the provided node :param dict context: execution context used to evaluate node attributes :param bool in_tree_view: triggers the ``tree_invisible`` code path (separate from ``invisible``): in tree view there are two levels of invisibility, cell content (a column is present but the cell itself is not displayed) with ``invisible`` and column invisibility (the whole column is hidden) with ``tree_invisible``. :returns: nothing """ modifiers = {} if field is not None: transfer_field_to_modifiers(field, modifiers) transfer_node_to_modifiers( node, modifiers, context=context, in_tree_view=in_tree_view) transfer_modifiers_to_node(modifiers, node) def test_modifiers(what, expected): modifiers = {} if isinstance(what, basestring): node = etree.fromstring(what) transfer_node_to_modifiers(node, modifiers) simplify_modifiers(modifiers) json = simplejson.dumps(modifiers) assert json == expected, "%s != %s" % (json, expected) elif isinstance(what, dict): transfer_field_to_modifiers(what, modifiers) simplify_modifiers(modifiers) json = simplejson.dumps(modifiers) assert json == expected, "%s != %s" % (json, expected) # To use this test: # import openerp # openerp.osv.orm.modifiers_tests() def modifiers_tests(): test_modifiers('<field name="a"/>', '{}') test_modifiers('<field name="a" invisible="1"/>', '{"invisible": true}') test_modifiers('<field name="a" readonly="1"/>', '{"readonly": true}') test_modifiers('<field name="a" required="1"/>', '{"required": true}') test_modifiers('<field name="a" invisible="0"/>', '{}') test_modifiers('<field name="a" readonly="0"/>', '{}') test_modifiers('<field name="a" required="0"/>', '{}') test_modifiers('<field name="a" invisible="1" required="1"/>', '{"invisible": true, "required": true}') # TODO order is not guaranteed test_modifiers('<field name="a" invisible="1" required="0"/>', '{"invisible": true}') test_modifiers('<field name="a" invisible="0" required="1"/>', '{"required": true}') test_modifiers("""<field name="a" attrs="{'invisible': [('b', '=', 'c')]}"/>""", '{"invisible": [["b", "=", "c"]]}') # The dictionary is supposed to be the result of fields_get(). test_modifiers({}, '{}') test_modifiers({"invisible": True}, '{"invisible": true}') test_modifiers({"invisible": False}, '{}')
agpl-3.0
6,245,757,846,319,378,000
-4,067,341,734,857,294,300
40.758389
138
0.61395
false
jwlawson/tensorflow
tensorflow/contrib/py2tf/api_test.py
3
2115
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for api module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.py2tf import api from tensorflow.contrib.py2tf import config from tensorflow.contrib.py2tf.pyct import parser from tensorflow.python.framework import constant_op from tensorflow.python.ops import math_ops from tensorflow.python.platform import test class ApiTest(test.TestCase): def test_to_graph_basic(self): def test_fn(x, s): while math_ops.reduce_sum(x) > s: x //= 2 return x config.DEFAULT_UNCOMPILED_MODULES.add((math_ops.__name__,)) config.COMPILED_IMPORT_STATEMENTS = ( 'from tensorflow.python.ops ' 'import control_flow_ops as tf', ) compiled_fn = api.to_graph(test_fn) with self.test_session() as sess: x = compiled_fn(constant_op.constant([4, 8]), 4) self.assertListEqual([1, 2], sess.run(x).tolist()) def test_to_code_basic(self): def test_fn(x, s): while math_ops.reduce_sum(x) > s: x /= 2 return x config.DEFAULT_UNCOMPILED_MODULES.add((math_ops.__name__,)) compiled_code = api.to_code(test_fn) # Just check for some key words and that it is parseable Python code. self.assertRegexpMatches(compiled_code, 'tf\\.while_loop') self.assertIsNotNone(parser.parse_str(compiled_code)) if __name__ == '__main__': test.main()
apache-2.0
-7,683,720,184,177,094,000
4,150,182,507,494,591,000
32.571429
80
0.675177
false
timduru/platform-external-chromium_org
tools/symsrc/pefile.py
187
139621
# -*- coding: Latin-1 -*- """pefile, Portable Executable reader module All the PE file basic structures are available with their default names as attributes of the instance returned. Processed elements such as the import table are made available with lowercase names, to differentiate them from the upper case basic structure names. pefile has been tested against the limits of valid PE headers, that is, malware. Lots of packed malware attempt to abuse the format way beyond its standard use. To the best of my knowledge most of the abuses are handled gracefully. Copyright (c) 2005, 2006, 2007, 2008 Ero Carrera <ero@dkbza.org> All rights reserved. For detailed copyright information see the file COPYING in the root of the distribution archive. """ __author__ = 'Ero Carrera' __version__ = '1.2.9.1' __contact__ = 'ero@dkbza.org' import os import struct import time import math import re import exceptions import string import array sha1, sha256, sha512, md5 = None, None, None, None try: import hashlib sha1 = hashlib.sha1 sha256 = hashlib.sha256 sha512 = hashlib.sha512 md5 = hashlib.md5 except ImportError: try: import sha sha1 = sha.new except ImportError: pass try: import md5 md5 = md5.new except ImportError: pass fast_load = False IMAGE_DOS_SIGNATURE = 0x5A4D IMAGE_OS2_SIGNATURE = 0x454E IMAGE_OS2_SIGNATURE_LE = 0x454C IMAGE_VXD_SIGNATURE = 0x454C IMAGE_NT_SIGNATURE = 0x00004550 IMAGE_NUMBEROF_DIRECTORY_ENTRIES= 16 IMAGE_ORDINAL_FLAG = 0x80000000L IMAGE_ORDINAL_FLAG64 = 0x8000000000000000L OPTIONAL_HEADER_MAGIC_PE = 0x10b OPTIONAL_HEADER_MAGIC_PE_PLUS = 0x20b directory_entry_types = [ ('IMAGE_DIRECTORY_ENTRY_EXPORT', 0), ('IMAGE_DIRECTORY_ENTRY_IMPORT', 1), ('IMAGE_DIRECTORY_ENTRY_RESOURCE', 2), ('IMAGE_DIRECTORY_ENTRY_EXCEPTION', 3), ('IMAGE_DIRECTORY_ENTRY_SECURITY', 4), ('IMAGE_DIRECTORY_ENTRY_BASERELOC', 5), ('IMAGE_DIRECTORY_ENTRY_DEBUG', 6), ('IMAGE_DIRECTORY_ENTRY_COPYRIGHT', 7), ('IMAGE_DIRECTORY_ENTRY_GLOBALPTR', 8), ('IMAGE_DIRECTORY_ENTRY_TLS', 9), ('IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG', 10), ('IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT', 11), ('IMAGE_DIRECTORY_ENTRY_IAT', 12), ('IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT', 13), ('IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR',14), ('IMAGE_DIRECTORY_ENTRY_RESERVED', 15) ] DIRECTORY_ENTRY = dict([(e[1], e[0]) for e in directory_entry_types]+directory_entry_types) image_characteristics = [ ('IMAGE_FILE_RELOCS_STRIPPED', 0x0001), ('IMAGE_FILE_EXECUTABLE_IMAGE', 0x0002), ('IMAGE_FILE_LINE_NUMS_STRIPPED', 0x0004), ('IMAGE_FILE_LOCAL_SYMS_STRIPPED', 0x0008), ('IMAGE_FILE_AGGRESIVE_WS_TRIM', 0x0010), ('IMAGE_FILE_LARGE_ADDRESS_AWARE', 0x0020), ('IMAGE_FILE_16BIT_MACHINE', 0x0040), ('IMAGE_FILE_BYTES_REVERSED_LO', 0x0080), ('IMAGE_FILE_32BIT_MACHINE', 0x0100), ('IMAGE_FILE_DEBUG_STRIPPED', 0x0200), ('IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP', 0x0400), ('IMAGE_FILE_NET_RUN_FROM_SWAP', 0x0800), ('IMAGE_FILE_SYSTEM', 0x1000), ('IMAGE_FILE_DLL', 0x2000), ('IMAGE_FILE_UP_SYSTEM_ONLY', 0x4000), ('IMAGE_FILE_BYTES_REVERSED_HI', 0x8000) ] IMAGE_CHARACTERISTICS = dict([(e[1], e[0]) for e in image_characteristics]+image_characteristics) section_characteristics = [ ('IMAGE_SCN_CNT_CODE', 0x00000020), ('IMAGE_SCN_CNT_INITIALIZED_DATA', 0x00000040), ('IMAGE_SCN_CNT_UNINITIALIZED_DATA', 0x00000080), ('IMAGE_SCN_LNK_OTHER', 0x00000100), ('IMAGE_SCN_LNK_INFO', 0x00000200), ('IMAGE_SCN_LNK_REMOVE', 0x00000800), ('IMAGE_SCN_LNK_COMDAT', 0x00001000), ('IMAGE_SCN_MEM_FARDATA', 0x00008000), ('IMAGE_SCN_MEM_PURGEABLE', 0x00020000), ('IMAGE_SCN_MEM_16BIT', 0x00020000), ('IMAGE_SCN_MEM_LOCKED', 0x00040000), ('IMAGE_SCN_MEM_PRELOAD', 0x00080000), ('IMAGE_SCN_ALIGN_1BYTES', 0x00100000), ('IMAGE_SCN_ALIGN_2BYTES', 0x00200000), ('IMAGE_SCN_ALIGN_4BYTES', 0x00300000), ('IMAGE_SCN_ALIGN_8BYTES', 0x00400000), ('IMAGE_SCN_ALIGN_16BYTES', 0x00500000), ('IMAGE_SCN_ALIGN_32BYTES', 0x00600000), ('IMAGE_SCN_ALIGN_64BYTES', 0x00700000), ('IMAGE_SCN_ALIGN_128BYTES', 0x00800000), ('IMAGE_SCN_ALIGN_256BYTES', 0x00900000), ('IMAGE_SCN_ALIGN_512BYTES', 0x00A00000), ('IMAGE_SCN_ALIGN_1024BYTES', 0x00B00000), ('IMAGE_SCN_ALIGN_2048BYTES', 0x00C00000), ('IMAGE_SCN_ALIGN_4096BYTES', 0x00D00000), ('IMAGE_SCN_ALIGN_8192BYTES', 0x00E00000), ('IMAGE_SCN_ALIGN_MASK', 0x00F00000), ('IMAGE_SCN_LNK_NRELOC_OVFL', 0x01000000), ('IMAGE_SCN_MEM_DISCARDABLE', 0x02000000), ('IMAGE_SCN_MEM_NOT_CACHED', 0x04000000), ('IMAGE_SCN_MEM_NOT_PAGED', 0x08000000), ('IMAGE_SCN_MEM_SHARED', 0x10000000), ('IMAGE_SCN_MEM_EXECUTE', 0x20000000), ('IMAGE_SCN_MEM_READ', 0x40000000), ('IMAGE_SCN_MEM_WRITE', 0x80000000L) ] SECTION_CHARACTERISTICS = dict([(e[1], e[0]) for e in section_characteristics]+section_characteristics) debug_types = [ ('IMAGE_DEBUG_TYPE_UNKNOWN', 0), ('IMAGE_DEBUG_TYPE_COFF', 1), ('IMAGE_DEBUG_TYPE_CODEVIEW', 2), ('IMAGE_DEBUG_TYPE_FPO', 3), ('IMAGE_DEBUG_TYPE_MISC', 4), ('IMAGE_DEBUG_TYPE_EXCEPTION', 5), ('IMAGE_DEBUG_TYPE_FIXUP', 6), ('IMAGE_DEBUG_TYPE_OMAP_TO_SRC', 7), ('IMAGE_DEBUG_TYPE_OMAP_FROM_SRC', 8), ('IMAGE_DEBUG_TYPE_BORLAND', 9), ('IMAGE_DEBUG_TYPE_RESERVED10', 10) ] DEBUG_TYPE = dict([(e[1], e[0]) for e in debug_types]+debug_types) subsystem_types = [ ('IMAGE_SUBSYSTEM_UNKNOWN', 0), ('IMAGE_SUBSYSTEM_NATIVE', 1), ('IMAGE_SUBSYSTEM_WINDOWS_GUI', 2), ('IMAGE_SUBSYSTEM_WINDOWS_CUI', 3), ('IMAGE_SUBSYSTEM_OS2_CUI', 5), ('IMAGE_SUBSYSTEM_POSIX_CUI', 7), ('IMAGE_SUBSYSTEM_WINDOWS_CE_GUI', 9), ('IMAGE_SUBSYSTEM_EFI_APPLICATION', 10), ('IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER', 11), ('IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER', 12), ('IMAGE_SUBSYSTEM_EFI_ROM', 13), ('IMAGE_SUBSYSTEM_XBOX', 14)] SUBSYSTEM_TYPE = dict([(e[1], e[0]) for e in subsystem_types]+subsystem_types) machine_types = [ ('IMAGE_FILE_MACHINE_UNKNOWN', 0), ('IMAGE_FILE_MACHINE_AM33', 0x1d3), ('IMAGE_FILE_MACHINE_AMD64', 0x8664), ('IMAGE_FILE_MACHINE_ARM', 0x1c0), ('IMAGE_FILE_MACHINE_EBC', 0xebc), ('IMAGE_FILE_MACHINE_I386', 0x14c), ('IMAGE_FILE_MACHINE_IA64', 0x200), ('IMAGE_FILE_MACHINE_MR32', 0x9041), ('IMAGE_FILE_MACHINE_MIPS16', 0x266), ('IMAGE_FILE_MACHINE_MIPSFPU', 0x366), ('IMAGE_FILE_MACHINE_MIPSFPU16',0x466), ('IMAGE_FILE_MACHINE_POWERPC', 0x1f0), ('IMAGE_FILE_MACHINE_POWERPCFP',0x1f1), ('IMAGE_FILE_MACHINE_R4000', 0x166), ('IMAGE_FILE_MACHINE_SH3', 0x1a2), ('IMAGE_FILE_MACHINE_SH3DSP', 0x1a3), ('IMAGE_FILE_MACHINE_SH4', 0x1a6), ('IMAGE_FILE_MACHINE_SH5', 0x1a8), ('IMAGE_FILE_MACHINE_THUMB', 0x1c2), ('IMAGE_FILE_MACHINE_WCEMIPSV2',0x169), ] MACHINE_TYPE = dict([(e[1], e[0]) for e in machine_types]+machine_types) relocation_types = [ ('IMAGE_REL_BASED_ABSOLUTE', 0), ('IMAGE_REL_BASED_HIGH', 1), ('IMAGE_REL_BASED_LOW', 2), ('IMAGE_REL_BASED_HIGHLOW', 3), ('IMAGE_REL_BASED_HIGHADJ', 4), ('IMAGE_REL_BASED_MIPS_JMPADDR', 5), ('IMAGE_REL_BASED_SECTION', 6), ('IMAGE_REL_BASED_REL', 7), ('IMAGE_REL_BASED_MIPS_JMPADDR16', 9), ('IMAGE_REL_BASED_IA64_IMM64', 9), ('IMAGE_REL_BASED_DIR64', 10), ('IMAGE_REL_BASED_HIGH3ADJ', 11) ] RELOCATION_TYPE = dict([(e[1], e[0]) for e in relocation_types]+relocation_types) dll_characteristics = [ ('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x0001', 0x0001), ('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x0002', 0x0002), ('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x0004', 0x0004), ('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x0008', 0x0008), ('IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE', 0x0040), ('IMAGE_DLL_CHARACTERISTICS_FORCE_INTEGRITY', 0x0080), ('IMAGE_DLL_CHARACTERISTICS_NX_COMPAT', 0x0100), ('IMAGE_DLL_CHARACTERISTICS_NO_ISOLATION', 0x0200), ('IMAGE_DLL_CHARACTERISTICS_NO_SEH', 0x0400), ('IMAGE_DLL_CHARACTERISTICS_NO_BIND', 0x0800), ('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x1000', 0x1000), ('IMAGE_DLL_CHARACTERISTICS_WDM_DRIVER', 0x2000), ('IMAGE_DLL_CHARACTERISTICS_TERMINAL_SERVER_AWARE', 0x8000) ] DLL_CHARACTERISTICS = dict([(e[1], e[0]) for e in dll_characteristics]+dll_characteristics) # Resource types resource_type = [ ('RT_CURSOR', 1), ('RT_BITMAP', 2), ('RT_ICON', 3), ('RT_MENU', 4), ('RT_DIALOG', 5), ('RT_STRING', 6), ('RT_FONTDIR', 7), ('RT_FONT', 8), ('RT_ACCELERATOR', 9), ('RT_RCDATA', 10), ('RT_MESSAGETABLE', 11), ('RT_GROUP_CURSOR', 12), ('RT_GROUP_ICON', 14), ('RT_VERSION', 16), ('RT_DLGINCLUDE', 17), ('RT_PLUGPLAY', 19), ('RT_VXD', 20), ('RT_ANICURSOR', 21), ('RT_ANIICON', 22), ('RT_HTML', 23), ('RT_MANIFEST', 24) ] RESOURCE_TYPE = dict([(e[1], e[0]) for e in resource_type]+resource_type) # Language definitions lang = [ ('LANG_NEUTRAL', 0x00), ('LANG_INVARIANT', 0x7f), ('LANG_AFRIKAANS', 0x36), ('LANG_ALBANIAN', 0x1c), ('LANG_ARABIC', 0x01), ('LANG_ARMENIAN', 0x2b), ('LANG_ASSAMESE', 0x4d), ('LANG_AZERI', 0x2c), ('LANG_BASQUE', 0x2d), ('LANG_BELARUSIAN', 0x23), ('LANG_BENGALI', 0x45), ('LANG_BULGARIAN', 0x02), ('LANG_CATALAN', 0x03), ('LANG_CHINESE', 0x04), ('LANG_CROATIAN', 0x1a), ('LANG_CZECH', 0x05), ('LANG_DANISH', 0x06), ('LANG_DIVEHI', 0x65), ('LANG_DUTCH', 0x13), ('LANG_ENGLISH', 0x09), ('LANG_ESTONIAN', 0x25), ('LANG_FAEROESE', 0x38), ('LANG_FARSI', 0x29), ('LANG_FINNISH', 0x0b), ('LANG_FRENCH', 0x0c), ('LANG_GALICIAN', 0x56), ('LANG_GEORGIAN', 0x37), ('LANG_GERMAN', 0x07), ('LANG_GREEK', 0x08), ('LANG_GUJARATI', 0x47), ('LANG_HEBREW', 0x0d), ('LANG_HINDI', 0x39), ('LANG_HUNGARIAN', 0x0e), ('LANG_ICELANDIC', 0x0f), ('LANG_INDONESIAN', 0x21), ('LANG_ITALIAN', 0x10), ('LANG_JAPANESE', 0x11), ('LANG_KANNADA', 0x4b), ('LANG_KASHMIRI', 0x60), ('LANG_KAZAK', 0x3f), ('LANG_KONKANI', 0x57), ('LANG_KOREAN', 0x12), ('LANG_KYRGYZ', 0x40), ('LANG_LATVIAN', 0x26), ('LANG_LITHUANIAN', 0x27), ('LANG_MACEDONIAN', 0x2f), ('LANG_MALAY', 0x3e), ('LANG_MALAYALAM', 0x4c), ('LANG_MANIPURI', 0x58), ('LANG_MARATHI', 0x4e), ('LANG_MONGOLIAN', 0x50), ('LANG_NEPALI', 0x61), ('LANG_NORWEGIAN', 0x14), ('LANG_ORIYA', 0x48), ('LANG_POLISH', 0x15), ('LANG_PORTUGUESE', 0x16), ('LANG_PUNJABI', 0x46), ('LANG_ROMANIAN', 0x18), ('LANG_RUSSIAN', 0x19), ('LANG_SANSKRIT', 0x4f), ('LANG_SERBIAN', 0x1a), ('LANG_SINDHI', 0x59), ('LANG_SLOVAK', 0x1b), ('LANG_SLOVENIAN', 0x24), ('LANG_SPANISH', 0x0a), ('LANG_SWAHILI', 0x41), ('LANG_SWEDISH', 0x1d), ('LANG_SYRIAC', 0x5a), ('LANG_TAMIL', 0x49), ('LANG_TATAR', 0x44), ('LANG_TELUGU', 0x4a), ('LANG_THAI', 0x1e), ('LANG_TURKISH', 0x1f), ('LANG_UKRAINIAN', 0x22), ('LANG_URDU', 0x20), ('LANG_UZBEK', 0x43), ('LANG_VIETNAMESE', 0x2a), ('LANG_GAELIC', 0x3c), ('LANG_MALTESE', 0x3a), ('LANG_MAORI', 0x28), ('LANG_RHAETO_ROMANCE',0x17), ('LANG_SAAMI', 0x3b), ('LANG_SORBIAN', 0x2e), ('LANG_SUTU', 0x30), ('LANG_TSONGA', 0x31), ('LANG_TSWANA', 0x32), ('LANG_VENDA', 0x33), ('LANG_XHOSA', 0x34), ('LANG_ZULU', 0x35), ('LANG_ESPERANTO', 0x8f), ('LANG_WALON', 0x90), ('LANG_CORNISH', 0x91), ('LANG_WELSH', 0x92), ('LANG_BRETON', 0x93) ] LANG = dict(lang+[(e[1], e[0]) for e in lang]) # Sublanguage definitions sublang = [ ('SUBLANG_NEUTRAL', 0x00), ('SUBLANG_DEFAULT', 0x01), ('SUBLANG_SYS_DEFAULT', 0x02), ('SUBLANG_ARABIC_SAUDI_ARABIA', 0x01), ('SUBLANG_ARABIC_IRAQ', 0x02), ('SUBLANG_ARABIC_EGYPT', 0x03), ('SUBLANG_ARABIC_LIBYA', 0x04), ('SUBLANG_ARABIC_ALGERIA', 0x05), ('SUBLANG_ARABIC_MOROCCO', 0x06), ('SUBLANG_ARABIC_TUNISIA', 0x07), ('SUBLANG_ARABIC_OMAN', 0x08), ('SUBLANG_ARABIC_YEMEN', 0x09), ('SUBLANG_ARABIC_SYRIA', 0x0a), ('SUBLANG_ARABIC_JORDAN', 0x0b), ('SUBLANG_ARABIC_LEBANON', 0x0c), ('SUBLANG_ARABIC_KUWAIT', 0x0d), ('SUBLANG_ARABIC_UAE', 0x0e), ('SUBLANG_ARABIC_BAHRAIN', 0x0f), ('SUBLANG_ARABIC_QATAR', 0x10), ('SUBLANG_AZERI_LATIN', 0x01), ('SUBLANG_AZERI_CYRILLIC', 0x02), ('SUBLANG_CHINESE_TRADITIONAL', 0x01), ('SUBLANG_CHINESE_SIMPLIFIED', 0x02), ('SUBLANG_CHINESE_HONGKONG', 0x03), ('SUBLANG_CHINESE_SINGAPORE', 0x04), ('SUBLANG_CHINESE_MACAU', 0x05), ('SUBLANG_DUTCH', 0x01), ('SUBLANG_DUTCH_BELGIAN', 0x02), ('SUBLANG_ENGLISH_US', 0x01), ('SUBLANG_ENGLISH_UK', 0x02), ('SUBLANG_ENGLISH_AUS', 0x03), ('SUBLANG_ENGLISH_CAN', 0x04), ('SUBLANG_ENGLISH_NZ', 0x05), ('SUBLANG_ENGLISH_EIRE', 0x06), ('SUBLANG_ENGLISH_SOUTH_AFRICA', 0x07), ('SUBLANG_ENGLISH_JAMAICA', 0x08), ('SUBLANG_ENGLISH_CARIBBEAN', 0x09), ('SUBLANG_ENGLISH_BELIZE', 0x0a), ('SUBLANG_ENGLISH_TRINIDAD', 0x0b), ('SUBLANG_ENGLISH_ZIMBABWE', 0x0c), ('SUBLANG_ENGLISH_PHILIPPINES', 0x0d), ('SUBLANG_FRENCH', 0x01), ('SUBLANG_FRENCH_BELGIAN', 0x02), ('SUBLANG_FRENCH_CANADIAN', 0x03), ('SUBLANG_FRENCH_SWISS', 0x04), ('SUBLANG_FRENCH_LUXEMBOURG', 0x05), ('SUBLANG_FRENCH_MONACO', 0x06), ('SUBLANG_GERMAN', 0x01), ('SUBLANG_GERMAN_SWISS', 0x02), ('SUBLANG_GERMAN_AUSTRIAN', 0x03), ('SUBLANG_GERMAN_LUXEMBOURG', 0x04), ('SUBLANG_GERMAN_LIECHTENSTEIN', 0x05), ('SUBLANG_ITALIAN', 0x01), ('SUBLANG_ITALIAN_SWISS', 0x02), ('SUBLANG_KASHMIRI_SASIA', 0x02), ('SUBLANG_KASHMIRI_INDIA', 0x02), ('SUBLANG_KOREAN', 0x01), ('SUBLANG_LITHUANIAN', 0x01), ('SUBLANG_MALAY_MALAYSIA', 0x01), ('SUBLANG_MALAY_BRUNEI_DARUSSALAM', 0x02), ('SUBLANG_NEPALI_INDIA', 0x02), ('SUBLANG_NORWEGIAN_BOKMAL', 0x01), ('SUBLANG_NORWEGIAN_NYNORSK', 0x02), ('SUBLANG_PORTUGUESE', 0x02), ('SUBLANG_PORTUGUESE_BRAZILIAN', 0x01), ('SUBLANG_SERBIAN_LATIN', 0x02), ('SUBLANG_SERBIAN_CYRILLIC', 0x03), ('SUBLANG_SPANISH', 0x01), ('SUBLANG_SPANISH_MEXICAN', 0x02), ('SUBLANG_SPANISH_MODERN', 0x03), ('SUBLANG_SPANISH_GUATEMALA', 0x04), ('SUBLANG_SPANISH_COSTA_RICA', 0x05), ('SUBLANG_SPANISH_PANAMA', 0x06), ('SUBLANG_SPANISH_DOMINICAN_REPUBLIC', 0x07), ('SUBLANG_SPANISH_VENEZUELA', 0x08), ('SUBLANG_SPANISH_COLOMBIA', 0x09), ('SUBLANG_SPANISH_PERU', 0x0a), ('SUBLANG_SPANISH_ARGENTINA', 0x0b), ('SUBLANG_SPANISH_ECUADOR', 0x0c), ('SUBLANG_SPANISH_CHILE', 0x0d), ('SUBLANG_SPANISH_URUGUAY', 0x0e), ('SUBLANG_SPANISH_PARAGUAY', 0x0f), ('SUBLANG_SPANISH_BOLIVIA', 0x10), ('SUBLANG_SPANISH_EL_SALVADOR', 0x11), ('SUBLANG_SPANISH_HONDURAS', 0x12), ('SUBLANG_SPANISH_NICARAGUA', 0x13), ('SUBLANG_SPANISH_PUERTO_RICO', 0x14), ('SUBLANG_SWEDISH', 0x01), ('SUBLANG_SWEDISH_FINLAND', 0x02), ('SUBLANG_URDU_PAKISTAN', 0x01), ('SUBLANG_URDU_INDIA', 0x02), ('SUBLANG_UZBEK_LATIN', 0x01), ('SUBLANG_UZBEK_CYRILLIC', 0x02), ('SUBLANG_DUTCH_SURINAM', 0x03), ('SUBLANG_ROMANIAN', 0x01), ('SUBLANG_ROMANIAN_MOLDAVIA', 0x02), ('SUBLANG_RUSSIAN', 0x01), ('SUBLANG_RUSSIAN_MOLDAVIA', 0x02), ('SUBLANG_CROATIAN', 0x01), ('SUBLANG_LITHUANIAN_CLASSIC', 0x02), ('SUBLANG_GAELIC', 0x01), ('SUBLANG_GAELIC_SCOTTISH', 0x02), ('SUBLANG_GAELIC_MANX', 0x03) ] SUBLANG = dict(sublang+[(e[1], e[0]) for e in sublang]) class UnicodeStringWrapperPostProcessor: """This class attemps to help the process of identifying strings that might be plain Unicode or Pascal. A list of strings will be wrapped on it with the hope the overlappings will help make the decission about their type.""" def __init__(self, pe, rva_ptr): self.pe = pe self.rva_ptr = rva_ptr self.string = None def get_rva(self): """Get the RVA of the string.""" return self.rva_ptr def __str__(self): """Return the escaped ASCII representation of the string.""" def convert_char(char): if char in string.printable: return char else: return r'\x%02x' % ord(char) if self.string: return ''.join([convert_char(c) for c in self.string]) return '' def invalidate(self): """Make this instance None, to express it's no known string type.""" self = None def render_pascal_16(self): self.string = self.pe.get_string_u_at_rva( self.rva_ptr+2, max_length=self.__get_pascal_16_length()) def ask_pascal_16(self, next_rva_ptr): """The next RVA is taken to be the one immediately following this one. Such RVA could indicate the natural end of the string and will be checked with the possible length contained in the first word. """ length = self.__get_pascal_16_length() if length == (next_rva_ptr - (self.rva_ptr+2)) / 2: self.length = length return True return False def __get_pascal_16_length(self): return self.__get_word_value_at_rva(self.rva_ptr) def __get_word_value_at_rva(self, rva): try: data = self.pe.get_data(self.rva_ptr, 2) except PEFormatError, e: return False if len(data)<2: return False return struct.unpack('<H', data)[0] #def render_pascal_8(self): # """""" def ask_unicode_16(self, next_rva_ptr): """The next RVA is taken to be the one immediately following this one. Such RVA could indicate the natural end of the string and will be checked to see if there's a Unicode NULL character there. """ if self.__get_word_value_at_rva(next_rva_ptr-2) == 0: self.length = next_rva_ptr - self.rva_ptr return True return False def render_unicode_16(self): """""" self.string = self.pe.get_string_u_at_rva(self.rva_ptr) class PEFormatError(Exception): """Generic PE format error exception.""" def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class Dump: """Convenience class for dumping the PE information.""" def __init__(self): self.text = '' def add_lines(self, txt, indent=0): """Adds a list of lines. The list can be indented with the optional argument 'indent'. """ for line in txt: self.add_line(line, indent) def add_line(self, txt, indent=0): """Adds a line. The line can be indented with the optional argument 'indent'. """ self.add(txt+'\n', indent) def add(self, txt, indent=0): """Adds some text, no newline will be appended. The text can be indented with the optional argument 'indent'. """ if isinstance(txt, unicode): s = [] for c in txt: try: s.append(str(c)) except UnicodeEncodeError, e: s.append(repr(c)) txt = ''.join(s) self.text += ' '*indent+txt def add_header(self, txt): """Adds a header element.""" self.add_line('-'*10+txt+'-'*10+'\n') def add_newline(self): """Adds a newline.""" self.text += '\n' def get_text(self): """Get the text in its current state.""" return self.text class Structure: """Prepare structure object to extract members from data. Format is a list containing definitions for the elements of the structure. """ def __init__(self, format, name=None, file_offset=None): # Format is forced little endian, for big endian non Intel platforms self.__format__ = '<' self.__keys__ = [] # self.values = {} self.__format_length__ = 0 self.__set_format__(format[1]) self._all_zeroes = False self.__unpacked_data_elms__ = None self.__file_offset__ = file_offset if name: self.name = name else: self.name = format[0] def __get_format__(self): return self.__format__ def get_file_offset(self): return self.__file_offset__ def set_file_offset(self, offset): self.__file_offset__ = offset def all_zeroes(self): """Returns true is the unpacked data is all zeroes.""" return self._all_zeroes def __set_format__(self, format): for elm in format: if ',' in elm: elm_type, elm_name = elm.split(',', 1) self.__format__ += elm_type elm_names = elm_name.split(',') names = [] for elm_name in elm_names: if elm_name in self.__keys__: search_list = [x[:len(elm_name)] for x in self.__keys__] occ_count = search_list.count(elm_name) elm_name = elm_name+'_'+str(occ_count) names.append(elm_name) # Some PE header structures have unions on them, so a certain # value might have different names, so each key has a list of # all the possible members referring to the data. self.__keys__.append(names) self.__format_length__ = struct.calcsize(self.__format__) def sizeof(self): """Return size of the structure.""" return self.__format_length__ def __unpack__(self, data): if len(data)>self.__format_length__: data = data[:self.__format_length__] # OC Patch: # Some malware have incorrect header lengths. # Fail gracefully if this occurs # Buggy malware: a29b0118af8b7408444df81701ad5a7f # elif len(data)<self.__format_length__: raise PEFormatError('Data length less than expected header length.') if data.count(chr(0)) == len(data): self._all_zeroes = True self.__unpacked_data_elms__ = struct.unpack(self.__format__, data) for i in xrange(len(self.__unpacked_data_elms__)): for key in self.__keys__[i]: # self.values[key] = self.__unpacked_data_elms__[i] setattr(self, key, self.__unpacked_data_elms__[i]) def __pack__(self): new_values = [] for i in xrange(len(self.__unpacked_data_elms__)): for key in self.__keys__[i]: new_val = getattr(self, key) old_val = self.__unpacked_data_elms__[i] # In the case of Unions, when the first changed value # is picked the loop is exited if new_val != old_val: break new_values.append(new_val) return struct.pack(self.__format__, *new_values) def __str__(self): return '\n'.join( self.dump() ) def __repr__(self): return '<Structure: %s>' % (' '.join( [' '.join(s.split()) for s in self.dump()] )) def dump(self, indentation=0): """Returns a string representation of the structure.""" dump = [] dump.append('[%s]' % self.name) # Refer to the __set_format__ method for an explanation # of the following construct. for keys in self.__keys__: for key in keys: val = getattr(self, key) if isinstance(val, int) or isinstance(val, long): val_str = '0x%-8X' % (val) if key == 'TimeDateStamp' or key == 'dwTimeStamp': try: val_str += ' [%s UTC]' % time.asctime(time.gmtime(val)) except exceptions.ValueError, e: val_str += ' [INVALID TIME]' else: val_str = ''.join(filter(lambda c:c != '\0', str(val))) dump.append('%-30s %s' % (key+':', val_str)) return dump class SectionStructure(Structure): """Convenience section handling class.""" def get_data(self, start, length=None): """Get data chunk from a section. Allows to query data from the section by passing the addresses where the PE file would be loaded by default. It is then possible to retrieve code and data by its real addresses as it would be if loaded. """ offset = start - self.VirtualAddress if length: end = offset+length else: end = len(self.data) return self.data[offset:end] def get_rva_from_offset(self, offset): return offset - self.PointerToRawData + self.VirtualAddress def get_offset_from_rva(self, rva): return (rva - self.VirtualAddress) + self.PointerToRawData def contains_offset(self, offset): """Check whether the section contains the file offset provided.""" if not self.PointerToRawData: # bss and other sections containing only uninitialized data must have 0 # and do not take space in the file return False return self.PointerToRawData <= offset < self.VirtualAddress + self.SizeOfRawData def contains_rva(self, rva): """Check whether the section contains the address provided.""" # PECOFF documentation v8 says: # The total size of the section when loaded into memory. # If this value is greater than SizeOfRawData, the section is zero-padded. # This field is valid only for executable images and should be set to zero # for object files. if len(self.data) < self.SizeOfRawData: size = self.Misc_VirtualSize else: size = max(self.SizeOfRawData, self.Misc_VirtualSize) return self.VirtualAddress <= rva < self.VirtualAddress + size def contains(self, rva): #print "DEPRECATION WARNING: you should use contains_rva() instead of contains()" return self.contains_rva(rva) def set_data(self, data): """Set the data belonging to the section.""" self.data = data def get_entropy(self): """Calculate and return the entropy for the section.""" return self.entropy_H( self.data ) def get_hash_sha1(self): """Get the SHA-1 hex-digest of the section's data.""" if sha1 is not None: return sha1( self.data ).hexdigest() def get_hash_sha256(self): """Get the SHA-256 hex-digest of the section's data.""" if sha256 is not None: return sha256( self.data ).hexdigest() def get_hash_sha512(self): """Get the SHA-512 hex-digest of the section's data.""" if sha512 is not None: return sha512( self.data ).hexdigest() def get_hash_md5(self): """Get the MD5 hex-digest of the section's data.""" if md5 is not None: return md5( self.data ).hexdigest() def entropy_H(self, data): """Calculate the entropy of a chunk of data.""" if len(data) == 0: return 0.0 occurences = array.array('L', [0]*256) for x in data: occurences[ord(x)] += 1 entropy = 0 for x in occurences: if x: p_x = float(x) / len(data) entropy -= p_x*math.log(p_x, 2) return entropy class DataContainer: """Generic data container.""" def __init__(self, **args): for key, value in args.items(): setattr(self, key, value) class ImportDescData(DataContainer): """Holds import descriptor information. dll: name of the imported DLL imports: list of imported symbols (ImportData instances) struct: IMAGE_IMPORT_DESCRIPTOR sctruture """ class ImportData(DataContainer): """Holds imported symbol's information. ordinal: Ordinal of the symbol name: Name of the symbol bound: If the symbol is bound, this contains the address. """ class ExportDirData(DataContainer): """Holds export directory information. struct: IMAGE_EXPORT_DIRECTORY structure symbols: list of exported symbols (ExportData instances) """ class ExportData(DataContainer): """Holds exported symbols' information. ordinal: ordinal of the symbol address: address of the symbol name: name of the symbol (None if the symbol is exported by ordinal only) forwarder: if the symbol is forwarded it will contain the name of the target symbol, None otherwise. """ class ResourceDirData(DataContainer): """Holds resource directory information. struct: IMAGE_RESOURCE_DIRECTORY structure entries: list of entries (ResourceDirEntryData instances) """ class ResourceDirEntryData(DataContainer): """Holds resource directory entry data. struct: IMAGE_RESOURCE_DIRECTORY_ENTRY structure name: If the resource is identified by name this attribute will contain the name string. None otherwise. If identified by id, the id is availabe at 'struct.Id' id: the id, also in struct.Id directory: If this entry has a lower level directory this attribute will point to the ResourceDirData instance representing it. data: If this entry has no futher lower directories and points to the actual resource data, this attribute will reference the corresponding ResourceDataEntryData instance. (Either of the 'directory' or 'data' attribute will exist, but not both.) """ class ResourceDataEntryData(DataContainer): """Holds resource data entry information. struct: IMAGE_RESOURCE_DATA_ENTRY structure lang: Primary language ID sublang: Sublanguage ID """ class DebugData(DataContainer): """Holds debug information. struct: IMAGE_DEBUG_DIRECTORY structure """ class BaseRelocationData(DataContainer): """Holds base relocation information. struct: IMAGE_BASE_RELOCATION structure entries: list of relocation data (RelocationData instances) """ class RelocationData(DataContainer): """Holds relocation information. type: Type of relocation The type string is can be obtained by RELOCATION_TYPE[type] rva: RVA of the relocation """ class TlsData(DataContainer): """Holds TLS information. struct: IMAGE_TLS_DIRECTORY structure """ class BoundImportDescData(DataContainer): """Holds bound import descriptor data. This directory entry will provide with information on the DLLs this PE files has been bound to (if bound at all). The structure will contain the name and timestamp of the DLL at the time of binding so that the loader can know whether it differs from the one currently present in the system and must, therefore, re-bind the PE's imports. struct: IMAGE_BOUND_IMPORT_DESCRIPTOR structure name: DLL name entries: list of entries (BoundImportRefData instances) the entries will exist if this DLL has forwarded symbols. If so, the destination DLL will have an entry in this list. """ class BoundImportRefData(DataContainer): """Holds bound import forwader reference data. Contains the same information as the bound descriptor but for forwarded DLLs, if any. struct: IMAGE_BOUND_FORWARDER_REF structure name: dll name """ class PE: """A Portable Executable representation. This class provides access to most of the information in a PE file. It expects to be supplied the name of the file to load or PE data to process and an optional argument 'fast_load' (False by default) which controls whether to load all the directories information, which can be quite time consuming. pe = pefile.PE('module.dll') pe = pefile.PE(name='module.dll') would load 'module.dll' and process it. If the data would be already available in a buffer the same could be achieved with: pe = pefile.PE(data=module_dll_data) The "fast_load" can be set to a default by setting its value in the module itself by means,for instance, of a "pefile.fast_load = True". That will make all the subsequent instances not to load the whole PE structure. The "full_load" method can be used to parse the missing data at a later stage. Basic headers information will be available in the attributes: DOS_HEADER NT_HEADERS FILE_HEADER OPTIONAL_HEADER All of them will contain among their attrbitues the members of the corresponding structures as defined in WINNT.H The raw data corresponding to the header (from the beginning of the file up to the start of the first section) will be avaiable in the instance's attribute 'header' as a string. The sections will be available as a list in the 'sections' attribute. Each entry will contain as attributes all the structure's members. Directory entries will be available as attributes (if they exist): (no other entries are processed at this point) DIRECTORY_ENTRY_IMPORT (list of ImportDescData instances) DIRECTORY_ENTRY_EXPORT (ExportDirData instance) DIRECTORY_ENTRY_RESOURCE (ResourceDirData instance) DIRECTORY_ENTRY_DEBUG (list of DebugData instances) DIRECTORY_ENTRY_BASERELOC (list of BaseRelocationData instances) DIRECTORY_ENTRY_TLS DIRECTORY_ENTRY_BOUND_IMPORT (list of BoundImportData instances) The following dictionary attributes provide ways of mapping different constants. They will accept the numeric value and return the string representation and the opposite, feed in the string and get the numeric constant: DIRECTORY_ENTRY IMAGE_CHARACTERISTICS SECTION_CHARACTERISTICS DEBUG_TYPE SUBSYSTEM_TYPE MACHINE_TYPE RELOCATION_TYPE RESOURCE_TYPE LANG SUBLANG """ # # Format specifications for PE structures. # __IMAGE_DOS_HEADER_format__ = ('IMAGE_DOS_HEADER', ('H,e_magic', 'H,e_cblp', 'H,e_cp', 'H,e_crlc', 'H,e_cparhdr', 'H,e_minalloc', 'H,e_maxalloc', 'H,e_ss', 'H,e_sp', 'H,e_csum', 'H,e_ip', 'H,e_cs', 'H,e_lfarlc', 'H,e_ovno', '8s,e_res', 'H,e_oemid', 'H,e_oeminfo', '20s,e_res2', 'L,e_lfanew')) __IMAGE_FILE_HEADER_format__ = ('IMAGE_FILE_HEADER', ('H,Machine', 'H,NumberOfSections', 'L,TimeDateStamp', 'L,PointerToSymbolTable', 'L,NumberOfSymbols', 'H,SizeOfOptionalHeader', 'H,Characteristics')) __IMAGE_DATA_DIRECTORY_format__ = ('IMAGE_DATA_DIRECTORY', ('L,VirtualAddress', 'L,Size')) __IMAGE_OPTIONAL_HEADER_format__ = ('IMAGE_OPTIONAL_HEADER', ('H,Magic', 'B,MajorLinkerVersion', 'B,MinorLinkerVersion', 'L,SizeOfCode', 'L,SizeOfInitializedData', 'L,SizeOfUninitializedData', 'L,AddressOfEntryPoint', 'L,BaseOfCode', 'L,BaseOfData', 'L,ImageBase', 'L,SectionAlignment', 'L,FileAlignment', 'H,MajorOperatingSystemVersion', 'H,MinorOperatingSystemVersion', 'H,MajorImageVersion', 'H,MinorImageVersion', 'H,MajorSubsystemVersion', 'H,MinorSubsystemVersion', 'L,Reserved1', 'L,SizeOfImage', 'L,SizeOfHeaders', 'L,CheckSum', 'H,Subsystem', 'H,DllCharacteristics', 'L,SizeOfStackReserve', 'L,SizeOfStackCommit', 'L,SizeOfHeapReserve', 'L,SizeOfHeapCommit', 'L,LoaderFlags', 'L,NumberOfRvaAndSizes' )) __IMAGE_OPTIONAL_HEADER64_format__ = ('IMAGE_OPTIONAL_HEADER64', ('H,Magic', 'B,MajorLinkerVersion', 'B,MinorLinkerVersion', 'L,SizeOfCode', 'L,SizeOfInitializedData', 'L,SizeOfUninitializedData', 'L,AddressOfEntryPoint', 'L,BaseOfCode', 'Q,ImageBase', 'L,SectionAlignment', 'L,FileAlignment', 'H,MajorOperatingSystemVersion', 'H,MinorOperatingSystemVersion', 'H,MajorImageVersion', 'H,MinorImageVersion', 'H,MajorSubsystemVersion', 'H,MinorSubsystemVersion', 'L,Reserved1', 'L,SizeOfImage', 'L,SizeOfHeaders', 'L,CheckSum', 'H,Subsystem', 'H,DllCharacteristics', 'Q,SizeOfStackReserve', 'Q,SizeOfStackCommit', 'Q,SizeOfHeapReserve', 'Q,SizeOfHeapCommit', 'L,LoaderFlags', 'L,NumberOfRvaAndSizes' )) __IMAGE_NT_HEADERS_format__ = ('IMAGE_NT_HEADERS', ('L,Signature',)) __IMAGE_SECTION_HEADER_format__ = ('IMAGE_SECTION_HEADER', ('8s,Name', 'L,Misc,Misc_PhysicalAddress,Misc_VirtualSize', 'L,VirtualAddress', 'L,SizeOfRawData', 'L,PointerToRawData', 'L,PointerToRelocations', 'L,PointerToLinenumbers', 'H,NumberOfRelocations', 'H,NumberOfLinenumbers', 'L,Characteristics')) __IMAGE_DELAY_IMPORT_DESCRIPTOR_format__ = ('IMAGE_DELAY_IMPORT_DESCRIPTOR', ('L,grAttrs', 'L,szName', 'L,phmod', 'L,pIAT', 'L,pINT', 'L,pBoundIAT', 'L,pUnloadIAT', 'L,dwTimeStamp')) __IMAGE_IMPORT_DESCRIPTOR_format__ = ('IMAGE_IMPORT_DESCRIPTOR', ('L,OriginalFirstThunk,Characteristics', 'L,TimeDateStamp', 'L,ForwarderChain', 'L,Name', 'L,FirstThunk')) __IMAGE_EXPORT_DIRECTORY_format__ = ('IMAGE_EXPORT_DIRECTORY', ('L,Characteristics', 'L,TimeDateStamp', 'H,MajorVersion', 'H,MinorVersion', 'L,Name', 'L,Base', 'L,NumberOfFunctions', 'L,NumberOfNames', 'L,AddressOfFunctions', 'L,AddressOfNames', 'L,AddressOfNameOrdinals')) __IMAGE_RESOURCE_DIRECTORY_format__ = ('IMAGE_RESOURCE_DIRECTORY', ('L,Characteristics', 'L,TimeDateStamp', 'H,MajorVersion', 'H,MinorVersion', 'H,NumberOfNamedEntries', 'H,NumberOfIdEntries')) __IMAGE_RESOURCE_DIRECTORY_ENTRY_format__ = ('IMAGE_RESOURCE_DIRECTORY_ENTRY', ('L,Name', 'L,OffsetToData')) __IMAGE_RESOURCE_DATA_ENTRY_format__ = ('IMAGE_RESOURCE_DATA_ENTRY', ('L,OffsetToData', 'L,Size', 'L,CodePage', 'L,Reserved')) __VS_VERSIONINFO_format__ = ( 'VS_VERSIONINFO', ('H,Length', 'H,ValueLength', 'H,Type' )) __VS_FIXEDFILEINFO_format__ = ( 'VS_FIXEDFILEINFO', ('L,Signature', 'L,StrucVersion', 'L,FileVersionMS', 'L,FileVersionLS', 'L,ProductVersionMS', 'L,ProductVersionLS', 'L,FileFlagsMask', 'L,FileFlags', 'L,FileOS', 'L,FileType', 'L,FileSubtype', 'L,FileDateMS', 'L,FileDateLS')) __StringFileInfo_format__ = ( 'StringFileInfo', ('H,Length', 'H,ValueLength', 'H,Type' )) __StringTable_format__ = ( 'StringTable', ('H,Length', 'H,ValueLength', 'H,Type' )) __String_format__ = ( 'String', ('H,Length', 'H,ValueLength', 'H,Type' )) __Var_format__ = ( 'Var', ('H,Length', 'H,ValueLength', 'H,Type' )) __IMAGE_THUNK_DATA_format__ = ('IMAGE_THUNK_DATA', ('L,ForwarderString,Function,Ordinal,AddressOfData',)) __IMAGE_THUNK_DATA64_format__ = ('IMAGE_THUNK_DATA', ('Q,ForwarderString,Function,Ordinal,AddressOfData',)) __IMAGE_DEBUG_DIRECTORY_format__ = ('IMAGE_DEBUG_DIRECTORY', ('L,Characteristics', 'L,TimeDateStamp', 'H,MajorVersion', 'H,MinorVersion', 'L,Type', 'L,SizeOfData', 'L,AddressOfRawData', 'L,PointerToRawData')) __IMAGE_BASE_RELOCATION_format__ = ('IMAGE_BASE_RELOCATION', ('L,VirtualAddress', 'L,SizeOfBlock') ) __IMAGE_TLS_DIRECTORY_format__ = ('IMAGE_TLS_DIRECTORY', ('L,StartAddressOfRawData', 'L,EndAddressOfRawData', 'L,AddressOfIndex', 'L,AddressOfCallBacks', 'L,SizeOfZeroFill', 'L,Characteristics' ) ) __IMAGE_TLS_DIRECTORY64_format__ = ('IMAGE_TLS_DIRECTORY', ('Q,StartAddressOfRawData', 'Q,EndAddressOfRawData', 'Q,AddressOfIndex', 'Q,AddressOfCallBacks', 'L,SizeOfZeroFill', 'L,Characteristics' ) ) __IMAGE_BOUND_IMPORT_DESCRIPTOR_format__ = ('IMAGE_BOUND_IMPORT_DESCRIPTOR', ('L,TimeDateStamp', 'H,OffsetModuleName', 'H,NumberOfModuleForwarderRefs')) __IMAGE_BOUND_FORWARDER_REF_format__ = ('IMAGE_BOUND_FORWARDER_REF', ('L,TimeDateStamp', 'H,OffsetModuleName', 'H,Reserved') ) def __init__(self, name=None, data=None, fast_load=None): self.sections = [] self.__warnings = [] self.PE_TYPE = None if not name and not data: return # This list will keep track of all the structures created. # That will allow for an easy iteration through the list # in order to save the modifications made self.__structures__ = [] if not fast_load: fast_load = globals()['fast_load'] self.__parse__(name, data, fast_load) def __unpack_data__(self, format, data, file_offset): """Apply structure format to raw data. Returns and unpacked structure object if successful, None otherwise. """ structure = Structure(format, file_offset=file_offset) #if len(data) < structure.sizeof(): # return None try: structure.__unpack__(data) except PEFormatError, err: self.__warnings.append( 'Corrupt header "%s" at file offset %d. Exception: %s' % ( format[0], file_offset, str(err)) ) return None self.__structures__.append(structure) return structure def __parse__(self, fname, data, fast_load): """Parse a Portable Executable file. Loads a PE file, parsing all its structures and making them available through the instance's attributes. """ if fname: fd = file(fname, 'rb') self.__data__ = fd.read() fd.close() elif data: self.__data__ = data self.DOS_HEADER = self.__unpack_data__( self.__IMAGE_DOS_HEADER_format__, self.__data__, file_offset=0) if not self.DOS_HEADER or self.DOS_HEADER.e_magic != IMAGE_DOS_SIGNATURE: raise PEFormatError('DOS Header magic not found.') # OC Patch: # Check for sane value in e_lfanew # if self.DOS_HEADER.e_lfanew > len(self.__data__): raise PEFormatError('Invalid e_lfanew value, probably not a PE file') nt_headers_offset = self.DOS_HEADER.e_lfanew self.NT_HEADERS = self.__unpack_data__( self.__IMAGE_NT_HEADERS_format__, self.__data__[nt_headers_offset:], file_offset = nt_headers_offset) # We better check the signature right here, before the file screws # around with sections: # OC Patch: # Some malware will cause the Signature value to not exist at all if not self.NT_HEADERS or not self.NT_HEADERS.Signature: raise PEFormatError('NT Headers not found.') if self.NT_HEADERS.Signature != IMAGE_NT_SIGNATURE: raise PEFormatError('Invalid NT Headers signature.') self.FILE_HEADER = self.__unpack_data__( self.__IMAGE_FILE_HEADER_format__, self.__data__[nt_headers_offset+4:], file_offset = nt_headers_offset+4) image_flags = self.retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_') if not self.FILE_HEADER: raise PEFormatError('File Header missing') # Set the image's flags according the the Characteristics member self.set_flags(self.FILE_HEADER, self.FILE_HEADER.Characteristics, image_flags) optional_header_offset = \ nt_headers_offset+4+self.FILE_HEADER.sizeof() # Note: location of sections can be controlled from PE header: sections_offset = optional_header_offset + self.FILE_HEADER.SizeOfOptionalHeader self.OPTIONAL_HEADER = self.__unpack_data__( self.__IMAGE_OPTIONAL_HEADER_format__, self.__data__[optional_header_offset:], file_offset = optional_header_offset) # According to solardesigner's findings for his # Tiny PE project, the optional header does not # need fields beyond "Subsystem" in order to be # loadable by the Windows loader (given that zeroes # are acceptable values and the header is loaded # in a zeroed memory page) # If trying to parse a full Optional Header fails # we try to parse it again with some 0 padding # MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE = 69 if ( self.OPTIONAL_HEADER is None and len(self.__data__[optional_header_offset:]) >= MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE ): # Add enough zeroes to make up for the unused fields # padding_length = 128 # Create padding # padded_data = self.__data__[optional_header_offset:] + ( '\0' * padding_length) self.OPTIONAL_HEADER = self.__unpack_data__( self.__IMAGE_OPTIONAL_HEADER_format__, padded_data, file_offset = optional_header_offset) # Check the Magic in the OPTIONAL_HEADER and set the PE file # type accordingly # if self.OPTIONAL_HEADER is not None: if self.OPTIONAL_HEADER.Magic == OPTIONAL_HEADER_MAGIC_PE: self.PE_TYPE = OPTIONAL_HEADER_MAGIC_PE elif self.OPTIONAL_HEADER.Magic == OPTIONAL_HEADER_MAGIC_PE_PLUS: self.PE_TYPE = OPTIONAL_HEADER_MAGIC_PE_PLUS self.OPTIONAL_HEADER = self.__unpack_data__( self.__IMAGE_OPTIONAL_HEADER64_format__, self.__data__[optional_header_offset:], file_offset = optional_header_offset) # Again, as explained above, we try to parse # a reduced form of the Optional Header which # is still valid despite not including all # structure members # MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE = 69+4 if ( self.OPTIONAL_HEADER is None and len(self.__data__[optional_header_offset:]) >= MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE ): padding_length = 128 padded_data = self.__data__[optional_header_offset:] + ( '\0' * padding_length) self.OPTIONAL_HEADER = self.__unpack_data__( self.__IMAGE_OPTIONAL_HEADER64_format__, padded_data, file_offset = optional_header_offset) if not self.FILE_HEADER: raise PEFormatError('File Header missing') # OC Patch: # Die gracefully if there is no OPTIONAL_HEADER field # 975440f5ad5e2e4a92c4d9a5f22f75c1 if self.PE_TYPE is None or self.OPTIONAL_HEADER is None: raise PEFormatError("No Optional Header found, invalid PE32 or PE32+ file") dll_characteristics_flags = self.retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLL_CHARACTERISTICS_') # Set the Dll Characteristics flags according the the DllCharacteristics member self.set_flags( self.OPTIONAL_HEADER, self.OPTIONAL_HEADER.DllCharacteristics, dll_characteristics_flags) self.OPTIONAL_HEADER.DATA_DIRECTORY = [] #offset = (optional_header_offset + self.FILE_HEADER.SizeOfOptionalHeader) offset = (optional_header_offset + self.OPTIONAL_HEADER.sizeof()) self.NT_HEADERS.FILE_HEADER = self.FILE_HEADER self.NT_HEADERS.OPTIONAL_HEADER = self.OPTIONAL_HEADER # The NumberOfRvaAndSizes is sanitized to stay within # reasonable limits so can be casted to an int # if self.OPTIONAL_HEADER.NumberOfRvaAndSizes > 0x10: self.__warnings.append( 'Suspicious NumberOfRvaAndSizes in the Optional Header. ' + 'Normal values are never larger than 0x10, the value is: 0x%x' % self.OPTIONAL_HEADER.NumberOfRvaAndSizes ) for i in xrange(int(0x7fffffffL & self.OPTIONAL_HEADER.NumberOfRvaAndSizes)): if len(self.__data__[offset:]) == 0: break if len(self.__data__[offset:]) < 8: data = self.__data__[offset:]+'\0'*8 else: data = self.__data__[offset:] dir_entry = self.__unpack_data__( self.__IMAGE_DATA_DIRECTORY_format__, data, file_offset = offset) if dir_entry is None: break # Would fail if missing an entry # 1d4937b2fa4d84ad1bce0309857e70ca offending sample try: dir_entry.name = DIRECTORY_ENTRY[i] except (KeyError, AttributeError): break offset += dir_entry.sizeof() self.OPTIONAL_HEADER.DATA_DIRECTORY.append(dir_entry) # If the offset goes outside the optional header, # the loop is broken, regardless of how many directories # NumberOfRvaAndSizes says there are # # We assume a normally sized optional header, hence that we do # a sizeof() instead of reading SizeOfOptionalHeader. # Then we add a default number of drectories times their size, # if we go beyond that, we assume the number of directories # is wrong and stop processing if offset >= (optional_header_offset + self.OPTIONAL_HEADER.sizeof() + 8*16) : break offset = self.parse_sections(sections_offset) # OC Patch: # There could be a problem if there are no raw data sections # greater than 0 # fc91013eb72529da005110a3403541b6 example # Should this throw an exception in the minimum header offset # can't be found? # rawDataPointers = [ s.PointerToRawData for s in self.sections if s.PointerToRawData>0] if len(rawDataPointers) > 0: lowest_section_offset = min(rawDataPointers) else: lowest_section_offset = None if not lowest_section_offset or lowest_section_offset<offset: self.header = self.__data__[:offset] else: self.header = self.__data__[:lowest_section_offset] # Check whether the entry point lies within a section # if self.get_section_by_rva(self.OPTIONAL_HEADER.AddressOfEntryPoint) is not None: # Check whether the entry point lies within the file # ep_offset = self.get_offset_from_rva(self.OPTIONAL_HEADER.AddressOfEntryPoint) if ep_offset > len(self.__data__): self.__warnings.append( 'Possibly corrupt file. AddressOfEntryPoint lies outside the file. ' + 'AddressOfEntryPoint: 0x%x' % self.OPTIONAL_HEADER.AddressOfEntryPoint ) else: self.__warnings.append( 'AddressOfEntryPoint lies outside the sections\' boundaries. ' + 'AddressOfEntryPoint: 0x%x' % self.OPTIONAL_HEADER.AddressOfEntryPoint ) if not fast_load: self.parse_data_directories() def get_warnings(self): """Return the list of warnings. Non-critical problems found when parsing the PE file are appended to a list of warnings. This method returns the full list. """ return self.__warnings def show_warnings(self): """Print the list of warnings. Non-critical problems found when parsing the PE file are appended to a list of warnings. This method prints the full list to standard output. """ for warning in self.__warnings: print '>', warning def full_load(self): """Process the data directories. This mathod will load the data directories which might not have been loaded if the "fast_load" option was used. """ self.parse_data_directories() def write(self, filename=None): """Write the PE file. This function will process all headers and components of the PE file and include all changes made (by just assigning to attributes in the PE objects) and write the changes back to a file whose name is provided as an argument. The filename is optional. The data to be written to the file will be returned as a 'str' object. """ file_data = list(self.__data__) for struct in self.__structures__: struct_data = list(struct.__pack__()) offset = struct.get_file_offset() file_data[offset:offset+len(struct_data)] = struct_data if hasattr(self, 'VS_VERSIONINFO'): if hasattr(self, 'FileInfo'): for entry in self.FileInfo: if hasattr(entry, 'StringTable'): for st_entry in entry.StringTable: for key, entry in st_entry.entries.items(): offsets = st_entry.entries_offsets[key] lengths = st_entry.entries_lengths[key] if len( entry ) > lengths[1]: uc = zip( list(entry[:lengths[1]]), ['\0'] * lengths[1] ) l = list() map(l.extend, uc) file_data[ offsets[1] : offsets[1] + lengths[1]*2 ] = l else: uc = zip( list(entry), ['\0'] * len(entry) ) l = list() map(l.extend, uc) file_data[ offsets[1] : offsets[1] + len(entry)*2 ] = l remainder = lengths[1] - len(entry) file_data[ offsets[1] + len(entry)*2 : offsets[1] + lengths[1]*2 ] = [ u'\0' ] * remainder*2 new_file_data = ''.join( [ chr(ord(c)) for c in file_data ] ) if filename: f = file(filename, 'wb+') f.write(new_file_data) f.close() return new_file_data def parse_sections(self, offset): """Fetch the PE file sections. The sections will be readily available in the "sections" attribute. Its attributes will contain all the section information plus "data" a buffer containing the section's data. The "Characteristics" member will be processed and attributes representing the section characteristics (with the 'IMAGE_SCN_' string trimmed from the constant's names) will be added to the section instance. Refer to the SectionStructure class for additional info. """ self.sections = [] for i in xrange(self.FILE_HEADER.NumberOfSections): section = SectionStructure(self.__IMAGE_SECTION_HEADER_format__) if not section: break section_offset = offset + section.sizeof() * i section.set_file_offset(section_offset) section.__unpack__(self.__data__[section_offset:]) self.__structures__.append(section) if section.SizeOfRawData > len(self.__data__): self.__warnings.append( ('Error parsing section %d. ' % i) + 'SizeOfRawData is larger than file.') if section.PointerToRawData > len(self.__data__): self.__warnings.append( ('Error parsing section %d. ' % i) + 'PointerToRawData points beyond the end of the file.') if section.Misc_VirtualSize > 0x10000000: self.__warnings.append( ('Suspicious value found parsing section %d. ' % i) + 'VirtualSize is extremely large > 256MiB.') if section.VirtualAddress > 0x10000000: self.__warnings.append( ('Suspicious value found parsing section %d. ' % i) + 'VirtualAddress is beyond 0x10000000.') # # Some packer used a non-aligned PointerToRawData in the sections, # which causes several common tools not to load the section data # properly as they blindly read from the indicated offset. # It seems that Windows will round the offset down to the largest # offset multiple of FileAlignment which is smaller than # PointerToRawData. The following code will do the same. # #alignment = self.OPTIONAL_HEADER.FileAlignment section_data_start = section.PointerToRawData if ( self.OPTIONAL_HEADER.FileAlignment != 0 and (section.PointerToRawData % self.OPTIONAL_HEADER.FileAlignment) != 0): self.__warnings.append( ('Error parsing section %d. ' % i) + 'Suspicious value for FileAlignment in the Optional Header. ' + 'Normally the PointerToRawData entry of the sections\' structures ' + 'is a multiple of FileAlignment, this might imply the file ' + 'is trying to confuse tools which parse this incorrectly') section_data_end = section_data_start+section.SizeOfRawData section.set_data(self.__data__[section_data_start:section_data_end]) section_flags = self.retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_') # Set the section's flags according the the Characteristics member self.set_flags(section, section.Characteristics, section_flags) if ( section.__dict__.get('IMAGE_SCN_MEM_WRITE', False) and section.__dict__.get('IMAGE_SCN_MEM_EXECUTE', False) ): self.__warnings.append( ('Suspicious flags set for section %d. ' % i) + 'Both IMAGE_SCN_MEM_WRITE and IMAGE_SCN_MEM_EXECUTE are set.' + 'This might indicate a packed executable.') self.sections.append(section) if self.FILE_HEADER.NumberOfSections > 0 and self.sections: return offset + self.sections[0].sizeof()*self.FILE_HEADER.NumberOfSections else: return offset def retrieve_flags(self, flag_dict, flag_filter): """Read the flags from a dictionary and return them in a usable form. Will return a list of (flag, value) for all flags in "flag_dict" matching the filter "flag_filter". """ return [(f[0], f[1]) for f in flag_dict.items() if isinstance(f[0], str) and f[0].startswith(flag_filter)] def set_flags(self, obj, flag_field, flags): """Will process the flags and set attributes in the object accordingly. The object "obj" will gain attritutes named after the flags provided in "flags" and valued True/False, matching the results of applyin each flag value from "flags" to flag_field. """ for flag in flags: if flag[1] & flag_field: setattr(obj, flag[0], True) else: setattr(obj, flag[0], False) def parse_data_directories(self): """Parse and process the PE file's data directories.""" directory_parsing = ( ('IMAGE_DIRECTORY_ENTRY_IMPORT', self.parse_import_directory), ('IMAGE_DIRECTORY_ENTRY_EXPORT', self.parse_export_directory), ('IMAGE_DIRECTORY_ENTRY_RESOURCE', self.parse_resources_directory), ('IMAGE_DIRECTORY_ENTRY_DEBUG', self.parse_debug_directory), ('IMAGE_DIRECTORY_ENTRY_BASERELOC', self.parse_relocations_directory), ('IMAGE_DIRECTORY_ENTRY_TLS', self.parse_directory_tls), ('IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT', self.parse_delay_import_directory), ('IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT', self.parse_directory_bound_imports) ) for entry in directory_parsing: # OC Patch: # try: dir_entry = self.OPTIONAL_HEADER.DATA_DIRECTORY[ DIRECTORY_ENTRY[entry[0]]] except IndexError: break if dir_entry.VirtualAddress: value = entry[1](dir_entry.VirtualAddress, dir_entry.Size) if value: setattr(self, entry[0][6:], value) def parse_directory_bound_imports(self, rva, size): """""" bnd_descr = Structure(self.__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__) bnd_descr_size = bnd_descr.sizeof() start = rva bound_imports = [] while True: bnd_descr = self.__unpack_data__( self.__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__, self.__data__[rva:rva+bnd_descr_size], file_offset = rva) if bnd_descr is None: # If can't parse directory then silently return. # This directory does not necesarily have to be valid to # still have a valid PE file self.__warnings.append( 'The Bound Imports directory exists but can\'t be parsed.') return if bnd_descr.all_zeroes(): break rva += bnd_descr.sizeof() forwarder_refs = [] for idx in xrange(bnd_descr.NumberOfModuleForwarderRefs): # Both structures IMAGE_BOUND_IMPORT_DESCRIPTOR and # IMAGE_BOUND_FORWARDER_REF have the same size. bnd_frwd_ref = self.__unpack_data__( self.__IMAGE_BOUND_FORWARDER_REF_format__, self.__data__[rva:rva+bnd_descr_size], file_offset = rva) # OC Patch: if not bnd_frwd_ref: raise PEFormatError( "IMAGE_BOUND_FORWARDER_REF cannot be read") rva += bnd_frwd_ref.sizeof() name_str = self.get_string_from_data( start+bnd_frwd_ref.OffsetModuleName, self.__data__) if not name_str: break forwarder_refs.append(BoundImportRefData( struct = bnd_frwd_ref, name = name_str)) name_str = self.get_string_from_data( start+bnd_descr.OffsetModuleName, self.__data__) if not name_str: break bound_imports.append( BoundImportDescData( struct = bnd_descr, name = name_str, entries = forwarder_refs)) return bound_imports def parse_directory_tls(self, rva, size): """""" if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE: format = self.__IMAGE_TLS_DIRECTORY_format__ elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS: format = self.__IMAGE_TLS_DIRECTORY64_format__ tls_struct = self.__unpack_data__( format, self.get_data(rva), file_offset = self.get_offset_from_rva(rva)) if not tls_struct: return None return TlsData( struct = tls_struct ) def parse_relocations_directory(self, rva, size): """""" rlc = Structure(self.__IMAGE_BASE_RELOCATION_format__) rlc_size = rlc.sizeof() end = rva+size relocations = [] while rva<end: # OC Patch: # Malware that has bad rva entries will cause an error. # Just continue on after an exception # try: rlc = self.__unpack_data__( self.__IMAGE_BASE_RELOCATION_format__, self.get_data(rva, rlc_size), file_offset = self.get_offset_from_rva(rva) ) except PEFormatError: self.__warnings.append( 'Invalid relocation information. Can\'t read ' + 'data at RVA: 0x%x' % rva) rlc = None if not rlc: break reloc_entries = self.parse_relocations( rva+rlc_size, rlc.VirtualAddress, rlc.SizeOfBlock-rlc_size) relocations.append( BaseRelocationData( struct = rlc, entries = reloc_entries)) if not rlc.SizeOfBlock: break rva += rlc.SizeOfBlock return relocations def parse_relocations(self, data_rva, rva, size): """""" data = self.get_data(data_rva, size) entries = [] for idx in xrange(len(data)/2): word = struct.unpack('<H', data[idx*2:(idx+1)*2])[0] reloc_type = (word>>12) reloc_offset = (word&0x0fff) entries.append( RelocationData( type = reloc_type, rva = reloc_offset+rva)) return entries def parse_debug_directory(self, rva, size): """""" dbg = Structure(self.__IMAGE_DEBUG_DIRECTORY_format__) dbg_size = dbg.sizeof() debug = [] for idx in xrange(size/dbg_size): try: data = self.get_data(rva+dbg_size*idx, dbg_size) except PEFormatError, e: self.__warnings.append( 'Invalid debug information. Can\'t read ' + 'data at RVA: 0x%x' % rva) return None dbg = self.__unpack_data__( self.__IMAGE_DEBUG_DIRECTORY_format__, data, file_offset = self.get_offset_from_rva(rva+dbg_size*idx)) if not dbg: return None debug.append( DebugData( struct = dbg)) return debug def parse_resources_directory(self, rva, size=0, base_rva = None, level = 0): """Parse the resources directory. Given the rva of the resources directory, it will process all its entries. The root will have the corresponding member of its structure, IMAGE_RESOURCE_DIRECTORY plus 'entries', a list of all the entries in the directory. Those entries will have, correspondingly, all the structure's members (IMAGE_RESOURCE_DIRECTORY_ENTRY) and an additional one, "directory", pointing to the IMAGE_RESOURCE_DIRECTORY structure representing upper layers of the tree. This one will also have an 'entries' attribute, pointing to the 3rd, and last, level. Another directory with more entries. Those last entries will have a new atribute (both 'leaf' or 'data_entry' can be used to access it). This structure finally points to the resource data. All the members of this structure, IMAGE_RESOURCE_DATA_ENTRY, are available as its attributes. """ # OC Patch: original_rva = rva if base_rva is None: base_rva = rva resources_section = self.get_section_by_rva(rva) try: # If the RVA is invalid all would blow up. Some EXEs seem to be # specially nasty and have an invalid RVA. data = self.get_data(rva) except PEFormatError, e: self.__warnings.append( 'Invalid resources directory. Can\'t read ' + 'directory data at RVA: 0x%x' % rva) return None # Get the resource directory structure, that is, the header # of the table preceding the actual entries # resource_dir = self.__unpack_data__( self.__IMAGE_RESOURCE_DIRECTORY_format__, data, file_offset = self.get_offset_from_rva(rva) ) if resource_dir is None: # If can't parse resources directory then silently return. # This directory does not necesarily have to be valid to # still have a valid PE file self.__warnings.append( 'Invalid resources directory. Can\'t parse ' + 'directory data at RVA: 0x%x' % rva) return None dir_entries = [] # Advance the rva to the positon immediately following the directory # table header and pointing to the first entry in the table # rva += resource_dir.sizeof() number_of_entries = ( resource_dir.NumberOfNamedEntries + resource_dir.NumberOfIdEntries ) strings_to_postprocess = list() for idx in xrange(number_of_entries): res = self.parse_resource_entry(rva) if res is None: self.__warnings.append( 'Error parsing the resources directory, ' + 'Entry %d is invalid, RVA = 0x%x. ' % (idx, rva) ) break entry_name = None entry_id = None # If all named entries have been processed, only Id ones # remain if idx >= resource_dir.NumberOfNamedEntries: entry_id = res.Name else: ustr_offset = base_rva+res.NameOffset try: #entry_name = self.get_string_u_at_rva(ustr_offset, max_length=16) entry_name = UnicodeStringWrapperPostProcessor(self, ustr_offset) strings_to_postprocess.append(entry_name) except PEFormatError, excp: self.__warnings.append( 'Error parsing the resources directory, ' + 'attempting to read entry name. ' + 'Can\'t read unicode string at offset 0x%x' % (ustr_offset) ) if res.DataIsDirectory: # OC Patch: # # One trick malware can do is to recursively reference # the next directory. This causes hilarity to ensue when # trying to parse everything correctly. # If the original RVA given to this function is equal to # the next one to parse, we assume that it's a trick. # Instead of raising a PEFormatError this would skip some # reasonable data so we just break. # # 9ee4d0a0caf095314fd7041a3e4404dc is the offending sample if original_rva == (base_rva + res.OffsetToDirectory): break else: entry_directory = self.parse_resources_directory( base_rva+res.OffsetToDirectory, base_rva=base_rva, level = level+1) if not entry_directory: break dir_entries.append( ResourceDirEntryData( struct = res, name = entry_name, id = entry_id, directory = entry_directory)) else: struct = self.parse_resource_data_entry( base_rva + res.OffsetToDirectory) if struct: entry_data = ResourceDataEntryData( struct = struct, lang = res.Name & 0xff, sublang = (res.Name>>8) & 0xff) dir_entries.append( ResourceDirEntryData( struct = res, name = entry_name, id = entry_id, data = entry_data)) else: break # Check if this entry contains version information # if level == 0 and res.Id == RESOURCE_TYPE['RT_VERSION']: if len(dir_entries)>0: last_entry = dir_entries[-1] rt_version_struct = None try: rt_version_struct = last_entry.directory.entries[0].directory.entries[0].data.struct except: # Maybe a malformed directory structure...? # Lets ignore it pass if rt_version_struct is not None: self.parse_version_information(rt_version_struct) rva += res.sizeof() string_rvas = [s.get_rva() for s in strings_to_postprocess] string_rvas.sort() for idx, s in enumerate(strings_to_postprocess): s.render_pascal_16() resource_directory_data = ResourceDirData( struct = resource_dir, entries = dir_entries) return resource_directory_data def parse_resource_data_entry(self, rva): """Parse a data entry from the resources directory.""" try: # If the RVA is invalid all would blow up. Some EXEs seem to be # specially nasty and have an invalid RVA. data = self.get_data(rva) except PEFormatError, excp: self.__warnings.append( 'Error parsing a resource directory data entry, ' + 'the RVA is invalid: 0x%x' % ( rva ) ) return None data_entry = self.__unpack_data__( self.__IMAGE_RESOURCE_DATA_ENTRY_format__, data, file_offset = self.get_offset_from_rva(rva) ) return data_entry def parse_resource_entry(self, rva): """Parse a directory entry from the resources directory.""" resource = self.__unpack_data__( self.__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__, self.get_data(rva), file_offset = self.get_offset_from_rva(rva) ) if resource is None: return None #resource.NameIsString = (resource.Name & 0x80000000L) >> 31 resource.NameOffset = resource.Name & 0x7FFFFFFFL resource.__pad = resource.Name & 0xFFFF0000L resource.Id = resource.Name & 0x0000FFFFL resource.DataIsDirectory = (resource.OffsetToData & 0x80000000L) >> 31 resource.OffsetToDirectory = resource.OffsetToData & 0x7FFFFFFFL return resource def parse_version_information(self, version_struct): """Parse version information structure. The date will be made available in three attributes of the PE object. VS_VERSIONINFO will contain the first three fields of the main structure: 'Length', 'ValueLength', and 'Type' VS_FIXEDFILEINFO will hold the rest of the fields, accessible as sub-attributes: 'Signature', 'StrucVersion', 'FileVersionMS', 'FileVersionLS', 'ProductVersionMS', 'ProductVersionLS', 'FileFlagsMask', 'FileFlags', 'FileOS', 'FileType', 'FileSubtype', 'FileDateMS', 'FileDateLS' FileInfo is a list of all StringFileInfo and VarFileInfo structures. StringFileInfo structures will have a list as an attribute named 'StringTable' containing all the StringTable structures. Each of those structures contains a dictionary 'entries' with all the key/value version information string pairs. VarFileInfo structures will have a list as an attribute named 'Var' containing all Var structures. Each Var structure will have a dictionary as an attribute named 'entry' which will contain the name and value of the Var. """ # Retrieve the data for the version info resource # start_offset = self.get_offset_from_rva( version_struct.OffsetToData ) raw_data = self.__data__[ start_offset : start_offset+version_struct.Size ] # Map the main structure and the subsequent string # versioninfo_struct = self.__unpack_data__( self.__VS_VERSIONINFO_format__, raw_data, file_offset = start_offset ) if versioninfo_struct is None: return ustr_offset = version_struct.OffsetToData + versioninfo_struct.sizeof() try: versioninfo_string = self.get_string_u_at_rva( ustr_offset ) except PEFormatError, excp: self.__warnings.append( 'Error parsing the version information, ' + 'attempting to read VS_VERSION_INFO string. Can\'t ' + 'read unicode string at offset 0x%x' % ( ustr_offset ) ) versioninfo_string = None # If the structure does not contain the expected name, it's assumed to be invalid # if versioninfo_string != u'VS_VERSION_INFO': self.__warnings.append('Invalid VS_VERSION_INFO block') return # Set the PE object's VS_VERSIONINFO to this one # self.VS_VERSIONINFO = versioninfo_struct # The the Key attribute to point to the unicode string identifying the structure # self.VS_VERSIONINFO.Key = versioninfo_string # Process the fixed version information, get the offset and structure # fixedfileinfo_offset = self.dword_align( versioninfo_struct.sizeof() + 2 * (len(versioninfo_string) + 1), version_struct.OffsetToData) fixedfileinfo_struct = self.__unpack_data__( self.__VS_FIXEDFILEINFO_format__, raw_data[fixedfileinfo_offset:], file_offset = start_offset+fixedfileinfo_offset ) if not fixedfileinfo_struct: return # Set the PE object's VS_FIXEDFILEINFO to this one # self.VS_FIXEDFILEINFO = fixedfileinfo_struct # Start parsing all the StringFileInfo and VarFileInfo structures # # Get the first one # stringfileinfo_offset = self.dword_align( fixedfileinfo_offset + fixedfileinfo_struct.sizeof(), version_struct.OffsetToData) original_stringfileinfo_offset = stringfileinfo_offset # Set the PE object's attribute that will contain them all. # self.FileInfo = list() while True: # Process the StringFileInfo/VarFileInfo struct # stringfileinfo_struct = self.__unpack_data__( self.__StringFileInfo_format__, raw_data[stringfileinfo_offset:], file_offset = start_offset+stringfileinfo_offset ) if stringfileinfo_struct is None: self.__warnings.append( 'Error parsing StringFileInfo/VarFileInfo struct' ) return None # Get the subsequent string defining the structure. # ustr_offset = ( version_struct.OffsetToData + stringfileinfo_offset + versioninfo_struct.sizeof() ) try: stringfileinfo_string = self.get_string_u_at_rva( ustr_offset ) except PEFormatError, excp: self.__warnings.append( 'Error parsing the version information, ' + 'attempting to read StringFileInfo string. Can\'t ' + 'read unicode string at offset 0x%x' % ( ustr_offset ) ) break # Set such string as the Key attribute # stringfileinfo_struct.Key = stringfileinfo_string # Append the structure to the PE object's list # self.FileInfo.append(stringfileinfo_struct) # Parse a StringFileInfo entry # if stringfileinfo_string == u'StringFileInfo': if stringfileinfo_struct.Type == 1 and stringfileinfo_struct.ValueLength == 0: stringtable_offset = self.dword_align( stringfileinfo_offset + stringfileinfo_struct.sizeof() + 2*(len(stringfileinfo_string)+1), version_struct.OffsetToData) stringfileinfo_struct.StringTable = list() # Process the String Table entries # while True: stringtable_struct = self.__unpack_data__( self.__StringTable_format__, raw_data[stringtable_offset:], file_offset = start_offset+stringtable_offset ) if not stringtable_struct: break ustr_offset = ( version_struct.OffsetToData + stringtable_offset + stringtable_struct.sizeof() ) try: stringtable_string = self.get_string_u_at_rva( ustr_offset ) except PEFormatError, excp: self.__warnings.append( 'Error parsing the version information, ' + 'attempting to read StringTable string. Can\'t ' + 'read unicode string at offset 0x%x' % ( ustr_offset ) ) break stringtable_struct.LangID = stringtable_string stringtable_struct.entries = dict() stringtable_struct.entries_offsets = dict() stringtable_struct.entries_lengths = dict() stringfileinfo_struct.StringTable.append(stringtable_struct) entry_offset = self.dword_align( stringtable_offset + stringtable_struct.sizeof() + 2*(len(stringtable_string)+1), version_struct.OffsetToData) # Process all entries in the string table # while entry_offset < stringtable_offset + stringtable_struct.Length: string_struct = self.__unpack_data__( self.__String_format__, raw_data[entry_offset:], file_offset = start_offset+entry_offset ) if not string_struct: break ustr_offset = ( version_struct.OffsetToData + entry_offset + string_struct.sizeof() ) try: key = self.get_string_u_at_rva( ustr_offset ) key_offset = self.get_offset_from_rva( ustr_offset ) except PEFormatError, excp: self.__warnings.append( 'Error parsing the version information, ' + 'attempting to read StringTable Key string. Can\'t ' + 'read unicode string at offset 0x%x' % ( ustr_offset ) ) break value_offset = self.dword_align( 2*(len(key)+1) + entry_offset + string_struct.sizeof(), version_struct.OffsetToData) ustr_offset = version_struct.OffsetToData + value_offset try: value = self.get_string_u_at_rva( ustr_offset, max_length = string_struct.ValueLength ) value_offset = self.get_offset_from_rva( ustr_offset ) except PEFormatError, excp: self.__warnings.append( 'Error parsing the version information, ' + 'attempting to read StringTable Value string. ' + 'Can\'t read unicode string at offset 0x%x' % ( ustr_offset ) ) break if string_struct.Length == 0: entry_offset = stringtable_offset + stringtable_struct.Length else: entry_offset = self.dword_align( string_struct.Length+entry_offset, version_struct.OffsetToData) key_as_char = [] for c in key: if ord(c)>128: key_as_char.append('\\x%02x' %ord(c)) else: key_as_char.append(c) key_as_char = ''.join(key_as_char) setattr(stringtable_struct, key_as_char, value) stringtable_struct.entries[key] = value stringtable_struct.entries_offsets[key] = (key_offset, value_offset) stringtable_struct.entries_lengths[key] = (len(key), len(value)) stringtable_offset = self.dword_align( stringtable_struct.Length + stringtable_offset, version_struct.OffsetToData) if stringtable_offset >= stringfileinfo_struct.Length: break # Parse a VarFileInfo entry # elif stringfileinfo_string == u'VarFileInfo': varfileinfo_struct = stringfileinfo_struct varfileinfo_struct.name = 'VarFileInfo' if varfileinfo_struct.Type == 1 and varfileinfo_struct.ValueLength == 0: var_offset = self.dword_align( stringfileinfo_offset + varfileinfo_struct.sizeof() + 2*(len(stringfileinfo_string)+1), version_struct.OffsetToData) varfileinfo_struct.Var = list() # Process all entries # while True: var_struct = self.__unpack_data__( self.__Var_format__, raw_data[var_offset:], file_offset = start_offset+var_offset ) if not var_struct: break ustr_offset = ( version_struct.OffsetToData + var_offset + var_struct.sizeof() ) try: var_string = self.get_string_u_at_rva( ustr_offset ) except PEFormatError, excp: self.__warnings.append( 'Error parsing the version information, ' + 'attempting to read VarFileInfo Var string. ' + 'Can\'t read unicode string at offset 0x%x' % (ustr_offset)) break varfileinfo_struct.Var.append(var_struct) varword_offset = self.dword_align( 2*(len(var_string)+1) + var_offset + var_struct.sizeof(), version_struct.OffsetToData) orig_varword_offset = varword_offset while varword_offset < orig_varword_offset + var_struct.ValueLength: word1 = self.get_word_from_data( raw_data[varword_offset:varword_offset+2], 0) word2 = self.get_word_from_data( raw_data[varword_offset+2:varword_offset+4], 0) varword_offset += 4 var_struct.entry = {var_string: '0x%04x 0x%04x' % (word1, word2)} var_offset = self.dword_align( var_offset+var_struct.Length, version_struct.OffsetToData) if var_offset <= var_offset+var_struct.Length: break # Increment and align the offset # stringfileinfo_offset = self.dword_align( stringfileinfo_struct.Length+stringfileinfo_offset, version_struct.OffsetToData) # Check if all the StringFileInfo and VarFileInfo items have been processed # if stringfileinfo_struct.Length == 0 or stringfileinfo_offset >= versioninfo_struct.Length: break def parse_export_directory(self, rva, size): """Parse the export directory. Given the rva of the export directory, it will process all its entries. The exports will be made available through a list "exports" containing a tuple with the following elements: (ordinal, symbol_address, symbol_name) And also through a dicionary "exports_by_ordinal" whose keys will be the ordinals and the values tuples of the from: (symbol_address, symbol_name) The symbol addresses are relative, not absolute. """ try: export_dir = self.__unpack_data__( self.__IMAGE_EXPORT_DIRECTORY_format__, self.get_data(rva), file_offset = self.get_offset_from_rva(rva) ) except PEFormatError: self.__warnings.append( 'Error parsing export directory at RVA: 0x%x' % ( rva ) ) return if not export_dir: return try: address_of_names = self.get_data( export_dir.AddressOfNames, export_dir.NumberOfNames*4) address_of_name_ordinals = self.get_data( export_dir.AddressOfNameOrdinals, export_dir.NumberOfNames*4) address_of_functions = self.get_data( export_dir.AddressOfFunctions, export_dir.NumberOfFunctions*4) except PEFormatError: self.__warnings.append( 'Error parsing export directory at RVA: 0x%x' % ( rva ) ) return exports = [] for i in xrange(export_dir.NumberOfNames): symbol_name = self.get_string_at_rva( self.get_dword_from_data(address_of_names, i)) symbol_ordinal = self.get_word_from_data( address_of_name_ordinals, i) if symbol_ordinal*4<len(address_of_functions): symbol_address = self.get_dword_from_data( address_of_functions, symbol_ordinal) else: # Corrupt? a bad pointer... we assume it's all # useless, no exports return None # If the funcion's rva points within the export directory # it will point to a string with the forwarded symbol's string # instead of pointing the the function start address. if symbol_address>=rva and symbol_address<rva+size: forwarder_str = self.get_string_at_rva(symbol_address) else: forwarder_str = None exports.append( ExportData( ordinal = export_dir.Base+symbol_ordinal, address = symbol_address, name = symbol_name, forwarder = forwarder_str)) ordinals = [exp.ordinal for exp in exports] for idx in xrange(export_dir.NumberOfFunctions): if not idx+export_dir.Base in ordinals: symbol_address = self.get_dword_from_data( address_of_functions, idx) # # Checking for forwarder again. # if symbol_address>=rva and symbol_address<rva+size: forwarder_str = self.get_string_at_rva(symbol_address) else: forwarder_str = None exports.append( ExportData( ordinal = export_dir.Base+idx, address = symbol_address, name = None, forwarder = forwarder_str)) return ExportDirData( struct = export_dir, symbols = exports) def dword_align(self, offset, base): offset += base return (offset+3) - ((offset+3)%4) - base def parse_delay_import_directory(self, rva, size): """Walk and parse the delay import directory.""" import_descs = [] while True: try: # If the RVA is invalid all would blow up. Some PEs seem to be # specially nasty and have an invalid RVA. data = self.get_data(rva) except PEFormatError, e: self.__warnings.append( 'Error parsing the Delay import directory at RVA: 0x%x' % ( rva ) ) break import_desc = self.__unpack_data__( self.__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__, data, file_offset = self.get_offset_from_rva(rva) ) # If the structure is all zeores, we reached the end of the list if not import_desc or import_desc.all_zeroes(): break rva += import_desc.sizeof() try: import_data = self.parse_imports( import_desc.pINT, import_desc.pIAT, None) except PEFormatError, e: self.__warnings.append( 'Error parsing the Delay import directory. ' + 'Invalid import data at RVA: 0x%x' % ( rva ) ) break if not import_data: continue dll = self.get_string_at_rva(import_desc.szName) if dll: import_descs.append( ImportDescData( struct = import_desc, imports = import_data, dll = dll)) return import_descs def parse_import_directory(self, rva, size): """Walk and parse the import directory.""" import_descs = [] while True: try: # If the RVA is invalid all would blow up. Some EXEs seem to be # specially nasty and have an invalid RVA. data = self.get_data(rva) except PEFormatError, e: self.__warnings.append( 'Error parsing the Import directory at RVA: 0x%x' % ( rva ) ) break import_desc = self.__unpack_data__( self.__IMAGE_IMPORT_DESCRIPTOR_format__, data, file_offset = self.get_offset_from_rva(rva) ) # If the structure is all zeores, we reached the end of the list if not import_desc or import_desc.all_zeroes(): break rva += import_desc.sizeof() try: import_data = self.parse_imports( import_desc.OriginalFirstThunk, import_desc.FirstThunk, import_desc.ForwarderChain) except PEFormatError, excp: self.__warnings.append( 'Error parsing the Import directory. ' + 'Invalid Import data at RVA: 0x%x' % ( rva ) ) break #raise excp if not import_data: continue dll = self.get_string_at_rva(import_desc.Name) if dll: import_descs.append( ImportDescData( struct = import_desc, imports = import_data, dll = dll)) return import_descs def parse_imports(self, original_first_thunk, first_thunk, forwarder_chain): """Parse the imported symbols. It will fill a list, which will be avalable as the dictionary attribute "imports". Its keys will be the DLL names and the values all the symbols imported from that object. """ imported_symbols = [] imports_section = self.get_section_by_rva(first_thunk) if not imports_section: raise PEFormatError, 'Invalid/corrupt imports.' # Import Lookup Table. Contains ordinals or pointers to strings. ilt = self.get_import_table(original_first_thunk) # Import Address Table. May have identical content to ILT if # PE file is not bounded, Will contain the address of the # imported symbols once the binary is loaded or if it is already # bound. iat = self.get_import_table(first_thunk) # OC Patch: # Would crash if iat or ilt had None type if not iat and not ilt: raise PEFormatError( 'Invalid Import Table information. ' + 'Both ILT and IAT appear to be broken.') if not iat and ilt: table = ilt elif iat and not ilt: table = iat elif ilt and ((len(ilt) and len(iat)==0) or (len(ilt) == len(iat))): table = ilt elif (ilt and len(ilt))==0 and (iat and len(iat)): table = iat else: return None for idx in xrange(len(table)): imp_ord = None imp_hint = None imp_name = None hint_name_table_rva = None if table[idx].AddressOfData: if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE: ordinal_flag = IMAGE_ORDINAL_FLAG elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS: ordinal_flag = IMAGE_ORDINAL_FLAG64 # If imported by ordinal, we will append the ordinal number # if table[idx].AddressOfData & ordinal_flag: import_by_ordinal = True imp_ord = table[idx].AddressOfData & 0xffff imp_name = None else: import_by_ordinal = False try: hint_name_table_rva = table[idx].AddressOfData & 0x7fffffff data = self.get_data(hint_name_table_rva, 2) # Get the Hint imp_hint = self.get_word_from_data(data, 0) imp_name = self.get_string_at_rva(table[idx].AddressOfData+2) except PEFormatError, e: pass imp_address = first_thunk+self.OPTIONAL_HEADER.ImageBase+idx*4 if iat and ilt and ilt[idx].AddressOfData != iat[idx].AddressOfData: imp_bound = iat[idx].AddressOfData else: imp_bound = None if imp_name != '' and (imp_ord or imp_name): imported_symbols.append( ImportData( import_by_ordinal = import_by_ordinal, ordinal = imp_ord, hint = imp_hint, name = imp_name, bound = imp_bound, address = imp_address, hint_name_table_rva = hint_name_table_rva)) return imported_symbols def get_import_table(self, rva): table = [] while True and rva: try: data = self.get_data(rva) except PEFormatError, e: self.__warnings.append( 'Error parsing the import table. ' + 'Invalid data at RVA: 0x%x' % ( rva ) ) return None if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE: format = self.__IMAGE_THUNK_DATA_format__ elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS: format = self.__IMAGE_THUNK_DATA64_format__ thunk_data = self.__unpack_data__( format, data, file_offset=self.get_offset_from_rva(rva) ) if not thunk_data or thunk_data.all_zeroes(): break rva += thunk_data.sizeof() table.append(thunk_data) return table def get_memory_mapped_image(self, max_virtual_address=0x10000000, ImageBase=None): """Returns the data corresponding to the memory layout of the PE file. The data includes the PE header and the sections loaded at offsets corresponding to their relative virtual addresses. (the VirtualAddress section header member). Any offset in this data corresponds to the absolute memory address ImageBase+offset. The optional argument 'max_virtual_address' provides with means of limiting which section are processed. Any section with their VirtualAddress beyond this value will be skipped. Normally, sections with values beyond this range are just there to confuse tools. It's a common trick to see in packed executables. If the 'ImageBase' optional argument is supplied, the file's relocations will be applied to the image by calling the 'relocate_image()' method. """ # Collect all sections in one code block data = self.header for section in self.sections: # Miscellanous integrity tests. # Some packer will set these to bogus values to # make tools go nuts. # if section.Misc_VirtualSize == 0 or section.SizeOfRawData == 0: continue if section.SizeOfRawData > len(self.__data__): continue if section.PointerToRawData > len(self.__data__): continue if section.VirtualAddress >= max_virtual_address: continue padding_length = section.VirtualAddress - len(data) if padding_length>0: data += '\0'*padding_length elif padding_length<0: data = data[:padding_length] data += section.data return data def get_data(self, rva, length=None): """Get data regardless of the section where it lies on. Given a rva and the size of the chunk to retrieve, this method will find the section where the data lies and return the data. """ s = self.get_section_by_rva(rva) if not s: if rva<len(self.header): if length: end = rva+length else: end = None return self.header[rva:end] raise PEFormatError, 'data at RVA can\'t be fetched. Corrupt header?' return s.get_data(rva, length) def get_rva_from_offset(self, offset): """Get the rva corresponding to this file offset. """ s = self.get_section_by_offset(offset) if not s: raise PEFormatError("specified offset (0x%x) doesn't belong to any section." % offset) return s.get_rva_from_offset(offset) def get_offset_from_rva(self, rva): """Get the file offset corresponding to this rva. Given a rva , this method will find the section where the data lies and return the offset within the file. """ s = self.get_section_by_rva(rva) if not s: raise PEFormatError, 'data at RVA can\'t be fetched. Corrupt header?' return s.get_offset_from_rva(rva) def get_string_at_rva(self, rva): """Get an ASCII string located at the given address.""" s = self.get_section_by_rva(rva) if not s: if rva<len(self.header): return self.get_string_from_data(rva, self.header) return None return self.get_string_from_data(rva-s.VirtualAddress, s.data) def get_string_from_data(self, offset, data): """Get an ASCII string from within the data.""" # OC Patch b = None try: b = data[offset] except IndexError: return '' s = '' while ord(b): s += b offset += 1 try: b = data[offset] except IndexError: break return s def get_string_u_at_rva(self, rva, max_length = 2**16): """Get an Unicode string located at the given address.""" try: # If the RVA is invalid all would blow up. Some EXEs seem to be # specially nasty and have an invalid RVA. data = self.get_data(rva, 2) except PEFormatError, e: return None #length = struct.unpack('<H', data)[0] s = u'' for idx in xrange(max_length): try: uchr = struct.unpack('<H', self.get_data(rva+2*idx, 2))[0] except struct.error: break if unichr(uchr) == u'\0': break s += unichr(uchr) return s def get_section_by_offset(self, offset): """Get the section containing the given file offset.""" sections = [s for s in self.sections if s.contains_offset(offset)] if sections: return sections[0] return None def get_section_by_rva(self, rva): """Get the section containing the given address.""" sections = [s for s in self.sections if s.contains_rva(rva)] if sections: return sections[0] return None def __str__(self): return self.dump_info() def print_info(self): """Print all the PE header information in a human readable from.""" print self.dump_info() def dump_info(self, dump=None): """Dump all the PE header information into human readable string.""" if dump is None: dump = Dump() warnings = self.get_warnings() if warnings: dump.add_header('Parsing Warnings') for warning in warnings: dump.add_line(warning) dump.add_newline() dump.add_header('DOS_HEADER') dump.add_lines(self.DOS_HEADER.dump()) dump.add_newline() dump.add_header('NT_HEADERS') dump.add_lines(self.NT_HEADERS.dump()) dump.add_newline() dump.add_header('FILE_HEADER') dump.add_lines(self.FILE_HEADER.dump()) image_flags = self.retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_') dump.add('Flags: ') flags = [] for flag in image_flags: if getattr(self.FILE_HEADER, flag[0]): flags.append(flag[0]) dump.add_line(', '.join(flags)) dump.add_newline() if hasattr(self, 'OPTIONAL_HEADER') and self.OPTIONAL_HEADER is not None: dump.add_header('OPTIONAL_HEADER') dump.add_lines(self.OPTIONAL_HEADER.dump()) dll_characteristics_flags = self.retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLL_CHARACTERISTICS_') dump.add('DllCharacteristics: ') flags = [] for flag in dll_characteristics_flags: if getattr(self.OPTIONAL_HEADER, flag[0]): flags.append(flag[0]) dump.add_line(', '.join(flags)) dump.add_newline() dump.add_header('PE Sections') section_flags = self.retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_') for section in self.sections: dump.add_lines(section.dump()) dump.add('Flags: ') flags = [] for flag in section_flags: if getattr(section, flag[0]): flags.append(flag[0]) dump.add_line(', '.join(flags)) dump.add_line('Entropy: %f (Min=0.0, Max=8.0)' % section.get_entropy() ) if md5 is not None: dump.add_line('MD5 hash: %s' % section.get_hash_md5() ) if sha1 is not None: dump.add_line('SHA-1 hash: %s' % section.get_hash_sha1() ) if sha256 is not None: dump.add_line('SHA-256 hash: %s' % section.get_hash_sha256() ) if sha512 is not None: dump.add_line('SHA-512 hash: %s' % section.get_hash_sha512() ) dump.add_newline() if (hasattr(self, 'OPTIONAL_HEADER') and hasattr(self.OPTIONAL_HEADER, 'DATA_DIRECTORY') ): dump.add_header('Directories') for idx in xrange(len(self.OPTIONAL_HEADER.DATA_DIRECTORY)): directory = self.OPTIONAL_HEADER.DATA_DIRECTORY[idx] dump.add_lines(directory.dump()) dump.add_newline() if hasattr(self, 'VS_VERSIONINFO'): dump.add_header('Version Information') dump.add_lines(self.VS_VERSIONINFO.dump()) dump.add_newline() if hasattr(self, 'VS_FIXEDFILEINFO'): dump.add_lines(self.VS_FIXEDFILEINFO.dump()) dump.add_newline() if hasattr(self, 'FileInfo'): for entry in self.FileInfo: dump.add_lines(entry.dump()) dump.add_newline() if hasattr(entry, 'StringTable'): for st_entry in entry.StringTable: [dump.add_line(' '+line) for line in st_entry.dump()] dump.add_line(' LangID: '+st_entry.LangID) dump.add_newline() for str_entry in st_entry.entries.items(): dump.add_line(' '+str_entry[0]+': '+str_entry[1]) dump.add_newline() elif hasattr(entry, 'Var'): for var_entry in entry.Var: if hasattr(var_entry, 'entry'): [dump.add_line(' '+line) for line in var_entry.dump()] dump.add_line( ' ' + var_entry.entry.keys()[0] + ': ' + var_entry.entry.values()[0]) dump.add_newline() if hasattr(self, 'DIRECTORY_ENTRY_EXPORT'): dump.add_header('Exported symbols') dump.add_lines(self.DIRECTORY_ENTRY_EXPORT.struct.dump()) dump.add_newline() dump.add_line('%-10s %-10s %s' % ('Ordinal', 'RVA', 'Name')) for export in self.DIRECTORY_ENTRY_EXPORT.symbols: dump.add('%-10d 0x%08Xh %s' % ( export.ordinal, export.address, export.name)) if export.forwarder: dump.add_line(' forwarder: %s' % export.forwarder) else: dump.add_newline() dump.add_newline() if hasattr(self, 'DIRECTORY_ENTRY_IMPORT'): dump.add_header('Imported symbols') for module in self.DIRECTORY_ENTRY_IMPORT: dump.add_lines(module.struct.dump()) dump.add_newline() for symbol in module.imports: if symbol.import_by_ordinal is True: dump.add('%s Ordinal[%s] (Imported by Ordinal)' % ( module.dll, str(symbol.ordinal))) else: dump.add('%s.%s Hint[%s]' % ( module.dll, symbol.name, str(symbol.hint))) if symbol.bound: dump.add_line(' Bound: 0x%08X' % (symbol.bound)) else: dump.add_newline() dump.add_newline() if hasattr(self, 'DIRECTORY_ENTRY_BOUND_IMPORT'): dump.add_header('Bound imports') for bound_imp_desc in self.DIRECTORY_ENTRY_BOUND_IMPORT: dump.add_lines(bound_imp_desc.struct.dump()) dump.add_line('DLL: %s' % bound_imp_desc.name) dump.add_newline() for bound_imp_ref in bound_imp_desc.entries: dump.add_lines(bound_imp_ref.struct.dump(), 4) dump.add_line('DLL: %s' % bound_imp_ref.name, 4) dump.add_newline() if hasattr(self, 'DIRECTORY_ENTRY_DELAY_IMPORT'): dump.add_header('Delay Imported symbols') for module in self.DIRECTORY_ENTRY_DELAY_IMPORT: dump.add_lines(module.struct.dump()) dump.add_newline() for symbol in module.imports: if symbol.import_by_ordinal is True: dump.add('%s Ordinal[%s] (Imported by Ordinal)' % ( module.dll, str(symbol.ordinal))) else: dump.add('%s.%s Hint[%s]' % ( module.dll, symbol.name, str(symbol.hint))) if symbol.bound: dump.add_line(' Bound: 0x%08X' % (symbol.bound)) else: dump.add_newline() dump.add_newline() if hasattr(self, 'DIRECTORY_ENTRY_RESOURCE'): dump.add_header('Resource directory') dump.add_lines(self.DIRECTORY_ENTRY_RESOURCE.struct.dump()) for resource_type in self.DIRECTORY_ENTRY_RESOURCE.entries: if resource_type.name is not None: dump.add_line('Name: [%s]' % resource_type.name, 2) else: dump.add_line('Id: [0x%X] (%s)' % ( resource_type.struct.Id, RESOURCE_TYPE.get( resource_type.struct.Id, '-')), 2) dump.add_lines(resource_type.struct.dump(), 2) if hasattr(resource_type, 'directory'): dump.add_lines(resource_type.directory.struct.dump(), 4) for resource_id in resource_type.directory.entries: if resource_id.name is not None: dump.add_line('Name: [%s]' % resource_id.name, 6) else: dump.add_line('Id: [0x%X]' % resource_id.struct.Id, 6) dump.add_lines(resource_id.struct.dump(), 6) if hasattr(resource_id, 'directory'): dump.add_lines(resource_id.directory.struct.dump(), 8) for resource_lang in resource_id.directory.entries: # dump.add_line('\\--- LANG [%d,%d][%s]' % ( # resource_lang.data.lang, # resource_lang.data.sublang, # LANG[resource_lang.data.lang]), 8) dump.add_lines(resource_lang.struct.dump(), 10) dump.add_lines(resource_lang.data.struct.dump(), 12) dump.add_newline() dump.add_newline() if ( hasattr(self, 'DIRECTORY_ENTRY_TLS') and self.DIRECTORY_ENTRY_TLS and self.DIRECTORY_ENTRY_TLS.struct ): dump.add_header('TLS') dump.add_lines(self.DIRECTORY_ENTRY_TLS.struct.dump()) dump.add_newline() if hasattr(self, 'DIRECTORY_ENTRY_DEBUG'): dump.add_header('Debug information') for dbg in self.DIRECTORY_ENTRY_DEBUG: dump.add_lines(dbg.struct.dump()) try: dump.add_line('Type: '+DEBUG_TYPE[dbg.struct.Type]) except KeyError: dump.add_line('Type: 0x%x(Unknown)' % dbg.struct.Type) dump.add_newline() if hasattr(self, 'DIRECTORY_ENTRY_BASERELOC'): dump.add_header('Base relocations') for base_reloc in self.DIRECTORY_ENTRY_BASERELOC: dump.add_lines(base_reloc.struct.dump()) for reloc in base_reloc.entries: try: dump.add_line('%08Xh %s' % ( reloc.rva, RELOCATION_TYPE[reloc.type][16:]), 4) except KeyError: dump.add_line('0x%08X 0x%x(Unknown)' % ( reloc.rva, reloc.type), 4) dump.add_newline() return dump.get_text() # OC Patch def get_physical_by_rva(self, rva): """Gets the physical address in the PE file from an RVA value.""" try: return self.get_offset_from_rva(rva) except Exception: return None ## # Double-Word get/set ## def get_data_from_dword(self, dword): """Return a four byte string representing the double word value. (little endian).""" return struct.pack('<L', dword) def get_dword_from_data(self, data, offset): """Convert four bytes of data to a double word (little endian) 'offset' is assumed to index into a dword array. So setting it to N will return a dword out of the data sarting at offset N*4. Returns None if the data can't be turned into a double word. """ if (offset+1)*4 > len(data): return None return struct.unpack('<L', data[offset*4:(offset+1)*4])[0] def get_dword_at_rva(self, rva): """Return the double word value at the given RVA. Returns None if the value can't be read, i.e. the RVA can't be mapped to a file offset. """ try: return self.get_dword_from_data(self.get_data(rva)[:4], 0) except PEFormatError: return None def get_dword_from_offset(self, offset): """Return the double word value at the given file offset. (little endian)""" if offset+4 > len(self.__data__): return None return self.get_dword_from_data(self.__data__[offset:offset+4], 0) def set_dword_at_rva(self, rva, dword): """Set the double word value at the file offset corresponding to the given RVA.""" return self.set_bytes_at_rva(rva, self.get_data_from_dword(dword)) def set_dword_at_offset(self, offset, dword): """Set the double word value at the given file offset.""" return self.set_bytes_at_offset(offset, self.get_data_from_dword(dword)) ## # Word get/set ## def get_data_from_word(self, word): """Return a two byte string representing the word value. (little endian).""" return struct.pack('<H', word) def get_word_from_data(self, data, offset): """Convert two bytes of data to a word (little endian) 'offset' is assumed to index into a word array. So setting it to N will return a dword out of the data sarting at offset N*2. Returns None if the data can't be turned into a word. """ if (offset+1)*2 > len(data): return None return struct.unpack('<H', data[offset*2:(offset+1)*2])[0] def get_word_at_rva(self, rva): """Return the word value at the given RVA. Returns None if the value can't be read, i.e. the RVA can't be mapped to a file offset. """ try: return self.get_word_from_data(self.get_data(rva)[:2], 0) except PEFormatError: return None def get_word_from_offset(self, offset): """Return the word value at the given file offset. (little endian)""" if offset+2 > len(self.__data__): return None return self.get_word_from_data(self.__data__[offset:offset+2], 0) def set_word_at_rva(self, rva, word): """Set the word value at the file offset corresponding to the given RVA.""" return self.set_bytes_at_rva(rva, self.get_data_from_word(word)) def set_word_at_offset(self, offset, word): """Set the word value at the given file offset.""" return self.set_bytes_at_offset(offset, self.get_data_from_word(word)) ## # Quad-Word get/set ## def get_data_from_qword(self, word): """Return a eight byte string representing the quad-word value. (little endian).""" return struct.pack('<Q', word) def get_qword_from_data(self, data, offset): """Convert eight bytes of data to a word (little endian) 'offset' is assumed to index into a word array. So setting it to N will return a dword out of the data sarting at offset N*8. Returns None if the data can't be turned into a quad word. """ if (offset+1)*8 > len(data): return None return struct.unpack('<Q', data[offset*8:(offset+1)*8])[0] def get_qword_at_rva(self, rva): """Return the quad-word value at the given RVA. Returns None if the value can't be read, i.e. the RVA can't be mapped to a file offset. """ try: return self.get_qword_from_data(self.get_data(rva)[:8], 0) except PEFormatError: return None def get_qword_from_offset(self, offset): """Return the quad-word value at the given file offset. (little endian)""" if offset+8 > len(self.__data__): return None return self.get_qword_from_data(self.__data__[offset:offset+8], 0) def set_qword_at_rva(self, rva, qword): """Set the quad-word value at the file offset corresponding to the given RVA.""" return self.set_bytes_at_rva(rva, self.get_data_from_qword(qword)) def set_qword_at_offset(self, offset, qword): """Set the quad-word value at the given file offset.""" return self.set_bytes_at_offset(offset, self.get_data_from_qword(qword)) ## # Set bytes ## def set_bytes_at_rva(self, rva, data): """Overwrite, with the given string, the bytes at the file offset corresponding to the given RVA. Return True if successful, False otherwise. It can fail if the offset is outside the file's boundaries. """ offset = self.get_physical_by_rva(rva) if not offset: raise False return self.set_bytes_at_offset(offset, data) def set_bytes_at_offset(self, offset, data): """Overwrite the bytes at the given file offset with the given string. Return True if successful, False otherwise. It can fail if the offset is outside the file's boundaries. """ if not isinstance(data, str): raise TypeError('data should be of type: str') if offset >= 0 and offset < len(self.__data__): self.__data__ = ( self.__data__[:offset] + data + self.__data__[offset+len(data):] ) else: return False # Refresh the section's data with the modified information # for section in self.sections: section_data_start = section.PointerToRawData section_data_end = section_data_start+section.SizeOfRawData section.data = self.__data__[section_data_start:section_data_end] return True def relocate_image(self, new_ImageBase): """Apply the relocation information to the image using the provided new image base. This method will apply the relocation information to the image. Given the new base, all the relocations will be processed and both the raw data and the section's data will be fixed accordingly. The resulting image can be retrieved as well through the method: get_memory_mapped_image() In order to get something that would more closely match what could be found in memory once the Windows loader finished its work. """ relocation_difference = new_ImageBase - self.OPTIONAL_HEADER.ImageBase for reloc in self.DIRECTORY_ENTRY_BASERELOC: virtual_address = reloc.struct.VirtualAddress size_of_block = reloc.struct.SizeOfBlock # We iterate with an index because if the relocation is of type # IMAGE_REL_BASED_HIGHADJ we need to also process the next entry # at once and skip it for the next interation # entry_idx = 0 while entry_idx<len(reloc.entries): entry = reloc.entries[entry_idx] entry_idx += 1 if entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_ABSOLUTE']: # Nothing to do for this type of relocation pass elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGH']: # Fix the high 16bits of a relocation # # Add high 16bits of relocation_difference to the # 16bit value at RVA=entry.rva self.set_word_at_rva( entry.rva, ( self.get_word_at_rva(entry.rva) + relocation_difference>>16)&0xffff ) elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_LOW']: # Fix the low 16bits of a relocation # # Add low 16 bits of relocation_difference to the 16bit value # at RVA=entry.rva self.set_word_at_rva( entry.rva, ( self.get_word_at_rva(entry.rva) + relocation_difference)&0xffff) elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHLOW']: # Handle all high and low parts of a 32bit relocation # # Add relocation_difference to the value at RVA=entry.rva self.set_dword_at_rva( entry.rva, self.get_dword_at_rva(entry.rva)+relocation_difference) elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHADJ']: # Fix the high 16bits of a relocation and adjust # # Add high 16bits of relocation_difference to the 32bit value # composed from the (16bit value at RVA=entry.rva)<<16 plus # the 16bit value at the next relocation entry. # # If the next entry is beyond the array's limits, # abort... the table is corrupt # if entry_idx == len(reloc.entries): break next_entry = reloc.entries[entry_idx] entry_idx += 1 self.set_word_at_rva( entry.rva, ((self.get_word_at_rva(entry.rva)<<16) + next_entry.rva + relocation_difference & 0xffff0000) >> 16 ) elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_DIR64']: # Apply the difference to the 64bit value at the offset # RVA=entry.rva self.set_qword_at_rva( entry.rva, self.get_qword_at_rva(entry.rva) + relocation_difference) def verify_checksum(self): return self.OPTIONAL_HEADER.CheckSum == self.generate_checksum() def generate_checksum(self): # Get the offset to the CheckSum field in the OptionalHeader # checksum_offset = self.OPTIONAL_HEADER.__file_offset__ + 0x40 # 64 checksum = 0 for i in range( len(self.__data__) / 4 ): # Skip the checksum field # if i == checksum_offset / 4: continue dword = struct.unpack('L', self.__data__[ i*4 : i*4+4 ])[0] checksum = (checksum & 0xffffffff) + dword + (checksum>>32) if checksum > 2**32: checksum = (checksum & 0xffffffff) + (checksum >> 32) checksum = (checksum & 0xffff) + (checksum >> 16) checksum = (checksum) + (checksum >> 16) checksum = checksum & 0xffff return checksum + len(self.__data__)
bsd-3-clause
-1,072,663,433,863,285,100
-1,727,239,611,794,571,800
36.441942
106
0.516334
false
joelddiaz/openshift-tools
openshift/installer/vendored/openshift-ansible-3.4.40/filter_plugins/oo_filters.py
9
43540
#!/usr/bin/python # -*- coding: utf-8 -*- # vim: expandtab:tabstop=4:shiftwidth=4 """ Custom filters for use in openshift-ansible """ from ansible import errors from collections import Mapping from distutils.util import strtobool from distutils.version import LooseVersion from operator import itemgetter import OpenSSL.crypto import os import pdb import pkg_resources import re import json import yaml from ansible.parsing.yaml.dumper import AnsibleDumper from urlparse import urlparse try: # ansible-2.2 # ansible.utils.unicode.to_unicode is deprecated in ansible-2.2, # ansible.module_utils._text.to_text should be used instead. from ansible.module_utils._text import to_text except ImportError: # ansible-2.1 from ansible.utils.unicode import to_unicode as to_text # Disabling too-many-public-methods, since filter methods are necessarily # public # pylint: disable=too-many-public-methods class FilterModule(object): """ Custom ansible filters """ @staticmethod def oo_pdb(arg): """ This pops you into a pdb instance where arg is the data passed in from the filter. Ex: "{{ hostvars | oo_pdb }}" """ pdb.set_trace() return arg @staticmethod def get_attr(data, attribute=None): """ This looks up dictionary attributes of the form a.b.c and returns the value. If the key isn't present, None is returned. Ex: data = {'a': {'b': {'c': 5}}} attribute = "a.b.c" returns 5 """ if not attribute: raise errors.AnsibleFilterError("|failed expects attribute to be set") ptr = data for attr in attribute.split('.'): if attr in ptr: ptr = ptr[attr] else: ptr = None break return ptr @staticmethod def oo_flatten(data): """ This filter plugin will flatten a list of lists """ if not isinstance(data, list): raise errors.AnsibleFilterError("|failed expects to flatten a List") return [item for sublist in data for item in sublist] @staticmethod def oo_merge_dicts(first_dict, second_dict): """ Merge two dictionaries where second_dict values take precedence. Ex: first_dict={'a': 1, 'b': 2} second_dict={'b': 3, 'c': 4} returns {'a': 1, 'b': 3, 'c': 4} """ if not isinstance(first_dict, dict) or not isinstance(second_dict, dict): raise errors.AnsibleFilterError("|failed expects to merge two dicts") merged = first_dict.copy() merged.update(second_dict) return merged @staticmethod def oo_merge_hostvars(hostvars, variables, inventory_hostname): """ Merge host and play variables. When ansible version is greater than or equal to 2.0.0, merge hostvars[inventory_hostname] with variables (ansible vars) otherwise merge hostvars with hostvars['inventory_hostname']. Ex: hostvars={'master1.example.com': {'openshift_variable': '3'}, 'openshift_other_variable': '7'} variables={'openshift_other_variable': '6'} inventory_hostname='master1.example.com' returns {'openshift_variable': '3', 'openshift_other_variable': '7'} hostvars=<ansible.vars.hostvars.HostVars object> (Mapping) variables={'openshift_other_variable': '6'} inventory_hostname='master1.example.com' returns {'openshift_variable': '3', 'openshift_other_variable': '6'} """ if not isinstance(hostvars, Mapping): raise errors.AnsibleFilterError("|failed expects hostvars is dictionary or object") if not isinstance(variables, dict): raise errors.AnsibleFilterError("|failed expects variables is a dictionary") if not isinstance(inventory_hostname, basestring): raise errors.AnsibleFilterError("|failed expects inventory_hostname is a string") # pylint: disable=no-member ansible_version = pkg_resources.get_distribution("ansible").version merged_hostvars = {} if LooseVersion(ansible_version) >= LooseVersion('2.0.0'): merged_hostvars = FilterModule.oo_merge_dicts(hostvars[inventory_hostname], variables) else: merged_hostvars = FilterModule.oo_merge_dicts(hostvars[inventory_hostname], hostvars) return merged_hostvars @staticmethod def oo_collect(data, attribute=None, filters=None): """ This takes a list of dict and collects all attributes specified into a list. If filter is specified then we will include all items that match _ALL_ of filters. If a dict entry is missing the key in a filter it will be excluded from the match. Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return {'a':2, 'z': 'z'}, # True, return {'a':3, 'z': 'z'}, # True, return {'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z'] ] attribute = 'a' filters = {'z': 'z'} returns [1, 2, 3] """ if not isinstance(data, list): raise errors.AnsibleFilterError("|failed expects to filter on a List") if not attribute: raise errors.AnsibleFilterError("|failed expects attribute to be set") if filters is not None: if not isinstance(filters, dict): raise errors.AnsibleFilterError("|failed expects filter to be a" " dict") retval = [FilterModule.get_attr(d, attribute) for d in data if ( all([d.get(key, None) == filters[key] for key in filters]))] else: retval = [FilterModule.get_attr(d, attribute) for d in data] retval = [val for val in retval if val != None] return retval @staticmethod def oo_select_keys_from_list(data, keys): """ This returns a list, which contains the value portions for the keys Ex: data = { 'a':1, 'b':2, 'c':3 } keys = ['a', 'c'] returns [1, 3] """ if not isinstance(data, list): raise errors.AnsibleFilterError("|failed expects to filter on a list") if not isinstance(keys, list): raise errors.AnsibleFilterError("|failed expects first param is a list") # Gather up the values for the list of keys passed in retval = [FilterModule.oo_select_keys(item, keys) for item in data] return FilterModule.oo_flatten(retval) @staticmethod def oo_select_keys(data, keys): """ This returns a list, which contains the value portions for the keys Ex: data = { 'a':1, 'b':2, 'c':3 } keys = ['a', 'c'] returns [1, 3] """ if not isinstance(data, Mapping): raise errors.AnsibleFilterError("|failed expects to filter on a dict or object") if not isinstance(keys, list): raise errors.AnsibleFilterError("|failed expects first param is a list") # Gather up the values for the list of keys passed in retval = [data[key] for key in keys if key in data] return retval @staticmethod def oo_prepend_strings_in_list(data, prepend): """ This takes a list of strings and prepends a string to each item in the list Ex: data = ['cart', 'tree'] prepend = 'apple-' returns ['apple-cart', 'apple-tree'] """ if not isinstance(data, list): raise errors.AnsibleFilterError("|failed expects first param is a list") if not all(isinstance(x, basestring) for x in data): raise errors.AnsibleFilterError("|failed expects first param is a list" " of strings") retval = [prepend + s for s in data] return retval @staticmethod def oo_combine_key_value(data, joiner='='): """Take a list of dict in the form of { 'key': 'value'} and arrange them as a list of strings ['key=value'] """ if not isinstance(data, list): raise errors.AnsibleFilterError("|failed expects first param is a list") rval = [] for item in data: rval.append("%s%s%s" % (item['key'], joiner, item['value'])) return rval @staticmethod def oo_combine_dict(data, in_joiner='=', out_joiner=' '): """Take a dict in the form of { 'key': 'value', 'key': 'value' } and arrange them as a string 'key=value key=value' """ if not isinstance(data, dict): raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_combine_dict]. Got %s. Type: %s" % (str(data), str(type(data)))) return out_joiner.join([in_joiner.join([k, str(v)]) for k, v in data.items()]) @staticmethod def oo_ami_selector(data, image_name): """ This takes a list of amis and an image name and attempts to return the latest ami. """ if not isinstance(data, list): raise errors.AnsibleFilterError("|failed expects first param is a list") if not data: return None else: if image_name is None or not image_name.endswith('_*'): ami = sorted(data, key=itemgetter('name'), reverse=True)[0] return ami['ami_id'] else: ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data] ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0] return ami['ami_id'] @staticmethod def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False): """ This takes a dictionary of volume definitions and returns a valid ec2 volume definition based on the host_type and the values in the dictionary. The dictionary should look similar to this: { 'master': { 'root': { 'volume_size': 10, 'device_type': 'gp2', 'iops': 500 }, 'docker': { 'volume_size': 40, 'device_type': 'gp2', 'iops': 500, 'ephemeral': 'true' } }, 'node': { 'root': { 'volume_size': 10, 'device_type': 'io1', 'iops': 1000 }, 'docker': { 'volume_size': 40, 'device_type': 'gp2', 'iops': 500, 'ephemeral': 'true' } } } """ if not isinstance(data, dict): raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_ec2_volume_def]. Got %s. Type: %s" % (str(data), str(type(data)))) if host_type not in ['master', 'node', 'etcd']: raise errors.AnsibleFilterError("|failed expects etcd, master or node" " as the host type") root_vol = data[host_type]['root'] root_vol['device_name'] = '/dev/sda1' root_vol['delete_on_termination'] = True if root_vol['device_type'] != 'io1': root_vol.pop('iops', None) if host_type in ['master', 'node'] and 'docker' in data[host_type]: docker_vol = data[host_type]['docker'] docker_vol['device_name'] = '/dev/xvdb' docker_vol['delete_on_termination'] = True if docker_vol['device_type'] != 'io1': docker_vol.pop('iops', None) if docker_ephemeral: docker_vol.pop('device_type', None) docker_vol.pop('delete_on_termination', None) docker_vol['ephemeral'] = 'ephemeral0' return [root_vol, docker_vol] elif host_type == 'etcd' and 'etcd' in data[host_type]: etcd_vol = data[host_type]['etcd'] etcd_vol['device_name'] = '/dev/xvdb' etcd_vol['delete_on_termination'] = True if etcd_vol['device_type'] != 'io1': etcd_vol.pop('iops', None) return [root_vol, etcd_vol] return [root_vol] @staticmethod def oo_split(string, separator=','): """ This splits the input string into a list. If the input string is already a list we will return it as is. """ if isinstance(string, list): return string return string.split(separator) @staticmethod def oo_haproxy_backend_masters(hosts, port): """ This takes an array of dicts and returns an array of dicts to be used as a backend for the haproxy role """ servers = [] for idx, host_info in enumerate(hosts): server = dict(name="master%s" % idx) server_ip = host_info['openshift']['common']['ip'] server['address'] = "%s:%s" % (server_ip, port) server['opts'] = 'check' servers.append(server) return servers @staticmethod def oo_filter_list(data, filter_attr=None): """ This returns a list, which contains all items where filter_attr evaluates to true Ex: data = [ { a: 1, b: True }, { a: 3, b: False }, { a: 5, b: True } ] filter_attr = 'b' returns [ { a: 1, b: True }, { a: 5, b: True } ] """ if not isinstance(data, list): raise errors.AnsibleFilterError("|failed expects to filter on a list") if not isinstance(filter_attr, basestring): raise errors.AnsibleFilterError("|failed expects filter_attr is a str or unicode") # Gather up the values for the list of keys passed in return [x for x in data if filter_attr in x and x[filter_attr]] @staticmethod def oo_nodes_with_label(nodes, label, value=None): """ Filters a list of nodes by label and value (if provided) It handles labels that are in the following variables by priority: openshift_node_labels, cli_openshift_node_labels, openshift['node']['labels'] Examples: data = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}}, 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}}, 'c': {'openshift_node_labels': {'size': 'S'}}] label = 'color' returns = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}}, 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}}] data = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}}, 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}}, 'c': {'openshift_node_labels': {'size': 'S'}}] label = 'color' value = 'green' returns = ['b': {'labels': {'color': 'green', 'size': 'L'}}] Args: nodes (list[dict]): list of node to node variables label (str): label to filter `nodes` by value (Optional[str]): value of `label` to filter by Defaults to None. Returns: list[dict]: nodes filtered by label and value (if provided) """ if not isinstance(nodes, list): raise errors.AnsibleFilterError("failed expects to filter on a list") if not isinstance(label, basestring): raise errors.AnsibleFilterError("failed expects label to be a string") if value is not None and not isinstance(value, basestring): raise errors.AnsibleFilterError("failed expects value to be a string") def label_filter(node): """ filter function for testing if node should be returned """ if not isinstance(node, dict): raise errors.AnsibleFilterError("failed expects to filter on a list of dicts") if 'openshift_node_labels' in node: labels = node['openshift_node_labels'] elif 'cli_openshift_node_labels' in node: labels = node['cli_openshift_node_labels'] elif 'openshift' in node and 'node' in node['openshift'] and 'labels' in node['openshift']['node']: labels = node['openshift']['node']['labels'] else: return False if isinstance(labels, basestring): labels = yaml.safe_load(labels) if not isinstance(labels, dict): raise errors.AnsibleFilterError( "failed expected node labels to be a dict or serializable to a dict" ) return label in labels and (value is None or labels[label] == value) return [n for n in nodes if label_filter(n)] @staticmethod def oo_parse_heat_stack_outputs(data): """ Formats the HEAT stack output into a usable form The goal is to transform something like this: +---------------+-------------------------------------------------+ | Property | Value | +---------------+-------------------------------------------------+ | capabilities | [] | | | creation_time | 2015-06-26T12:26:26Z | | | description | OpenShift cluster | | | … | … | | outputs | [ | | | { | | | "output_value": "value_A" | | | "description": "This is the value of Key_A" | | | "output_key": "Key_A" | | | }, | | | { | | | "output_value": [ | | | "value_B1", | | | "value_B2" | | | ], | | | "description": "This is the value of Key_B" | | | "output_key": "Key_B" | | | }, | | | ] | | parameters | { | | … | … | +---------------+-------------------------------------------------+ into something like this: { "Key_A": "value_A", "Key_B": [ "value_B1", "value_B2" ] } """ # Extract the “outputs” JSON snippet from the pretty-printed array in_outputs = False outputs = '' line_regex = re.compile(r'\|\s*(.*?)\s*\|\s*(.*?)\s*\|') for line in data['stdout_lines']: match = line_regex.match(line) if match: if match.group(1) == 'outputs': in_outputs = True elif match.group(1) != '': in_outputs = False if in_outputs: outputs += match.group(2) outputs = json.loads(outputs) # Revamp the “outputs” to put it in the form of a “Key: value” map revamped_outputs = {} for output in outputs: revamped_outputs[output['output_key']] = output['output_value'] return revamped_outputs @staticmethod # pylint: disable=too-many-branches def oo_parse_named_certificates(certificates, named_certs_dir, internal_hostnames): """ Parses names from list of certificate hashes. Ex: certificates = [{ "certfile": "/root/custom1.crt", "keyfile": "/root/custom1.key", "cafile": "/root/custom-ca1.crt" }, { "certfile": "custom2.crt", "keyfile": "custom2.key", "cafile": "custom-ca2.crt" }] returns [{ "certfile": "/etc/origin/master/named_certificates/custom1.crt", "keyfile": "/etc/origin/master/named_certificates/custom1.key", "cafile": "/etc/origin/master/named_certificates/custom-ca1.crt", "names": [ "public-master-host.com", "other-master-host.com" ] }, { "certfile": "/etc/origin/master/named_certificates/custom2.crt", "keyfile": "/etc/origin/master/named_certificates/custom2.key", "cafile": "/etc/origin/master/named_certificates/custom-ca-2.crt", "names": [ "some-hostname.com" ] }] """ if not isinstance(named_certs_dir, basestring): raise errors.AnsibleFilterError("|failed expects named_certs_dir is str or unicode") if not isinstance(internal_hostnames, list): raise errors.AnsibleFilterError("|failed expects internal_hostnames is list") for certificate in certificates: if 'names' in certificate.keys(): continue else: certificate['names'] = [] if not os.path.isfile(certificate['certfile']) or not os.path.isfile(certificate['keyfile']): raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" % (certificate['certfile'], certificate['keyfile'])) try: st_cert = open(certificate['certfile'], 'rt').read() cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert) certificate['names'].append(str(cert.get_subject().commonName.decode())) for i in range(cert.get_extension_count()): if cert.get_extension(i).get_short_name() == 'subjectAltName': for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '): certificate['names'].append(name) except: raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] + "please specify certificate names in host inventory")) certificate['names'] = list(set(certificate['names'])) if 'cafile' not in certificate: certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames] if not certificate['names']: raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] + "detected a collision with internal hostname, please specify " + "certificate names in host inventory")) for certificate in certificates: # Update paths for configuration certificate['certfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['certfile'])) certificate['keyfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['keyfile'])) if 'cafile' in certificate: certificate['cafile'] = os.path.join(named_certs_dir, os.path.basename(certificate['cafile'])) return certificates @staticmethod def oo_pretty_print_cluster(data, prefix='tag_'): """ Read a subset of hostvars and build a summary of the cluster in the following layout: "c_id": { "master": { "default": [ { "name": "c_id-master-12345", "public IP": "172.16.0.1", "private IP": "192.168.0.1" } ] "node": { "infra": [ { "name": "c_id-node-infra-23456", "public IP": "172.16.0.2", "private IP": "192.168.0.2" } ], "compute": [ { "name": "c_id-node-compute-23456", "public IP": "172.16.0.3", "private IP": "192.168.0.3" }, ... ] } """ def _get_tag_value(tags, key): """ Extract values of a map implemented as a set. Ex: tags = { 'tag_foo_value1', 'tag_bar_value2', 'tag_baz_value3' } key = 'bar' returns 'value2' """ for tag in tags: if tag[:len(prefix)+len(key)] == prefix + key: return tag[len(prefix)+len(key)+1:] raise KeyError(key) def _add_host(clusters, clusterid, host_type, sub_host_type, host): """ Add a new host in the clusters data structure """ if clusterid not in clusters: clusters[clusterid] = {} if host_type not in clusters[clusterid]: clusters[clusterid][host_type] = {} if sub_host_type not in clusters[clusterid][host_type]: clusters[clusterid][host_type][sub_host_type] = [] clusters[clusterid][host_type][sub_host_type].append(host) clusters = {} for host in data: try: _add_host(clusters=clusters, clusterid=_get_tag_value(host['group_names'], 'clusterid'), host_type=_get_tag_value(host['group_names'], 'host-type'), sub_host_type=_get_tag_value(host['group_names'], 'sub-host-type'), host={'name': host['inventory_hostname'], 'public IP': host['ansible_ssh_host'], 'private IP': host['ansible_default_ipv4']['address']}) except KeyError: pass return clusters @staticmethod def oo_generate_secret(num_bytes): """ generate a session secret """ if not isinstance(num_bytes, int): raise errors.AnsibleFilterError("|failed expects num_bytes is int") secret = os.urandom(num_bytes) return secret.encode('base-64').strip() @staticmethod def to_padded_yaml(data, level=0, indent=2, **kw): """ returns a yaml snippet padded to match the indent level you specify """ if data in [None, ""]: return "" try: transformed = yaml.dump(data, indent=indent, allow_unicode=True, default_flow_style=False, Dumper=AnsibleDumper, **kw) padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()]) return to_text("\n{0}".format(padded)) except Exception as my_e: raise errors.AnsibleFilterError('Failed to convert: %s' % my_e) @staticmethod def oo_openshift_env(hostvars): ''' Return facts which begin with "openshift_" and translate legacy facts to their openshift_env counterparts. Ex: hostvars = {'openshift_fact': 42, 'theyre_taking_the_hobbits_to': 'isengard'} returns = {'openshift_fact': 42} ''' if not issubclass(type(hostvars), dict): raise errors.AnsibleFilterError("|failed expects hostvars is a dict") facts = {} regex = re.compile('^openshift_.*') for key in hostvars: if regex.match(key): facts[key] = hostvars[key] migrations = {'openshift_router_selector': 'openshift_hosted_router_selector', 'openshift_registry_selector': 'openshift_hosted_registry_selector'} for old_fact, new_fact in migrations.iteritems(): if old_fact in facts and new_fact not in facts: facts[new_fact] = facts[old_fact] return facts @staticmethod # pylint: disable=too-many-branches def oo_persistent_volumes(hostvars, groups, persistent_volumes=None): """ Generate list of persistent volumes based on oo_openshift_env storage options set in host variables. """ if not issubclass(type(hostvars), dict): raise errors.AnsibleFilterError("|failed expects hostvars is a dict") if not issubclass(type(groups), dict): raise errors.AnsibleFilterError("|failed expects groups is a dict") if persistent_volumes != None and not issubclass(type(persistent_volumes), list): raise errors.AnsibleFilterError("|failed expects persistent_volumes is a list") if persistent_volumes == None: persistent_volumes = [] if 'hosted' in hostvars['openshift']: for component in hostvars['openshift']['hosted']: if 'storage' in hostvars['openshift']['hosted'][component]: params = hostvars['openshift']['hosted'][component]['storage'] kind = params['kind'] create_pv = params['create_pv'] if kind != None and create_pv: if kind == 'nfs': host = params['host'] if host == None: if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0: host = groups['oo_nfs_to_config'][0] else: raise errors.AnsibleFilterError("|failed no storage host detected") directory = params['nfs']['directory'] volume = params['volume']['name'] path = directory + '/' + volume size = params['volume']['size'] access_modes = params['access_modes'] persistent_volume = dict( name="{0}-volume".format(volume), capacity=size, access_modes=access_modes, storage=dict( nfs=dict( server=host, path=path))) persistent_volumes.append(persistent_volume) elif kind == 'openstack': volume = params['volume']['name'] size = params['volume']['size'] access_modes = params['access_modes'] filesystem = params['openstack']['filesystem'] volume_id = params['openstack']['volumeID'] persistent_volume = dict( name="{0}-volume".format(volume), capacity=size, access_modes=access_modes, storage=dict( cinder=dict( fsType=filesystem, volumeID=volume_id))) persistent_volumes.append(persistent_volume) elif not (kind == 'object' or kind == 'dynamic'): msg = "|failed invalid storage kind '{0}' for component '{1}'".format( kind, component) raise errors.AnsibleFilterError(msg) return persistent_volumes @staticmethod def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None): """ Generate list of persistent volume claims based on oo_openshift_env storage options set in host variables. """ if not issubclass(type(hostvars), dict): raise errors.AnsibleFilterError("|failed expects hostvars is a dict") if persistent_volume_claims != None and not issubclass(type(persistent_volume_claims), list): raise errors.AnsibleFilterError("|failed expects persistent_volume_claims is a list") if persistent_volume_claims == None: persistent_volume_claims = [] if 'hosted' in hostvars['openshift']: for component in hostvars['openshift']['hosted']: if 'storage' in hostvars['openshift']['hosted'][component]: params = hostvars['openshift']['hosted'][component]['storage'] kind = params['kind'] create_pv = params['create_pv'] create_pvc = params['create_pvc'] if kind not in [None, 'object'] and create_pv and create_pvc: volume = params['volume']['name'] size = params['volume']['size'] access_modes = params['access_modes'] persistent_volume_claim = dict( name="{0}-claim".format(volume), capacity=size, access_modes=access_modes) persistent_volume_claims.append(persistent_volume_claim) return persistent_volume_claims @staticmethod def oo_31_rpm_rename_conversion(rpms, openshift_version=None): """ Filters a list of 3.0 rpms and return the corresponding 3.1 rpms names with proper version (if provided) If 3.1 rpms are passed in they will only be augmented with the correct version. This is important for hosts that are running both Masters and Nodes. """ if not isinstance(rpms, list): raise errors.AnsibleFilterError("failed expects to filter on a list") if openshift_version is not None and not isinstance(openshift_version, basestring): raise errors.AnsibleFilterError("failed expects openshift_version to be a string") rpms_31 = [] for rpm in rpms: if not 'atomic' in rpm: rpm = rpm.replace("openshift", "atomic-openshift") if openshift_version: rpm = rpm + openshift_version rpms_31.append(rpm) return rpms_31 @staticmethod def oo_pods_match_component(pods, deployment_type, component): """ Filters a list of Pods and returns the ones matching the deployment_type and component """ if not isinstance(pods, list): raise errors.AnsibleFilterError("failed expects to filter on a list") if not isinstance(deployment_type, basestring): raise errors.AnsibleFilterError("failed expects deployment_type to be a string") if not isinstance(component, basestring): raise errors.AnsibleFilterError("failed expects component to be a string") image_prefix = 'openshift/origin-' if deployment_type in ['enterprise', 'online', 'openshift-enterprise']: image_prefix = 'openshift3/ose-' elif deployment_type == 'atomic-enterprise': image_prefix = 'aep3_beta/aep-' matching_pods = [] image_regex = image_prefix + component + r'.*' for pod in pods: for container in pod['spec']['containers']: if re.search(image_regex, container['image']): matching_pods.append(pod) break # stop here, don't add a pod more than once return matching_pods @staticmethod def oo_get_hosts_from_hostvars(hostvars, hosts): """ Return a list of hosts from hostvars """ retval = [] for host in hosts: try: retval.append(hostvars[host]) except errors.AnsibleError as _: # host does not exist pass return retval @staticmethod def oo_image_tag_to_rpm_version(version, include_dash=False): """ Convert an image tag string to an RPM version if necessary Empty strings and strings that are already in rpm version format are ignored. Also remove non semantic version components. Ex. v3.2.0.10 -> -3.2.0.10 v1.2.0-rc1 -> -1.2.0 """ if not isinstance(version, basestring): raise errors.AnsibleFilterError("|failed expects a string or unicode") if version.startswith("v"): version = version[1:] # Strip release from requested version, we no longer support this. version = version.split('-')[0] if include_dash and version and not version.startswith("-"): version = "-" + version return version @staticmethod def oo_hostname_from_url(url): """ Returns the hostname contained in a URL Ex: https://ose3-master.example.com/v1/api -> ose3-master.example.com """ if not isinstance(url, basestring): raise errors.AnsibleFilterError("|failed expects a string or unicode") parse_result = urlparse(url) if parse_result.netloc != '': return parse_result.netloc else: # netloc wasn't parsed, assume url was missing scheme and path return parse_result.path @staticmethod def oo_openshift_loadbalancer_frontends(api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None): loadbalancer_frontends = [{'name': 'atomic-openshift-api', 'mode': 'tcp', 'options': ['tcplog'], 'binds': ["*:{0}".format(api_port)], 'default_backend': 'atomic-openshift-api'}] if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None: loadbalancer_frontends.append({'name': 'nuage-monitor', 'mode': 'tcp', 'options': ['tcplog'], 'binds': ["*:{0}".format(nuage_rest_port)], 'default_backend': 'nuage-monitor'}) return loadbalancer_frontends @staticmethod def oo_openshift_loadbalancer_backends(api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None): loadbalancer_backends = [{'name': 'atomic-openshift-api', 'mode': 'tcp', 'option': 'tcplog', 'balance': 'source', 'servers': FilterModule.oo_haproxy_backend_masters(servers_hostvars, api_port)}] if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None: loadbalancer_backends.append({'name': 'nuage-monitor', 'mode': 'tcp', 'option': 'tcplog', 'balance': 'source', 'servers': FilterModule.oo_haproxy_backend_masters(servers_hostvars, nuage_rest_port)}) return loadbalancer_backends @staticmethod def oo_chomp_commit_offset(version): """Chomp any "+git.foo" commit offset string from the given `version` and return the modified version string. Ex: - chomp_commit_offset(None) => None - chomp_commit_offset(1337) => "1337" - chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15" - chomp_commit_offset("v3.4.0.15") => "v3.4.0.15" - chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0" """ if version is None: return version else: # Stringify, just in case it's a Number type. Split by '+' and # return the first split. No concerns about strings without a # '+', .split() returns an array of the original string. return str(version).split('+')[0] def filters(self): """ returns a mapping of filters to methods """ return { "oo_select_keys": self.oo_select_keys, "oo_select_keys_from_list": self.oo_select_keys_from_list, "oo_chomp_commit_offset": self.oo_chomp_commit_offset, "oo_collect": self.oo_collect, "oo_flatten": self.oo_flatten, "oo_pdb": self.oo_pdb, "oo_prepend_strings_in_list": self.oo_prepend_strings_in_list, "oo_ami_selector": self.oo_ami_selector, "oo_ec2_volume_definition": self.oo_ec2_volume_definition, "oo_combine_key_value": self.oo_combine_key_value, "oo_combine_dict": self.oo_combine_dict, "oo_split": self.oo_split, "oo_filter_list": self.oo_filter_list, "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs, "oo_parse_named_certificates": self.oo_parse_named_certificates, "oo_haproxy_backend_masters": self.oo_haproxy_backend_masters, "oo_pretty_print_cluster": self.oo_pretty_print_cluster, "oo_generate_secret": self.oo_generate_secret, "to_padded_yaml": self.to_padded_yaml, "oo_nodes_with_label": self.oo_nodes_with_label, "oo_openshift_env": self.oo_openshift_env, "oo_persistent_volumes": self.oo_persistent_volumes, "oo_persistent_volume_claims": self.oo_persistent_volume_claims, "oo_31_rpm_rename_conversion": self.oo_31_rpm_rename_conversion, "oo_pods_match_component": self.oo_pods_match_component, "oo_get_hosts_from_hostvars": self.oo_get_hosts_from_hostvars, "oo_image_tag_to_rpm_version": self.oo_image_tag_to_rpm_version, "oo_merge_dicts": self.oo_merge_dicts, "oo_hostname_from_url": self.oo_hostname_from_url, "oo_merge_hostvars": self.oo_merge_hostvars, "oo_openshift_loadbalancer_frontends": self.oo_openshift_loadbalancer_frontends, "oo_openshift_loadbalancer_backends": self.oo_openshift_loadbalancer_backends }
apache-2.0
-5,080,609,986,156,319,000
-6,757,774,718,124,507,000
44.955649
153
0.51489
false
dc3-plaso/plaso
tests/storage/fake_storage.py
1
6205
#!/usr/bin/python # -*- coding: utf-8 -*- """Tests for the fake storage.""" import unittest from plaso.containers import errors from plaso.containers import event_sources from plaso.containers import reports from plaso.containers import sessions from plaso.containers import tasks from plaso.lib import definitions from plaso.storage import fake_storage from plaso.storage import zip_file from tests import test_lib as shared_test_lib from tests.storage import test_lib class FakeStorageWriterTest(test_lib.StorageTestCase): """Tests for the fake storage writer object.""" def testAddAnalysisReport(self): """Tests the AddAnalysisReport function.""" session = sessions.Session() analysis_report = reports.AnalysisReport( plugin_name=u'test', text=u'test report') storage_writer = fake_storage.FakeStorageWriter(session) storage_writer.Open() storage_writer.AddAnalysisReport(analysis_report) storage_writer.Close() with self.assertRaises(IOError): storage_writer.AddAnalysisReport(analysis_report) def testAddError(self): """Tests the AddError function.""" session = sessions.Session() extraction_error = errors.ExtractionError( message=u'Test extraction error') storage_writer = fake_storage.FakeStorageWriter(session) storage_writer.Open() storage_writer.AddError(extraction_error) storage_writer.Close() with self.assertRaises(IOError): storage_writer.AddError(extraction_error) def testAddEvent(self): """Tests the AddEvent function.""" session = sessions.Session() test_events = self._CreateTestEvents() storage_writer = fake_storage.FakeStorageWriter(session) storage_writer.Open() event = None for event in test_events: storage_writer.AddEvent(event) storage_writer.Close() with self.assertRaises(IOError): storage_writer.AddEvent(event) def testAddEventSource(self): """Tests the AddEventSource function.""" session = sessions.Session() event_source = event_sources.EventSource() storage_writer = fake_storage.FakeStorageWriter(session) storage_writer.Open() storage_writer.AddEventSource(event_source) storage_writer.Close() with self.assertRaises(IOError): storage_writer.AddEventSource(event_source) def testAddEventTag(self): """Tests the AddEventTag function.""" session = sessions.Session() test_events = self._CreateTestEvents() event_tags = self._CreateTestEventTags() storage_writer = fake_storage.FakeStorageWriter(session) storage_writer.Open() for event in test_events: storage_writer.AddEvent(event) event_tag = None for event_tag in event_tags: storage_writer.AddEventTag(event_tag) storage_writer.Close() with self.assertRaises(IOError): storage_writer.AddEventTag(event_tag) def testOpenClose(self): """Tests the Open and Close functions.""" session = sessions.Session() storage_writer = fake_storage.FakeStorageWriter(session) storage_writer.Open() storage_writer.Close() storage_writer.Open() storage_writer.Close() storage_writer = fake_storage.FakeStorageWriter( session, storage_type=definitions.STORAGE_TYPE_TASK) storage_writer.Open() storage_writer.Close() storage_writer.Open() with self.assertRaises(IOError): storage_writer.Open() storage_writer.Close() with self.assertRaises(IOError): storage_writer.Close() # TODO: add test for GetEvents. # TODO: add test for GetFirstWrittenEventSource and # GetNextWrittenEventSource. @shared_test_lib.skipUnlessHasTestFile([u'psort_test.json.plaso']) @shared_test_lib.skipUnlessHasTestFile([u'pinfo_test.json.plaso']) def testMergeFromStorage(self): """Tests the MergeFromStorage function.""" session = sessions.Session() storage_writer = fake_storage.FakeStorageWriter(session) storage_writer.Open() test_file = self._GetTestFilePath([u'psort_test.json.plaso']) storage_reader = zip_file.ZIPStorageFileReader(test_file) storage_writer.MergeFromStorage(storage_reader) test_file = self._GetTestFilePath([u'pinfo_test.json.plaso']) storage_reader = zip_file.ZIPStorageFileReader(test_file) storage_writer.MergeFromStorage(storage_reader) storage_writer.Close() # TODO: add test for GetNextEventSource. def testWriteSessionStartAndCompletion(self): """Tests the WriteSessionStart and WriteSessionCompletion functions.""" session = sessions.Session() storage_writer = fake_storage.FakeStorageWriter(session) storage_writer.Open() storage_writer.WriteSessionStart() storage_writer.WriteSessionCompletion() storage_writer.Close() with self.assertRaises(IOError): storage_writer.WriteSessionStart() with self.assertRaises(IOError): storage_writer.WriteSessionCompletion() storage_writer = fake_storage.FakeStorageWriter( session, storage_type=definitions.STORAGE_TYPE_TASK) storage_writer.Open() with self.assertRaises(IOError): storage_writer.WriteSessionStart() with self.assertRaises(IOError): storage_writer.WriteSessionCompletion() storage_writer.Close() def testWriteTaskStartAndCompletion(self): """Tests the WriteTaskStart and WriteTaskCompletion functions.""" session = sessions.Session() task = tasks.Task(session_identifier=session.identifier) storage_writer = fake_storage.FakeStorageWriter( session, storage_type=definitions.STORAGE_TYPE_TASK, task=task) storage_writer.Open() storage_writer.WriteTaskStart() storage_writer.WriteTaskCompletion() storage_writer.Close() with self.assertRaises(IOError): storage_writer.WriteTaskStart() with self.assertRaises(IOError): storage_writer.WriteTaskCompletion() storage_writer = fake_storage.FakeStorageWriter(session) storage_writer.Open() with self.assertRaises(IOError): storage_writer.WriteTaskStart() with self.assertRaises(IOError): storage_writer.WriteTaskCompletion() storage_writer.Close() if __name__ == '__main__': unittest.main()
apache-2.0
-808,795,116,673,635,200
5,027,735,125,345,044,000
27.204545
75
0.72361
false
lastr2d2/lastchat
src/lib/web/db.py
16
42213
""" Database API (part of web.py) """ __all__ = [ "UnknownParamstyle", "UnknownDB", "TransactionError", "sqllist", "sqlors", "reparam", "sqlquote", "SQLQuery", "SQLParam", "sqlparam", "SQLLiteral", "sqlliteral", "database", 'DB', ] import time, os, urllib try: import datetime except ImportError: datetime = None try: set except NameError: from sets import Set as set from utils import threadeddict, storage, iters, iterbetter, safestr, safeunicode try: # db module can work independent of web.py from webapi import debug, config except: import sys debug = sys.stderr config = storage() class UnknownDB(Exception): """raised for unsupported dbms""" pass class _ItplError(ValueError): def __init__(self, text, pos): ValueError.__init__(self) self.text = text self.pos = pos def __str__(self): return "unfinished expression in %s at char %d" % ( repr(self.text), self.pos) class TransactionError(Exception): pass class UnknownParamstyle(Exception): """ raised for unsupported db paramstyles (currently supported: qmark, numeric, format, pyformat) """ pass class SQLParam(object): """ Parameter in SQLQuery. >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")]) >>> q <sql: "SELECT * FROM test WHERE name='joe'"> >>> q.query() 'SELECT * FROM test WHERE name=%s' >>> q.values() ['joe'] """ __slots__ = ["value"] def __init__(self, value): self.value = value def get_marker(self, paramstyle='pyformat'): if paramstyle == 'qmark': return '?' elif paramstyle == 'numeric': return ':1' elif paramstyle is None or paramstyle in ['format', 'pyformat']: return '%s' raise UnknownParamstyle, paramstyle def sqlquery(self): return SQLQuery([self]) def __add__(self, other): return self.sqlquery() + other def __radd__(self, other): return other + self.sqlquery() def __str__(self): return str(self.value) def __repr__(self): return '<param: %s>' % repr(self.value) sqlparam = SQLParam class SQLQuery(object): """ You can pass this sort of thing as a clause in any db function. Otherwise, you can pass a dictionary to the keyword argument `vars` and the function will call reparam for you. Internally, consists of `items`, which is a list of strings and SQLParams, which get concatenated to produce the actual query. """ __slots__ = ["items"] # tested in sqlquote's docstring def __init__(self, items=None): r"""Creates a new SQLQuery. >>> SQLQuery("x") <sql: 'x'> >>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)]) >>> q <sql: 'SELECT * FROM test WHERE x=1'> >>> q.query(), q.values() ('SELECT * FROM test WHERE x=%s', [1]) >>> SQLQuery(SQLParam(1)) <sql: '1'> """ if items is None: self.items = [] elif isinstance(items, list): self.items = items elif isinstance(items, SQLParam): self.items = [items] elif isinstance(items, SQLQuery): self.items = list(items.items) else: self.items = [items] # Take care of SQLLiterals for i, item in enumerate(self.items): if isinstance(item, SQLParam) and isinstance(item.value, SQLLiteral): self.items[i] = item.value.v def append(self, value): self.items.append(value) def __add__(self, other): if isinstance(other, basestring): items = [other] elif isinstance(other, SQLQuery): items = other.items else: return NotImplemented return SQLQuery(self.items + items) def __radd__(self, other): if isinstance(other, basestring): items = [other] else: return NotImplemented return SQLQuery(items + self.items) def __iadd__(self, other): if isinstance(other, (basestring, SQLParam)): self.items.append(other) elif isinstance(other, SQLQuery): self.items.extend(other.items) else: return NotImplemented return self def __len__(self): return len(self.query()) def query(self, paramstyle=None): """ Returns the query part of the sql query. >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')]) >>> q.query() 'SELECT * FROM test WHERE name=%s' >>> q.query(paramstyle='qmark') 'SELECT * FROM test WHERE name=?' """ s = [] for x in self.items: if isinstance(x, SQLParam): x = x.get_marker(paramstyle) s.append(safestr(x)) else: x = safestr(x) # automatically escape % characters in the query # For backward compatability, ignore escaping when the query looks already escaped if paramstyle in ['format', 'pyformat']: if '%' in x and '%%' not in x: x = x.replace('%', '%%') s.append(x) return "".join(s) def values(self): """ Returns the values of the parameters used in the sql query. >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')]) >>> q.values() ['joe'] """ return [i.value for i in self.items if isinstance(i, SQLParam)] def join(items, sep=' ', prefix=None, suffix=None, target=None): """ Joins multiple queries. >>> SQLQuery.join(['a', 'b'], ', ') <sql: 'a, b'> Optinally, prefix and suffix arguments can be provided. >>> SQLQuery.join(['a', 'b'], ', ', prefix='(', suffix=')') <sql: '(a, b)'> If target argument is provided, the items are appended to target instead of creating a new SQLQuery. """ if target is None: target = SQLQuery() target_items = target.items if prefix: target_items.append(prefix) for i, item in enumerate(items): if i != 0: target_items.append(sep) if isinstance(item, SQLQuery): target_items.extend(item.items) else: target_items.append(item) if suffix: target_items.append(suffix) return target join = staticmethod(join) def _str(self): try: return self.query() % tuple([sqlify(x) for x in self.values()]) except (ValueError, TypeError): return self.query() def __str__(self): return safestr(self._str()) def __unicode__(self): return safeunicode(self._str()) def __repr__(self): return '<sql: %s>' % repr(str(self)) class SQLLiteral: """ Protects a string from `sqlquote`. >>> sqlquote('NOW()') <sql: "'NOW()'"> >>> sqlquote(SQLLiteral('NOW()')) <sql: 'NOW()'> """ def __init__(self, v): self.v = v def __repr__(self): return self.v sqlliteral = SQLLiteral def _sqllist(values): """ >>> _sqllist([1, 2, 3]) <sql: '(1, 2, 3)'> """ items = [] items.append('(') for i, v in enumerate(values): if i != 0: items.append(', ') items.append(sqlparam(v)) items.append(')') return SQLQuery(items) def reparam(string_, dictionary): """ Takes a string and a dictionary and interpolates the string using values from the dictionary. Returns an `SQLQuery` for the result. >>> reparam("s = $s", dict(s=True)) <sql: "s = 't'"> >>> reparam("s IN $s", dict(s=[1, 2])) <sql: 's IN (1, 2)'> """ dictionary = dictionary.copy() # eval mucks with it vals = [] result = [] for live, chunk in _interpolate(string_): if live: v = eval(chunk, dictionary) result.append(sqlquote(v)) else: result.append(chunk) return SQLQuery.join(result, '') def sqlify(obj): """ converts `obj` to its proper SQL version >>> sqlify(None) 'NULL' >>> sqlify(True) "'t'" >>> sqlify(3) '3' """ # because `1 == True and hash(1) == hash(True)` # we have to do this the hard way... if obj is None: return 'NULL' elif obj is True: return "'t'" elif obj is False: return "'f'" elif isinstance(obj, long): return str(obj) elif datetime and isinstance(obj, datetime.datetime): return repr(obj.isoformat()) else: if isinstance(obj, unicode): obj = obj.encode('utf8') return repr(obj) def sqllist(lst): """ Converts the arguments for use in something like a WHERE clause. >>> sqllist(['a', 'b']) 'a, b' >>> sqllist('a') 'a' >>> sqllist(u'abc') u'abc' """ if isinstance(lst, basestring): return lst else: return ', '.join(lst) def sqlors(left, lst): """ `left is a SQL clause like `tablename.arg = ` and `lst` is a list of values. Returns a reparam-style pair featuring the SQL that ORs together the clause for each item in the lst. >>> sqlors('foo = ', []) <sql: '1=2'> >>> sqlors('foo = ', [1]) <sql: 'foo = 1'> >>> sqlors('foo = ', 1) <sql: 'foo = 1'> >>> sqlors('foo = ', [1,2,3]) <sql: '(foo = 1 OR foo = 2 OR foo = 3 OR 1=2)'> """ if isinstance(lst, iters): lst = list(lst) ln = len(lst) if ln == 0: return SQLQuery("1=2") if ln == 1: lst = lst[0] if isinstance(lst, iters): return SQLQuery(['('] + sum([[left, sqlparam(x), ' OR '] for x in lst], []) + ['1=2)'] ) else: return left + sqlparam(lst) def sqlwhere(dictionary, grouping=' AND '): """ Converts a `dictionary` to an SQL WHERE clause `SQLQuery`. >>> sqlwhere({'cust_id': 2, 'order_id':3}) <sql: 'order_id = 3 AND cust_id = 2'> >>> sqlwhere({'cust_id': 2, 'order_id':3}, grouping=', ') <sql: 'order_id = 3, cust_id = 2'> >>> sqlwhere({'a': 'a', 'b': 'b'}).query() 'a = %s AND b = %s' """ return SQLQuery.join([k + ' = ' + sqlparam(v) for k, v in dictionary.items()], grouping) def sqlquote(a): """ Ensures `a` is quoted properly for use in a SQL query. >>> 'WHERE x = ' + sqlquote(True) + ' AND y = ' + sqlquote(3) <sql: "WHERE x = 't' AND y = 3"> >>> 'WHERE x = ' + sqlquote(True) + ' AND y IN ' + sqlquote([2, 3]) <sql: "WHERE x = 't' AND y IN (2, 3)"> """ if isinstance(a, list): return _sqllist(a) else: return sqlparam(a).sqlquery() class Transaction: """Database transaction.""" def __init__(self, ctx): self.ctx = ctx self.transaction_count = transaction_count = len(ctx.transactions) class transaction_engine: """Transaction Engine used in top level transactions.""" def do_transact(self): ctx.commit(unload=False) def do_commit(self): ctx.commit() def do_rollback(self): ctx.rollback() class subtransaction_engine: """Transaction Engine used in sub transactions.""" def query(self, q): db_cursor = ctx.db.cursor() ctx.db_execute(db_cursor, SQLQuery(q % transaction_count)) def do_transact(self): self.query('SAVEPOINT webpy_sp_%s') def do_commit(self): self.query('RELEASE SAVEPOINT webpy_sp_%s') def do_rollback(self): self.query('ROLLBACK TO SAVEPOINT webpy_sp_%s') class dummy_engine: """Transaction Engine used instead of subtransaction_engine when sub transactions are not supported.""" do_transact = do_commit = do_rollback = lambda self: None if self.transaction_count: # nested transactions are not supported in some databases if self.ctx.get('ignore_nested_transactions'): self.engine = dummy_engine() else: self.engine = subtransaction_engine() else: self.engine = transaction_engine() self.engine.do_transact() self.ctx.transactions.append(self) def __enter__(self): return self def __exit__(self, exctype, excvalue, traceback): if exctype is not None: self.rollback() else: self.commit() def commit(self): if len(self.ctx.transactions) > self.transaction_count: self.engine.do_commit() self.ctx.transactions = self.ctx.transactions[:self.transaction_count] def rollback(self): if len(self.ctx.transactions) > self.transaction_count: self.engine.do_rollback() self.ctx.transactions = self.ctx.transactions[:self.transaction_count] class DB: """Database""" def __init__(self, db_module, keywords): """Creates a database. """ # some DB implementaions take optional paramater `driver` to use a specific driver modue # but it should not be passed to connect keywords.pop('driver', None) self.db_module = db_module self.keywords = keywords self._ctx = threadeddict() # flag to enable/disable printing queries self.printing = config.get('debug_sql', config.get('debug', False)) self.supports_multiple_insert = False try: import DBUtils # enable pooling if DBUtils module is available. self.has_pooling = True except ImportError: self.has_pooling = False # Pooling can be disabled by passing pooling=False in the keywords. self.has_pooling = self.keywords.pop('pooling', True) and self.has_pooling def _getctx(self): if not self._ctx.get('db'): self._load_context(self._ctx) return self._ctx ctx = property(_getctx) def _load_context(self, ctx): ctx.dbq_count = 0 ctx.transactions = [] # stack of transactions if self.has_pooling: ctx.db = self._connect_with_pooling(self.keywords) else: ctx.db = self._connect(self.keywords) ctx.db_execute = self._db_execute if not hasattr(ctx.db, 'commit'): ctx.db.commit = lambda: None if not hasattr(ctx.db, 'rollback'): ctx.db.rollback = lambda: None def commit(unload=True): # do db commit and release the connection if pooling is enabled. ctx.db.commit() if unload and self.has_pooling: self._unload_context(self._ctx) def rollback(): # do db rollback and release the connection if pooling is enabled. ctx.db.rollback() if self.has_pooling: self._unload_context(self._ctx) ctx.commit = commit ctx.rollback = rollback def _unload_context(self, ctx): del ctx.db def _connect(self, keywords): return self.db_module.connect(**keywords) def _connect_with_pooling(self, keywords): def get_pooled_db(): from DBUtils import PooledDB # In DBUtils 0.9.3, `dbapi` argument is renamed as `creator` # see Bug#122112 if PooledDB.__version__.split('.') < '0.9.3'.split('.'): return PooledDB.PooledDB(dbapi=self.db_module, **keywords) else: return PooledDB.PooledDB(creator=self.db_module, **keywords) if getattr(self, '_pooleddb', None) is None: self._pooleddb = get_pooled_db() return self._pooleddb.connection() def _db_cursor(self): return self.ctx.db.cursor() def _param_marker(self): """Returns parameter marker based on paramstyle attribute if this database.""" style = getattr(self, 'paramstyle', 'pyformat') if style == 'qmark': return '?' elif style == 'numeric': return ':1' elif style in ['format', 'pyformat']: return '%s' raise UnknownParamstyle, style def _db_execute(self, cur, sql_query): """executes an sql query""" self.ctx.dbq_count += 1 try: a = time.time() query, params = self._process_query(sql_query) out = cur.execute(query, params) b = time.time() except: if self.printing: print >> debug, 'ERR:', str(sql_query) if self.ctx.transactions: self.ctx.transactions[-1].rollback() else: self.ctx.rollback() raise if self.printing: print >> debug, '%s (%s): %s' % (round(b-a, 2), self.ctx.dbq_count, str(sql_query)) return out def _process_query(self, sql_query): """Takes the SQLQuery object and returns query string and parameters. """ paramstyle = getattr(self, 'paramstyle', 'pyformat') query = sql_query.query(paramstyle) params = sql_query.values() return query, params def _where(self, where, vars): if isinstance(where, (int, long)): where = "id = " + sqlparam(where) #@@@ for backward-compatibility elif isinstance(where, (list, tuple)) and len(where) == 2: where = SQLQuery(where[0], where[1]) elif isinstance(where, SQLQuery): pass else: where = reparam(where, vars) return where def query(self, sql_query, vars=None, processed=False, _test=False): """ Execute SQL query `sql_query` using dictionary `vars` to interpolate it. If `processed=True`, `vars` is a `reparam`-style list to use instead of interpolating. >>> db = DB(None, {}) >>> db.query("SELECT * FROM foo", _test=True) <sql: 'SELECT * FROM foo'> >>> db.query("SELECT * FROM foo WHERE x = $x", vars=dict(x='f'), _test=True) <sql: "SELECT * FROM foo WHERE x = 'f'"> >>> db.query("SELECT * FROM foo WHERE x = " + sqlquote('f'), _test=True) <sql: "SELECT * FROM foo WHERE x = 'f'"> """ if vars is None: vars = {} if not processed and not isinstance(sql_query, SQLQuery): sql_query = reparam(sql_query, vars) if _test: return sql_query db_cursor = self._db_cursor() self._db_execute(db_cursor, sql_query) if db_cursor.description: names = [x[0] for x in db_cursor.description] def iterwrapper(): row = db_cursor.fetchone() while row: yield storage(dict(zip(names, row))) row = db_cursor.fetchone() out = iterbetter(iterwrapper()) out.__len__ = lambda: int(db_cursor.rowcount) out.list = lambda: [storage(dict(zip(names, x))) \ for x in db_cursor.fetchall()] else: out = db_cursor.rowcount if not self.ctx.transactions: self.ctx.commit() return out def select(self, tables, vars=None, what='*', where=None, order=None, group=None, limit=None, offset=None, _test=False): """ Selects `what` from `tables` with clauses `where`, `order`, `group`, `limit`, and `offset`. Uses vars to interpolate. Otherwise, each clause can be a SQLQuery. >>> db = DB(None, {}) >>> db.select('foo', _test=True) <sql: 'SELECT * FROM foo'> >>> db.select(['foo', 'bar'], where="foo.bar_id = bar.id", limit=5, _test=True) <sql: 'SELECT * FROM foo, bar WHERE foo.bar_id = bar.id LIMIT 5'> """ if vars is None: vars = {} sql_clauses = self.sql_clauses(what, tables, where, group, order, limit, offset) clauses = [self.gen_clause(sql, val, vars) for sql, val in sql_clauses if val is not None] qout = SQLQuery.join(clauses) if _test: return qout return self.query(qout, processed=True) def where(self, table, what='*', order=None, group=None, limit=None, offset=None, _test=False, **kwargs): """ Selects from `table` where keys are equal to values in `kwargs`. >>> db = DB(None, {}) >>> db.where('foo', bar_id=3, _test=True) <sql: 'SELECT * FROM foo WHERE bar_id = 3'> >>> db.where('foo', source=2, crust='dewey', _test=True) <sql: "SELECT * FROM foo WHERE source = 2 AND crust = 'dewey'"> >>> db.where('foo', _test=True) <sql: 'SELECT * FROM foo'> """ where_clauses = [] for k, v in kwargs.iteritems(): where_clauses.append(k + ' = ' + sqlquote(v)) if where_clauses: where = SQLQuery.join(where_clauses, " AND ") else: where = None return self.select(table, what=what, order=order, group=group, limit=limit, offset=offset, _test=_test, where=where) def sql_clauses(self, what, tables, where, group, order, limit, offset): return ( ('SELECT', what), ('FROM', sqllist(tables)), ('WHERE', where), ('GROUP BY', group), ('ORDER BY', order), ('LIMIT', limit), ('OFFSET', offset)) def gen_clause(self, sql, val, vars): if isinstance(val, (int, long)): if sql == 'WHERE': nout = 'id = ' + sqlquote(val) else: nout = SQLQuery(val) #@@@ elif isinstance(val, (list, tuple)) and len(val) == 2: nout = SQLQuery(val[0], val[1]) # backwards-compatibility elif isinstance(val, SQLQuery): nout = val else: nout = reparam(val, vars) def xjoin(a, b): if a and b: return a + ' ' + b else: return a or b return xjoin(sql, nout) def insert(self, tablename, seqname=None, _test=False, **values): """ Inserts `values` into `tablename`. Returns current sequence ID. Set `seqname` to the ID if it's not the default, or to `False` if there isn't one. >>> db = DB(None, {}) >>> q = db.insert('foo', name='bob', age=2, created=SQLLiteral('NOW()'), _test=True) >>> q <sql: "INSERT INTO foo (age, name, created) VALUES (2, 'bob', NOW())"> >>> q.query() 'INSERT INTO foo (age, name, created) VALUES (%s, %s, NOW())' >>> q.values() [2, 'bob'] """ def q(x): return "(" + x + ")" if values: _keys = SQLQuery.join(values.keys(), ', ') _values = SQLQuery.join([sqlparam(v) for v in values.values()], ', ') sql_query = "INSERT INTO %s " % tablename + q(_keys) + ' VALUES ' + q(_values) else: sql_query = SQLQuery(self._get_insert_default_values_query(tablename)) if _test: return sql_query db_cursor = self._db_cursor() if seqname is not False: sql_query = self._process_insert_query(sql_query, tablename, seqname) if isinstance(sql_query, tuple): # for some databases, a separate query has to be made to find # the id of the inserted row. q1, q2 = sql_query self._db_execute(db_cursor, q1) self._db_execute(db_cursor, q2) else: self._db_execute(db_cursor, sql_query) try: out = db_cursor.fetchone()[0] except Exception: out = None if not self.ctx.transactions: self.ctx.commit() return out def _get_insert_default_values_query(self, table): return "INSERT INTO %s DEFAULT VALUES" % table def multiple_insert(self, tablename, values, seqname=None, _test=False): """ Inserts multiple rows into `tablename`. The `values` must be a list of dictioanries, one for each row to be inserted, each with the same set of keys. Returns the list of ids of the inserted rows. Set `seqname` to the ID if it's not the default, or to `False` if there isn't one. >>> db = DB(None, {}) >>> db.supports_multiple_insert = True >>> values = [{"name": "foo", "email": "foo@example.com"}, {"name": "bar", "email": "bar@example.com"}] >>> db.multiple_insert('person', values=values, _test=True) <sql: "INSERT INTO person (name, email) VALUES ('foo', 'foo@example.com'), ('bar', 'bar@example.com')"> """ if not values: return [] if not self.supports_multiple_insert: out = [self.insert(tablename, seqname=seqname, _test=_test, **v) for v in values] if seqname is False: return None else: return out keys = values[0].keys() #@@ make sure all keys are valid for v in values: if v.keys() != keys: raise ValueError, 'Not all rows have the same keys' sql_query = SQLQuery('INSERT INTO %s (%s) VALUES ' % (tablename, ', '.join(keys))) for i, row in enumerate(values): if i != 0: sql_query.append(", ") SQLQuery.join([SQLParam(row[k]) for k in keys], sep=", ", target=sql_query, prefix="(", suffix=")") if _test: return sql_query db_cursor = self._db_cursor() if seqname is not False: sql_query = self._process_insert_query(sql_query, tablename, seqname) if isinstance(sql_query, tuple): # for some databases, a separate query has to be made to find # the id of the inserted row. q1, q2 = sql_query self._db_execute(db_cursor, q1) self._db_execute(db_cursor, q2) else: self._db_execute(db_cursor, sql_query) try: out = db_cursor.fetchone()[0] out = range(out-len(values)+1, out+1) except Exception: out = None if not self.ctx.transactions: self.ctx.commit() return out def update(self, tables, where, vars=None, _test=False, **values): """ Update `tables` with clause `where` (interpolated using `vars`) and setting `values`. >>> db = DB(None, {}) >>> name = 'Joseph' >>> q = db.update('foo', where='name = $name', name='bob', age=2, ... created=SQLLiteral('NOW()'), vars=locals(), _test=True) >>> q <sql: "UPDATE foo SET age = 2, name = 'bob', created = NOW() WHERE name = 'Joseph'"> >>> q.query() 'UPDATE foo SET age = %s, name = %s, created = NOW() WHERE name = %s' >>> q.values() [2, 'bob', 'Joseph'] """ if vars is None: vars = {} where = self._where(where, vars) query = ( "UPDATE " + sqllist(tables) + " SET " + sqlwhere(values, ', ') + " WHERE " + where) if _test: return query db_cursor = self._db_cursor() self._db_execute(db_cursor, query) if not self.ctx.transactions: self.ctx.commit() return db_cursor.rowcount def delete(self, table, where, using=None, vars=None, _test=False): """ Deletes from `table` with clauses `where` and `using`. >>> db = DB(None, {}) >>> name = 'Joe' >>> db.delete('foo', where='name = $name', vars=locals(), _test=True) <sql: "DELETE FROM foo WHERE name = 'Joe'"> """ if vars is None: vars = {} where = self._where(where, vars) q = 'DELETE FROM ' + table if using: q += ' USING ' + sqllist(using) if where: q += ' WHERE ' + where if _test: return q db_cursor = self._db_cursor() self._db_execute(db_cursor, q) if not self.ctx.transactions: self.ctx.commit() return db_cursor.rowcount def _process_insert_query(self, query, tablename, seqname): return query def transaction(self): """Start a transaction.""" return Transaction(self.ctx) class PostgresDB(DB): """Postgres driver.""" def __init__(self, **keywords): if 'pw' in keywords: keywords['password'] = keywords.pop('pw') db_module = import_driver(["psycopg2", "psycopg", "pgdb"], preferred=keywords.pop('driver', None)) if db_module.__name__ == "psycopg2": import psycopg2.extensions psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) if db_module.__name__ == "pgdb" and 'port' in keywords: keywords["host"] += ":" + str(keywords.pop('port')) # if db is not provided postgres driver will take it from PGDATABASE environment variable if 'db' in keywords: keywords['database'] = keywords.pop('db') self.dbname = "postgres" self.paramstyle = db_module.paramstyle DB.__init__(self, db_module, keywords) self.supports_multiple_insert = True self._sequences = None def _process_insert_query(self, query, tablename, seqname): if seqname is None: # when seqname is not provided guess the seqname and make sure it exists seqname = tablename + "_id_seq" if seqname not in self._get_all_sequences(): seqname = None if seqname: query += "; SELECT currval('%s')" % seqname return query def _get_all_sequences(self): """Query postgres to find names of all sequences used in this database.""" if self._sequences is None: q = "SELECT c.relname FROM pg_class c WHERE c.relkind = 'S'" self._sequences = set([c.relname for c in self.query(q)]) return self._sequences def _connect(self, keywords): conn = DB._connect(self, keywords) try: conn.set_client_encoding('UTF8') except AttributeError: # fallback for pgdb driver conn.cursor().execute("set client_encoding to 'UTF-8'") return conn def _connect_with_pooling(self, keywords): conn = DB._connect_with_pooling(self, keywords) conn._con._con.set_client_encoding('UTF8') return conn class MySQLDB(DB): def __init__(self, **keywords): import MySQLdb as db if 'pw' in keywords: keywords['passwd'] = keywords['pw'] del keywords['pw'] if 'charset' not in keywords: keywords['charset'] = 'utf8' elif keywords['charset'] is None: del keywords['charset'] self.paramstyle = db.paramstyle = 'pyformat' # it's both, like psycopg self.dbname = "mysql" DB.__init__(self, db, keywords) self.supports_multiple_insert = True def _process_insert_query(self, query, tablename, seqname): return query, SQLQuery('SELECT last_insert_id();') def _get_insert_default_values_query(self, table): return "INSERT INTO %s () VALUES()" % table def import_driver(drivers, preferred=None): """Import the first available driver or preferred driver. """ if preferred: drivers = [preferred] for d in drivers: try: return __import__(d, None, None, ['x']) except ImportError: pass raise ImportError("Unable to import " + " or ".join(drivers)) class SqliteDB(DB): def __init__(self, **keywords): db = import_driver(["sqlite3", "pysqlite2.dbapi2", "sqlite"], preferred=keywords.pop('driver', None)) if db.__name__ in ["sqlite3", "pysqlite2.dbapi2"]: db.paramstyle = 'qmark' # sqlite driver doesn't create datatime objects for timestamp columns unless `detect_types` option is passed. # It seems to be supported in sqlite3 and pysqlite2 drivers, not surte about sqlite. keywords.setdefault('detect_types', db.PARSE_DECLTYPES) self.paramstyle = db.paramstyle keywords['database'] = keywords.pop('db') keywords['pooling'] = False # sqlite don't allows connections to be shared by threads self.dbname = "sqlite" DB.__init__(self, db, keywords) def _process_insert_query(self, query, tablename, seqname): return query, SQLQuery('SELECT last_insert_rowid();') def query(self, *a, **kw): out = DB.query(self, *a, **kw) if isinstance(out, iterbetter): del out.__len__ return out class FirebirdDB(DB): """Firebird Database. """ def __init__(self, **keywords): try: import kinterbasdb as db except Exception: db = None pass if 'pw' in keywords: keywords['password'] = keywords.pop('pw') keywords['database'] = keywords.pop('db') self.paramstyle = db.paramstyle DB.__init__(self, db, keywords) def delete(self, table, where=None, using=None, vars=None, _test=False): # firebird doesn't support using clause using=None return DB.delete(self, table, where, using, vars, _test) def sql_clauses(self, what, tables, where, group, order, limit, offset): return ( ('SELECT', ''), ('FIRST', limit), ('SKIP', offset), ('', what), ('FROM', sqllist(tables)), ('WHERE', where), ('GROUP BY', group), ('ORDER BY', order) ) class MSSQLDB(DB): def __init__(self, **keywords): import pymssql as db if 'pw' in keywords: keywords['password'] = keywords.pop('pw') keywords['database'] = keywords.pop('db') self.dbname = "mssql" DB.__init__(self, db, keywords) def _process_query(self, sql_query): """Takes the SQLQuery object and returns query string and parameters. """ # MSSQLDB expects params to be a tuple. # Overwriting the default implementation to convert params to tuple. paramstyle = getattr(self, 'paramstyle', 'pyformat') query = sql_query.query(paramstyle) params = sql_query.values() return query, tuple(params) def sql_clauses(self, what, tables, where, group, order, limit, offset): return ( ('SELECT', what), ('TOP', limit), ('FROM', sqllist(tables)), ('WHERE', where), ('GROUP BY', group), ('ORDER BY', order), ('OFFSET', offset)) def _test(self): """Test LIMIT. Fake presence of pymssql module for running tests. >>> import sys >>> sys.modules['pymssql'] = sys.modules['sys'] MSSQL has TOP clause instead of LIMIT clause. >>> db = MSSQLDB(db='test', user='joe', pw='secret') >>> db.select('foo', limit=4, _test=True) <sql: 'SELECT * TOP 4 FROM foo'> """ pass class OracleDB(DB): def __init__(self, **keywords): import cx_Oracle as db if 'pw' in keywords: keywords['password'] = keywords.pop('pw') #@@ TODO: use db.makedsn if host, port is specified keywords['dsn'] = keywords.pop('db') self.dbname = 'oracle' db.paramstyle = 'numeric' self.paramstyle = db.paramstyle # oracle doesn't support pooling keywords.pop('pooling', None) DB.__init__(self, db, keywords) def _process_insert_query(self, query, tablename, seqname): if seqname is None: # It is not possible to get seq name from table name in Oracle return query else: return query + "; SELECT %s.currval FROM dual" % seqname def dburl2dict(url): """ Takes a URL to a database and parses it into an equivalent dictionary. >>> dburl2dict('postgres://james:day@serverfarm.example.net:5432/mygreatdb') {'pw': 'day', 'dbn': 'postgres', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': '5432'} >>> dburl2dict('postgres://james:day@serverfarm.example.net/mygreatdb') {'user': 'james', 'host': 'serverfarm.example.net', 'db': 'mygreatdb', 'pw': 'day', 'dbn': 'postgres'} >>> dburl2dict('postgres://james:d%40y@serverfarm.example.net/mygreatdb') {'user': 'james', 'host': 'serverfarm.example.net', 'db': 'mygreatdb', 'pw': 'd@y', 'dbn': 'postgres'} """ dbn, rest = url.split('://', 1) user, rest = rest.split(':', 1) pw, rest = rest.split('@', 1) if ':' in rest: host, rest = rest.split(':', 1) port, rest = rest.split('/', 1) else: host, rest = rest.split('/', 1) port = None db = rest uq = urllib.unquote out = dict(dbn=dbn, user=uq(user), pw=uq(pw), host=uq(host), db=uq(db)) if port: out['port'] = port return out _databases = {} def database(dburl=None, **params): """Creates appropriate database using params. Pooling will be enabled if DBUtils module is available. Pooling can be disabled by passing pooling=False in params. """ if not dburl and not params: dburl = os.environ['DATABASE_URL'] if dburl: params = dburl2dict(dburl) dbn = params.pop('dbn') if dbn in _databases: return _databases[dbn](**params) else: raise UnknownDB, dbn def register_database(name, clazz): """ Register a database. >>> class LegacyDB(DB): ... def __init__(self, **params): ... pass ... >>> register_database('legacy', LegacyDB) >>> db = database(dbn='legacy', db='test', user='joe', passwd='secret') """ _databases[name] = clazz register_database('mysql', MySQLDB) register_database('postgres', PostgresDB) register_database('sqlite', SqliteDB) register_database('firebird', FirebirdDB) register_database('mssql', MSSQLDB) register_database('oracle', OracleDB) def _interpolate(format): """ Takes a format string and returns a list of 2-tuples of the form (boolean, string) where boolean says whether string should be evaled or not. from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee) """ from tokenize import tokenprog def matchorfail(text, pos): match = tokenprog.match(text, pos) if match is None: raise _ItplError(text, pos) return match, match.end() namechars = "abcdefghijklmnopqrstuvwxyz" \ "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"; chunks = [] pos = 0 while 1: dollar = format.find("$", pos) if dollar < 0: break nextchar = format[dollar + 1] if nextchar == "{": chunks.append((0, format[pos:dollar])) pos, level = dollar + 2, 1 while level: match, pos = matchorfail(format, pos) tstart, tend = match.regs[3] token = format[tstart:tend] if token == "{": level = level + 1 elif token == "}": level = level - 1 chunks.append((1, format[dollar + 2:pos - 1])) elif nextchar in namechars: chunks.append((0, format[pos:dollar])) match, pos = matchorfail(format, dollar + 1) while pos < len(format): if format[pos] == "." and \ pos + 1 < len(format) and format[pos + 1] in namechars: match, pos = matchorfail(format, pos + 1) elif format[pos] in "([": pos, level = pos + 1, 1 while level: match, pos = matchorfail(format, pos) tstart, tend = match.regs[3] token = format[tstart:tend] if token[0] in "([": level = level + 1 elif token[0] in ")]": level = level - 1 else: break chunks.append((1, format[dollar + 1:pos])) else: chunks.append((0, format[pos:dollar + 1])) pos = dollar + 1 + (nextchar == "$") if pos < len(format): chunks.append((0, format[pos:])) return chunks if __name__ == "__main__": import doctest doctest.testmod()
gpl-2.0
6,414,266,332,227,157,000
7,628,228,271,474,132,000
32.186321
126
0.525573
false
staticlibs/android-ndk-r9d-arm-linux-androideabi-4.8
lib/python2.7/test/test_xdrlib.py
94
1597
from test import test_support import unittest import xdrlib class XDRTest(unittest.TestCase): def test_xdr(self): p = xdrlib.Packer() s = 'hello world' a = ['what', 'is', 'hapnin', 'doctor'] p.pack_int(42) p.pack_int(-17) p.pack_uint(9) p.pack_bool(True) p.pack_bool(False) p.pack_uhyper(45L) p.pack_float(1.9) p.pack_double(1.9) p.pack_string(s) p.pack_list(range(5), p.pack_uint) p.pack_array(a, p.pack_string) # now verify data = p.get_buffer() up = xdrlib.Unpacker(data) self.assertEqual(up.get_position(), 0) self.assertEqual(up.unpack_int(), 42) self.assertEqual(up.unpack_int(), -17) self.assertEqual(up.unpack_uint(), 9) self.assertTrue(up.unpack_bool() is True) # remember position pos = up.get_position() self.assertTrue(up.unpack_bool() is False) # rewind and unpack again up.set_position(pos) self.assertTrue(up.unpack_bool() is False) self.assertEqual(up.unpack_uhyper(), 45L) self.assertAlmostEqual(up.unpack_float(), 1.9) self.assertAlmostEqual(up.unpack_double(), 1.9) self.assertEqual(up.unpack_string(), s) self.assertEqual(up.unpack_list(up.unpack_uint), range(5)) self.assertEqual(up.unpack_array(up.unpack_string), a) up.done() self.assertRaises(EOFError, up.unpack_uint) def test_main(): test_support.run_unittest(XDRTest) if __name__ == "__main__": test_main()
gpl-2.0
-5,261,222,487,092,495,000
3,616,795,008,711,590,400
26.534483
66
0.585473
false
xisisu/RT-Xen
tools/python/xen/xend/MemoryPool.py
43
4567
#=========================================================================== # This library is free software; you can redistribute it and/or # modify it under the terms of version 2.1 of the GNU Lesser General Public # License as published by the Free Software Foundation. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #============================================================================ # Copyright (C) 2009 Novell, Inc. # Author: James (Song Wei) <jsong@novell.com> #============================================================================ import xen.lowlevel.xc import XendDomain import XendOptions from XendLogging import log from XendError import VmError class MemoryPool: def init(self): xoptions = XendOptions.instance() self.default_reserved_memory = xoptions.get_reserved_memory() * 1024 * 1024 #KiB if self.default_reserved_memory <= 0: return self.enable_memory_pool = 1 self.dom0_ballooning = xoptions.get_enable_dom0_ballooning() if not self.dom0_ballooning: return self.reserve_memory = 0 self.untouched_memory = 0 #init reserved memory #if not reserve_memory_size: xc = xen.lowlevel.xc.xc() physinfo = xc.physinfo() total_mem = physinfo['total_memory'] if total_mem < self.reserve_memory: self.default_reserved_memory = total_mem self.reserve_memory = self.default_reserved_memory self.untouched_memory = self.default_reserved_memory log.debug("MemoryPool: init reserved_memory %d KiB" %self.reserve_memory) def __init__(self): self.reserve_memory = 0 self.untouched_memory = 0 self.default_reserved_memory = 0 self.enable_memory_pool = 0 self.dom0_ballooning = 0 def available_memory_check(self, need_mem): return self.is_enabled() and self.reserved_memory > need_mem def decrease_memory(self, value): if not self.is_enabled() or value <= 4096: #4M for PV guest kernel and ramdisk unzip return elif self.reserve_memory < value: raise VMError(('I need %d KiB, but only have %d KiB in Memory Pool') %(value,self.reserve_memory)) else: self.reserve_memory -= value log.debug("MemoryPool: decrease_memory: decrease: %d reserved_memory %d KiB" %(value,self.reserve_memory)) return def decrease_untouched_memory(self, value): if not self.is_enabled(): return elif self.untouched_memory < value: raise VmError(('I need %d KiB untouch mem, but only have %d KiB untouched mem in Memory Pool') %(value,self.reserve_memory)) else: self.untouched_memory -= value log.debug("MemoryPool: decrease_untouched_memory: untouched_memory %d KiB" %self.untouched_memory) return def increase_memory(self, value): if not self.is_enabled(): return else: self.reserve_memory += value if self.reserve_memory > self.default_reserved_memory: raise VmError(('the maxsize of memory pool is %d KiB, but current is %d KiB') %(value,self.reserve_memory)) log.debug("MemoryPool: increase_memory:%d, reserved_memory %d KiB" %(value,self.reserve_memory)) return def is_enabled(self): return self.enable_memory_pool and self.dom0_ballooning def get_pool_size(self): if self.is_enabled(): return self.default_reserved_memory else: return 0 def get_left_memory(self): if self.is_enabled(): return self.reserve_memory else: return 0 def get_untouched_memory(self): if self.is_enabled(): return self.untouched_memory else: return 0 def instance(): """Singleton constructor. Use this instead of the class constructor. """ global MP_inst try: MP_inst except: MP_inst = MemoryPool() MP_inst.init() return MP_inst
gpl-2.0
-8,874,169,359,030,901,000
-547,266,227,107,999,360
37.70339
137
0.600175
false
ella/django-ratings
django_ratings/aggregation.py
1
1768
""" This file is for aggregation records from Rating,Agg tables to Agg and TotalRate table """ import logging from datetime import datetime, timedelta from django_ratings.models import Rating, Agg, TotalRate logger = logging.getLogger('django_ratings') # aggregate ratings older than 2 years by year DELTA_TIME_YEAR = 2*365*24*60*60 # ratings older than 2 months by month DELTA_TIME_MONTH = 2*30*24*60*60 # rest of the ratings (last 2 months) aggregate daily DELTA_TIME_DAY = -24*60*60 TIMES_ALL = {DELTA_TIME_YEAR : 'year', DELTA_TIME_MONTH : 'month', DELTA_TIME_DAY : 'day'} def transfer_agg_to_totalrate(): """ Transfer aggregation data from table Agg to table TotalRate """ logger.info("transfer_agg_to_totalrate BEGIN") if TotalRate.objects.count() != 0: TotalRate.objects.all().delete() Agg.objects.agg_to_totalrate() logger.info("transfer_agg_to_totalrate END") def transfer_agg_to_agg(): """ aggregation data from table Agg to table Agg """ logger.info("transfer_agg_to_agg BEGIN") timenow = datetime.now() for t in TIMES_ALL: TIME_DELTA = t time_agg = timenow - timedelta(seconds=TIME_DELTA) Agg.objects.move_agg_to_agg(time_agg, TIMES_ALL[t]) Agg.objects.agg_assume() logger.info("transfer_agg_to_agg END") def transfer_data(): """ transfer data from table Rating to table Agg """ logger.info("transfer_data BEGIN") timenow = datetime.now() for t in sorted(TIMES_ALL.keys(), reverse=True): TIME_DELTA = t time_agg = timenow - timedelta(seconds=TIME_DELTA) Rating.objects.move_rate_to_agg(time_agg, TIMES_ALL[t]) transfer_agg_to_agg() transfer_agg_to_totalrate() logger.info("transfer_data END")
bsd-3-clause
-1,106,919,097,889,229,600
6,914,948,605,613,987,000
28.966102
90
0.675339
false
mancoast/CPythonPyc_test
fail/321_test_funcattrs.py
56
10870
from test import support import types import unittest class FuncAttrsTest(unittest.TestCase): def setUp(self): class F: def a(self): pass def b(): return 3 self.fi = F() self.F = F self.b = b def cannot_set_attr(self, obj, name, value, exceptions): try: setattr(obj, name, value) except exceptions: pass else: self.fail("shouldn't be able to set %s to %r" % (name, value)) try: delattr(obj, name) except exceptions: pass else: self.fail("shouldn't be able to del %s" % name) class FunctionPropertiesTest(FuncAttrsTest): # Include the external setUp method that is common to all tests def test_module(self): self.assertEqual(self.b.__module__, __name__) def test_dir_includes_correct_attrs(self): self.b.known_attr = 7 self.assertIn('known_attr', dir(self.b), "set attributes not in dir listing of method") # Test on underlying function object of method self.F.a.known_attr = 7 self.assertIn('known_attr', dir(self.fi.a), "set attribute on function " "implementations, should show up in next dir") def test_duplicate_function_equality(self): # Body of `duplicate' is the exact same as self.b def duplicate(): 'my docstring' return 3 self.assertNotEqual(self.b, duplicate) def test_copying___code__(self): def test(): pass self.assertEqual(test(), None) test.__code__ = self.b.__code__ self.assertEqual(test(), 3) # self.b always returns 3, arbitrarily def test___globals__(self): self.assertIs(self.b.__globals__, globals()) self.cannot_set_attr(self.b, '__globals__', 2, (AttributeError, TypeError)) def test___closure__(self): a = 12 def f(): print(a) c = f.__closure__ self.assertIsInstance(c, tuple) self.assertEqual(len(c), 1) # don't have a type object handy self.assertEqual(c[0].__class__.__name__, "cell") self.cannot_set_attr(f, "__closure__", c, AttributeError) def test_empty_cell(self): def f(): print(a) try: f.__closure__[0].cell_contents except ValueError: pass else: self.fail("shouldn't be able to read an empty cell") a = 12 def test___name__(self): self.assertEqual(self.b.__name__, 'b') self.b.__name__ = 'c' self.assertEqual(self.b.__name__, 'c') self.b.__name__ = 'd' self.assertEqual(self.b.__name__, 'd') # __name__ and __name__ must be a string self.cannot_set_attr(self.b, '__name__', 7, TypeError) # __name__ must be available when in restricted mode. Exec will raise # AttributeError if __name__ is not available on f. s = """def f(): pass\nf.__name__""" exec(s, {'__builtins__': {}}) # Test on methods, too self.assertEqual(self.fi.a.__name__, 'a') self.cannot_set_attr(self.fi.a, "__name__", 'a', AttributeError) def test___code__(self): num_one, num_two = 7, 8 def a(): pass def b(): return 12 def c(): return num_one def d(): return num_two def e(): return num_one, num_two for func in [a, b, c, d, e]: self.assertEqual(type(func.__code__), types.CodeType) self.assertEqual(c(), 7) self.assertEqual(d(), 8) d.__code__ = c.__code__ self.assertEqual(c.__code__, d.__code__) self.assertEqual(c(), 7) # self.assertEqual(d(), 7) try: b.__code__ = c.__code__ except ValueError: pass else: self.fail("__code__ with different numbers of free vars should " "not be possible") try: e.__code__ = d.__code__ except ValueError: pass else: self.fail("__code__ with different numbers of free vars should " "not be possible") def test_blank_func_defaults(self): self.assertEqual(self.b.__defaults__, None) del self.b.__defaults__ self.assertEqual(self.b.__defaults__, None) def test_func_default_args(self): def first_func(a, b): return a+b def second_func(a=1, b=2): return a+b self.assertEqual(first_func.__defaults__, None) self.assertEqual(second_func.__defaults__, (1, 2)) first_func.__defaults__ = (1, 2) self.assertEqual(first_func.__defaults__, (1, 2)) self.assertEqual(first_func(), 3) self.assertEqual(first_func(3), 5) self.assertEqual(first_func(3, 5), 8) del second_func.__defaults__ self.assertEqual(second_func.__defaults__, None) try: second_func() except TypeError: pass else: self.fail("__defaults__ does not update; deleting it does not " "remove requirement") class InstancemethodAttrTest(FuncAttrsTest): def test___class__(self): self.assertEqual(self.fi.a.__self__.__class__, self.F) self.cannot_set_attr(self.fi.a, "__class__", self.F, TypeError) def test___func__(self): self.assertEqual(self.fi.a.__func__, self.F.a) self.cannot_set_attr(self.fi.a, "__func__", self.F.a, AttributeError) def test___self__(self): self.assertEqual(self.fi.a.__self__, self.fi) self.cannot_set_attr(self.fi.a, "__self__", self.fi, AttributeError) def test___func___non_method(self): # Behavior should be the same when a method is added via an attr # assignment self.fi.id = types.MethodType(id, self.fi) self.assertEqual(self.fi.id(), id(self.fi)) # Test usage try: self.fi.id.unknown_attr except AttributeError: pass else: self.fail("using unknown attributes should raise AttributeError") # Test assignment and deletion self.cannot_set_attr(self.fi.id, 'unknown_attr', 2, AttributeError) class ArbitraryFunctionAttrTest(FuncAttrsTest): def test_set_attr(self): self.b.known_attr = 7 self.assertEqual(self.b.known_attr, 7) try: self.fi.a.known_attr = 7 except AttributeError: pass else: self.fail("setting attributes on methods should raise error") def test_delete_unknown_attr(self): try: del self.b.unknown_attr except AttributeError: pass else: self.fail("deleting unknown attribute should raise TypeError") def test_unset_attr(self): for func in [self.b, self.fi.a]: try: func.non_existent_attr except AttributeError: pass else: self.fail("using unknown attributes should raise " "AttributeError") class FunctionDictsTest(FuncAttrsTest): def test_setting_dict_to_invalid(self): self.cannot_set_attr(self.b, '__dict__', None, TypeError) from collections import UserDict d = UserDict({'known_attr': 7}) self.cannot_set_attr(self.fi.a.__func__, '__dict__', d, TypeError) def test_setting_dict_to_valid(self): d = {'known_attr': 7} self.b.__dict__ = d # Test assignment self.assertIs(d, self.b.__dict__) # ... and on all the different ways of referencing the method's func self.F.a.__dict__ = d self.assertIs(d, self.fi.a.__func__.__dict__) self.assertIs(d, self.fi.a.__dict__) # Test value self.assertEqual(self.b.known_attr, 7) self.assertEqual(self.b.__dict__['known_attr'], 7) # ... and again, on all the different method's names self.assertEqual(self.fi.a.__func__.known_attr, 7) self.assertEqual(self.fi.a.known_attr, 7) def test_delete___dict__(self): try: del self.b.__dict__ except TypeError: pass else: self.fail("deleting function dictionary should raise TypeError") def test_unassigned_dict(self): self.assertEqual(self.b.__dict__, {}) def test_func_as_dict_key(self): value = "Some string" d = {} d[self.b] = value self.assertEqual(d[self.b], value) class FunctionDocstringTest(FuncAttrsTest): def test_set_docstring_attr(self): self.assertEqual(self.b.__doc__, None) docstr = "A test method that does nothing" self.b.__doc__ = docstr self.F.a.__doc__ = docstr self.assertEqual(self.b.__doc__, docstr) self.assertEqual(self.fi.a.__doc__, docstr) self.cannot_set_attr(self.fi.a, "__doc__", docstr, AttributeError) def test_delete_docstring(self): self.b.__doc__ = "The docstring" del self.b.__doc__ self.assertEqual(self.b.__doc__, None) def cell(value): """Create a cell containing the given value.""" def f(): print(a) a = value return f.__closure__[0] def empty_cell(empty=True): """Create an empty cell.""" def f(): print(a) # the intent of the following line is simply "if False:"; it's # spelt this way to avoid the danger that a future optimization # might simply remove an "if False:" code block. if not empty: a = 1729 return f.__closure__[0] class CellTest(unittest.TestCase): def test_comparison(self): # These tests are here simply to exercise the comparison code; # their presence should not be interpreted as providing any # guarantees about the semantics (or even existence) of cell # comparisons in future versions of CPython. self.assertTrue(cell(2) < cell(3)) self.assertTrue(empty_cell() < cell('saturday')) self.assertTrue(empty_cell() == empty_cell()) self.assertTrue(cell(-36) == cell(-36.0)) self.assertTrue(cell(True) > empty_cell()) class StaticMethodAttrsTest(unittest.TestCase): def test_func_attribute(self): def f(): pass c = classmethod(f) self.assertTrue(c.__func__ is f) s = staticmethod(f) self.assertTrue(s.__func__ is f) def test_main(): support.run_unittest(FunctionPropertiesTest, InstancemethodAttrTest, ArbitraryFunctionAttrTest, FunctionDictsTest, FunctionDocstringTest, CellTest, StaticMethodAttrsTest) if __name__ == "__main__": test_main()
gpl-3.0
1,310,205,042,526,877,000
5,794,579,233,051,980,000
32.446154
80
0.557314
false
philsch/ansible
lib/ansible/utils/module_docs_fragments/asa.py
123
4186
# # (c) 2016, Peter Sprygada <psprygada@ansible.com> # (c) 2016, Patrick Ogenstad <@ogenstad> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. class ModuleDocFragment(object): # Standard files documentation fragment DOCUMENTATION = """ options: authorize: description: - Instructs the module to enter privileged mode on the remote device before sending any commands. If not specified, the device will attempt to execute all commands in non-privileged mode. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_AUTHORIZE) will be used instead. default: no choices: ['yes', 'no'] context: description: - Specifies which context to target if you are running in the ASA in multiple context mode. Defaults to the current context you login to. default: null provider: description: - A dict object containing connection details. default: null suboptions: host: description: - Specifies the DNS host name or address for connecting to the remote device over the specified transport. The value of host is used as the destination address for the transport. port: description: - Specifies the port to use when building the connection to the remote device. default: 22 username: description: - Configures the username to use to authenticate the connection to the remote device. This value is used to authenticate the SSH session. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead. password: description: - Specifies the password to use to authenticate the connection to the remote device. This value is used to authenticate the SSH session. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead. default: null ssh_keyfile: description: - Specifies the SSH key to use to authenticate the connection to the remote device. This value is the path to the key used to authenticate the SSH session. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead. authorize: description: - Instructs the module to enter privileged mode on the remote device before sending any commands. If not specified, the device will attempt to execute all commands in non-privileged mode. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_AUTHORIZE) will be used instead. default: no choices: ['yes', 'no'] auth_pass: description: - Specifies the password to use if required to enter privileged mode on the remote device. If I(authorize) is false, then this argument does nothing. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead. default: none timeout: description: - Specifies idle timeout in seconds for the connection, in seconds. Useful if the console freezes before continuing. For example when saving configurations. default: 10 """
gpl-3.0
-1,121,495,472,757,608,000
-6,543,944,089,936,055,000
42.154639
87
0.671285
false
natcoin/natcoin
contrib/bitrpc/bitrpc.py
1
7836
from jsonrpc import ServiceProxy import sys import string # ===== BEGIN USER SETTINGS ===== # if you do not set these you will be prompted for a password for every command rpcuser = "" rpcpass = "" # ====== END USER SETTINGS ====== if rpcpass == "": access = ServiceProxy("http://127.0.0.1:9332") else: access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332") cmd = sys.argv[1].lower() if cmd == "backupwallet": try: path = raw_input("Enter destination path/filename: ") print access.backupwallet(path) except: print "\n---An error occurred---\n" elif cmd == "getaccount": try: addr = raw_input("Enter a Natcoin address: ") print access.getaccount(addr) except: print "\n---An error occurred---\n" elif cmd == "getaccountaddress": try: acct = raw_input("Enter an account name: ") print access.getaccountaddress(acct) except: print "\n---An error occurred---\n" elif cmd == "getaddressesbyaccount": try: acct = raw_input("Enter an account name: ") print access.getaddressesbyaccount(acct) except: print "\n---An error occurred---\n" elif cmd == "getbalance": try: acct = raw_input("Enter an account (optional): ") mc = raw_input("Minimum confirmations (optional): ") try: print access.getbalance(acct, mc) except: print access.getbalance() except: print "\n---An error occurred---\n" elif cmd == "getblockbycount": try: height = raw_input("Height: ") print access.getblockbycount(height) except: print "\n---An error occurred---\n" elif cmd == "getblockcount": try: print access.getblockcount() except: print "\n---An error occurred---\n" elif cmd == "getblocknumber": try: print access.getblocknumber() except: print "\n---An error occurred---\n" elif cmd == "getconnectioncount": try: print access.getconnectioncount() except: print "\n---An error occurred---\n" elif cmd == "getdifficulty": try: print access.getdifficulty() except: print "\n---An error occurred---\n" elif cmd == "getgenerate": try: print access.getgenerate() except: print "\n---An error occurred---\n" elif cmd == "gethashespersec": try: print access.gethashespersec() except: print "\n---An error occurred---\n" elif cmd == "getinfo": try: print access.getinfo() except: print "\n---An error occurred---\n" elif cmd == "getnewaddress": try: acct = raw_input("Enter an account name: ") try: print access.getnewaddress(acct) except: print access.getnewaddress() except: print "\n---An error occurred---\n" elif cmd == "getreceivedbyaccount": try: acct = raw_input("Enter an account (optional): ") mc = raw_input("Minimum confirmations (optional): ") try: print access.getreceivedbyaccount(acct, mc) except: print access.getreceivedbyaccount() except: print "\n---An error occurred---\n" elif cmd == "getreceivedbyaddress": try: addr = raw_input("Enter a Natcoin address (optional): ") mc = raw_input("Minimum confirmations (optional): ") try: print access.getreceivedbyaddress(addr, mc) except: print access.getreceivedbyaddress() except: print "\n---An error occurred---\n" elif cmd == "gettransaction": try: txid = raw_input("Enter a transaction ID: ") print access.gettransaction(txid) except: print "\n---An error occurred---\n" elif cmd == "getwork": try: data = raw_input("Data (optional): ") try: print access.gettransaction(data) except: print access.gettransaction() except: print "\n---An error occurred---\n" elif cmd == "help": try: cmd = raw_input("Command (optional): ") try: print access.help(cmd) except: print access.help() except: print "\n---An error occurred---\n" elif cmd == "listaccounts": try: mc = raw_input("Minimum confirmations (optional): ") try: print access.listaccounts(mc) except: print access.listaccounts() except: print "\n---An error occurred---\n" elif cmd == "listreceivedbyaccount": try: mc = raw_input("Minimum confirmations (optional): ") incemp = raw_input("Include empty? (true/false, optional): ") try: print access.listreceivedbyaccount(mc, incemp) except: print access.listreceivedbyaccount() except: print "\n---An error occurred---\n" elif cmd == "listreceivedbyaddress": try: mc = raw_input("Minimum confirmations (optional): ") incemp = raw_input("Include empty? (true/false, optional): ") try: print access.listreceivedbyaddress(mc, incemp) except: print access.listreceivedbyaddress() except: print "\n---An error occurred---\n" elif cmd == "listtransactions": try: acct = raw_input("Account (optional): ") count = raw_input("Number of transactions (optional): ") frm = raw_input("Skip (optional):") try: print access.listtransactions(acct, count, frm) except: print access.listtransactions() except: print "\n---An error occurred---\n" elif cmd == "move": try: frm = raw_input("From: ") to = raw_input("To: ") amt = raw_input("Amount:") mc = raw_input("Minimum confirmations (optional): ") comment = raw_input("Comment (optional): ") try: print access.move(frm, to, amt, mc, comment) except: print access.move(frm, to, amt) except: print "\n---An error occurred---\n" elif cmd == "sendfrom": try: frm = raw_input("From: ") to = raw_input("To: ") amt = raw_input("Amount:") mc = raw_input("Minimum confirmations (optional): ") comment = raw_input("Comment (optional): ") commentto = raw_input("Comment-to (optional): ") try: print access.sendfrom(frm, to, amt, mc, comment, commentto) except: print access.sendfrom(frm, to, amt) except: print "\n---An error occurred---\n" elif cmd == "sendmany": try: frm = raw_input("From: ") to = raw_input("To (in format address1:amount1,address2:amount2,...): ") mc = raw_input("Minimum confirmations (optional): ") comment = raw_input("Comment (optional): ") try: print access.sendmany(frm,to,mc,comment) except: print access.sendmany(frm,to) except: print "\n---An error occurred---\n" elif cmd == "sendtoaddress": try: to = raw_input("To (in format address1:amount1,address2:amount2,...): ") amt = raw_input("Amount:") comment = raw_input("Comment (optional): ") commentto = raw_input("Comment-to (optional): ") try: print access.sendtoaddress(to,amt,comment,commentto) except: print access.sendtoaddress(to,amt) except: print "\n---An error occurred---\n" elif cmd == "setaccount": try: addr = raw_input("Address: ") acct = raw_input("Account:") print access.setaccount(addr,acct) except: print "\n---An error occurred---\n" elif cmd == "setgenerate": try: gen= raw_input("Generate? (true/false): ") cpus = raw_input("Max processors/cores (-1 for unlimited, optional):") try: print access.setgenerate(gen, cpus) except: print access.setgenerate(gen) except: print "\n---An error occurred---\n" elif cmd == "settxfee": try: amt = raw_input("Amount:") print access.settxfee(amt) except: print "\n---An error occurred---\n" elif cmd == "stop": try: print access.stop() except: print "\n---An error occurred---\n" elif cmd == "validateaddress": try: addr = raw_input("Address: ") print access.validateaddress(addr) except: print "\n---An error occurred---\n" elif cmd == "walletpassphrase": try: pwd = raw_input("Enter wallet passphrase: ") access.walletpassphrase(pwd, 60) print "\n---Wallet unlocked---\n" except: print "\n---An error occurred---\n" elif cmd == "walletpassphrasechange": try: pwd = raw_input("Enter old wallet passphrase: ") pwd2 = raw_input("Enter new wallet passphrase: ") access.walletpassphrasechange(pwd, pwd2) print print "\n---Passphrase changed---\n" except: print print "\n---An error occurred---\n" print else: print "Command not found or not supported"
mit
-5,133,121,904,799,606,000
5,180,742,877,962,550,000
23.185185
79
0.66169
false
Boldie/gourmet
setup.py
4
12089
#!/bin/env python # # setup.py for Gourmet import sys import glob import os.path import os import fileinput import string from types import StringType, ListType, TupleType from distutils.command.build_py import build_py as _build_py from distutils.command.build_scripts import build_scripts as _build_scripts from distutils.util import convert_path from DistUtilsExtra.command import build_extra, build_i18n, build_icons # grab the version from our "version" module # first we have to extend our path to include gourmet/ srcpath = os.path.split(__file__)[0] sys.path.append(os.path.join(srcpath, 'gourmet')) import version class build_py(_build_py): """build_py command This specific build_py command will modify module 'build_config' so that it contains information on installation prefixes afterwards. """ def build_module (self, module, module_file, package): _build_py.build_module(self, module, module_file, package) if type(package) is StringType: package = string.split(package, '.') elif type(package) not in (ListType, TupleType): raise TypeError, \ "'package' must be a string (dot-separated), list, or tuple" if ( module == 'settings' and len(package) == 1 and package[0] == 'gourmet' and 'install' in self.distribution.command_obj): outfile = self.get_module_outfile(self.build_lib, package, module) iobj = self.distribution.command_obj['install'] lib_dir = iobj.install_lib base = iobj.install_data if (iobj.root): lib_dir = lib_dir[len(iobj.root):] base = base[len(iobj.root):] base = os.path.join(base, 'share') data_dir = os.path.join(base, 'gourmet') # abuse fileinput to replace two lines in bin/gourmet for line in fileinput.input(outfile, inplace = 1): if "base_dir = " in line: line = "base_dir = '%s'\n" % base elif "lib_dir = " in line: line = "lib_dir = '%s'\n" % lib_dir elif "data_dir = " in line: line = "data_dir = '%s'\n" % data_dir elif "doc_base = " in line: line = "doc_base = '%s'\n" % \ os.path.join(base, 'doc', 'gourmet') elif "icon_base = " in line: line = "icon_base = '%s'\n" % \ os.path.join(base, 'icons', 'hicolor') elif "locale_base = " in line: line = "locale_base = '%s'\n" % \ os.path.join(base, 'locale') elif "plugin_base = " in line: line = "plugin_base = data_dir\n" print line, class build_scripts(_build_scripts): """build_scripts command This specific build_scripts command will modify the bin/gourmet script so that it contains information on installation prefixes afterwards. """ def copy_scripts(self): _build_scripts.copy_scripts(self) if "install" in self.distribution.command_obj: iobj = self.distribution.command_obj["install"] lib_dir = iobj.install_lib data_dir = iobj.install_data if iobj.root: lib_dir = lib_dir[len(iobj.root):] data_dir = data_dir[len(iobj.root):] script = convert_path("bin/gourmet") outfile = os.path.join(self.build_dir, os.path.basename(script)) # abuse fileinput to replace two lines in bin/gourmet for line in fileinput.input(outfile, inplace = 1): if "lib_dir = '.'" in line: line = "lib_dir = '%s'\n" % lib_dir elif "data_dir = '.'" in line: line = "data_dir = '%s'\n" % data_dir print line, if sys.platform == "win32": #gtk file inclusion import gtk # The runtime dir is in the same directory as the module: GTK_RUNTIME_DIR = os.path.join( os.path.split(os.path.dirname(gtk.__file__))[0], "runtime") assert os.path.exists(GTK_RUNTIME_DIR), "Cannot find GTK runtime data" GTK_THEME_DEFAULT = os.path.join("share", "themes", "Default") GTK_THEME_WINDOWS = os.path.join("share", "themes", "MS-Windows") GTK_GTKRC_DIR = os.path.join("etc", "gtk-2.0") GTK_GTKRC = "gtkrc" GTK_WIMP_DIR = os.path.join("lib", "gtk-2.0", "2.10.0", "engines") GTK_WIMP_DLL = "libwimp.dll" #If you want the Tango icons: GTK_ICONS = os.path.join("share", "icons") #There is also localisation data (which I omit, but you might not want to): GTK_LOCALE_DATA = os.path.join("share", "locale") def data_files(): '''Build list of data files to be installed''' data_files = [] for root, dirs, files in os.walk('data'): if files: files = [os.path.join(root, f) for f in files] data_files.append((os.path.join('share','gourmet', root[len('data')+1:]), files)) # files in /usr/share/X/ (not gourmet) files = [] base = os.path.join('share','gourmet') files.extend(data_files) files.extend([(os.path.join(base,'ui'), glob.glob(os.path.join('ui','*.ui')))]) files.extend([(os.path.join('share','doc','gourmet'), ['FAQ', 'LICENSE'])]) return files if sys.platform == "win32": from cx_Freeze import setup, Executable, build as build_cxf import msilib class build(build_extra.build_extra, build_cxf): def __init__(self, dist): build_extra.build_extra.__init__(self, dist) build_cxf.__init__(self, dist) def get_sub_comands(self): build_cxf.sub_commands(self) def initialize_options(self): build_extra.build_extra.initialize_options(self) build_cxf.initialize_options(self) def finalize_options(self): build_extra.build_extra.finalize_options(self) build_cxf.finalize_options(self) include_files = [] for i in data_files(): for j in i[1]: include_files.append((j, i[0])) icon_table = [ ('GourmetIco', msilib.Binary('data/icons/gourmet.ico')) ] property_table = [ ('ARPPRODUCTICON', 'GourmetIco'), ] msi_data = { 'Icon': icon_table, 'Property': property_table, } kwargs = dict(name="Gourmet Recipe Manager", executables=[Executable( os.path.join(srcpath, 'bin','gourmet'), base="Win32GUI", icon="data/icons/gourmet.ico", shortcutName="Gourmet Recipe Manager", shortcutDir="ProgramMenuFolder" ) ], options={ 'build_exe': { 'packages': [ 'gourmet', 'sqlalchemy', 'reportlab', 'reportlab.graphics', 'reportlab.lib', 'reportlab.pdfbase', 'reportlab.pdfgen', 'reportlab.platypus', 'xml.dom', 'lxml.etree', 'lxml._elementpath' ], 'includes': [ 'cairo', 'gio', 'pango', 'pangocairo', 'atk', 'BeautifulSoup' ], 'include_files': [ ('data', '.'), ('ui', 'ui'), ('LICENSE', os.path.join('doc', 'LICENSE')), ('FAQ', os.path.join('doc', 'FAQ')), (os.path.join(GTK_RUNTIME_DIR, GTK_THEME_DEFAULT), GTK_THEME_DEFAULT), (os.path.join(GTK_RUNTIME_DIR, GTK_THEME_WINDOWS), GTK_THEME_WINDOWS), #(os.path.join(GTK_RUNTIME_DIR, GTK_ICONS), GTK_ICONS), (os.path.join(GTK_RUNTIME_DIR, GTK_GTKRC_DIR, GTK_GTKRC), os.path.join(GTK_GTKRC_DIR, GTK_GTKRC)), (os.path.join(GTK_RUNTIME_DIR, GTK_WIMP_DIR, GTK_WIMP_DLL), os.path.join(GTK_WIMP_DIR, GTK_WIMP_DLL)), (os.path.join('build', 'mo'), 'locale'), (os.path.join("build", "share", "gourmet"), '.'), (os.path.join("gourmet", 'plugins'), 'plugins') ], # We're excluding the plugins module from being added to library.zip # and add it via include_files instead in order to faciliate # handling *.gourmet-plugin and extra files (such as *.ui files # and images). 'excludes': ['plugins','Tkinter','wx'], 'optimize': 2, 'compressed':1, 'include_msvcr': True, # see http://stackoverflow.com/questions/1979486/py2exe-win32api-pyc-importerror-dll-load-failed # libgcc_s_dw2-1.dll, if present, would crash Gourmet 'bin_excludes': ["mswsock.dll", "powrprof.dll","libgcc_s_dw2-1.dll"], }, 'bdist_msi': { 'upgrade_code': '{D19B9EC6-DF39-4C83-BF87-A67776D087FA}', 'data': msi_data } } ) else: from distutils.core import setup build = build_extra.build_extra kwargs = dict( name=version.name, data_files=data_files(), scripts=[os.path.join('bin','gourmet')] ) plugins = [] def crawl (base, basename): bdir = base subdirs = filter(lambda x: os.path.isdir(os.path.join(bdir,x)), os.listdir(bdir)) for subd in subdirs: name = basename + '.' + subd plugins.append(name) crawl(os.path.join(bdir,subd),name) crawl('gourmet/plugins', 'gourmet.plugins') result = setup( version = version.version, description = version.description, author = version.author, author_email = version.author_email, url = version.website, license = version.license, packages = ['gourmet', 'gourmet.backends', 'gourmet.util', 'gourmet.defaults', 'gourmet.gtk_extras', 'gourmet.importers', 'gourmet.exporters', 'gourmet.plugins', ] + plugins, package_data = {'gourmet': ['plugins/*/*.ui', 'plugins/*/images/*.png','plugins/*/*/images/*.png']}, cmdclass={'build' : build, 'build_i18n' : build_i18n.build_i18n, 'build_icons' : build_icons.build_icons, 'build_py' : build_py, 'build_scripts' : build_scripts, }, **kwargs )
gpl-2.0
1,751,445,854,944,515,300
1,343,509,535,628,101,600
39.703704
148
0.474398
false
krafczyk/spack
var/spack/repos/builtin/packages/chlorop/package.py
2
2274
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * import os class Chlorop(Package): """Chlorop predicts the presence of chloroplast transit peptides in protein sequences and the location of potential cTP cleavage sites. You will need to obtain the tarball by visiting the URL and completing the form. You can then either run spack install with the tarball in the directory, or add it to a mirror. You will need to set the CHLOROTMP environment variable to the full path of the directory you want chlorop to use as a temporary directory.""" homepage = "http://www.cbs.dtu.dk/services/ChloroP/" url = "file://{0}/chlorop-1.1.Linux.tar.gz".format(os.getcwd()) version('1.1', 'eb0ba6b28dfa735163ad5fc70e30139e46e33f6ae27f87666a7167a4ac5f71d9') depends_on('awk', type='run') patch('chlorop.patch') def install(self, spec, prefix): os.rename('chlorop', 'bin/chlorop') install_tree('.', prefix) def setup_environment(self, spack_env, run_env): run_env.set('CHLOROP', self.prefix)
lgpl-2.1
-2,488,739,929,112,157,000
3,482,382,158,170,846,700
42.730769
86
0.684257
false
rogeriofalcone/libre
libre/apps/lock_manager/migrations/0001_initial.py
2
1308
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Lock' db.create_table('lock_manager_lock', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('creation_datetime', self.gf('django.db.models.fields.DateTimeField')()), ('timeout', self.gf('django.db.models.fields.IntegerField')(default=30)), ('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=48)), )) db.send_create_signal('lock_manager', ['Lock']) def backwards(self, orm): # Deleting model 'Lock' db.delete_table('lock_manager_lock') models = { 'lock_manager.lock': { 'Meta': {'object_name': 'Lock'}, 'creation_datetime': ('django.db.models.fields.DateTimeField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '48'}), 'timeout': ('django.db.models.fields.IntegerField', [], {'default': '30'}) } } complete_apps = ['lock_manager']
gpl-3.0
1,996,771,161,193,677,300
7,190,901,143,450,699,000
35.361111
102
0.584098
false
yebeloved/idapython
pywraps/py_expr.py
16
5381
# -------------------------------------------------------------------------- import os import sys import idaapi import _idaapi from sys import getrefcount import gc try: import pywraps pywraps_there = True _idaapi.pyw_register_idc_func = pywraps.pyw_register_idc_func _idaapi.pyw_unregister_idc_func = pywraps.pyw_unregister_idc_func _idaapi.py_get_call_idc_func = pywraps.py_get_call_idc_func _idaapi.py_set_idc_func_ex = pywraps.py_set_idc_func_ex except Exception as e: pywraps_there = False print("exception: %s" % str(e)) print("Using PyWraps: %s" % pywraps_there) # -------------------------------------------------------------------------- #<pycode(py_expr)> try: import types import ctypes # Callback for IDC func callback (On Windows, we use stdcall) # typedef error_t idaapi idc_func_t(idc_value_t *argv,idc_value_t *r); _IDCFUNC_CB_T = ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p) # A trampoline function that is called from idcfunc_t that will # call the Python callback with the argv and r properly serialized to python call_idc_func__ = ctypes.CFUNCTYPE(ctypes.c_long)(_idaapi.py_get_call_idc_func()) except: def call_idc_func__(*args): warning("IDC extensions need ctypes library in order to work") return 0 try: _IDCFUNC_CB_T = CFUNCTYPE(c_int, c_void_p, c_void_p) except: _IDCFUNC_CB_T = None # -------------------------------------------------------------------------- EXTFUN_BASE = 0x0001 """requires open database""" EXTFUN_NORET = 0x0002 """does not return. the interpreter may clean up its state before calling it.""" EXTFUN_SAFE = 0x0004 """thread safe function. may be called""" # -------------------------------------------------------------------------- class _IdcFunction(object): """ Internal class that calls pyw_call_idc_func() with a context """ def __init__(self, ctxptr): self.ctxptr = ctxptr # Take a reference to the ctypes callback # (note: this will create a circular reference) self.cb = _IDCFUNC_CB_T(self) fp_ptr = property(lambda self: ctypes.cast(self.cb, ctypes.c_void_p).value) def __call__(self, args, res): return call_idc_func__(self.ctxptr, args, res) # -------------------------------------------------------------------------- # Dictionary to remember IDC function names along with the context pointer # retrieved by using the internal pyw_register_idc_func() __IDC_FUNC_CTXS = {} # -------------------------------------------------------------------------- def set_idc_func_ex(name, fp=None, args=(), flags=0): """ Extends the IDC language by exposing a new IDC function that is backed up by a Python function This function also unregisters the IDC function if 'fp' was passed as None @param name: IDC function name to expose @param fp: Python callable that will receive the arguments and return a tuple. If this argument is None then the IDC function is unregistered @param args: Arguments. A tuple of idaapi.VT_XXX constants @param flags: IDC function flags. A combination of EXTFUN_XXX constants @return: Boolean. """ global __IDC_FUNC_CTXS # Get the context f = __IDC_FUNC_CTXS.get(name, None) # Unregistering? if fp is None: # Not registered? if f is None: return False # Break circular reference del f.cb # Delete the name from the dictionary del __IDC_FUNC_CTXS[name] # Delete the context and unregister the function return _idaapi.pyw_unregister_idc_func(f.ctxptr) # Registering a function that is already registered? if f is not None: # Unregister it first set_idc_func_ex(name, None) # Convert the tupple argument info to a string args = "".join([chr(x) for x in args]) # Create a context ctxptr = _idaapi.pyw_register_idc_func(name, args, fp) if ctxptr == 0: return False # Bind the context with the IdcFunc object f = _IdcFunction(ctxptr) # Remember the Python context __IDC_FUNC_CTXS[name] = f # Register IDC function with a callback return _idaapi.py_set_idc_func_ex( name, f.fp_ptr, args, flags) #</pycode(py_expr)> # -------------------------------------------------------------------------- def test1(): global MY_IDC_FUNC try: # Already registered? MY_IDC_FUNC # Unregister print("Unregistering function") set_idc_func_ex(MY_IDC_FUNC) except: MY_IDC_FUNC = "pysum" ok = set_idc_func_ex(MY_IDC_FUNC, my_idc_sum, (idaapi.VT_LONG, idaapi.VT_LONG), 0) if not ok: del MY_IDC_FUNC #</pycode(ex_expr)> # -------------------------------------------------------------------------- #<pycode(ex_expr)> def py_power(n, e): return n ** e ok = set_idc_func_ex("pow", py_power, (idaapi.VT_LONG, idaapi.VT_LONG), 0) if ok: print("Now the pow() will be present IDC!") else: print("Failed to register pow() IDC function") #</pycode(ex_expr)>
bsd-3-clause
885,773,264,320,631,200
-5,166,438,258,045,423,000
29.840237
98
0.547854
false
KhronosGroup/COLLADA-CTS
StandardDataSets/collada/library_visual_scenes/visual_scene/extra/multiExtra/multiExtra.py
4
3846
# Copyright (c) 2012 The Khronos Group Inc. # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to # the following conditions: # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Materials. # THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. # See Core.Logic.FJudgementContext for the information # of the 'context' parameter. # This sample judging object does the following: # # JudgeBaseline: just verifies that the standard steps did not crash. # JudgeSuperior: also verifies that the validation steps are not in error. # JudgeExemplary: same as intermediate badge. # We import an assistant script that includes the common verifications # methods. The assistant buffers its checks, so that running them again # does not incurs an unnecessary performance hint. from StandardDataSets.scripts import JudgeAssistant # Please feed your node list here: tagLst = ['library_visual_scenes', 'visual_scene', 'extra', 'technique'] attrName = 'profile' attrVal = '' dataToCheck = '' class SimpleJudgingObject: def __init__(self, _tagLst, _attrName, _attrVal, _data): self.tagList = _tagLst self.attrName = _attrName self.attrVal = _attrVal self.dataToCheck = _data self.status_baseline = False self.status_superior = False self.status_exemplary = False self.__assistant = JudgeAssistant.JudgeAssistant() def JudgeBaseline(self, context): # No step should not crash self.__assistant.CheckCrashes(context) # Import/export/validate must exist and pass, while Render must only exist. self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], []) self.status_baseline = self.__assistant.GetResults() return self.status_baseline # To pass intermediate you need to pass basic, this object could also include additional # tests that were specific to the intermediate badge. def JudgeSuperior(self, context): if (self.status_baseline == False): self.status_superior = self.status_baseline return self.status_superior # Check for preservation of element data self.__assistant.FullPreservation(context, self.tagList, self.attrName) self.status_superior = self.__assistant.DeferJudgement(context) return self.status_superior # To pass advanced you need to pass intermediate, this object could also include additional # tests that were specific to the advanced badge def JudgeExemplary(self, context): self.status_exemplary = self.status_superior return self.status_exemplary # This is where all the work occurs: "judgingObject" is an absolutely necessary token. # The dynamic loader looks very specifically for a class instance named "judgingObject". # judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
mit
-8,757,133,331,658,350,000
2,036,325,783,472,112,600
51.486111
466
0.713469
false
Khan/git-bigfile
vendor/boto/rds2/layer1.py
76
158232
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # import boto from boto.connection import AWSQueryConnection from boto.regioninfo import RegionInfo from boto.exception import JSONResponseError from boto.rds2 import exceptions from boto.compat import json class RDSConnection(AWSQueryConnection): """ Amazon Relational Database Service Amazon Relational Database Service (Amazon RDS) is a web service that makes it easier to set up, operate, and scale a relational database in the cloud. It provides cost-efficient, resizable capacity for an industry-standard relational database and manages common database administration tasks, freeing up developers to focus on what makes their applications and businesses unique. Amazon RDS gives you access to the capabilities of a familiar MySQL or Oracle database server. This means the code, applications, and tools you already use today with your existing MySQL or Oracle databases work with Amazon RDS without modification. Amazon RDS automatically backs up your database and maintains the database software that powers your DB instance. Amazon RDS is flexible: you can scale your database instance's compute resources and storage capacity to meet your application's demand. As with all Amazon Web Services, there are no up-front investments, and you pay only for the resources you use. This is the Amazon RDS API Reference . It contains a comprehensive description of all Amazon RDS Query APIs and data types. Note that this API is asynchronous and some actions may require polling to determine when an action has been applied. See the parameter description to determine if a change is applied immediately or on the next instance reboot or during the maintenance window. For more information on Amazon RDS concepts and usage scenarios, go to the `Amazon RDS User Guide`_. """ APIVersion = "2013-09-09" DefaultRegionName = "us-east-1" DefaultRegionEndpoint = "rds.us-east-1.amazonaws.com" ResponseError = JSONResponseError _faults = { "InvalidSubnet": exceptions.InvalidSubnet, "DBParameterGroupQuotaExceeded": exceptions.DBParameterGroupQuotaExceeded, "DBSubnetGroupAlreadyExists": exceptions.DBSubnetGroupAlreadyExists, "DBSubnetGroupQuotaExceeded": exceptions.DBSubnetGroupQuotaExceeded, "InstanceQuotaExceeded": exceptions.InstanceQuotaExceeded, "InvalidRestore": exceptions.InvalidRestore, "InvalidDBParameterGroupState": exceptions.InvalidDBParameterGroupState, "AuthorizationQuotaExceeded": exceptions.AuthorizationQuotaExceeded, "DBSecurityGroupAlreadyExists": exceptions.DBSecurityGroupAlreadyExists, "InsufficientDBInstanceCapacity": exceptions.InsufficientDBInstanceCapacity, "ReservedDBInstanceQuotaExceeded": exceptions.ReservedDBInstanceQuotaExceeded, "DBSecurityGroupNotFound": exceptions.DBSecurityGroupNotFound, "DBInstanceAlreadyExists": exceptions.DBInstanceAlreadyExists, "ReservedDBInstanceNotFound": exceptions.ReservedDBInstanceNotFound, "DBSubnetGroupDoesNotCoverEnoughAZs": exceptions.DBSubnetGroupDoesNotCoverEnoughAZs, "InvalidDBSecurityGroupState": exceptions.InvalidDBSecurityGroupState, "InvalidVPCNetworkState": exceptions.InvalidVPCNetworkState, "ReservedDBInstancesOfferingNotFound": exceptions.ReservedDBInstancesOfferingNotFound, "SNSTopicArnNotFound": exceptions.SNSTopicArnNotFound, "SNSNoAuthorization": exceptions.SNSNoAuthorization, "SnapshotQuotaExceeded": exceptions.SnapshotQuotaExceeded, "OptionGroupQuotaExceeded": exceptions.OptionGroupQuotaExceeded, "DBParameterGroupNotFound": exceptions.DBParameterGroupNotFound, "SNSInvalidTopic": exceptions.SNSInvalidTopic, "InvalidDBSubnetGroupState": exceptions.InvalidDBSubnetGroupState, "DBSubnetGroupNotFound": exceptions.DBSubnetGroupNotFound, "InvalidOptionGroupState": exceptions.InvalidOptionGroupState, "SourceNotFound": exceptions.SourceNotFound, "SubscriptionCategoryNotFound": exceptions.SubscriptionCategoryNotFound, "EventSubscriptionQuotaExceeded": exceptions.EventSubscriptionQuotaExceeded, "DBSecurityGroupNotSupported": exceptions.DBSecurityGroupNotSupported, "InvalidEventSubscriptionState": exceptions.InvalidEventSubscriptionState, "InvalidDBSubnetState": exceptions.InvalidDBSubnetState, "InvalidDBSnapshotState": exceptions.InvalidDBSnapshotState, "SubscriptionAlreadyExist": exceptions.SubscriptionAlreadyExist, "DBSecurityGroupQuotaExceeded": exceptions.DBSecurityGroupQuotaExceeded, "ProvisionedIopsNotAvailableInAZ": exceptions.ProvisionedIopsNotAvailableInAZ, "AuthorizationNotFound": exceptions.AuthorizationNotFound, "OptionGroupAlreadyExists": exceptions.OptionGroupAlreadyExists, "SubscriptionNotFound": exceptions.SubscriptionNotFound, "DBUpgradeDependencyFailure": exceptions.DBUpgradeDependencyFailure, "PointInTimeRestoreNotEnabled": exceptions.PointInTimeRestoreNotEnabled, "AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExists, "DBSubnetQuotaExceeded": exceptions.DBSubnetQuotaExceeded, "OptionGroupNotFound": exceptions.OptionGroupNotFound, "DBParameterGroupAlreadyExists": exceptions.DBParameterGroupAlreadyExists, "DBInstanceNotFound": exceptions.DBInstanceNotFound, "ReservedDBInstanceAlreadyExists": exceptions.ReservedDBInstanceAlreadyExists, "InvalidDBInstanceState": exceptions.InvalidDBInstanceState, "DBSnapshotNotFound": exceptions.DBSnapshotNotFound, "DBSnapshotAlreadyExists": exceptions.DBSnapshotAlreadyExists, "StorageQuotaExceeded": exceptions.StorageQuotaExceeded, "SubnetAlreadyInUse": exceptions.SubnetAlreadyInUse, } def __init__(self, **kwargs): region = kwargs.pop('region', None) if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) if 'host' not in kwargs: kwargs['host'] = region.endpoint super(RDSConnection, self).__init__(**kwargs) self.region = region def _required_auth_capability(self): return ['hmac-v4'] def add_source_identifier_to_subscription(self, subscription_name, source_identifier): """ Adds a source identifier to an existing RDS event notification subscription. :type subscription_name: string :param subscription_name: The name of the RDS event notification subscription you want to add a source identifier to. :type source_identifier: string :param source_identifier: The identifier of the event source to be added. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens. Constraints: + If the source type is a DB instance, then a `DBInstanceIdentifier` must be supplied. + If the source type is a DB security group, a `DBSecurityGroupName` must be supplied. + If the source type is a DB parameter group, a `DBParameterGroupName` must be supplied. + If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be supplied. """ params = { 'SubscriptionName': subscription_name, 'SourceIdentifier': source_identifier, } return self._make_request( action='AddSourceIdentifierToSubscription', verb='POST', path='/', params=params) def add_tags_to_resource(self, resource_name, tags): """ Adds metadata tags to an Amazon RDS resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon RDS resources, or used in Condition statement in IAM policy for Amazon RDS. For an overview on tagging Amazon RDS resources, see `Tagging Amazon RDS Resources`_. :type resource_name: string :param resource_name: The Amazon RDS resource the tags will be added to. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see ` Constructing an RDS Amazon Resource Name (ARN)`_. :type tags: list :param tags: The tags to be assigned to the Amazon RDS resource. """ params = {'ResourceName': resource_name, } self.build_complex_list_params( params, tags, 'Tags.member', ('Key', 'Value')) return self._make_request( action='AddTagsToResource', verb='POST', path='/', params=params) def authorize_db_security_group_ingress(self, db_security_group_name, cidrip=None, ec2_security_group_name=None, ec2_security_group_id=None, ec2_security_group_owner_id=None): """ Enables ingress to a DBSecurityGroup using one of two forms of authorization. First, EC2 or VPC security groups can be added to the DBSecurityGroup if the application using the database is running on EC2 or VPC instances. Second, IP ranges are available if the application accessing your database is running on the Internet. Required parameters for this API are one of CIDR range, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId for non-VPC). You cannot authorize ingress from an EC2 security group in one Region to an Amazon RDS DB instance in another. You cannot authorize ingress from a VPC security group in one VPC to an Amazon RDS DB instance in another. For an overview of CIDR ranges, go to the `Wikipedia Tutorial`_. :type db_security_group_name: string :param db_security_group_name: The name of the DB security group to add authorization to. :type cidrip: string :param cidrip: The IP range to authorize. :type ec2_security_group_name: string :param ec2_security_group_name: Name of the EC2 security group to authorize. For VPC DB security groups, `EC2SecurityGroupId` must be provided. Otherwise, EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided. :type ec2_security_group_id: string :param ec2_security_group_id: Id of the EC2 security group to authorize. For VPC DB security groups, `EC2SecurityGroupId` must be provided. Otherwise, EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided. :type ec2_security_group_owner_id: string :param ec2_security_group_owner_id: AWS Account Number of the owner of the EC2 security group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB security groups, `EC2SecurityGroupId` must be provided. Otherwise, EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided. """ params = {'DBSecurityGroupName': db_security_group_name, } if cidrip is not None: params['CIDRIP'] = cidrip if ec2_security_group_name is not None: params['EC2SecurityGroupName'] = ec2_security_group_name if ec2_security_group_id is not None: params['EC2SecurityGroupId'] = ec2_security_group_id if ec2_security_group_owner_id is not None: params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id return self._make_request( action='AuthorizeDBSecurityGroupIngress', verb='POST', path='/', params=params) def copy_db_snapshot(self, source_db_snapshot_identifier, target_db_snapshot_identifier, tags=None): """ Copies the specified DBSnapshot. The source DBSnapshot must be in the "available" state. :type source_db_snapshot_identifier: string :param source_db_snapshot_identifier: The identifier for the source DB snapshot. Constraints: + Must be the identifier for a valid system snapshot in the "available" state. Example: `rds:mydb-2012-04-02-00-01` :type target_db_snapshot_identifier: string :param target_db_snapshot_identifier: The identifier for the copied snapshot. Constraints: + Cannot be null, empty, or blank + Must contain from 1 to 255 alphanumeric characters or hyphens + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens Example: `my-db-snapshot` :type tags: list :param tags: A list of tags. """ params = { 'SourceDBSnapshotIdentifier': source_db_snapshot_identifier, 'TargetDBSnapshotIdentifier': target_db_snapshot_identifier, } if tags is not None: self.build_complex_list_params( params, tags, 'Tags.member', ('Key', 'Value')) return self._make_request( action='CopyDBSnapshot', verb='POST', path='/', params=params) def create_db_instance(self, db_instance_identifier, allocated_storage, db_instance_class, engine, master_username, master_user_password, db_name=None, db_security_groups=None, vpc_security_group_ids=None, availability_zone=None, db_subnet_group_name=None, preferred_maintenance_window=None, db_parameter_group_name=None, backup_retention_period=None, preferred_backup_window=None, port=None, multi_az=None, engine_version=None, auto_minor_version_upgrade=None, license_model=None, iops=None, option_group_name=None, character_set_name=None, publicly_accessible=None, tags=None): """ Creates a new DB instance. :type db_name: string :param db_name: The meaning of this parameter differs according to the database engine you use. **MySQL** The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance. Constraints: + Must contain 1 to 64 alphanumeric characters + Cannot be a word reserved by the specified database engine Type: String **Oracle** The Oracle System ID (SID) of the created DB instance. Default: `ORCL` Constraints: + Cannot be longer than 8 characters **SQL Server** Not applicable. Must be null. :type db_instance_identifier: string :param db_instance_identifier: The DB instance identifier. This parameter is stored as a lowercase string. Constraints: + Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 for SQL Server). + First character must be a letter. + Cannot end with a hyphen or contain two consecutive hyphens. Example: `mydbinstance` :type allocated_storage: integer :param allocated_storage: The amount of storage (in gigabytes) to be initially allocated for the database instance. **MySQL** Constraints: Must be an integer from 5 to 1024. Type: Integer **Oracle** Constraints: Must be an integer from 10 to 1024. **SQL Server** Constraints: Must be an integer from 200 to 1024 (Standard Edition and Enterprise Edition) or from 30 to 1024 (Express Edition and Web Edition) :type db_instance_class: string :param db_instance_class: The compute and memory capacity of the DB instance. Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge` :type engine: string :param engine: The name of the database engine to be used for this instance. Valid Values: `MySQL` | `oracle-se1` | `oracle-se` | `oracle-ee` | `sqlserver-ee` | `sqlserver-se` | `sqlserver-ex` | `sqlserver-web` :type master_username: string :param master_username: The name of master user for the client DB instance. **MySQL** Constraints: + Must be 1 to 16 alphanumeric characters. + First character must be a letter. + Cannot be a reserved word for the chosen database engine. Type: String **Oracle** Constraints: + Must be 1 to 30 alphanumeric characters. + First character must be a letter. + Cannot be a reserved word for the chosen database engine. **SQL Server** Constraints: + Must be 1 to 128 alphanumeric characters. + First character must be a letter. + Cannot be a reserved word for the chosen database engine. :type master_user_password: string :param master_user_password: The password for the master database user. Can be any printable ASCII character except "/", '"', or "@". Type: String **MySQL** Constraints: Must contain from 8 to 41 characters. **Oracle** Constraints: Must contain from 8 to 30 characters. **SQL Server** Constraints: Must contain from 8 to 128 characters. :type db_security_groups: list :param db_security_groups: A list of DB security groups to associate with this DB instance. Default: The default DB security group for the database engine. :type vpc_security_group_ids: list :param vpc_security_group_ids: A list of EC2 VPC security groups to associate with this DB instance. Default: The default EC2 VPC security group for the DB subnet group's VPC. :type availability_zone: string :param availability_zone: The EC2 Availability Zone that the database instance will be created in. Default: A random, system-chosen Availability Zone in the endpoint's region. Example: `us-east-1d` Constraint: The AvailabilityZone parameter cannot be specified if the MultiAZ parameter is set to `True`. The specified Availability Zone must be in the same region as the current endpoint. :type db_subnet_group_name: string :param db_subnet_group_name: A DB subnet group to associate with this DB instance. If there is no DB subnet group, then it is a non-VPC DB instance. :type preferred_maintenance_window: string :param preferred_maintenance_window: The weekly time range (in UTC) during which system maintenance can occur. Format: `ddd:hh24:mi-ddd:hh24:mi` Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. To see the time blocks available, see ` Adjusting the Preferred Maintenance Window`_ in the Amazon RDS User Guide. Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun Constraints: Minimum 30-minute window. :type db_parameter_group_name: string :param db_parameter_group_name: The name of the DB parameter group to associate with this DB instance. If this argument is omitted, the default DBParameterGroup for the specified engine will be used. Constraints: + Must be 1 to 255 alphanumeric characters + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens :type backup_retention_period: integer :param backup_retention_period: The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups. Default: 1 Constraints: + Must be a value from 0 to 8 + Cannot be set to 0 if the DB instance is a master instance with read replicas :type preferred_backup_window: string :param preferred_backup_window: The daily time range during which automated backups are created if automated backups are enabled, using the `BackupRetentionPeriod` parameter. Default: A 30-minute window selected at random from an 8-hour block of time per region. See the Amazon RDS User Guide for the time blocks for each region from which the default backup windows are assigned. Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be Universal Time Coordinated (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes. :type port: integer :param port: The port number on which the database accepts connections. **MySQL** Default: `3306` Valid Values: `1150-65535` Type: Integer **Oracle** Default: `1521` Valid Values: `1150-65535` **SQL Server** Default: `1433` Valid Values: `1150-65535` except for `1434` and `3389`. :type multi_az: boolean :param multi_az: Specifies if the DB instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the MultiAZ parameter is set to true. :type engine_version: string :param engine_version: The version number of the database engine to use. **MySQL** Example: `5.1.42` Type: String **Oracle** Example: `11.2.0.2.v2` Type: String **SQL Server** Example: `10.50.2789.0.v1` :type auto_minor_version_upgrade: boolean :param auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window. Default: `True` :type license_model: string :param license_model: License model information for this DB instance. Valid values: `license-included` | `bring-your-own-license` | `general- public-license` :type iops: integer :param iops: The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. Constraints: Must be an integer greater than 1000. :type option_group_name: string :param option_group_name: Indicates that the DB instance should be associated with the specified option group. Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance :type character_set_name: string :param character_set_name: For supported engines, indicates that the DB instance should be associated with the specified CharacterSet. :type publicly_accessible: boolean :param publicly_accessible: Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address. Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case. + **Default VPC:**true + **VPC:**false If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private. :type tags: list :param tags: A list of tags. """ params = { 'DBInstanceIdentifier': db_instance_identifier, 'AllocatedStorage': allocated_storage, 'DBInstanceClass': db_instance_class, 'Engine': engine, 'MasterUsername': master_username, 'MasterUserPassword': master_user_password, } if db_name is not None: params['DBName'] = db_name if db_security_groups is not None: self.build_list_params(params, db_security_groups, 'DBSecurityGroups.member') if vpc_security_group_ids is not None: self.build_list_params(params, vpc_security_group_ids, 'VpcSecurityGroupIds.member') if availability_zone is not None: params['AvailabilityZone'] = availability_zone if db_subnet_group_name is not None: params['DBSubnetGroupName'] = db_subnet_group_name if preferred_maintenance_window is not None: params['PreferredMaintenanceWindow'] = preferred_maintenance_window if db_parameter_group_name is not None: params['DBParameterGroupName'] = db_parameter_group_name if backup_retention_period is not None: params['BackupRetentionPeriod'] = backup_retention_period if preferred_backup_window is not None: params['PreferredBackupWindow'] = preferred_backup_window if port is not None: params['Port'] = port if multi_az is not None: params['MultiAZ'] = str( multi_az).lower() if engine_version is not None: params['EngineVersion'] = engine_version if auto_minor_version_upgrade is not None: params['AutoMinorVersionUpgrade'] = str( auto_minor_version_upgrade).lower() if license_model is not None: params['LicenseModel'] = license_model if iops is not None: params['Iops'] = iops if option_group_name is not None: params['OptionGroupName'] = option_group_name if character_set_name is not None: params['CharacterSetName'] = character_set_name if publicly_accessible is not None: params['PubliclyAccessible'] = str( publicly_accessible).lower() if tags is not None: self.build_complex_list_params( params, tags, 'Tags.member', ('Key', 'Value')) return self._make_request( action='CreateDBInstance', verb='POST', path='/', params=params) def create_db_instance_read_replica(self, db_instance_identifier, source_db_instance_identifier, db_instance_class=None, availability_zone=None, port=None, auto_minor_version_upgrade=None, iops=None, option_group_name=None, publicly_accessible=None, tags=None): """ Creates a DB instance that acts as a read replica of a source DB instance. All read replica DB instances are created as Single-AZ deployments with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified below. The source DB instance must have backup retention enabled. :type db_instance_identifier: string :param db_instance_identifier: The DB instance identifier of the read replica. This is the unique key that identifies a DB instance. This parameter is stored as a lowercase string. :type source_db_instance_identifier: string :param source_db_instance_identifier: The identifier of the DB instance that will act as the source for the read replica. Each DB instance can have up to five read replicas. Constraints: Must be the identifier of an existing DB instance that is not already a read replica DB instance. :type db_instance_class: string :param db_instance_class: The compute and memory capacity of the read replica. Valid Values: `db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge` Default: Inherits from the source DB instance. :type availability_zone: string :param availability_zone: The Amazon EC2 Availability Zone that the read replica will be created in. Default: A random, system-chosen Availability Zone in the endpoint's region. Example: `us-east-1d` :type port: integer :param port: The port number that the DB instance uses for connections. Default: Inherits from the source DB instance Valid Values: `1150-65535` :type auto_minor_version_upgrade: boolean :param auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied automatically to the read replica during the maintenance window. Default: Inherits from the source DB instance :type iops: integer :param iops: The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. :type option_group_name: string :param option_group_name: The option group the DB instance will be associated with. If omitted, the default option group for the engine specified will be used. :type publicly_accessible: boolean :param publicly_accessible: Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address. Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case. + **Default VPC:**true + **VPC:**false If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private. :type tags: list :param tags: A list of tags. """ params = { 'DBInstanceIdentifier': db_instance_identifier, 'SourceDBInstanceIdentifier': source_db_instance_identifier, } if db_instance_class is not None: params['DBInstanceClass'] = db_instance_class if availability_zone is not None: params['AvailabilityZone'] = availability_zone if port is not None: params['Port'] = port if auto_minor_version_upgrade is not None: params['AutoMinorVersionUpgrade'] = str( auto_minor_version_upgrade).lower() if iops is not None: params['Iops'] = iops if option_group_name is not None: params['OptionGroupName'] = option_group_name if publicly_accessible is not None: params['PubliclyAccessible'] = str( publicly_accessible).lower() if tags is not None: self.build_complex_list_params( params, tags, 'Tags.member', ('Key', 'Value')) return self._make_request( action='CreateDBInstanceReadReplica', verb='POST', path='/', params=params) def create_db_parameter_group(self, db_parameter_group_name, db_parameter_group_family, description, tags=None): """ Creates a new DB parameter group. A DB parameter group is initially created with the default parameters for the database engine used by the DB instance. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBParameterGroup . Once you've created a DB parameter group, you need to associate it with your DB instance using ModifyDBInstance . When you associate a new DB parameter group with a running DB instance, you need to reboot the DB Instance for the new DB parameter group and associated settings to take effect. :type db_parameter_group_name: string :param db_parameter_group_name: The name of the DB parameter group. Constraints: + Must be 1 to 255 alphanumeric characters + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens This value is stored as a lower-case string. :type db_parameter_group_family: string :param db_parameter_group_family: The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family. :type description: string :param description: The description for the DB parameter group. :type tags: list :param tags: A list of tags. """ params = { 'DBParameterGroupName': db_parameter_group_name, 'DBParameterGroupFamily': db_parameter_group_family, 'Description': description, } if tags is not None: self.build_complex_list_params( params, tags, 'Tags.member', ('Key', 'Value')) return self._make_request( action='CreateDBParameterGroup', verb='POST', path='/', params=params) def create_db_security_group(self, db_security_group_name, db_security_group_description, tags=None): """ Creates a new DB security group. DB security groups control access to a DB instance. :type db_security_group_name: string :param db_security_group_name: The name for the DB security group. This value is stored as a lowercase string. Constraints: + Must be 1 to 255 alphanumeric characters + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens + Must not be "Default" + May not contain spaces Example: `mysecuritygroup` :type db_security_group_description: string :param db_security_group_description: The description for the DB security group. :type tags: list :param tags: A list of tags. """ params = { 'DBSecurityGroupName': db_security_group_name, 'DBSecurityGroupDescription': db_security_group_description, } if tags is not None: self.build_complex_list_params( params, tags, 'Tags.member', ('Key', 'Value')) return self._make_request( action='CreateDBSecurityGroup', verb='POST', path='/', params=params) def create_db_snapshot(self, db_snapshot_identifier, db_instance_identifier, tags=None): """ Creates a DBSnapshot. The source DBInstance must be in "available" state. :type db_snapshot_identifier: string :param db_snapshot_identifier: The identifier for the DB snapshot. Constraints: + Cannot be null, empty, or blank + Must contain from 1 to 255 alphanumeric characters or hyphens + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens Example: `my-snapshot-id` :type db_instance_identifier: string :param db_instance_identifier: The DB instance identifier. This is the unique key that identifies a DB instance. This parameter isn't case sensitive. Constraints: + Must contain from 1 to 63 alphanumeric characters or hyphens + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens :type tags: list :param tags: A list of tags. """ params = { 'DBSnapshotIdentifier': db_snapshot_identifier, 'DBInstanceIdentifier': db_instance_identifier, } if tags is not None: self.build_complex_list_params( params, tags, 'Tags.member', ('Key', 'Value')) return self._make_request( action='CreateDBSnapshot', verb='POST', path='/', params=params) def create_db_subnet_group(self, db_subnet_group_name, db_subnet_group_description, subnet_ids, tags=None): """ Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region. :type db_subnet_group_name: string :param db_subnet_group_name: The name for the DB subnet group. This value is stored as a lowercase string. Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not be "Default". Example: `mySubnetgroup` :type db_subnet_group_description: string :param db_subnet_group_description: The description for the DB subnet group. :type subnet_ids: list :param subnet_ids: The EC2 Subnet IDs for the DB subnet group. :type tags: list :param tags: A list of tags into tuples. """ params = { 'DBSubnetGroupName': db_subnet_group_name, 'DBSubnetGroupDescription': db_subnet_group_description, } self.build_list_params(params, subnet_ids, 'SubnetIds.member') if tags is not None: self.build_complex_list_params( params, tags, 'Tags.member', ('Key', 'Value')) return self._make_request( action='CreateDBSubnetGroup', verb='POST', path='/', params=params) def create_event_subscription(self, subscription_name, sns_topic_arn, source_type=None, event_categories=None, source_ids=None, enabled=None, tags=None): """ Creates an RDS event notification subscription. This action requires a topic ARN (Amazon Resource Name) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console. You can specify the type of source (SourceType) you want to be notified of, provide a list of RDS sources (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, Backup. If you specify both the SourceType and SourceIds, such as SourceType = db-instance and SourceIdentifier = myDBInstance1, you will be notified of all the db-instance events for the specified source. If you specify a SourceType but do not specify a SourceIdentifier, you will receive notice of the events for that source type for all your RDS sources. If you do not specify either the SourceType nor the SourceIdentifier, you will be notified of events generated from all RDS sources belonging to your customer account. :type subscription_name: string :param subscription_name: The name of the subscription. Constraints: The name must be less than 255 characters. :type sns_topic_arn: string :param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it. :type source_type: string :param source_type: The type of source that will be generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned. Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot :type event_categories: list :param event_categories: A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the `Events`_ topic in the Amazon RDS User Guide or by using the **DescribeEventCategories** action. :type source_ids: list :param source_ids: The list of identifiers of the event sources for which events will be returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens. Constraints: + If SourceIds are supplied, SourceType must also be provided. + If the source type is a DB instance, then a `DBInstanceIdentifier` must be supplied. + If the source type is a DB security group, a `DBSecurityGroupName` must be supplied. + If the source type is a DB parameter group, a `DBParameterGroupName` must be supplied. + If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be supplied. :type enabled: boolean :param enabled: A Boolean value; set to **true** to activate the subscription, set to **false** to create the subscription but not active it. :type tags: list :param tags: A list of tags. """ params = { 'SubscriptionName': subscription_name, 'SnsTopicArn': sns_topic_arn, } if source_type is not None: params['SourceType'] = source_type if event_categories is not None: self.build_list_params(params, event_categories, 'EventCategories.member') if source_ids is not None: self.build_list_params(params, source_ids, 'SourceIds.member') if enabled is not None: params['Enabled'] = str( enabled).lower() if tags is not None: self.build_complex_list_params( params, tags, 'Tags.member', ('Key', 'Value')) return self._make_request( action='CreateEventSubscription', verb='POST', path='/', params=params) def create_option_group(self, option_group_name, engine_name, major_engine_version, option_group_description, tags=None): """ Creates a new option group. You can create up to 20 option groups. :type option_group_name: string :param option_group_name: Specifies the name of the option group to be created. Constraints: + Must be 1 to 255 alphanumeric characters or hyphens + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens Example: `myoptiongroup` :type engine_name: string :param engine_name: Specifies the name of the engine that this option group should be associated with. :type major_engine_version: string :param major_engine_version: Specifies the major version of the engine that this option group should be associated with. :type option_group_description: string :param option_group_description: The description of the option group. :type tags: list :param tags: A list of tags. """ params = { 'OptionGroupName': option_group_name, 'EngineName': engine_name, 'MajorEngineVersion': major_engine_version, 'OptionGroupDescription': option_group_description, } if tags is not None: self.build_complex_list_params( params, tags, 'Tags.member', ('Key', 'Value')) return self._make_request( action='CreateOptionGroup', verb='POST', path='/', params=params) def delete_db_instance(self, db_instance_identifier, skip_final_snapshot=None, final_db_snapshot_identifier=None): """ The DeleteDBInstance action deletes a previously provisioned DB instance. A successful response from the web service indicates the request was received correctly. When you delete a DB instance, all automated backups for that instance are deleted and cannot be recovered. Manual DB snapshots of the DB instance to be deleted are not deleted. If a final DB snapshot is requested the status of the RDS instance will be "deleting" until the DB snapshot is created. The API action `DescribeDBInstance` is used to monitor the status of this operation. The action cannot be canceled or reverted once submitted. :type db_instance_identifier: string :param db_instance_identifier: The DB instance identifier for the DB instance to be deleted. This parameter isn't case sensitive. Constraints: + Must contain from 1 to 63 alphanumeric characters or hyphens + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens :type skip_final_snapshot: boolean :param skip_final_snapshot: Determines whether a final DB snapshot is created before the DB instance is deleted. If `True` is specified, no DBSnapshot is created. If false is specified, a DB snapshot is created before the DB instance is deleted. The FinalDBSnapshotIdentifier parameter must be specified if SkipFinalSnapshot is `False`. Default: `False` :type final_db_snapshot_identifier: string :param final_db_snapshot_identifier: The DBSnapshotIdentifier of the new DBSnapshot created when SkipFinalSnapshot is set to `False`. Specifying this parameter and also setting the SkipFinalShapshot parameter to true results in an error. Constraints: + Must be 1 to 255 alphanumeric characters + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens """ params = {'DBInstanceIdentifier': db_instance_identifier, } if skip_final_snapshot is not None: params['SkipFinalSnapshot'] = str( skip_final_snapshot).lower() if final_db_snapshot_identifier is not None: params['FinalDBSnapshotIdentifier'] = final_db_snapshot_identifier return self._make_request( action='DeleteDBInstance', verb='POST', path='/', params=params) def delete_db_parameter_group(self, db_parameter_group_name): """ Deletes a specified DBParameterGroup. The DBParameterGroup cannot be associated with any RDS instances to be deleted. The specified DB parameter group cannot be associated with any DB instances. :type db_parameter_group_name: string :param db_parameter_group_name: The name of the DB parameter group. Constraints: + Must be the name of an existing DB parameter group + You cannot delete a default DB parameter group + Cannot be associated with any DB instances """ params = {'DBParameterGroupName': db_parameter_group_name, } return self._make_request( action='DeleteDBParameterGroup', verb='POST', path='/', params=params) def delete_db_security_group(self, db_security_group_name): """ Deletes a DB security group. The specified DB security group must not be associated with any DB instances. :type db_security_group_name: string :param db_security_group_name: The name of the DB security group to delete. You cannot delete the default DB security group. Constraints: + Must be 1 to 255 alphanumeric characters + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens + Must not be "Default" + May not contain spaces """ params = {'DBSecurityGroupName': db_security_group_name, } return self._make_request( action='DeleteDBSecurityGroup', verb='POST', path='/', params=params) def delete_db_snapshot(self, db_snapshot_identifier): """ Deletes a DBSnapshot. The DBSnapshot must be in the `available` state to be deleted. :type db_snapshot_identifier: string :param db_snapshot_identifier: The DBSnapshot identifier. Constraints: Must be the name of an existing DB snapshot in the `available` state. """ params = {'DBSnapshotIdentifier': db_snapshot_identifier, } return self._make_request( action='DeleteDBSnapshot', verb='POST', path='/', params=params) def delete_db_subnet_group(self, db_subnet_group_name): """ Deletes a DB subnet group. The specified database subnet group must not be associated with any DB instances. :type db_subnet_group_name: string :param db_subnet_group_name: The name of the database subnet group to delete. You cannot delete the default subnet group. Constraints: + Must be 1 to 255 alphanumeric characters + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens """ params = {'DBSubnetGroupName': db_subnet_group_name, } return self._make_request( action='DeleteDBSubnetGroup', verb='POST', path='/', params=params) def delete_event_subscription(self, subscription_name): """ Deletes an RDS event notification subscription. :type subscription_name: string :param subscription_name: The name of the RDS event notification subscription you want to delete. """ params = {'SubscriptionName': subscription_name, } return self._make_request( action='DeleteEventSubscription', verb='POST', path='/', params=params) def delete_option_group(self, option_group_name): """ Deletes an existing option group. :type option_group_name: string :param option_group_name: The name of the option group to be deleted. You cannot delete default option groups. """ params = {'OptionGroupName': option_group_name, } return self._make_request( action='DeleteOptionGroup', verb='POST', path='/', params=params) def describe_db_engine_versions(self, engine=None, engine_version=None, db_parameter_group_family=None, max_records=None, marker=None, default_only=None, list_supported_character_sets=None): """ Returns a list of the available DB engines. :type engine: string :param engine: The database engine to return. :type engine_version: string :param engine_version: The database engine version to return. Example: `5.1.49` :type db_parameter_group_family: string :param db_parameter_group_family: The name of a specific DB parameter group family to return details for. Constraints: + Must be 1 to 255 alphanumeric characters + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens :type max_records: integer :param max_records: The maximum number of records to include in the response. If more than the `MaxRecords` value is available, a pagination token called a marker is included in the response so that the following results can be retrieved. Default: 100 Constraints: minimum 20, maximum 100 :type marker: string :param marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by `MaxRecords`. :type default_only: boolean :param default_only: Indicates that only the default version of the specified engine or engine and major version combination is returned. :type list_supported_character_sets: boolean :param list_supported_character_sets: If this parameter is specified, and if the requested engine supports the CharacterSetName parameter for CreateDBInstance, the response includes a list of supported character sets for each engine version. """ params = {} if engine is not None: params['Engine'] = engine if engine_version is not None: params['EngineVersion'] = engine_version if db_parameter_group_family is not None: params['DBParameterGroupFamily'] = db_parameter_group_family if max_records is not None: params['MaxRecords'] = max_records if marker is not None: params['Marker'] = marker if default_only is not None: params['DefaultOnly'] = str( default_only).lower() if list_supported_character_sets is not None: params['ListSupportedCharacterSets'] = str( list_supported_character_sets).lower() return self._make_request( action='DescribeDBEngineVersions', verb='POST', path='/', params=params) def describe_db_instances(self, db_instance_identifier=None, filters=None, max_records=None, marker=None): """ Returns information about provisioned RDS instances. This API supports pagination. :type db_instance_identifier: string :param db_instance_identifier: The user-supplied instance identifier. If this parameter is specified, information from only the specific DB instance is returned. This parameter isn't case sensitive. Constraints: + Must contain from 1 to 63 alphanumeric characters or hyphens + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens :type filters: list :param filters: :type max_records: integer :param max_records: The maximum number of records to include in the response. If more records exist than the specified `MaxRecords` value, a pagination token called a marker is included in the response so that the remaining results may be retrieved. Default: 100 Constraints: minimum 20, maximum 100 :type marker: string :param marker: An optional pagination token provided by a previous DescribeDBInstances request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by `MaxRecords` . """ params = {} if db_instance_identifier is not None: params['DBInstanceIdentifier'] = db_instance_identifier if filters is not None: self.build_complex_list_params( params, filters, 'Filters.member', ('FilterName', 'FilterValue')) if max_records is not None: params['MaxRecords'] = max_records if marker is not None: params['Marker'] = marker return self._make_request( action='DescribeDBInstances', verb='POST', path='/', params=params) def describe_db_log_files(self, db_instance_identifier, filename_contains=None, file_last_written=None, file_size=None, max_records=None, marker=None): """ Returns a list of DB log files for the DB instance. :type db_instance_identifier: string :param db_instance_identifier: The customer-assigned name of the DB instance that contains the log files you want to list. Constraints: + Must contain from 1 to 63 alphanumeric characters or hyphens + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens :type filename_contains: string :param filename_contains: Filters the available log files for log file names that contain the specified string. :type file_last_written: long :param file_last_written: Filters the available log files for files written since the specified date, in POSIX timestamp format. :type file_size: long :param file_size: Filters the available log files for files larger than the specified size. :type max_records: integer :param max_records: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. :type marker: string :param marker: The pagination token provided in the previous request. If this parameter is specified the response includes only records beyond the marker, up to MaxRecords. """ params = {'DBInstanceIdentifier': db_instance_identifier, } if filename_contains is not None: params['FilenameContains'] = filename_contains if file_last_written is not None: params['FileLastWritten'] = file_last_written if file_size is not None: params['FileSize'] = file_size if max_records is not None: params['MaxRecords'] = max_records if marker is not None: params['Marker'] = marker return self._make_request( action='DescribeDBLogFiles', verb='POST', path='/', params=params) def describe_db_parameter_groups(self, db_parameter_group_name=None, filters=None, max_records=None, marker=None): """ Returns a list of `DBParameterGroup` descriptions. If a `DBParameterGroupName` is specified, the list will contain only the description of the specified DB parameter group. :type db_parameter_group_name: string :param db_parameter_group_name: The name of a specific DB parameter group to return details for. Constraints: + Must be 1 to 255 alphanumeric characters + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens :type filters: list :param filters: :type max_records: integer :param max_records: The maximum number of records to include in the response. If more records exist than the specified `MaxRecords` value, a pagination token called a marker is included in the response so that the remaining results may be retrieved. Default: 100 Constraints: minimum 20, maximum 100 :type marker: string :param marker: An optional pagination token provided by a previous `DescribeDBParameterGroups` request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by `MaxRecords`. """ params = {} if db_parameter_group_name is not None: params['DBParameterGroupName'] = db_parameter_group_name if filters is not None: self.build_complex_list_params( params, filters, 'Filters.member', ('FilterName', 'FilterValue')) if max_records is not None: params['MaxRecords'] = max_records if marker is not None: params['Marker'] = marker return self._make_request( action='DescribeDBParameterGroups', verb='POST', path='/', params=params) def describe_db_parameters(self, db_parameter_group_name, source=None, max_records=None, marker=None): """ Returns the detailed parameter list for a particular DB parameter group. :type db_parameter_group_name: string :param db_parameter_group_name: The name of a specific DB parameter group to return details for. Constraints: + Must be 1 to 255 alphanumeric characters + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens :type source: string :param source: The parameter types to return. Default: All parameter types returned Valid Values: `user | system | engine-default` :type max_records: integer :param max_records: The maximum number of records to include in the response. If more records exist than the specified `MaxRecords` value, a pagination token called a marker is included in the response so that the remaining results may be retrieved. Default: 100 Constraints: minimum 20, maximum 100 :type marker: string :param marker: An optional pagination token provided by a previous `DescribeDBParameters` request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by `MaxRecords`. """ params = {'DBParameterGroupName': db_parameter_group_name, } if source is not None: params['Source'] = source if max_records is not None: params['MaxRecords'] = max_records if marker is not None: params['Marker'] = marker return self._make_request( action='DescribeDBParameters', verb='POST', path='/', params=params) def describe_db_security_groups(self, db_security_group_name=None, filters=None, max_records=None, marker=None): """ Returns a list of `DBSecurityGroup` descriptions. If a `DBSecurityGroupName` is specified, the list will contain only the descriptions of the specified DB security group. :type db_security_group_name: string :param db_security_group_name: The name of the DB security group to return details for. :type filters: list :param filters: :type max_records: integer :param max_records: The maximum number of records to include in the response. If more records exist than the specified `MaxRecords` value, a pagination token called a marker is included in the response so that the remaining results may be retrieved. Default: 100 Constraints: minimum 20, maximum 100 :type marker: string :param marker: An optional pagination token provided by a previous DescribeDBSecurityGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by `MaxRecords`. """ params = {} if db_security_group_name is not None: params['DBSecurityGroupName'] = db_security_group_name if filters is not None: self.build_complex_list_params( params, filters, 'Filters.member', ('FilterName', 'FilterValue')) if max_records is not None: params['MaxRecords'] = max_records if marker is not None: params['Marker'] = marker return self._make_request( action='DescribeDBSecurityGroups', verb='POST', path='/', params=params) def describe_db_snapshots(self, db_instance_identifier=None, db_snapshot_identifier=None, snapshot_type=None, filters=None, max_records=None, marker=None): """ Returns information about DB snapshots. This API supports pagination. :type db_instance_identifier: string :param db_instance_identifier: A DB instance identifier to retrieve the list of DB snapshots for. Cannot be used in conjunction with `DBSnapshotIdentifier`. This parameter is not case sensitive. Constraints: + Must contain from 1 to 63 alphanumeric characters or hyphens + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens :type db_snapshot_identifier: string :param db_snapshot_identifier: A specific DB snapshot identifier to describe. Cannot be used in conjunction with `DBInstanceIdentifier`. This value is stored as a lowercase string. Constraints: + Must be 1 to 255 alphanumeric characters + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens + If this is the identifier of an automated snapshot, the `SnapshotType` parameter must also be specified. :type snapshot_type: string :param snapshot_type: The type of snapshots that will be returned. Values can be "automated" or "manual." If not specified, the returned results will include all snapshots types. :type filters: list :param filters: :type max_records: integer :param max_records: The maximum number of records to include in the response. If more records exist than the specified `MaxRecords` value, a pagination token called a marker is included in the response so that the remaining results may be retrieved. Default: 100 Constraints: minimum 20, maximum 100 :type marker: string :param marker: An optional pagination token provided by a previous `DescribeDBSnapshots` request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by `MaxRecords`. """ params = {} if db_instance_identifier is not None: params['DBInstanceIdentifier'] = db_instance_identifier if db_snapshot_identifier is not None: params['DBSnapshotIdentifier'] = db_snapshot_identifier if snapshot_type is not None: params['SnapshotType'] = snapshot_type if filters is not None: self.build_complex_list_params( params, filters, 'Filters.member', ('FilterName', 'FilterValue')) if max_records is not None: params['MaxRecords'] = max_records if marker is not None: params['Marker'] = marker return self._make_request( action='DescribeDBSnapshots', verb='POST', path='/', params=params) def describe_db_subnet_groups(self, db_subnet_group_name=None, filters=None, max_records=None, marker=None): """ Returns a list of DBSubnetGroup descriptions. If a DBSubnetGroupName is specified, the list will contain only the descriptions of the specified DBSubnetGroup. For an overview of CIDR ranges, go to the `Wikipedia Tutorial`_. :type db_subnet_group_name: string :param db_subnet_group_name: The name of the DB subnet group to return details for. :type filters: list :param filters: :type max_records: integer :param max_records: The maximum number of records to include in the response. If more records exist than the specified `MaxRecords` value, a pagination token called a marker is included in the response so that the remaining results may be retrieved. Default: 100 Constraints: minimum 20, maximum 100 :type marker: string :param marker: An optional pagination token provided by a previous DescribeDBSubnetGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by `MaxRecords`. """ params = {} if db_subnet_group_name is not None: params['DBSubnetGroupName'] = db_subnet_group_name if filters is not None: self.build_complex_list_params( params, filters, 'Filters.member', ('FilterName', 'FilterValue')) if max_records is not None: params['MaxRecords'] = max_records if marker is not None: params['Marker'] = marker return self._make_request( action='DescribeDBSubnetGroups', verb='POST', path='/', params=params) def describe_engine_default_parameters(self, db_parameter_group_family, max_records=None, marker=None): """ Returns the default engine and system parameter information for the specified database engine. :type db_parameter_group_family: string :param db_parameter_group_family: The name of the DB parameter group family. :type max_records: integer :param max_records: The maximum number of records to include in the response. If more records exist than the specified `MaxRecords` value, a pagination token called a marker is included in the response so that the remaining results may be retrieved. Default: 100 Constraints: minimum 20, maximum 100 :type marker: string :param marker: An optional pagination token provided by a previous `DescribeEngineDefaultParameters` request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by `MaxRecords`. """ params = { 'DBParameterGroupFamily': db_parameter_group_family, } if max_records is not None: params['MaxRecords'] = max_records if marker is not None: params['Marker'] = marker return self._make_request( action='DescribeEngineDefaultParameters', verb='POST', path='/', params=params) def describe_event_categories(self, source_type=None): """ Displays a list of categories for all event source types, or, if specified, for a specified source type. You can see a list of the event categories and source types in the ` Events`_ topic in the Amazon RDS User Guide. :type source_type: string :param source_type: The type of source that will be generating the events. Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot """ params = {} if source_type is not None: params['SourceType'] = source_type return self._make_request( action='DescribeEventCategories', verb='POST', path='/', params=params) def describe_event_subscriptions(self, subscription_name=None, filters=None, max_records=None, marker=None): """ Lists all the subscription descriptions for a customer account. The description for a subscription includes SubscriptionName, SNSTopicARN, CustomerID, SourceType, SourceID, CreationTime, and Status. If you specify a SubscriptionName, lists the description for that subscription. :type subscription_name: string :param subscription_name: The name of the RDS event notification subscription you want to describe. :type filters: list :param filters: :type max_records: integer :param max_records: The maximum number of records to include in the response. If more records exist than the specified `MaxRecords` value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: minimum 20, maximum 100 :type marker: string :param marker: An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by `MaxRecords` . """ params = {} if subscription_name is not None: params['SubscriptionName'] = subscription_name if filters is not None: self.build_complex_list_params( params, filters, 'Filters.member', ('FilterName', 'FilterValue')) if max_records is not None: params['MaxRecords'] = max_records if marker is not None: params['Marker'] = marker return self._make_request( action='DescribeEventSubscriptions', verb='POST', path='/', params=params) def describe_events(self, source_identifier=None, source_type=None, start_time=None, end_time=None, duration=None, event_categories=None, max_records=None, marker=None): """ Returns events related to DB instances, DB security groups, DB snapshots, and DB parameter groups for the past 14 days. Events specific to a particular DB instance, DB security group, database snapshot, or DB parameter group can be obtained by providing the name as a parameter. By default, the past hour of events are returned. :type source_identifier: string :param source_identifier: The identifier of the event source for which events will be returned. If not specified, then all sources are included in the response. Constraints: + If SourceIdentifier is supplied, SourceType must also be provided. + If the source type is `DBInstance`, then a `DBInstanceIdentifier` must be supplied. + If the source type is `DBSecurityGroup`, a `DBSecurityGroupName` must be supplied. + If the source type is `DBParameterGroup`, a `DBParameterGroupName` must be supplied. + If the source type is `DBSnapshot`, a `DBSnapshotIdentifier` must be supplied. + Cannot end with a hyphen or contain two consecutive hyphens. :type source_type: string :param source_type: The event source to retrieve events for. If no value is specified, all events are returned. :type start_time: timestamp :param start_time: The beginning of the time interval to retrieve events for, specified in ISO 8601 format. For more information about ISO 8601, go to the `ISO8601 Wikipedia page.`_ Example: 2009-07-08T18:00Z :type end_time: timestamp :param end_time: The end of the time interval for which to retrieve events, specified in ISO 8601 format. For more information about ISO 8601, go to the `ISO8601 Wikipedia page.`_ Example: 2009-07-08T18:00Z :type duration: integer :param duration: The number of minutes to retrieve events for. Default: 60 :type event_categories: list :param event_categories: A list of event categories that trigger notifications for a event notification subscription. :type max_records: integer :param max_records: The maximum number of records to include in the response. If more records exist than the specified `MaxRecords` value, a pagination token called a marker is included in the response so that the remaining results may be retrieved. Default: 100 Constraints: minimum 20, maximum 100 :type marker: string :param marker: An optional pagination token provided by a previous DescribeEvents request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by `MaxRecords`. """ params = {} if source_identifier is not None: params['SourceIdentifier'] = source_identifier if source_type is not None: params['SourceType'] = source_type if start_time is not None: params['StartTime'] = start_time if end_time is not None: params['EndTime'] = end_time if duration is not None: params['Duration'] = duration if event_categories is not None: self.build_list_params(params, event_categories, 'EventCategories.member') if max_records is not None: params['MaxRecords'] = max_records if marker is not None: params['Marker'] = marker return self._make_request( action='DescribeEvents', verb='POST', path='/', params=params) def describe_option_group_options(self, engine_name, major_engine_version=None, max_records=None, marker=None): """ Describes all available options. :type engine_name: string :param engine_name: A required parameter. Options available for the given Engine name will be described. :type major_engine_version: string :param major_engine_version: If specified, filters the results to include only options for the specified major engine version. :type max_records: integer :param max_records: The maximum number of records to include in the response. If more records exist than the specified `MaxRecords` value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: minimum 20, maximum 100 :type marker: string :param marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by `MaxRecords`. """ params = {'EngineName': engine_name, } if major_engine_version is not None: params['MajorEngineVersion'] = major_engine_version if max_records is not None: params['MaxRecords'] = max_records if marker is not None: params['Marker'] = marker return self._make_request( action='DescribeOptionGroupOptions', verb='POST', path='/', params=params) def describe_option_groups(self, option_group_name=None, filters=None, marker=None, max_records=None, engine_name=None, major_engine_version=None): """ Describes the available option groups. :type option_group_name: string :param option_group_name: The name of the option group to describe. Cannot be supplied together with EngineName or MajorEngineVersion. :type filters: list :param filters: :type marker: string :param marker: An optional pagination token provided by a previous DescribeOptionGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by `MaxRecords`. :type max_records: integer :param max_records: The maximum number of records to include in the response. If more records exist than the specified `MaxRecords` value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: minimum 20, maximum 100 :type engine_name: string :param engine_name: Filters the list of option groups to only include groups associated with a specific database engine. :type major_engine_version: string :param major_engine_version: Filters the list of option groups to only include groups associated with a specific database engine version. If specified, then EngineName must also be specified. """ params = {} if option_group_name is not None: params['OptionGroupName'] = option_group_name if filters is not None: self.build_complex_list_params( params, filters, 'Filters.member', ('FilterName', 'FilterValue')) if marker is not None: params['Marker'] = marker if max_records is not None: params['MaxRecords'] = max_records if engine_name is not None: params['EngineName'] = engine_name if major_engine_version is not None: params['MajorEngineVersion'] = major_engine_version return self._make_request( action='DescribeOptionGroups', verb='POST', path='/', params=params) def describe_orderable_db_instance_options(self, engine, engine_version=None, db_instance_class=None, license_model=None, vpc=None, max_records=None, marker=None): """ Returns a list of orderable DB instance options for the specified engine. :type engine: string :param engine: The name of the engine to retrieve DB instance options for. :type engine_version: string :param engine_version: The engine version filter value. Specify this parameter to show only the available offerings matching the specified engine version. :type db_instance_class: string :param db_instance_class: The DB instance class filter value. Specify this parameter to show only the available offerings matching the specified DB instance class. :type license_model: string :param license_model: The license model filter value. Specify this parameter to show only the available offerings matching the specified license model. :type vpc: boolean :param vpc: The VPC filter value. Specify this parameter to show only the available VPC or non-VPC offerings. :type max_records: integer :param max_records: The maximum number of records to include in the response. If more records exist than the specified `MaxRecords` value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: minimum 20, maximum 100 :type marker: string :param marker: An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by `MaxRecords` . """ params = {'Engine': engine, } if engine_version is not None: params['EngineVersion'] = engine_version if db_instance_class is not None: params['DBInstanceClass'] = db_instance_class if license_model is not None: params['LicenseModel'] = license_model if vpc is not None: params['Vpc'] = str( vpc).lower() if max_records is not None: params['MaxRecords'] = max_records if marker is not None: params['Marker'] = marker return self._make_request( action='DescribeOrderableDBInstanceOptions', verb='POST', path='/', params=params) def describe_reserved_db_instances(self, reserved_db_instance_id=None, reserved_db_instances_offering_id=None, db_instance_class=None, duration=None, product_description=None, offering_type=None, multi_az=None, filters=None, max_records=None, marker=None): """ Returns information about reserved DB instances for this account, or about a specified reserved DB instance. :type reserved_db_instance_id: string :param reserved_db_instance_id: The reserved DB instance identifier filter value. Specify this parameter to show only the reservation that matches the specified reservation ID. :type reserved_db_instances_offering_id: string :param reserved_db_instances_offering_id: The offering identifier filter value. Specify this parameter to show only purchased reservations matching the specified offering identifier. :type db_instance_class: string :param db_instance_class: The DB instance class filter value. Specify this parameter to show only those reservations matching the specified DB instances class. :type duration: string :param duration: The duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration. Valid Values: `1 | 3 | 31536000 | 94608000` :type product_description: string :param product_description: The product description filter value. Specify this parameter to show only those reservations matching the specified product description. :type offering_type: string :param offering_type: The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type. Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy Utilization" ` :type multi_az: boolean :param multi_az: The Multi-AZ filter value. Specify this parameter to show only those reservations matching the specified Multi-AZ parameter. :type filters: list :param filters: :type max_records: integer :param max_records: The maximum number of records to include in the response. If more than the `MaxRecords` value is available, a pagination token called a marker is included in the response so that the following results can be retrieved. Default: 100 Constraints: minimum 20, maximum 100 :type marker: string :param marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by `MaxRecords`. """ params = {} if reserved_db_instance_id is not None: params['ReservedDBInstanceId'] = reserved_db_instance_id if reserved_db_instances_offering_id is not None: params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id if db_instance_class is not None: params['DBInstanceClass'] = db_instance_class if duration is not None: params['Duration'] = duration if product_description is not None: params['ProductDescription'] = product_description if offering_type is not None: params['OfferingType'] = offering_type if multi_az is not None: params['MultiAZ'] = str( multi_az).lower() if filters is not None: self.build_complex_list_params( params, filters, 'Filters.member', ('FilterName', 'FilterValue')) if max_records is not None: params['MaxRecords'] = max_records if marker is not None: params['Marker'] = marker return self._make_request( action='DescribeReservedDBInstances', verb='POST', path='/', params=params) def describe_reserved_db_instances_offerings(self, reserved_db_instances_offering_id=None, db_instance_class=None, duration=None, product_description=None, offering_type=None, multi_az=None, max_records=None, marker=None): """ Lists available reserved DB instance offerings. :type reserved_db_instances_offering_id: string :param reserved_db_instances_offering_id: The offering identifier filter value. Specify this parameter to show only the available offering that matches the specified reservation identifier. Example: `438012d3-4052-4cc7-b2e3-8d3372e0e706` :type db_instance_class: string :param db_instance_class: The DB instance class filter value. Specify this parameter to show only the available offerings matching the specified DB instance class. :type duration: string :param duration: Duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration. Valid Values: `1 | 3 | 31536000 | 94608000` :type product_description: string :param product_description: Product description filter value. Specify this parameter to show only the available offerings matching the specified product description. :type offering_type: string :param offering_type: The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type. Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy Utilization" ` :type multi_az: boolean :param multi_az: The Multi-AZ filter value. Specify this parameter to show only the available offerings matching the specified Multi-AZ parameter. :type max_records: integer :param max_records: The maximum number of records to include in the response. If more than the `MaxRecords` value is available, a pagination token called a marker is included in the response so that the following results can be retrieved. Default: 100 Constraints: minimum 20, maximum 100 :type marker: string :param marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by `MaxRecords`. """ params = {} if reserved_db_instances_offering_id is not None: params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id if db_instance_class is not None: params['DBInstanceClass'] = db_instance_class if duration is not None: params['Duration'] = duration if product_description is not None: params['ProductDescription'] = product_description if offering_type is not None: params['OfferingType'] = offering_type if multi_az is not None: params['MultiAZ'] = str( multi_az).lower() if max_records is not None: params['MaxRecords'] = max_records if marker is not None: params['Marker'] = marker return self._make_request( action='DescribeReservedDBInstancesOfferings', verb='POST', path='/', params=params) def download_db_log_file_portion(self, db_instance_identifier, log_file_name, marker=None, number_of_lines=None): """ Downloads the last line of the specified log file. :type db_instance_identifier: string :param db_instance_identifier: The customer-assigned name of the DB instance that contains the log files you want to list. Constraints: + Must contain from 1 to 63 alphanumeric characters or hyphens + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens :type log_file_name: string :param log_file_name: The name of the log file to be downloaded. :type marker: string :param marker: The pagination token provided in the previous request. If this parameter is specified the response includes only records beyond the marker, up to MaxRecords. :type number_of_lines: integer :param number_of_lines: The number of lines remaining to be downloaded. """ params = { 'DBInstanceIdentifier': db_instance_identifier, 'LogFileName': log_file_name, } if marker is not None: params['Marker'] = marker if number_of_lines is not None: params['NumberOfLines'] = number_of_lines return self._make_request( action='DownloadDBLogFilePortion', verb='POST', path='/', params=params) def list_tags_for_resource(self, resource_name): """ Lists all tags on an Amazon RDS resource. For an overview on tagging an Amazon RDS resource, see `Tagging Amazon RDS Resources`_. :type resource_name: string :param resource_name: The Amazon RDS resource with tags to be listed. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see ` Constructing an RDS Amazon Resource Name (ARN)`_. """ params = {'ResourceName': resource_name, } return self._make_request( action='ListTagsForResource', verb='POST', path='/', params=params) def modify_db_instance(self, db_instance_identifier, allocated_storage=None, db_instance_class=None, db_security_groups=None, vpc_security_group_ids=None, apply_immediately=None, master_user_password=None, db_parameter_group_name=None, backup_retention_period=None, preferred_backup_window=None, preferred_maintenance_window=None, multi_az=None, engine_version=None, allow_major_version_upgrade=None, auto_minor_version_upgrade=None, iops=None, option_group_name=None, new_db_instance_identifier=None): """ Modify settings for a DB instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. :type db_instance_identifier: string :param db_instance_identifier: The DB instance identifier. This value is stored as a lowercase string. Constraints: + Must be the identifier for an existing DB instance + Must contain from 1 to 63 alphanumeric characters or hyphens + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens :type allocated_storage: integer :param allocated_storage: The new storage capacity of the RDS instance. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the `ApplyImmediately` parameter is set to `True` for this request. **MySQL** Default: Uses existing setting Valid Values: 5-1024 Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. Type: Integer **Oracle** Default: Uses existing setting Valid Values: 10-1024 Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. **SQL Server** Cannot be modified. If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but may experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance. :type db_instance_class: string :param db_instance_class: The new compute and memory capacity of the DB instance. To determine the instance classes that are available for a particular DB engine, use the DescribeOrderableDBInstanceOptions action. Passing a value for this parameter causes an outage during the change and is applied during the next maintenance window, unless the `ApplyImmediately` parameter is specified as `True` for this request. Default: Uses existing setting Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge` :type db_security_groups: list :param db_security_groups: A list of DB security groups to authorize on this DB instance. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Constraints: + Must be 1 to 255 alphanumeric characters + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens :type vpc_security_group_ids: list :param vpc_security_group_ids: A list of EC2 VPC security groups to authorize on this DB instance. This change is asynchronously applied as soon as possible. Constraints: + Must be 1 to 255 alphanumeric characters + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens :type apply_immediately: boolean :param apply_immediately: Specifies whether or not the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the `PreferredMaintenanceWindow` setting for the DB instance. If this parameter is passed as `False`, changes to the DB instance are applied on the next call to RebootDBInstance, the next maintenance reboot, or the next failure reboot, whichever occurs first. See each parameter to determine when a change is applied. Default: `False` :type master_user_password: string :param master_user_password: The new password for the DB instance master user. Can be any printable ASCII character except "/", '"', or "@". Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the `MasterUserPassword` element exists in the `PendingModifiedValues` element of the operation response. Default: Uses existing setting Constraints: Must be 8 to 41 alphanumeric characters (MySQL), 8 to 30 alphanumeric characters (Oracle), or 8 to 128 alphanumeric characters (SQL Server). Amazon RDS API actions never return the password, so this action provides a way to regain access to a master instance user if the password is lost. :type db_parameter_group_name: string :param db_parameter_group_name: The name of the DB parameter group to apply to this DB instance. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the `ApplyImmediately` parameter is set to `True` for this request. Default: Uses existing setting Constraints: The DB parameter group must be in the same DB parameter group family as this DB instance. :type backup_retention_period: integer :param backup_retention_period: The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups. Changing this parameter can result in an outage if you change from 0 to a non-zero value or from a non-zero value to 0. These changes are applied during the next maintenance window unless the `ApplyImmediately` parameter is set to `True` for this request. If you change the parameter from one non-zero value to another non- zero value, the change is asynchronously applied as soon as possible. Default: Uses existing setting Constraints: + Must be a value from 0 to 8 + Cannot be set to 0 if the DB instance is a master instance with read replicas or if the DB instance is a read replica :type preferred_backup_window: string :param preferred_backup_window: The daily time range during which automated backups are created if automated backups are enabled, as determined by the `BackupRetentionPeriod`. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Constraints: + Must be in the format hh24:mi-hh24:mi + Times should be Universal Time Coordinated (UTC) + Must not conflict with the preferred maintenance window + Must be at least 30 minutes :type preferred_maintenance_window: string :param preferred_maintenance_window: The weekly time range (in UTC) during which system maintenance can occur, which may result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, then changing this parameter will cause a reboot of the DB instance. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied. Default: Uses existing setting Format: ddd:hh24:mi-ddd:hh24:mi Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Must be at least 30 minutes :type multi_az: boolean :param multi_az: Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the `ApplyImmediately` parameter is set to `True` for this request. Constraints: Cannot be specified if the DB instance is a read replica. :type engine_version: string :param engine_version: The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the `ApplyImmediately` parameter is set to `True` for this request. For major version upgrades, if a non-default DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family. Example: `5.1.42` :type allow_major_version_upgrade: boolean :param allow_major_version_upgrade: Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the DB instance's current version. :type auto_minor_version_upgrade: boolean :param auto_minor_version_upgrade: Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window. Changing this parameter does not result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to `True` during the maintenance window, and a newer minor version is available, and RDS has enabled auto patching for that engine version. :type iops: integer :param iops: The new Provisioned IOPS (I/O operations per second) value for the RDS instance. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the `ApplyImmediately` parameter is set to `True` for this request. Default: Uses existing setting Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. Type: Integer If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but may experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance. :type option_group_name: string :param option_group_name: Indicates that the DB instance should be associated with the specified option group. Changing this parameter does not result in an outage except in the following case and the change is applied during the next maintenance window unless the `ApplyImmediately` parameter is set to `True` for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted. Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance :type new_db_instance_identifier: string :param new_db_instance_identifier: The new DB instance identifier for the DB instance when renaming a DB Instance. This value is stored as a lowercase string. Constraints: + Must contain from 1 to 63 alphanumeric characters or hyphens + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens """ params = {'DBInstanceIdentifier': db_instance_identifier, } if allocated_storage is not None: params['AllocatedStorage'] = allocated_storage if db_instance_class is not None: params['DBInstanceClass'] = db_instance_class if db_security_groups is not None: self.build_list_params(params, db_security_groups, 'DBSecurityGroups.member') if vpc_security_group_ids is not None: self.build_list_params(params, vpc_security_group_ids, 'VpcSecurityGroupIds.member') if apply_immediately is not None: params['ApplyImmediately'] = str( apply_immediately).lower() if master_user_password is not None: params['MasterUserPassword'] = master_user_password if db_parameter_group_name is not None: params['DBParameterGroupName'] = db_parameter_group_name if backup_retention_period is not None: params['BackupRetentionPeriod'] = backup_retention_period if preferred_backup_window is not None: params['PreferredBackupWindow'] = preferred_backup_window if preferred_maintenance_window is not None: params['PreferredMaintenanceWindow'] = preferred_maintenance_window if multi_az is not None: params['MultiAZ'] = str( multi_az).lower() if engine_version is not None: params['EngineVersion'] = engine_version if allow_major_version_upgrade is not None: params['AllowMajorVersionUpgrade'] = str( allow_major_version_upgrade).lower() if auto_minor_version_upgrade is not None: params['AutoMinorVersionUpgrade'] = str( auto_minor_version_upgrade).lower() if iops is not None: params['Iops'] = iops if option_group_name is not None: params['OptionGroupName'] = option_group_name if new_db_instance_identifier is not None: params['NewDBInstanceIdentifier'] = new_db_instance_identifier return self._make_request( action='ModifyDBInstance', verb='POST', path='/', params=params) def modify_db_parameter_group(self, db_parameter_group_name, parameters): """ Modifies the parameters of a DB parameter group. To modify more than one parameter, submit a list of the following: `ParameterName`, `ParameterValue`, and `ApplyMethod`. A maximum of 20 parameters can be modified in a single request. The `apply-immediate` method can be used only for dynamic parameters; the `pending-reboot` method can be used with MySQL and Oracle DB instances for either dynamic or static parameters. For Microsoft SQL Server DB instances, the `pending-reboot` method can be used only for static parameters. :type db_parameter_group_name: string :param db_parameter_group_name: The name of the DB parameter group. Constraints: + Must be the name of an existing DB parameter group + Must be 1 to 255 alphanumeric characters + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens :type parameters: list :param parameters: An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters may be modified in a single request. Valid Values (for the application method): `immediate | pending-reboot` You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB instance reboots. """ params = {'DBParameterGroupName': db_parameter_group_name, } self.build_complex_list_params( params, parameters, 'Parameters.member', ('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod')) return self._make_request( action='ModifyDBParameterGroup', verb='POST', path='/', params=params) def modify_db_subnet_group(self, db_subnet_group_name, subnet_ids, db_subnet_group_description=None): """ Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region. :type db_subnet_group_name: string :param db_subnet_group_name: The name for the DB subnet group. This value is stored as a lowercase string. Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not be "Default". Example: `mySubnetgroup` :type db_subnet_group_description: string :param db_subnet_group_description: The description for the DB subnet group. :type subnet_ids: list :param subnet_ids: The EC2 subnet IDs for the DB subnet group. """ params = {'DBSubnetGroupName': db_subnet_group_name, } self.build_list_params(params, subnet_ids, 'SubnetIds.member') if db_subnet_group_description is not None: params['DBSubnetGroupDescription'] = db_subnet_group_description return self._make_request( action='ModifyDBSubnetGroup', verb='POST', path='/', params=params) def modify_event_subscription(self, subscription_name, sns_topic_arn=None, source_type=None, event_categories=None, enabled=None): """ Modifies an existing RDS event notification subscription. Note that you cannot modify the source identifiers using this call; to change source identifiers for a subscription, use the AddSourceIdentifierToSubscription and RemoveSourceIdentifierFromSubscription calls. You can see a list of the event categories for a given SourceType in the `Events`_ topic in the Amazon RDS User Guide or by using the **DescribeEventCategories** action. :type subscription_name: string :param subscription_name: The name of the RDS event notification subscription. :type sns_topic_arn: string :param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it. :type source_type: string :param source_type: The type of source that will be generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned. Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot :type event_categories: list :param event_categories: A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the `Events`_ topic in the Amazon RDS User Guide or by using the **DescribeEventCategories** action. :type enabled: boolean :param enabled: A Boolean value; set to **true** to activate the subscription. """ params = {'SubscriptionName': subscription_name, } if sns_topic_arn is not None: params['SnsTopicArn'] = sns_topic_arn if source_type is not None: params['SourceType'] = source_type if event_categories is not None: self.build_list_params(params, event_categories, 'EventCategories.member') if enabled is not None: params['Enabled'] = str( enabled).lower() return self._make_request( action='ModifyEventSubscription', verb='POST', path='/', params=params) def modify_option_group(self, option_group_name, options_to_include=None, options_to_remove=None, apply_immediately=None): """ Modifies an existing option group. :type option_group_name: string :param option_group_name: The name of the option group to be modified. Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance :type options_to_include: list :param options_to_include: Options in this list are added to the option group or, if already present, the specified configuration is used to update the existing configuration. :type options_to_remove: list :param options_to_remove: Options in this list are removed from the option group. :type apply_immediately: boolean :param apply_immediately: Indicates whether the changes should be applied immediately, or during the next maintenance window for each instance associated with the option group. """ params = {'OptionGroupName': option_group_name, } if options_to_include is not None: self.build_complex_list_params( params, options_to_include, 'OptionsToInclude.member', ('OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings')) if options_to_remove is not None: self.build_list_params(params, options_to_remove, 'OptionsToRemove.member') if apply_immediately is not None: params['ApplyImmediately'] = str( apply_immediately).lower() return self._make_request( action='ModifyOptionGroup', verb='POST', path='/', params=params) def promote_read_replica(self, db_instance_identifier, backup_retention_period=None, preferred_backup_window=None): """ Promotes a read replica DB instance to a standalone DB instance. :type db_instance_identifier: string :param db_instance_identifier: The DB instance identifier. This value is stored as a lowercase string. Constraints: + Must be the identifier for an existing read replica DB instance + Must contain from 1 to 63 alphanumeric characters or hyphens + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens Example: mydbinstance :type backup_retention_period: integer :param backup_retention_period: The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups. Default: 1 Constraints: + Must be a value from 0 to 8 :type preferred_backup_window: string :param preferred_backup_window: The daily time range during which automated backups are created if automated backups are enabled, using the `BackupRetentionPeriod` parameter. Default: A 30-minute window selected at random from an 8-hour block of time per region. See the Amazon RDS User Guide for the time blocks for each region from which the default backup windows are assigned. Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be Universal Time Coordinated (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes. """ params = {'DBInstanceIdentifier': db_instance_identifier, } if backup_retention_period is not None: params['BackupRetentionPeriod'] = backup_retention_period if preferred_backup_window is not None: params['PreferredBackupWindow'] = preferred_backup_window return self._make_request( action='PromoteReadReplica', verb='POST', path='/', params=params) def purchase_reserved_db_instances_offering(self, reserved_db_instances_offering_id, reserved_db_instance_id=None, db_instance_count=None, tags=None): """ Purchases a reserved DB instance offering. :type reserved_db_instances_offering_id: string :param reserved_db_instances_offering_id: The ID of the Reserved DB instance offering to purchase. Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706 :type reserved_db_instance_id: string :param reserved_db_instance_id: Customer-specified identifier to track this reservation. Example: myreservationID :type db_instance_count: integer :param db_instance_count: The number of instances to reserve. Default: `1` :type tags: list :param tags: A list of tags. """ params = { 'ReservedDBInstancesOfferingId': reserved_db_instances_offering_id, } if reserved_db_instance_id is not None: params['ReservedDBInstanceId'] = reserved_db_instance_id if db_instance_count is not None: params['DBInstanceCount'] = db_instance_count if tags is not None: self.build_complex_list_params( params, tags, 'Tags.member', ('Key', 'Value')) return self._make_request( action='PurchaseReservedDBInstancesOffering', verb='POST', path='/', params=params) def reboot_db_instance(self, db_instance_identifier, force_failover=None): """ Rebooting a DB instance restarts the database engine service. A reboot also applies to the DB instance any modifications to the associated DB parameter group that were pending. Rebooting a DB instance results in a momentary outage of the instance, during which the DB instance status is set to rebooting. If the RDS instance is configured for MultiAZ, it is possible that the reboot will be conducted through a failover. An Amazon RDS event is created when the reboot is completed. If your DB instance is deployed in multiple Availability Zones, you can force a failover from one AZ to the other during the reboot. You might force a failover to test the availability of your DB instance deployment or to restore operations to the original AZ after a failover occurs. The time required to reboot is a function of the specific database engine's crash recovery process. To improve the reboot time, we recommend that you reduce database activities as much as possible during the reboot process to reduce rollback activity for in-transit transactions. :type db_instance_identifier: string :param db_instance_identifier: The DB instance identifier. This parameter is stored as a lowercase string. Constraints: + Must contain from 1 to 63 alphanumeric characters or hyphens + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens :type force_failover: boolean :param force_failover: When `True`, the reboot will be conducted through a MultiAZ failover. Constraint: You cannot specify `True` if the instance is not configured for MultiAZ. """ params = {'DBInstanceIdentifier': db_instance_identifier, } if force_failover is not None: params['ForceFailover'] = str( force_failover).lower() return self._make_request( action='RebootDBInstance', verb='POST', path='/', params=params) def remove_source_identifier_from_subscription(self, subscription_name, source_identifier): """ Removes a source identifier from an existing RDS event notification subscription. :type subscription_name: string :param subscription_name: The name of the RDS event notification subscription you want to remove a source identifier from. :type source_identifier: string :param source_identifier: The source identifier to be removed from the subscription, such as the **DB instance identifier** for a DB instance or the name of a security group. """ params = { 'SubscriptionName': subscription_name, 'SourceIdentifier': source_identifier, } return self._make_request( action='RemoveSourceIdentifierFromSubscription', verb='POST', path='/', params=params) def remove_tags_from_resource(self, resource_name, tag_keys): """ Removes metadata tags from an Amazon RDS resource. For an overview on tagging an Amazon RDS resource, see `Tagging Amazon RDS Resources`_. :type resource_name: string :param resource_name: The Amazon RDS resource the tags will be removed from. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see ` Constructing an RDS Amazon Resource Name (ARN)`_. :type tag_keys: list :param tag_keys: The tag key (name) of the tag to be removed. """ params = {'ResourceName': resource_name, } self.build_list_params(params, tag_keys, 'TagKeys.member') return self._make_request( action='RemoveTagsFromResource', verb='POST', path='/', params=params) def reset_db_parameter_group(self, db_parameter_group_name, reset_all_parameters=None, parameters=None): """ Modifies the parameters of a DB parameter group to the engine/system default value. To reset specific parameters submit a list of the following: `ParameterName` and `ApplyMethod`. To reset the entire DB parameter group, specify the `DBParameterGroup` name and `ResetAllParameters` parameters. When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to `pending-reboot` to take effect on the next DB instance restart or `RebootDBInstance` request. :type db_parameter_group_name: string :param db_parameter_group_name: The name of the DB parameter group. Constraints: + Must be 1 to 255 alphanumeric characters + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens :type reset_all_parameters: boolean :param reset_all_parameters: Specifies whether ( `True`) or not ( `False`) to reset all parameters in the DB parameter group to default values. Default: `True` :type parameters: list :param parameters: An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters may be modified in a single request. **MySQL** Valid Values (for Apply method): `immediate` | `pending-reboot` You can use the immediate value with dynamic parameters only. You can use the `pending-reboot` value for both dynamic and static parameters, and changes are applied when DB instance reboots. **Oracle** Valid Values (for Apply method): `pending-reboot` """ params = {'DBParameterGroupName': db_parameter_group_name, } if reset_all_parameters is not None: params['ResetAllParameters'] = str( reset_all_parameters).lower() if parameters is not None: self.build_complex_list_params( params, parameters, 'Parameters.member', ('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod')) return self._make_request( action='ResetDBParameterGroup', verb='POST', path='/', params=params) def restore_db_instance_from_db_snapshot(self, db_instance_identifier, db_snapshot_identifier, db_instance_class=None, port=None, availability_zone=None, db_subnet_group_name=None, multi_az=None, publicly_accessible=None, auto_minor_version_upgrade=None, license_model=None, db_name=None, engine=None, iops=None, option_group_name=None, tags=None): """ Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with the same configuration as the original source database, except that the new RDS instance is created with the default security group. :type db_instance_identifier: string :param db_instance_identifier: The identifier for the DB snapshot to restore from. Constraints: + Must contain from 1 to 63 alphanumeric characters or hyphens + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens :type db_snapshot_identifier: string :param db_snapshot_identifier: Name of the DB instance to create from the DB snapshot. This parameter isn't case sensitive. Constraints: + Must contain from 1 to 255 alphanumeric characters or hyphens + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens Example: `my-snapshot-id` :type db_instance_class: string :param db_instance_class: The compute and memory capacity of the Amazon RDS DB instance. Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge` :type port: integer :param port: The port number on which the database accepts connections. Default: The same port as the original DB instance Constraints: Value must be `1150-65535` :type availability_zone: string :param availability_zone: The EC2 Availability Zone that the database instance will be created in. Default: A random, system-chosen Availability Zone. Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to `True`. Example: `us-east-1a` :type db_subnet_group_name: string :param db_subnet_group_name: The DB subnet group name to use for the new instance. :type multi_az: boolean :param multi_az: Specifies if the DB instance is a Multi-AZ deployment. Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to `True`. :type publicly_accessible: boolean :param publicly_accessible: Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address. Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case. + **Default VPC:**true + **VPC:**false If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private. :type auto_minor_version_upgrade: boolean :param auto_minor_version_upgrade: Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window. :type license_model: string :param license_model: License model information for the restored DB instance. Default: Same as source. Valid values: `license-included` | `bring-your-own-license` | `general- public-license` :type db_name: string :param db_name: The database name for the restored DB instance. This parameter doesn't apply to the MySQL engine. :type engine: string :param engine: The database engine to use for the new instance. Default: The same as source Constraint: Must be compatible with the engine of the source Example: `oracle-ee` :type iops: integer :param iops: Specifies the amount of provisioned IOPS for the DB instance, expressed in I/O operations per second. If this parameter is not specified, the IOPS value will be taken from the backup. If this parameter is set to 0, the new instance will be converted to a non-PIOPS instance, which will take additional time, though your DB instance will be available for connections before the conversion starts. Constraints: Must be an integer greater than 1000. :type option_group_name: string :param option_group_name: The name of the option group to be used for the restored DB instance. Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance :type tags: list :param tags: A list of tags. """ params = { 'DBInstanceIdentifier': db_instance_identifier, 'DBSnapshotIdentifier': db_snapshot_identifier, } if db_instance_class is not None: params['DBInstanceClass'] = db_instance_class if port is not None: params['Port'] = port if availability_zone is not None: params['AvailabilityZone'] = availability_zone if db_subnet_group_name is not None: params['DBSubnetGroupName'] = db_subnet_group_name if multi_az is not None: params['MultiAZ'] = str( multi_az).lower() if publicly_accessible is not None: params['PubliclyAccessible'] = str( publicly_accessible).lower() if auto_minor_version_upgrade is not None: params['AutoMinorVersionUpgrade'] = str( auto_minor_version_upgrade).lower() if license_model is not None: params['LicenseModel'] = license_model if db_name is not None: params['DBName'] = db_name if engine is not None: params['Engine'] = engine if iops is not None: params['Iops'] = iops if option_group_name is not None: params['OptionGroupName'] = option_group_name if tags is not None: self.build_complex_list_params( params, tags, 'Tags.member', ('Key', 'Value')) return self._make_request( action='RestoreDBInstanceFromDBSnapshot', verb='POST', path='/', params=params) def restore_db_instance_to_point_in_time(self, source_db_instance_identifier, target_db_instance_identifier, restore_time=None, use_latest_restorable_time=None, db_instance_class=None, port=None, availability_zone=None, db_subnet_group_name=None, multi_az=None, publicly_accessible=None, auto_minor_version_upgrade=None, license_model=None, db_name=None, engine=None, iops=None, option_group_name=None, tags=None): """ Restores a DB instance to an arbitrary point-in-time. Users can restore to any point in time before the latestRestorableTime for up to backupRetentionPeriod days. The target database is created from the source database with the same configuration as the original database except that the DB instance is created with the default DB security group. :type source_db_instance_identifier: string :param source_db_instance_identifier: The identifier of the source DB instance from which to restore. Constraints: + Must be the identifier of an existing database instance + Must contain from 1 to 63 alphanumeric characters or hyphens + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens :type target_db_instance_identifier: string :param target_db_instance_identifier: The name of the new database instance to be created. Constraints: + Must contain from 1 to 63 alphanumeric characters or hyphens + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens :type restore_time: timestamp :param restore_time: The date and time to restore from. Valid Values: Value must be a UTC time Constraints: + Must be before the latest restorable time for the DB instance + Cannot be specified if UseLatestRestorableTime parameter is true Example: `2009-09-07T23:45:00Z` :type use_latest_restorable_time: boolean :param use_latest_restorable_time: Specifies whether ( `True`) or not ( `False`) the DB instance is restored from the latest backup time. Default: `False` Constraints: Cannot be specified if RestoreTime parameter is provided. :type db_instance_class: string :param db_instance_class: The compute and memory capacity of the Amazon RDS DB instance. Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge` Default: The same DBInstanceClass as the original DB instance. :type port: integer :param port: The port number on which the database accepts connections. Constraints: Value must be `1150-65535` Default: The same port as the original DB instance. :type availability_zone: string :param availability_zone: The EC2 Availability Zone that the database instance will be created in. Default: A random, system-chosen Availability Zone. Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true. Example: `us-east-1a` :type db_subnet_group_name: string :param db_subnet_group_name: The DB subnet group name to use for the new instance. :type multi_az: boolean :param multi_az: Specifies if the DB instance is a Multi-AZ deployment. Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to `True`. :type publicly_accessible: boolean :param publicly_accessible: Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address. Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case. + **Default VPC:**true + **VPC:**false If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private. :type auto_minor_version_upgrade: boolean :param auto_minor_version_upgrade: Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window. :type license_model: string :param license_model: License model information for the restored DB instance. Default: Same as source. Valid values: `license-included` | `bring-your-own-license` | `general- public-license` :type db_name: string :param db_name: The database name for the restored DB instance. This parameter is not used for the MySQL engine. :type engine: string :param engine: The database engine to use for the new instance. Default: The same as source Constraint: Must be compatible with the engine of the source Example: `oracle-ee` :type iops: integer :param iops: The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. Constraints: Must be an integer greater than 1000. :type option_group_name: string :param option_group_name: The name of the option group to be used for the restored DB instance. Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance :type tags: list :param tags: A list of tags. """ params = { 'SourceDBInstanceIdentifier': source_db_instance_identifier, 'TargetDBInstanceIdentifier': target_db_instance_identifier, } if restore_time is not None: params['RestoreTime'] = restore_time if use_latest_restorable_time is not None: params['UseLatestRestorableTime'] = str( use_latest_restorable_time).lower() if db_instance_class is not None: params['DBInstanceClass'] = db_instance_class if port is not None: params['Port'] = port if availability_zone is not None: params['AvailabilityZone'] = availability_zone if db_subnet_group_name is not None: params['DBSubnetGroupName'] = db_subnet_group_name if multi_az is not None: params['MultiAZ'] = str( multi_az).lower() if publicly_accessible is not None: params['PubliclyAccessible'] = str( publicly_accessible).lower() if auto_minor_version_upgrade is not None: params['AutoMinorVersionUpgrade'] = str( auto_minor_version_upgrade).lower() if license_model is not None: params['LicenseModel'] = license_model if db_name is not None: params['DBName'] = db_name if engine is not None: params['Engine'] = engine if iops is not None: params['Iops'] = iops if option_group_name is not None: params['OptionGroupName'] = option_group_name if tags is not None: self.build_complex_list_params( params, tags, 'Tags.member', ('Key', 'Value')) return self._make_request( action='RestoreDBInstanceToPointInTime', verb='POST', path='/', params=params) def revoke_db_security_group_ingress(self, db_security_group_name, cidrip=None, ec2_security_group_name=None, ec2_security_group_id=None, ec2_security_group_owner_id=None): """ Revokes ingress from a DBSecurityGroup for previously authorized IP ranges or EC2 or VPC Security Groups. Required parameters for this API are one of CIDRIP, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId). :type db_security_group_name: string :param db_security_group_name: The name of the DB security group to revoke ingress from. :type cidrip: string :param cidrip: The IP range to revoke access from. Must be a valid CIDR range. If `CIDRIP` is specified, `EC2SecurityGroupName`, `EC2SecurityGroupId` and `EC2SecurityGroupOwnerId` cannot be provided. :type ec2_security_group_name: string :param ec2_security_group_name: The name of the EC2 security group to revoke access from. For VPC DB security groups, `EC2SecurityGroupId` must be provided. Otherwise, EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided. :type ec2_security_group_id: string :param ec2_security_group_id: The id of the EC2 security group to revoke access from. For VPC DB security groups, `EC2SecurityGroupId` must be provided. Otherwise, EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided. :type ec2_security_group_owner_id: string :param ec2_security_group_owner_id: The AWS Account Number of the owner of the EC2 security group specified in the `EC2SecurityGroupName` parameter. The AWS Access Key ID is not an acceptable value. For VPC DB security groups, `EC2SecurityGroupId` must be provided. Otherwise, EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided. """ params = {'DBSecurityGroupName': db_security_group_name, } if cidrip is not None: params['CIDRIP'] = cidrip if ec2_security_group_name is not None: params['EC2SecurityGroupName'] = ec2_security_group_name if ec2_security_group_id is not None: params['EC2SecurityGroupId'] = ec2_security_group_id if ec2_security_group_owner_id is not None: params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id return self._make_request( action='RevokeDBSecurityGroupIngress', verb='POST', path='/', params=params) def _make_request(self, action, verb, path, params): params['ContentType'] = 'JSON' response = self.make_request(action=action, verb='POST', path='/', params=params) body = response.read() boto.log.debug(body) if response.status == 200: return json.loads(body) else: json_body = json.loads(body) fault_name = json_body.get('Error', {}).get('Code', None) exception_class = self._faults.get(fault_name, self.ResponseError) raise exception_class(response.status, response.reason, body=json_body)
mit
-644,054,749,981,063,300
8,242,949,363,257,423,000
40.971353
174
0.619476
false
javipalanca/ojoalplato
ojoalplato/users/models.py
1
1358
# -*- coding: utf-8 -*- from __future__ import unicode_literals, absolute_import from django.contrib.auth.models import AbstractUser from django.core.urlresolvers import reverse from django.db import models from django.utils.encoding import python_2_unicode_compatible from django.utils.translation import ugettext_lazy as _ USER_STATUS_CHOICES = ( (0, "active"), ) @python_2_unicode_compatible class User(AbstractUser): # First Name and Last Name do not cover name patterns # around the globe. name = models.CharField(_("Name of User"), blank=True, max_length=255) login = models.CharField(max_length=60, default="") url = models.URLField(max_length=100, blank=True) activation_key = models.CharField(max_length=60, default="0") status = models.IntegerField(default=0, choices=USER_STATUS_CHOICES) def __str__(self): return self.username def get_absolute_url(self): return reverse('users:detail', kwargs={'username': self.username}) class UserMeta(models.Model): """ Meta information about a user. """ id = models.IntegerField(primary_key=True) user = models.ForeignKey(User, related_name="meta", blank=True, null=True) key = models.CharField(max_length=255) value = models.TextField() def __unicode__(self): return u"%s: %s" % (self.key, self.value)
mit
-7,304,202,768,045,789,000
-576,311,785,274,492,800
30.581395
78
0.694404
false
jhd/spunout
venv/lib/python2.7/site-packages/pip/commands/search.py
344
4736
import sys import textwrap import pip.download from pip.basecommand import Command, SUCCESS from pip.util import get_terminal_size from pip.log import logger from pip.backwardcompat import xmlrpclib, reduce, cmp from pip.exceptions import CommandError from pip.status_codes import NO_MATCHES_FOUND from pip._vendor import pkg_resources from distutils.version import StrictVersion, LooseVersion class SearchCommand(Command): """Search for PyPI packages whose name or summary contains <query>.""" name = 'search' usage = """ %prog [options] <query>""" summary = 'Search PyPI for packages.' def __init__(self, *args, **kw): super(SearchCommand, self).__init__(*args, **kw) self.cmd_opts.add_option( '--index', dest='index', metavar='URL', default='https://pypi.python.org/pypi', help='Base URL of Python Package Index (default %default)') self.parser.insert_option_group(0, self.cmd_opts) def run(self, options, args): if not args: raise CommandError('Missing required argument (search query).') query = args index_url = options.index pypi_hits = self.search(query, index_url) hits = transform_hits(pypi_hits) terminal_width = None if sys.stdout.isatty(): terminal_width = get_terminal_size()[0] print_results(hits, terminal_width=terminal_width) if pypi_hits: return SUCCESS return NO_MATCHES_FOUND def search(self, query, index_url): pypi = xmlrpclib.ServerProxy(index_url) hits = pypi.search({'name': query, 'summary': query}, 'or') return hits def transform_hits(hits): """ The list from pypi is really a list of versions. We want a list of packages with the list of versions stored inline. This converts the list from pypi into one we can use. """ packages = {} for hit in hits: name = hit['name'] summary = hit['summary'] version = hit['version'] score = hit['_pypi_ordering'] if score is None: score = 0 if name not in packages.keys(): packages[name] = {'name': name, 'summary': summary, 'versions': [version], 'score': score} else: packages[name]['versions'].append(version) # if this is the highest version, replace summary and score if version == highest_version(packages[name]['versions']): packages[name]['summary'] = summary packages[name]['score'] = score # each record has a unique name now, so we will convert the dict into a list sorted by score package_list = sorted(packages.values(), key=lambda x: x['score'], reverse=True) return package_list def print_results(hits, name_column_width=25, terminal_width=None): installed_packages = [p.project_name for p in pkg_resources.working_set] for hit in hits: name = hit['name'] summary = hit['summary'] or '' if terminal_width is not None: # wrap and indent summary to fit terminal summary = textwrap.wrap(summary, terminal_width - name_column_width - 5) summary = ('\n' + ' ' * (name_column_width + 3)).join(summary) line = '%s - %s' % (name.ljust(name_column_width), summary) try: logger.notify(line) if name in installed_packages: dist = pkg_resources.get_distribution(name) logger.indent += 2 try: latest = highest_version(hit['versions']) if dist.version == latest: logger.notify('INSTALLED: %s (latest)' % dist.version) else: logger.notify('INSTALLED: %s' % dist.version) logger.notify('LATEST: %s' % latest) finally: logger.indent -= 2 except UnicodeEncodeError: pass def compare_versions(version1, version2): try: return cmp(StrictVersion(version1), StrictVersion(version2)) # in case of abnormal version number, fall back to LooseVersion except ValueError: pass try: return cmp(LooseVersion(version1), LooseVersion(version2)) except TypeError: # certain LooseVersion comparions raise due to unorderable types, # fallback to string comparison return cmp([str(v) for v in LooseVersion(version1).version], [str(v) for v in LooseVersion(version2).version]) def highest_version(versions): return reduce((lambda v1, v2: compare_versions(v1, v2) == 1 and v1 or v2), versions)
gpl-3.0
-1,999,321,213,407,479,800
6,926,216,247,473,314,000
34.878788
102
0.603463
false
phausler/binutils
gdb/python/lib/gdb/command/frame_filters.py
126
16605
# Frame-filter commands. # Copyright (C) 2013-2014 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """GDB commands for working with frame-filters.""" import sys import gdb import copy from gdb.FrameIterator import FrameIterator from gdb.FrameDecorator import FrameDecorator import gdb.frames import itertools # GDB Commands. class SetFilterPrefixCmd(gdb.Command): """Prefix command for 'set' frame-filter related operations.""" def __init__(self): super(SetFilterPrefixCmd, self).__init__("set frame-filter", gdb.COMMAND_OBSCURE, gdb.COMPLETE_NONE, True) class ShowFilterPrefixCmd(gdb.Command): """Prefix command for 'show' frame-filter related operations.""" def __init__(self): super(ShowFilterPrefixCmd, self).__init__("show frame-filter", gdb.COMMAND_OBSCURE, gdb.COMPLETE_NONE, True) class InfoFrameFilter(gdb.Command): """List all registered Python frame-filters. Usage: info frame-filters """ def __init__(self): super(InfoFrameFilter, self).__init__("info frame-filter", gdb.COMMAND_DATA) @staticmethod def enabled_string(state): """Return "Yes" if filter is enabled, otherwise "No".""" if state: return "Yes" else: return "No" def list_frame_filters(self, frame_filters): """ Internal worker function to list and print frame filters in a dictionary. Arguments: frame_filters: The name of the dictionary, as specified by GDB user commands. """ sorted_frame_filters = sorted(frame_filters.items(), key=lambda i: gdb.frames.get_priority(i[1]), reverse=True) if len(sorted_frame_filters) == 0: print(" No frame filters registered.") else: print(" Priority Enabled Name") for frame_filter in sorted_frame_filters: name = frame_filter[0] try: priority = '{:<8}'.format( str(gdb.frames.get_priority(frame_filter[1]))) enabled = '{:<7}'.format( self.enabled_string(gdb.frames.get_enabled(frame_filter[1]))) except Exception: e = sys.exc_info()[1] print(" Error printing filter '"+name+"': "+str(e)) else: print(" %s %s %s" % (priority, enabled, name)) def print_list(self, title, filter_list, blank_line): print(title) self.list_frame_filters(filter_list) if blank_line: print("") def invoke(self, arg, from_tty): self.print_list("global frame-filters:", gdb.frame_filters, True) cp = gdb.current_progspace() self.print_list("progspace %s frame-filters:" % cp.filename, cp.frame_filters, True) for objfile in gdb.objfiles(): self.print_list("objfile %s frame-filters:" % objfile.filename, objfile.frame_filters, False) # Internal enable/disable functions. def _enable_parse_arg(cmd_name, arg): """ Internal worker function to take an argument from enable/disable and return a tuple of arguments. Arguments: cmd_name: Name of the command invoking this function. args: The argument as a string. Returns: A tuple containing the dictionary, and the argument, or just the dictionary in the case of "all". """ argv = gdb.string_to_argv(arg); argc = len(argv) if argv[0] == "all" and argc > 1: raise gdb.GdbError(cmd_name + ": with 'all' " \ "you may not specify a filter.") else: if argv[0] != "all" and argc != 2: raise gdb.GdbError(cmd_name + " takes exactly two arguments.") return argv def _do_enable_frame_filter(command_tuple, flag): """Worker for enabling/disabling frame_filters. Arguments: command_type: A tuple with the first element being the frame filter dictionary, and the second being the frame filter name. flag: True for Enable, False for Disable. """ list_op = command_tuple[0] op_list = gdb.frames.return_list(list_op) if list_op == "all": for item in op_list: gdb.frames.set_enabled(item, flag) else: frame_filter = command_tuple[1] try: ff = op_list[frame_filter] except KeyError: msg = "frame-filter '" + str(name) + "' not found." raise gdb.GdbError(msg) gdb.frames.set_enabled(ff, flag) def _complete_frame_filter_list(text, word, all_flag): """Worker for frame filter dictionary name completion. Arguments: text: The full text of the command line. word: The most recent word of the command line. all_flag: Whether to include the word "all" in completion. Returns: A list of suggested frame filter dictionary name completions from text/word analysis. This list can be empty when there are no suggestions for completion. """ if all_flag == True: filter_locations = ["all", "global", "progspace"] else: filter_locations = ["global", "progspace"] for objfile in gdb.objfiles(): filter_locations.append(objfile.filename) # If the user just asked for completions with no completion # hints, just return all the frame filter dictionaries we know # about. if (text == ""): return filter_locations # Otherwise filter on what we know. flist = filter(lambda x,y=text:x.startswith(y), filter_locations) # If we only have one completion, complete it and return it. if len(flist) == 1: flist[0] = flist[0][len(text)-len(word):] # Otherwise, return an empty list, or a list of frame filter # dictionaries that the previous filter operation returned. return flist def _complete_frame_filter_name(word, printer_dict): """Worker for frame filter name completion. Arguments: word: The most recent word of the command line. printer_dict: The frame filter dictionary to search for frame filter name completions. Returns: A list of suggested frame filter name completions from word analysis of the frame filter dictionary. This list can be empty when there are no suggestions for completion. """ printer_keys = printer_dict.keys() if (word == ""): return printer_keys flist = filter(lambda x,y=word:x.startswith(y), printer_keys) return flist class EnableFrameFilter(gdb.Command): """GDB command to disable the specified frame-filter. Usage: enable frame-filter enable DICTIONARY [NAME] DICTIONARY is the name of the frame filter dictionary on which to operate. If dictionary is set to "all", perform operations on all dictionaries. Named dictionaries are: "global" for the global frame filter dictionary, "progspace" for the program space's frame filter dictionary. If either all, or the two named dictionaries are not specified, the dictionary name is assumed to be the name of the object-file name. NAME matches the name of the frame-filter to operate on. If DICTIONARY is "all", NAME is ignored. """ def __init__(self): super(EnableFrameFilter, self).__init__("enable frame-filter", gdb.COMMAND_DATA) def complete(self, text, word): """Completion function for both frame filter dictionary, and frame filter name.""" if text.count(" ") == 0: return _complete_frame_filter_list(text, word, True) else: printer_list = gdb.frames.return_list(text.split()[0].rstrip()) return _complete_frame_filter_name(word, printer_list) def invoke(self, arg, from_tty): command_tuple = _enable_parse_arg("enable frame-filter", arg) _do_enable_frame_filter(command_tuple, True) class DisableFrameFilter(gdb.Command): """GDB command to disable the specified frame-filter. Usage: disable frame-filter disable DICTIONARY [NAME] DICTIONARY is the name of the frame filter dictionary on which to operate. If dictionary is set to "all", perform operations on all dictionaries. Named dictionaries are: "global" for the global frame filter dictionary, "progspace" for the program space's frame filter dictionary. If either all, or the two named dictionaries are not specified, the dictionary name is assumed to be the name of the object-file name. NAME matches the name of the frame-filter to operate on. If DICTIONARY is "all", NAME is ignored. """ def __init__(self): super(DisableFrameFilter, self).__init__("disable frame-filter", gdb.COMMAND_DATA) def complete(self, text, word): """Completion function for both frame filter dictionary, and frame filter name.""" if text.count(" ") == 0: return _complete_frame_filter_list(text, word, True) else: printer_list = gdb.frames.return_list(text.split()[0].rstrip()) return _complete_frame_filter_name(word, printer_list) def invoke(self, arg, from_tty): command_tuple = _enable_parse_arg("disable frame-filter", arg) _do_enable_frame_filter(command_tuple, False) class SetFrameFilterPriority(gdb.Command): """GDB command to set the priority of the specified frame-filter. Usage: set frame-filter priority DICTIONARY NAME PRIORITY DICTIONARY is the name of the frame filter dictionary on which to operate. Named dictionaries are: "global" for the global frame filter dictionary, "progspace" for the program space's framefilter dictionary. If either of these two are not specified, the dictionary name is assumed to be the name of the object-file name. NAME matches the name of the frame filter to operate on. PRIORITY is the an integer to assign the new priority to the frame filter. """ def __init__(self): super(SetFrameFilterPriority, self).__init__("set frame-filter " \ "priority", gdb.COMMAND_DATA) def _parse_pri_arg(self, arg): """Internal worker to parse a priority from a tuple. Arguments: arg: Tuple which contains the arguments from the command. Returns: A tuple containing the dictionary, name and priority from the arguments. Raises: gdb.GdbError: An error parsing the arguments. """ argv = gdb.string_to_argv(arg); argc = len(argv) if argc != 3: print("set frame-filter priority " \ "takes exactly three arguments.") return None return argv def _set_filter_priority(self, command_tuple): """Internal worker for setting priority of frame-filters, by parsing a tuple and calling _set_priority with the parsed tuple. Arguments: command_tuple: Tuple which contains the arguments from the command. """ list_op = command_tuple[0] frame_filter = command_tuple[1] # GDB returns arguments as a string, so convert priority to # a number. priority = int(command_tuple[2]) op_list = gdb.frames.return_list(list_op) try: ff = op_list[frame_filter] except KeyError: msg = "frame-filter '" + str(name) + "' not found." raise gdb.GdbError(msg) gdb.frames.set_priority(ff, priority) def complete(self, text, word): """Completion function for both frame filter dictionary, and frame filter name.""" if text.count(" ") == 0: return _complete_frame_filter_list(text, word, False) else: printer_list = gdb.frames.return_list(text.split()[0].rstrip()) return _complete_frame_filter_name(word, printer_list) def invoke(self, arg, from_tty): command_tuple = self._parse_pri_arg(arg) if command_tuple != None: self._set_filter_priority(command_tuple) class ShowFrameFilterPriority(gdb.Command): """GDB command to show the priority of the specified frame-filter. Usage: show frame-filter priority DICTIONARY NAME DICTIONARY is the name of the frame filter dictionary on which to operate. Named dictionaries are: "global" for the global frame filter dictionary, "progspace" for the program space's framefilter dictionary. If either of these two are not specified, the dictionary name is assumed to be the name of the object-file name. NAME matches the name of the frame-filter to operate on. """ def __init__(self): super(ShowFrameFilterPriority, self).__init__("show frame-filter " \ "priority", gdb.COMMAND_DATA) def _parse_pri_arg(self, arg): """Internal worker to parse a dictionary and name from a tuple. Arguments: arg: Tuple which contains the arguments from the command. Returns: A tuple containing the dictionary, and frame filter name. Raises: gdb.GdbError: An error parsing the arguments. """ argv = gdb.string_to_argv(arg); argc = len(argv) if argc != 2: print("show frame-filter priority " \ "takes exactly two arguments.") return None return argv def get_filter_priority(self, frame_filters, name): """Worker for retrieving the priority of frame_filters. Arguments: frame_filters: Name of frame filter dictionary. name: object to select printers. Returns: The priority of the frame filter. Raises: gdb.GdbError: A frame filter cannot be found. """ op_list = gdb.frames.return_list(frame_filters) try: ff = op_list[name] except KeyError: msg = "frame-filter '" + str(name) + "' not found." raise gdb.GdbError(msg) return gdb.frames.get_priority(ff) def complete(self, text, word): """Completion function for both frame filter dictionary, and frame filter name.""" if text.count(" ") == 0: return _complete_frame_filter_list(text, word, False) else: printer_list = frame._return_list(text.split()[0].rstrip()) return _complete_frame_filter_name(word, printer_list) def invoke(self, arg, from_tty): command_tuple = self._parse_pri_arg(arg) if command_tuple == None: return filter_name = command_tuple[1] list_name = command_tuple[0] try: priority = self.get_filter_priority(list_name, filter_name); except Exception: e = sys.exc_info()[1] print("Error printing filter priority for '"+name+"':"+str(e)) else: print("Priority of filter '" + filter_name + "' in list '" \ + list_name + "' is: " + str(priority)) # Register commands SetFilterPrefixCmd() ShowFilterPrefixCmd() InfoFrameFilter() EnableFrameFilter() DisableFrameFilter() SetFrameFilterPriority() ShowFrameFilterPriority()
gpl-2.0
9,007,701,346,268,689,000
-8,846,254,751,504,857,000
34.556745
85
0.604818
false
sobercoder/gem5
configs/ruby/Ruby.py
2
9500
# Copyright (c) 2012, 2017 ARM Limited # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Copyright (c) 2006-2007 The Regents of The University of Michigan # Copyright (c) 2009 Advanced Micro Devices, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Brad Beckmann import math import m5 from m5.objects import * from m5.defines import buildEnv from m5.util import addToPath, fatal from common import MemConfig from topologies import * from network import Network def define_options(parser): # By default, ruby uses the simple timing cpu parser.set_defaults(cpu_type="TimingSimpleCPU") parser.add_option("--ruby-clock", action="store", type="string", default='2GHz', help="Clock for blocks running at Ruby system's speed") parser.add_option("--access-backing-store", action="store_true", default=False, help="Should ruby maintain a second copy of memory") # Options related to cache structure parser.add_option("--ports", action="store", type="int", default=4, help="used of transitions per cycle which is a proxy \ for the number of ports.") # network options are in network/Network.py # ruby mapping options parser.add_option("--numa-high-bit", type="int", default=0, help="high order address bit to use for numa mapping. " \ "0 = highest bit, not specified = lowest bit") parser.add_option("--recycle-latency", type="int", default=10, help="Recycle latency for ruby controller input buffers") protocol = buildEnv['PROTOCOL'] exec "import %s" % protocol eval("%s.define_options(parser)" % protocol) Network.define_options(parser) def setup_memory_controllers(system, ruby, dir_cntrls, options): ruby.block_size_bytes = options.cacheline_size ruby.memory_size_bits = 48 index = 0 mem_ctrls = [] crossbars = [] # Sets bits to be used for interleaving. Creates memory controllers # attached to a directory controller. A separate controller is created # for each address range as the abstract memory can handle only one # contiguous address range as of now. for dir_cntrl in dir_cntrls: crossbar = None if len(system.mem_ranges) > 1: crossbar = IOXBar() crossbars.append(crossbar) dir_cntrl.memory = crossbar.slave for r in system.mem_ranges: mem_ctrl = MemConfig.create_mem_ctrl( MemConfig.get(options.mem_type), r, index, options.num_dirs, int(math.log(options.num_dirs, 2)), options.cacheline_size) if options.access_backing_store: mem_ctrl.kvm_map=False mem_ctrls.append(mem_ctrl) if crossbar != None: mem_ctrl.port = crossbar.master else: mem_ctrl.port = dir_cntrl.memory index += 1 system.mem_ctrls = mem_ctrls if len(crossbars) > 0: ruby.crossbars = crossbars def create_topology(controllers, options): """ Called from create_system in configs/ruby/<protocol>.py Must return an object which is a subclass of BaseTopology found in configs/topologies/BaseTopology.py This is a wrapper for the legacy topologies. """ exec "import topologies.%s as Topo" % options.topology topology = eval("Topo.%s(controllers)" % options.topology) return topology def create_system(options, full_system, system, piobus = None, dma_ports = []): system.ruby = RubySystem() ruby = system.ruby # Create the network object (network, IntLinkClass, ExtLinkClass, RouterClass, InterfaceClass) = \ Network.create_network(options, ruby) ruby.network = network protocol = buildEnv['PROTOCOL'] exec "import %s" % protocol try: (cpu_sequencers, dir_cntrls, topology) = \ eval("%s.create_system(options, full_system, system, dma_ports,\ ruby)" % protocol) except: print "Error: could not create sytem for ruby protocol %s" % protocol raise # Create the network topology topology.makeTopology(options, network, IntLinkClass, ExtLinkClass, RouterClass) # Initialize network based on topology Network.init_network(options, network, InterfaceClass) # Create a port proxy for connecting the system port. This is # independent of the protocol and kept in the protocol-agnostic # part (i.e. here). sys_port_proxy = RubyPortProxy(ruby_system = ruby) if piobus is not None: sys_port_proxy.pio_master_port = piobus.slave # Give the system port proxy a SimObject parent without creating a # full-fledged controller system.sys_port_proxy = sys_port_proxy # Connect the system port for loading of binaries etc system.system_port = system.sys_port_proxy.slave setup_memory_controllers(system, ruby, dir_cntrls, options) # Connect the cpu sequencers and the piobus if piobus != None: for cpu_seq in cpu_sequencers: cpu_seq.pio_master_port = piobus.slave cpu_seq.mem_master_port = piobus.slave if buildEnv['TARGET_ISA'] == "x86": cpu_seq.pio_slave_port = piobus.master ruby.number_of_virtual_networks = ruby.network.number_of_virtual_networks ruby._cpu_ports = cpu_sequencers ruby.num_of_sequencers = len(cpu_sequencers) # Create a backing copy of physical memory in case required if options.access_backing_store: ruby.access_backing_store = True ruby.phys_mem = SimpleMemory(range=system.mem_ranges[0], in_addr_map=False) def create_directories(options, mem_ranges, ruby_system): dir_cntrl_nodes = [] if options.numa_high_bit: numa_bit = options.numa_high_bit else: # if the numa_bit is not specified, set the directory bits as the # lowest bits above the block offset bits, and the numa_bit as the # highest of those directory bits dir_bits = int(math.log(options.num_dirs, 2)) block_size_bits = int(math.log(options.cacheline_size, 2)) numa_bit = block_size_bits + dir_bits - 1 for i in xrange(options.num_dirs): dir_ranges = [] for r in mem_ranges: addr_range = m5.objects.AddrRange(r.start, size = r.size(), intlvHighBit = numa_bit, intlvBits = dir_bits, intlvMatch = i) dir_ranges.append(addr_range) dir_cntrl = Directory_Controller() dir_cntrl.version = i dir_cntrl.directory = RubyDirectoryMemory() dir_cntrl.ruby_system = ruby_system dir_cntrl.addr_ranges = dir_ranges exec("ruby_system.dir_cntrl%d = dir_cntrl" % i) dir_cntrl_nodes.append(dir_cntrl) return dir_cntrl_nodes def send_evicts(options): # currently, 2 scenarios warrant forwarding evictions to the CPU: # 1. The O3 model must keep the LSQ coherent with the caches # 2. The x86 mwait instruction is built on top of coherence invalidations # 3. The local exclusive monitor in ARM systems if options.cpu_type == "DerivO3CPU" or \ buildEnv['TARGET_ISA'] in ('x86', 'arm'): return True return False
bsd-3-clause
-4,177,376,626,560,441,000
6,503,473,253,256,680,000
38.915966
83
0.665053
false
tlby/mxnet
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
2
34598
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # coding: utf-8 """ Module for translating ONNX operators into Mxnet operatoes""" # pylint: disable=unused-argument,protected-access import numpy as np from . import _translation_utils as translation_utils from .... import symbol # Method definitions for the callable objects mapped in the import_helper module def identity(attrs, inputs, proto_obj): """Returns the identity function of the the input.""" return 'identity', attrs, inputs def random_uniform(attrs, inputs, proto_obj): """Draw random samples from a uniform distribtuion.""" try: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE except ImportError: raise ImportError("Onnx and protobuf need to be installed. " "Instructions to install - https://github.com/onnx/onnx") new_attrs = translation_utils._remove_attributes(attrs, ['seed']) new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attrs.get('dtype', 1))] return 'random_uniform', new_attrs, inputs def random_normal(attrs, inputs, proto_obj): """Draw random samples from a Gaussian distribution.""" try: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE except ImportError: raise ImportError("Onnx and protobuf need to be installed. " "Instructions to install - https://github.com/onnx/onnx") new_attr = translation_utils._remove_attributes(attrs, ['seed']) new_attr = translation_utils._fix_attribute_names(new_attr, {'mean': 'loc'}) new_attr['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attr.get('dtype', 1))] return 'random_normal', new_attr, inputs def sample_multinomial(attrs, inputs, proto_obj): """Draw random samples from a multinomial distribution.""" try: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE except ImportError: raise ImportError("Onnx and protobuf need to be installed. " + "Instructions to install - https://github.com/onnx/onnx") new_attrs = translation_utils._remove_attributes(attrs, ['seed']) new_attrs = translation_utils._fix_attribute_names(new_attrs, {'sample_size': 'shape'}) new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(attrs.get('dtype', 6))] return 'sample_multinomial', new_attrs, inputs # Arithmetic Operations def add(attrs, inputs, proto_obj): """Adding two tensors""" new_attr = {} if 'broadcast' in attrs and attrs['broadcast'] == 1: broadcast_axis = attrs['axis'] op_value = translation_utils._fix_broadcast('broadcast_add', inputs, broadcast_axis, proto_obj) return op_value, new_attr, inputs return 'broadcast_add', new_attr, inputs def subtract(attrs, inputs, proto_obj): """Subtracting two tensors""" new_attr = {} if 'broadcast' in attrs and attrs['broadcast'] == 1: broadcast_axis = attrs['axis'] op_value = translation_utils._fix_broadcast('broadcast_sub', inputs, broadcast_axis, proto_obj) return op_value, new_attr, inputs return 'broadcast_sub', new_attr, inputs def multiply(attrs, inputs, proto_obj): """Multiply two tensors""" new_attr = {} if 'broadcast' in attrs and attrs['broadcast'] == 1: broadcast_axis = attrs['axis'] op_value = translation_utils._fix_broadcast('broadcast_mul', inputs, broadcast_axis, proto_obj) return op_value, new_attr, inputs return 'broadcast_mul', new_attr, inputs def divide(attrs, inputs, proto_obj): """Divide two tensors""" new_attr = {} if 'broadcast' in attrs and attrs['broadcast'] == 1: broadcast_axis = attrs['axis'] op_value = translation_utils._fix_broadcast('broadcast_div', inputs, broadcast_axis, proto_obj) return op_value, new_attr, inputs return 'broadcast_div', new_attr, inputs def mean(attrs, inputs, proto_obj): """Mean of all the input tensors.""" concat_input = [symbol.expand_dims(op_input, axis=0) for op_input in inputs] concat_sym = symbol.concat(*concat_input, dim=0) mean_sym = symbol.mean(concat_sym, axis=0) return mean_sym, attrs, inputs def logical_and(attrs, inputs, proto_obj): """Logical and of two input arrays.""" return 'broadcast_logical_and', attrs, inputs def logical_or(attrs, inputs, proto_obj): """Logical or of two input arrays.""" return 'broadcast_logical_or', attrs, inputs def logical_xor(attrs, inputs, proto_obj): """Logical xor of two input arrays.""" return 'broadcast_logical_xor', attrs, inputs def logical_not(attrs, inputs, proto_obj): """Logical not of two input arrays.""" return 'logical_not', attrs, inputs def absolute(attrs, inputs, proto_obj): """Returns element-wise absolute value of the input.""" return 'abs', attrs, inputs def negative(attrs, inputs, proto_obj): """Negation of every element in a tensor""" return 'negative', attrs, inputs def add_n(attrs, inputs, proto_obj): """Elementwise sum of arrays""" return 'add_n', attrs, inputs # Sorting and Searching def argmax(attrs, inputs, proto_obj): """Returns indices of the maximum values along an axis""" axis = attrs.get('axis', 0) keepdims = attrs.get('keepdims', 1) argmax_op = symbol.argmax(inputs[0], axis=axis, keepdims=keepdims) # onnx argmax operator always expects int64 as output type cast_attrs = {'dtype': 'int64'} return 'cast', cast_attrs, argmax_op def argmin(attrs, inputs, proto_obj): """Returns indices of the minimum values along an axis.""" axis = attrs.get('axis', 0) keepdims = attrs.get('keepdims', 1) argmin_op = symbol.argmin(inputs[0], axis=axis, keepdims=keepdims) # onnx argmax operator always expects int64 as output type cast_attrs = {'dtype': 'int64'} return 'cast', cast_attrs, argmin_op def maximum(attrs, inputs, proto_obj): """ Elementwise maximum of arrays. MXNet maximum compares only two symbols at a time. ONNX can send more than two to compare. Breaking into multiple mxnet ops to compare two symbols at a time """ if len(inputs) > 1: mxnet_op = symbol.maximum(inputs[0], inputs[1]) for op_input in inputs[2:]: mxnet_op = symbol.maximum(mxnet_op, op_input) else: mxnet_op = symbol.maximum(inputs[0], inputs[0]) return mxnet_op, attrs, inputs def minimum(attrs, inputs, proto_obj): """Elementwise minimum of arrays.""" # MXNet minimum compares only two symbols at a time. # ONNX can send more than two to compare. # Breaking into multiple mxnet ops to compare two symbols at a time if len(inputs) > 1: mxnet_op = symbol.minimum(inputs[0], inputs[1]) for op_input in inputs[2:]: mxnet_op = symbol.minimum(mxnet_op, op_input) else: mxnet_op = symbol.minimum(inputs[0], inputs[0]) return mxnet_op, attrs, inputs def lesser(attrs, inputs, proto_obj): """Logical Lesser operator with broadcasting.""" return 'broadcast_lesser', attrs, inputs def greater(attrs, inputs, proto_obj): """Logical Greater operator with broadcasting.""" return 'broadcast_greater', attrs, inputs def equal(attrs, inputs, proto_obj): """Logical Equal operator with broadcasting.""" return 'broadcast_equal', attrs, inputs #Hyperbolic functions def tanh(attrs, inputs, proto_obj): """Returns the hyperbolic tangent of the input array.""" return 'tanh', attrs, inputs # Rounding def ceil(attrs, inputs, proto_obj): """ Calculate ceil value for input """ return 'ceil', attrs, inputs def floor(attrs, inputs, proto_obj): """ Calculate floor value for input """ return 'floor', attrs, inputs # Joining and spliting def concat(attrs, inputs, proto_obj): """ Joins input arrays along a given axis. """ new_attrs = translation_utils._fix_attribute_names(attrs, {'axis': 'dim'}) return 'concat', new_attrs, inputs # Basic neural network functions def softsign(attrs, inputs, proto_obj): """Computes softsign of x element-wise.""" return 'softsign', attrs, inputs def sigmoid(attrs, inputs, proto_obj): """Computes elementwise sigmoid of the input array""" return 'sigmoid', attrs, inputs def hardsigmoid(attrs, inputs, proto_obj): """Computes elementwise hard sigmoid of the input array""" return 'hard_sigmoid', attrs, inputs def relu(attrs, inputs, proto_obj): """Computes rectified linear function.""" return 'relu', attrs, inputs def pad(attrs, inputs, proto_obj): """ Add padding to input tensor""" new_attrs = translation_utils._fix_attribute_names(attrs, {'pads' : 'pad_width', 'value' : 'constant_value' }) new_attrs['pad_width'] = translation_utils._pad_sequence_fix(new_attrs.get('pad_width')) return 'pad', new_attrs, inputs def matrix_multiplication(attrs, inputs, proto_obj): """Performs general matrix multiplication""" return 'linalg_gemm2', attrs, inputs def batch_norm(attrs, inputs, proto_obj): """Batch normalization.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon': 'eps', 'is_test': 'fix_gamma'}) new_attrs = translation_utils._remove_attributes(new_attrs, ['spatial', 'consumed_inputs']) # Disable cuDNN BN only if epsilon from model is < than minimum cuDNN eps (1e-5) cudnn_min_eps = 1e-5 cudnn_off = 0 if attrs.get('epsilon', cudnn_min_eps) >= cudnn_min_eps else 1 new_attrs = translation_utils._add_extra_attributes(new_attrs, {'cudnn_off': cudnn_off}) # in test mode "fix_gamma" should be unset. new_attrs['fix_gamma'] = not attrs.get('is_test', 1) return 'BatchNorm', new_attrs, inputs def instance_norm(attrs, inputs, proto_obj): """Instance Normalization.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon' : 'eps'}) new_attrs['eps'] = attrs.get('epsilon', 1e-5) return 'InstanceNorm', new_attrs, inputs def leaky_relu(attrs, inputs, proto_obj): """Leaky Relu function""" if 'alpha' in attrs: new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'}) else: new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 0.01}) return 'LeakyReLU', new_attrs, inputs def _elu(attrs, inputs, proto_obj): """Elu function""" if 'alpha' in attrs: new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'}) else: new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 1.0}) new_attrs = translation_utils._add_extra_attributes(new_attrs, {'act_type': 'elu'}) return 'LeakyReLU', new_attrs, inputs def _prelu(attrs, inputs, proto_obj): """PRelu function""" new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'prelu'}) return 'LeakyReLU', new_attrs, inputs def _selu(attrs, inputs, proto_obj): """Selu function""" new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'selu'}) return 'LeakyReLU', new_attrs, inputs def softmax(attrs, inputs, proto_obj): """Softmax function.""" if 'axis' not in attrs: attrs = translation_utils._add_extra_attributes(attrs, {'axis': 1}) return 'softmax', attrs, inputs def log_softmax(attrs, inputs, proto_obj): """Computes the log softmax of the input. This is equivalent to computing softmax followed by log.""" return 'log_softmax', attrs, inputs def softplus(attrs, inputs, proto_obj): """Applies the sofplus activation function element-wise to the input.""" new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type' : 'softrelu'}) return 'Activation', new_attrs, inputs def conv(attrs, inputs, proto_obj): """Compute N-D convolution on (N+2)-D input.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape' : 'kernel', 'strides' : 'stride', 'pads': 'pad', 'dilations': 'dilate', 'group': 'num_group'}) new_attrs = translation_utils._add_extra_attributes(new_attrs, {'num_group' : 1}) new_attrs = translation_utils._fix_bias('Convolution', new_attrs, len(inputs)) new_attrs = translation_utils._fix_channels('Convolution', new_attrs, inputs, proto_obj) kernel = new_attrs['kernel'] stride = new_attrs['stride'] if 'stride' in new_attrs else [] padding = new_attrs['pad'] if 'pad' in new_attrs else [] dilations = new_attrs['dilate'] if 'dilate' in new_attrs else [] num_filter = new_attrs['num_filter'] num_group = new_attrs['num_group'] no_bias = new_attrs['no_bias'] if 'no_bias' in new_attrs else 0 bias = None if no_bias is True else inputs[2] # Unlike ONNX, MXNet's convolution operator does not support asymmetric padding, so we first # use 'Pad' operator, which supports asymmetric padding. Then use the convolution operator. pad_width = (0, 0, 0, 0) + translation_utils._pad_sequence_fix(padding, kernel_dim=len(kernel)) pad_op = symbol.pad(inputs[0], mode='constant', pad_width=pad_width) conv_op = symbol.Convolution(pad_op, inputs[1], bias, kernel=kernel, stride=stride, dilate=dilations, num_filter=num_filter, num_group=num_group, no_bias=no_bias) return conv_op, new_attrs, inputs def deconv(attrs, inputs, proto_obj): """Computes transposed convolution of the input tensor.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape' : 'kernel', 'strides' : 'stride', 'pads': 'pad', 'dilations': 'dilate', 'group': 'num_group'}) new_attrs = translation_utils._add_extra_attributes(new_attrs, {'num_group' : 1}) new_attrs = translation_utils._fix_bias('Deconvolution', new_attrs, len(inputs)) new_attrs = translation_utils._fix_channels('Deconvolution', new_attrs, inputs, proto_obj) kernel = new_attrs['kernel'] stride = new_attrs['stride'] if 'stride' in new_attrs else [] padding = new_attrs['pad'] if 'pad' in new_attrs else [] dilations = new_attrs['dilate'] if 'dilate' in new_attrs else [] num_filter = new_attrs['num_filter'] num_group = new_attrs['num_group'] no_bias = new_attrs['no_bias'] if 'no_bias' in new_attrs else False bias = None if no_bias is True else inputs[2] # Unlike ONNX, MXNet's deconvolution operator does not support asymmetric padding, so we first # use 'Pad' operator, which supports asymmetric padding. Then use the deconvolution operator. pad_width = (0, 0, 0, 0) + translation_utils._pad_sequence_fix(padding, kernel_dim=len(kernel)) pad_op = symbol.pad(inputs[0], mode='constant', pad_width=pad_width) deconv_op = symbol.Deconvolution(pad_op, inputs[1], bias, kernel=kernel, stride=stride, dilate=dilations, num_filter=num_filter, num_group=num_group, no_bias=no_bias) return deconv_op, new_attrs, inputs def fully_connected(attrs, inputs, proto_obj): """Applies a linear transformation: Y=XWT+b.""" new_attrs = translation_utils._remove_attributes(attrs, ['axis']) new_attrs = translation_utils._fix_bias('FullyConnected', new_attrs, len(inputs)) new_attrs = translation_utils._fix_channels('FullyConnected', new_attrs, inputs, proto_obj) return 'FullyConnected', new_attrs, inputs def global_maxpooling(attrs, inputs, proto_obj): """Performs max pooling on the input.""" new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True, 'kernel': (1, 1), 'pool_type': 'max'}) return 'Pooling', new_attrs, inputs def global_avgpooling(attrs, inputs, proto_obj): """Performs avg pooling on the input.""" new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True, 'kernel': (1, 1), 'pool_type': 'avg'}) return 'Pooling', new_attrs, inputs def global_lppooling(attrs, inputs, proto_obj): """Performs global lp pooling on the input.""" p_value = attrs.get('p', 2) new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True, 'kernel': (1, 1), 'pool_type': 'lp', 'p_value': p_value}) new_attrs = translation_utils._remove_attributes(new_attrs, ['p']) return 'Pooling', new_attrs, inputs def linalg_gemm(attrs, inputs, proto_obj): """Performs general matrix multiplication and accumulation""" trans_a = 0 trans_b = 0 alpha = 1 beta = 1 if 'transA' in attrs: trans_a = attrs['transA'] if 'transB' in attrs: trans_b = attrs['transB'] if 'alpha' in attrs: alpha = attrs['alpha'] if 'beta' in attrs: beta = attrs['beta'] flatten_a = symbol.flatten(inputs[0]) matmul_op = symbol.linalg_gemm2(A=flatten_a, B=inputs[1], transpose_a=trans_a, transpose_b=trans_b, alpha=alpha) gemm_op = symbol.broadcast_add(matmul_op, beta*inputs[2]) new_attrs = translation_utils._fix_attribute_names(attrs, {'transA': 'transpose_a', 'transB': 'transpose_b'}) new_attrs = translation_utils._remove_attributes(new_attrs, ['broadcast']) return gemm_op, new_attrs, inputs def local_response_norm(attrs, inputs, proto_obj): """Local Response Normalization.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'bias': 'knorm', 'size' : 'nsize'}) return 'LRN', new_attrs, inputs def dropout(attrs, inputs, proto_obj): """Dropout Regularization.""" mode = 'training' if 'is_test' in attrs and attrs['is_test'] == 0: mode = 'always' new_attrs = translation_utils._fix_attribute_names(attrs, {'ratio': 'p'}) new_attrs = translation_utils._remove_attributes(new_attrs, ['is_test']) new_attrs = translation_utils._add_extra_attributes(new_attrs, {'mode': mode}) return 'Dropout', new_attrs, inputs # Changing shape and type. def reshape(attrs, inputs, proto_obj): """Reshape the given array by the shape attribute.""" if len(inputs) == 1: return 'reshape', attrs, inputs[0] reshape_shape = list(proto_obj._params[inputs[1].name].asnumpy()) reshape_shape = [int(i) for i in reshape_shape] new_attrs = {'shape': reshape_shape} return 'reshape', new_attrs, inputs[:1] def cast(attrs, inputs, proto_obj): """ Cast input to a given dtype""" try: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE except ImportError: raise ImportError("Onnx and protobuf need to be installed. " + "Instructions to install - https://github.com/onnx/onnx") new_attrs = translation_utils._fix_attribute_names(attrs, {'to' : 'dtype'}) new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attrs['dtype'])] return 'cast', new_attrs, inputs def split(attrs, inputs, proto_obj): """Splits an array along a particular axis into multiple sub-arrays.""" split_list = attrs.get('split') if 'split' in attrs else [] new_attrs = translation_utils._fix_attribute_names(attrs, {'split' : 'num_outputs'}) if 'axis' not in attrs: new_attrs = translation_utils._add_extra_attributes(new_attrs, {'axis': 0}) if not split_list: num_outputs = len(proto_obj.model_metadata.get('output_tensor_data')) else: if len(set(split_list)) == 1: num_outputs = len(split_list) else: raise NotImplementedError("Operator {} in MXNet does not support variable splits." "Tracking the issue to support variable split here: " "https://github.com/apache/incubator-mxnet/issues/11594" .format('split')) new_attrs['num_outputs'] = num_outputs return 'split', new_attrs, inputs def _slice(attrs, inputs, proto_obj): """Returns a slice of the input tensor along multiple axes.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'axes' : 'axis', 'ends' : 'end', 'starts' : 'begin'}) # onnx slice provides slicing on multiple axis. Adding multiple slice_axis operator # for multiple axes from mxnet begin = new_attrs.get('begin') end = new_attrs.get('end') axes = new_attrs.get('axis', tuple(range(len(begin)))) slice_op = symbol.slice_axis(inputs[0], axis=axes[0], begin=begin[0], end=end[0]) if len(axes) > 1: for i, axis in enumerate(axes): slice_op = symbol.slice_axis(slice_op, axis=axis, begin=begin[i], end=end[i]) return slice_op, new_attrs, inputs def transpose(attrs, inputs, proto_obj): """Transpose the input array.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'perm' : 'axes'}) return 'transpose', new_attrs, inputs def squeeze(attrs, inputs, proto_obj): """Remove single-dimensional entries from the shape of a tensor.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'axes' : 'axis'}) return 'squeeze', new_attrs, inputs def unsqueeze(attrs, inputs, cls): """Inserts a new axis of size 1 into the array shape""" # MXNet can only add one axis at a time. mxnet_op = inputs[0] for axis in attrs["axes"]: mxnet_op = symbol.expand_dims(mxnet_op, axis=axis) return mxnet_op, attrs, inputs def flatten(attrs, inputs, proto_obj): """Flattens the input array into a 2-D array by collapsing the higher dimensions.""" #Mxnet does not have axis support. By default uses axis=1 if 'axis' in attrs and attrs['axis'] != 1: raise RuntimeError("Flatten operator only supports axis=1") new_attrs = translation_utils._remove_attributes(attrs, ['axis']) return 'Flatten', new_attrs, inputs def clip(attrs, inputs, proto_obj): """Clips (limits) the values in an array.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'min' : 'a_min', 'max' : 'a_max'}) if 'a_max' not in new_attrs: new_attrs = translation_utils._add_extra_attributes(new_attrs, {'a_max' : np.inf}) if 'a_min' not in new_attrs: new_attrs = translation_utils._add_extra_attributes(new_attrs, {'a_min' : -np.inf}) return 'clip', new_attrs, inputs def gather(attrs, inputs, proto_obj): """Gather elements from an input array along the given axis.""" return 'take', attrs, inputs #Powers def reciprocal(attrs, inputs, proto_obj): """Returns the reciprocal of the argument, element-wise.""" return 'reciprocal', attrs, inputs def squareroot(attrs, inputs, proto_obj): """Returns element-wise square-root value of the input.""" return 'sqrt', attrs, inputs def power(attrs, inputs, proto_obj): """Returns element-wise result of base element raised to powers from exp element.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'exponent':'exp'}) if 'broadcast' in attrs: new_attrs = translation_utils._remove_attributes(new_attrs, ['broadcast']) if attrs['broadcast'] == 1: return 'broadcast_power', new_attrs, inputs else: mxnet_op = symbol.pow(inputs[0], inputs[1]) return mxnet_op, new_attrs, inputs mxnet_op = symbol.broadcast_power(inputs[0], inputs[1]) return mxnet_op, new_attrs, inputs def exponent(attrs, inputs, proto_obj): """Elementwise exponent of input array.""" return 'exp', attrs, inputs def _cos(attrs, inputs, proto_obj): """Elementwise cosine of input array.""" return 'cos', attrs, inputs def _sin(attrs, inputs, proto_obj): """Elementwise sine of input array.""" return 'sin', attrs, inputs def _tan(attrs, inputs, proto_obj): """Elementwise tan of input array.""" return 'tan', attrs, inputs def arccos(attrs, inputs, proto_obj): """Elementwise inverse cos of input array.""" return 'arccos', attrs, inputs def arcsin(attrs, inputs, proto_obj): """Elementwise inverse sin of input array.""" return 'arcsin', attrs, inputs def arctan(attrs, inputs, proto_obj): """Elementwise inverse tan of input array.""" return 'arctan', attrs, inputs def _log(attrs, inputs, proto_obj): """Elementwise log of input array.""" return 'log', attrs, inputs # Reduce Functions def reduce_max(attrs, inputs, proto_obj): """Reduce the array along a given axis by maximum value""" new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'}) return 'max', new_attrs, inputs def reduce_mean(attrs, inputs, proto_obj): """Reduce the array along a given axis by mean value""" new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'}) return 'mean', new_attrs, inputs def reduce_min(attrs, inputs, proto_obj): """Reduce the array along a given axis by minimum value""" new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'}) return 'min', new_attrs, inputs def reduce_sum(attrs, inputs, proto_obj): """Reduce the array along a given axis by sum value""" new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'}) return 'sum', new_attrs, inputs def reduce_prod(attrs, inputs, proto_obj): """Reduce the array along a given axis by product value""" new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'}) return 'prod', new_attrs, inputs def reduce_log_sum(attrs, inputs, proto_obj): """Reduce the array along a given axis by log sum value""" keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims') sum_op = symbol.sum(inputs[0], axis=attrs.get('axes'), keepdims=keep_dims) log_sym = symbol.log(sum_op) return log_sym, attrs, inputs def reduce_log_sum_exp(attrs, inputs, proto_obj): """Reduce the array along a given axis by log sum exp value""" keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims') exp_op = symbol.exp(inputs[0]) sum_op = symbol.sum(exp_op, axis=attrs.get('axes'), keepdims=keep_dims) log_sym = symbol.log(sum_op) return log_sym, attrs, inputs def reduce_sum_square(attrs, inputs, proto_obj): """Reduce the array along a given axis by sum square value""" square_op = symbol.square(inputs[0]) sum_op = symbol.sum(square_op, axis=attrs.get('axes'), keepdims=attrs.get('keepdims')) return sum_op, attrs, inputs def reduce_l1(attrs, inputs, proto_obj): """Reduce input tensor by l1 normalization.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'}) new_attrs = translation_utils._add_extra_attributes(new_attrs, {'ord' : 1}) return 'norm', new_attrs, inputs def shape(attrs, inputs, proto_obj): """Returns shape of input array.""" return 'shape_array', attrs, inputs def size(attrs, inputs, proto_obj): """Returns array containing size of data.""" return "size_array", attrs, inputs def reduce_l2(attrs, inputs, proto_obj): """Reduce input tensor by l2 normalization.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'}) return 'norm', new_attrs, inputs def avg_pooling(attrs, inputs, proto_obj): """ Average pooling""" new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape': 'kernel', 'strides': 'stride', 'pads': 'pad', }) new_attrs = translation_utils._add_extra_attributes(new_attrs, {'pooling_convention': 'valid' }) new_op = translation_utils._fix_pooling('avg', inputs, new_attrs) return new_op, new_attrs, inputs def lp_pooling(attrs, inputs, proto_obj): """LP Pooling""" p_value = attrs.get('p', 2) new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape': 'kernel', 'strides': 'stride', 'pads': 'pad' }) new_attrs = translation_utils._remove_attributes(new_attrs, ['p']) new_attrs = translation_utils._add_extra_attributes(new_attrs, {'pooling_convention': 'valid', 'p_value': p_value }) new_op = translation_utils._fix_pooling('lp', inputs, new_attrs) return new_op, new_attrs, inputs def max_pooling(attrs, inputs, proto_obj): """ Average pooling""" new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape': 'kernel', 'strides': 'stride', 'pads': 'pad', }) new_attrs = translation_utils._add_extra_attributes(new_attrs, {'pooling_convention': 'valid' }) new_op = translation_utils._fix_pooling('max', inputs, new_attrs) return new_op, new_attrs, inputs def max_roi_pooling(attrs, inputs, proto_obj): """Max ROI Pooling.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'pooled_shape': 'pooled_size', 'spatial_scale': 'spatial_scale' }) return 'ROIPooling', new_attrs, inputs def depthtospace(attrs, inputs, proto_obj): """Rearranges data from depth into blocks of spatial data.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'blocksize':'block_size'}) return "depth_to_space", new_attrs, inputs def spacetodepth(attrs, inputs, proto_obj): """Rearranges blocks of spatial data into depth.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'blocksize':'block_size'}) return "space_to_depth", new_attrs, inputs def hardmax(attrs, inputs, proto_obj): """Returns batched one-hot vectors.""" input_tensor_data = proto_obj.model_metadata.get('input_tensor_data')[0] input_shape = input_tensor_data[1] axis = int(attrs.get('axis', 1)) axis = axis if axis >= 0 else len(input_shape) + axis if axis == len(input_shape) - 1: amax = symbol.argmax(inputs[0], axis=-1) one_hot = symbol.one_hot(amax, depth=input_shape[-1]) return one_hot, attrs, inputs # since reshape doesn't take a tensor for shape, # computing with np.prod. This needs to be changed to # to use mx.sym.prod() when mx.sym.reshape() is fixed. # (https://github.com/apache/incubator-mxnet/issues/10789) new_shape = (int(np.prod(input_shape[:axis])), int(np.prod(input_shape[axis:]))) reshape_op = symbol.reshape(inputs[0], new_shape) amax = symbol.argmax(reshape_op, axis=-1) one_hot = symbol.one_hot(amax, depth=new_shape[-1]) hardmax_op = symbol.reshape(one_hot, input_shape) return hardmax_op, attrs, inputs def lpnormalization(attrs, inputs, proto_obj): """ONNX does not have eps attribute, so cannot map it to L2normalization in MXNet without that, it works as norm operator discussion in PR: https://github.com/onnx/onnx/pull/1330""" new_attrs = translation_utils._fix_attribute_names(attrs, {'p': 'ord'}) axis = int(attrs.get("axis", -1)) new_attrs.update(axis=axis) return 'norm', new_attrs, inputs
apache-2.0
-4,393,205,755,175,361,500
-6,664,043,194,640,194,000
43.299616
99
0.599081
false
AnotherIvan/calibre
src/calibre/ebooks/lrf/lrfparser.py
15
7322
__license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>' '''''' import sys, array, os, re, codecs, logging from calibre import setup_cli_handlers from calibre.utils.config import OptionParser from calibre.utils.filenames import ascii_filename from calibre.ebooks.lrf.meta import LRFMetaFile from calibre.ebooks.lrf.objects import get_object, PageTree, StyleObject, \ Font, Text, TOCObject, BookAttr, ruby_tags class LRFDocument(LRFMetaFile): class temp(object): pass def __init__(self, stream): LRFMetaFile.__init__(self, stream) self.scramble_key = self.xor_key self.page_trees = [] self.font_map = {} self.image_map = {} self.toc = '' self.keep_parsing = True def parse(self): self._parse_objects() self.metadata = LRFDocument.temp() for a in ('title', 'title_reading', 'author', 'author_reading', 'book_id', 'classification', 'free_text', 'publisher', 'label', 'category'): setattr(self.metadata, a, getattr(self, a)) self.doc_info = LRFDocument.temp() for a in ('thumbnail', 'language', 'creator', 'producer', 'page'): setattr(self.doc_info, a, getattr(self, a)) self.doc_info.thumbnail_extension = self.thumbail_extension() self.device_info = LRFDocument.temp() for a in ('dpi', 'width', 'height'): setattr(self.device_info, a, getattr(self, a)) def _parse_objects(self): self.objects = {} self._file.seek(self.object_index_offset) obj_array = array.array("I", self._file.read(4*4*self.number_of_objects)) if ord(array.array("i",[1]).tostring()[0])==0: #big-endian obj_array.byteswap() for i in range(self.number_of_objects): if not self.keep_parsing: break objid, objoff, objsize = obj_array[i*4:i*4+3] self._parse_object(objid, objoff, objsize) for obj in self.objects.values(): if not self.keep_parsing: break if hasattr(obj, 'initialize'): obj.initialize() def _parse_object(self, objid, objoff, objsize): obj = get_object(self, self._file, objid, objoff, objsize, self.scramble_key) self.objects[objid] = obj if isinstance(obj, PageTree): self.page_trees.append(obj) elif isinstance(obj, TOCObject): self.toc = obj elif isinstance(obj, BookAttr): self.ruby_tags = {} for h in ruby_tags.values(): attr = h[0] if hasattr(obj, attr): self.ruby_tags[attr] = getattr(obj, attr) def __iter__(self): for pt in self.page_trees: yield pt def write_files(self): for obj in self.image_map.values() + self.font_map.values(): open(obj.file, 'wb').write(obj.stream) def to_xml(self, write_files=True): bookinfo = u'<BookInformation>\n<Info version="1.1">\n<BookInfo>\n' bookinfo += u'<Title reading="%s">%s</Title>\n'%(self.metadata.title_reading, self.metadata.title) bookinfo += u'<Author reading="%s">%s</Author>\n'%(self.metadata.author_reading, self.metadata.author) bookinfo += u'<BookID>%s</BookID>\n'%(self.metadata.book_id,) bookinfo += u'<Publisher reading="">%s</Publisher>\n'%(self.metadata.publisher,) bookinfo += u'<Label reading="">%s</Label>\n'%(self.metadata.label,) bookinfo += u'<Category reading="">%s</Category>\n'%(self.metadata.category,) bookinfo += u'<Classification reading="">%s</Classification>\n'%(self.metadata.classification,) bookinfo += u'<FreeText reading="">%s</FreeText>\n</BookInfo>\n<DocInfo>\n'%(self.metadata.free_text,) th = self.doc_info.thumbnail if th: prefix = ascii_filename(self.metadata.title) bookinfo += u'<CThumbnail file="%s" />\n'%(prefix+'_thumbnail.'+self.doc_info.thumbnail_extension,) if write_files: open(prefix+'_thumbnail.'+self.doc_info.thumbnail_extension, 'wb').write(th) bookinfo += u'<Language reading="">%s</Language>\n'%(self.doc_info.language,) bookinfo += u'<Creator reading="">%s</Creator>\n'%(self.doc_info.creator,) bookinfo += u'<Producer reading="">%s</Producer>\n'%(self.doc_info.producer,) bookinfo += u'<SumPage>%s</SumPage>\n</DocInfo>\n</Info>\n%s</BookInformation>\n'%(self.doc_info.page,self.toc) pages = u'' done_main = False pt_id = -1 for page_tree in self: if not done_main: done_main = True pages += u'<Main>\n' close = u'</Main>\n' pt_id = page_tree.id else: pages += u'<PageTree objid="%d">\n'%(page_tree.id,) close = u'</PageTree>\n' for page in page_tree: pages += unicode(page) pages += close traversed_objects = [int(i) for i in re.findall(r'objid="(\w+)"', pages)] + [pt_id] objects = u'\n<Objects>\n' styles = u'\n<Style>\n' for obj in self.objects: obj = self.objects[obj] if obj.id in traversed_objects: continue if isinstance(obj, (Font, Text, TOCObject)): continue if isinstance(obj, StyleObject): styles += unicode(obj) else: objects += unicode(obj) styles += '</Style>\n' objects += '</Objects>\n' if write_files: self.write_files() return '<BBeBXylog version="1.0">\n' + bookinfo + pages + styles + objects + '</BBeBXylog>' def option_parser(): parser = OptionParser(usage=_('%prog book.lrf\nConvert an LRF file into an LRS (XML UTF-8 encoded) file')) parser.add_option('--output', '-o', default=None, help=_('Output LRS file'), dest='out') parser.add_option('--dont-output-resources', default=True, action='store_false', help=_('Do not save embedded image and font files to disk'), dest='output_resources') parser.add_option('--verbose', default=False, action='store_true', dest='verbose', help=_('Be more verbose')) return parser def main(args=sys.argv, logger=None): parser = option_parser() opts, args = parser.parse_args(args) if logger is None: level = logging.DEBUG if opts.verbose else logging.INFO logger = logging.getLogger('lrf2lrs') setup_cli_handlers(logger, level) if len(args) != 2: parser.print_help() return 1 if opts.out is None: opts.out = os.path.join(os.path.dirname(args[1]), os.path.splitext(os.path.basename(args[1]))[0]+".lrs") o = codecs.open(os.path.abspath(os.path.expanduser(opts.out)), 'wb', 'utf-8') o.write(u'<?xml version="1.0" encoding="UTF-8"?>\n') logger.info(_('Parsing LRF...')) d = LRFDocument(open(args[1], 'rb')) d.parse() logger.info(_('Creating XML...')) o.write(d.to_xml(write_files=opts.output_resources)) logger.info(_('LRS written to ')+opts.out) return 0 if __name__ == '__main__': sys.exit(main())
gpl-3.0
8,822,289,262,634,771,000
-5,627,603,175,741,944,000
42.583333
119
0.573341
false
dwitvliet/CATMAID
django/applications/catmaid/control/link.py
1
6452
import json from django.http import HttpResponse from django.core.exceptions import ObjectDoesNotExist from catmaid.models import UserRole, Project, Relation, Treenode, Connector, \ TreenodeConnector, ClassInstance from catmaid.control.authentication import requires_user_role, can_edit_or_fail @requires_user_role(UserRole.Annotate) def create_link(request, project_id=None): """ Create a link, currently only a presynaptic_to or postsynaptic_to relationship between a treenode and a connector. """ from_id = int(request.POST.get('from_id', 0)) to_id = int(request.POST.get('to_id', 0)) link_type = request.POST.get('link_type', 'none') try: project = Project.objects.get(id=project_id) relation = Relation.objects.get(project=project, relation_name=link_type) from_treenode = Treenode.objects.get(id=from_id) to_connector = Connector.objects.get(id=to_id, project=project) links = TreenodeConnector.objects.filter( connector=to_id, treenode=from_id, relation=relation.id) except ObjectDoesNotExist as e: return HttpResponse(json.dumps({'error': e.message})) if links.count() > 0: return HttpResponse(json.dumps({'error': "A relation '%s' between these two elements already exists!" % link_type})) related_skeleton_count = ClassInstance.objects.filter(project=project, id=from_treenode.skeleton.id).count() if related_skeleton_count > 1: # Can never happen. What motivated this check for an error of this kind? Would imply that a treenode belongs to more than one skeleton, which was possible when skeletons owned treendoes via element_of relations rather than by the skeleton_id column. return HttpResponse(json.dumps({'error': 'Multiple rows for treenode with ID #%s found' % from_id})) elif related_skeleton_count == 0: return HttpResponse(json.dumps({'error': 'Failed to retrieve skeleton id of treenode #%s' % from_id})) if link_type == 'presynaptic_to': # Enforce only one presynaptic link presyn_links = TreenodeConnector.objects.filter(project=project, connector=to_connector, relation=relation) if (presyn_links.count() != 0): return HttpResponse(json.dumps({'error': 'Connector %s does not have zero presynaptic connections.' % to_id})) # The object returned in case of success result = {} if link_type == 'postsynaptic_to': # Warn if there is already a link from the source skeleton to the # target skeleton. This can happen and is not necessarely wrong, but # worth to double check, because it is likely a mistake. post_links_to_skeleton = TreenodeConnector.objects.filter(project=project, connector=to_connector, relation=relation, skeleton_id=from_treenode.skeleton_id).count() if post_links_to_skeleton == 1: result['warning'] = 'There is already one post-synaptic ' \ 'connection to the target skeleton' elif post_links_to_skeleton > 1: result['warning'] = 'There are already %s post-synaptic ' \ 'connections to the target skeleton' % post_links_to_skeleton # Enforce only synaptic links gapjunction_links = TreenodeConnector.objects.filter(project=project, connector=to_connector, relation__relation_name='gapjunction_with') if (gapjunction_links.count() != 0): return HttpResponse(json.dumps({'error': 'Connector %s cannot have both a gap junction and a postsynaptic node.' % to_id})) if link_type == 'gapjunction_with': # Enforce only two gap junction links gapjunction_links = TreenodeConnector.objects.filter(project=project, connector=to_connector, relation=relation) synapse_links = TreenodeConnector.objects.filter(project=project, connector=to_connector, relation__relation_name__endswith='synaptic_to') if (gapjunction_links.count() > 1): return HttpResponse(json.dumps({'error': 'Connector %s can only have two gap junction connections.' % to_id})) if (synapse_links.count() != 0): return HttpResponse(json.dumps({'error': 'Connector %s is part of a synapse, and gap junction can not be added.' % to_id})) # Enforce same relations across all linked connectors; only new postsynaptic links are valid if any([to_connector.children.exists(), to_connector.parent]) and link_type != 'postsynaptic_to': return HttpResponse(json.dumps({'error': 'Cannot add %s connection to a linked connector.' % link_type})) TreenodeConnector( user=request.user, project=project, relation=relation, treenode=from_treenode, # treenode_id = from_id skeleton=from_treenode.skeleton, # treenode.skeleton_id where treenode.id = from_id connector=to_connector # connector_id = to_id ).save() result['message'] = 'success' return HttpResponse(json.dumps(result), content_type='application/json') @requires_user_role(UserRole.Annotate) def delete_link(request, project_id=None): connector_id = int(request.POST.get('connector_id', 0)) treenode_id = int(request.POST.get('treenode_id', 0)) links = TreenodeConnector.objects.filter( connector=connector_id, treenode=treenode_id) if links.count() == 0: return HttpResponse(json.dumps({'error': 'Failed to delete connector #%s from geometry domain.' % connector_id})) # Enforce same relations across all linked connectors; only removal of postsynaptic links are valid try: to_connector = Connector.objects.get(id=connector_id, project=project_id) link_type = links[0].relation.relation_name except ObjectDoesNotExist as e: return HttpResponse(json.dumps({'error': e.message})) if any([to_connector.children.exists(), to_connector.parent]) and link_type != 'postsynaptic_to': return HttpResponse(json.dumps({'error': 'Cannot remove %s connection to a linked connector.' % link_type})) # Could be done by filtering above when obtaining the links, # but then one cannot distinguish between the link not existing # and the user_id not matching or not being superuser. can_edit_or_fail(request.user, links[0].id, 'treenode_connector') links[0].delete() return HttpResponse(json.dumps({'result': 'Removed treenode to connector link'}))
gpl-3.0
-4,658,883,439,489,669,000
5,736,590,440,537,466,000
51.032258
257
0.688314
false
nhenezi/kuma
vendor/packages/sqlalchemy/examples/large_collection/large_collection.py
7
3294
from sqlalchemy import (MetaData, Table, Column, Integer, String, ForeignKey, create_engine) from sqlalchemy.orm import (mapper, relationship, sessionmaker) meta = MetaData() org_table = Table('organizations', meta, Column('org_id', Integer, primary_key=True), Column('org_name', String(50), nullable=False, key='name'), mysql_engine='InnoDB') member_table = Table('members', meta, Column('member_id', Integer, primary_key=True), Column('member_name', String(50), nullable=False, key='name'), Column('org_id', Integer, ForeignKey('organizations.org_id', ondelete="CASCADE")), mysql_engine='InnoDB') class Organization(object): def __init__(self, name): self.name = name class Member(object): def __init__(self, name): self.name = name mapper(Organization, org_table, properties = { 'members' : relationship(Member, # Organization.members will be a Query object - no loading # of the entire collection occurs unless requested lazy="dynamic", # Member objects "belong" to their parent, are deleted when # removed from the collection cascade="all, delete-orphan", # "delete, delete-orphan" cascade does not load in objects on delete, # allows ON DELETE CASCADE to handle it. # this only works with a database that supports ON DELETE CASCADE - # *not* sqlite or MySQL with MyISAM passive_deletes=True, ) }) mapper(Member, member_table) if __name__ == '__main__': engine = create_engine("mysql://scott:tiger@localhost/test", echo=True) meta.create_all(engine) # expire_on_commit=False means the session contents # will not get invalidated after commit. sess = sessionmaker(engine, expire_on_commit=False)() # create org with some members org = Organization('org one') org.members.append(Member('member one')) org.members.append(Member('member two')) org.members.append(Member('member three')) sess.add(org) print "-------------------------\nflush one - save org + 3 members\n" sess.commit() # the 'members' collection is a Query. it issues # SQL as needed to load subsets of the collection. print "-------------------------\nload subset of members\n" members = org.members.filter(member_table.c.name.like('%member t%')).all() print members # new Members can be appended without any # SQL being emitted to load the full collection org.members.append(Member('member four')) org.members.append(Member('member five')) org.members.append(Member('member six')) print "-------------------------\nflush two - save 3 more members\n" sess.commit() # delete the object. Using ON DELETE CASCADE # SQL is only emitted for the head row - the Member rows # disappear automatically without the need for additional SQL. sess.delete(org) print "-------------------------\nflush three - delete org, delete members in one statement\n" sess.commit() print "-------------------------\nno Member rows should remain:\n" print sess.query(Member).count() print "------------------------\ndone. dropping tables." meta.drop_all(engine)
mpl-2.0
-5,136,436,865,322,907,000
-6,642,292,300,396,388,000
34.053191
98
0.624165
false
jlachowski/django-transmeta
transmeta/management/commands/sync_transmeta_db.py
3
12022
""" Detect new translatable fields in all models and sync database structure. You will need to execute this command in two cases: 1. When you add new languages to settings.LANGUAGES. 2. When you new translatable fields to your models. """ import re from optparse import make_option from django.conf import settings from django.core.management.base import BaseCommand from django.core.management.color import no_style from django.db import connection, transaction from django.db import backend from django.db.models import get_models from django.db.models.fields import FieldDoesNotExist from transmeta import (mandatory_language, get_real_fieldname, get_languages, get_all_translatable_fields) VALUE_DEFAULT = 'WITHOUT VALUE' def ask_for_confirmation(sql_sentences, model_full_name, assume_yes): print ('\nSQL to synchronize "%s" schema:' % model_full_name) for sentence in sql_sentences: print (' %s' % sentence) if assume_yes: print ('\nAre you sure that you want to execute the previous SQL: (y/n) [n]: YES') return True while True: prompt = '\nAre you sure that you want to execute the previous SQL: (y/n) [n]: ' answer = raw_input(prompt).strip() if answer == '': return False elif answer not in ('y', 'n', 'yes', 'no'): print ('Please answer yes or no') elif answer == 'y' or answer == 'yes': return True else: return False def print_db_change_langs(db_change_langs, field_name, model_name): print ('\nThis languages can change in "%s" field from "%s" model: %s' % \ (field_name, model_name, ", ".join(db_change_langs))) class Command(BaseCommand): help = "Detect new translatable fields or new available languages and sync database structure" option_list = BaseCommand.option_list + ( make_option('-y', '--yes', action='store_true', dest='assume_yes', help="Assume YES on all queries"), make_option('-d', '--default', dest='default_language', help="Language code of your default language"), ) def handle(self, *args, **options): """ command execution """ assume_yes = options.get('assume_yes', False) default_language = options.get('default_language', None) # set manual transaction management transaction.commit_unless_managed() transaction.enter_transaction_management() transaction.managed(True) self.cursor = connection.cursor() self.introspection = connection.introspection self.default_lang = default_language or mandatory_language() all_models = get_models() found_db_change_fields = False for model in all_models: if hasattr(model._meta, 'translatable_fields'): model_full_name = '%s.%s' % (model._meta.app_label, model._meta.module_name) translatable_fields = get_all_translatable_fields(model, column_in_current_table=True) db_table = model._meta.db_table for field_name in translatable_fields: db_table_fields = self.get_table_fields(db_table) db_change_langs = list(set(list(self.get_db_change_languages(field_name, db_table_fields)) + [self.default_lang])) if db_change_langs: sql_sentences = self.get_sync_sql(field_name, db_change_langs, model, db_table_fields) if sql_sentences: found_db_change_fields = True print_db_change_langs(db_change_langs, field_name, model_full_name) execute_sql = ask_for_confirmation(sql_sentences, model_full_name, assume_yes) if execute_sql: print ('Executing SQL...') for sentence in sql_sentences: self.cursor.execute(sentence) # commit transaction.commit() print ('Done') else: print ('SQL not executed') if transaction.is_dirty(): transaction.commit() transaction.leave_transaction_management() if not found_db_change_fields: print ('\nNo new translatable fields detected') if default_language: variable = 'TRANSMETA_DEFAULT_LANGUAGE' has_transmeta_default_language = getattr(settings, variable, False) if not has_transmeta_default_language: variable = 'LANGUAGE_CODE' if getattr(settings, variable) != default_language: print (('\n\nYou should change in your settings ' 'the %s variable to "%s"' % (variable, default_language))) def get_table_fields(self, db_table): """ get table fields from schema """ db_table_desc = self.introspection.get_table_description(self.cursor, db_table) return [t[0] for t in db_table_desc] def get_field_required_in_db(self, db_table, field_name, value_not_implemented=False): table_fields = self.introspection.get_table_description(self.cursor, db_table) for f in table_fields: if f[0] == field_name: is_null = f[-1] if is_null is None: # Not Implemented return value_not_implemented return not is_null return False def get_db_change_languages(self, field_name, db_table_fields): """ get only db changes fields """ for lang_code, lang_name in get_languages(): if get_real_fieldname(field_name, lang_code) not in db_table_fields: yield lang_code for db_table_field in db_table_fields: pattern = re.compile('^%s_(?P<lang>\w{2})$' % field_name) m = pattern.match(db_table_field) if not m: continue lang = m.group('lang') yield lang def was_translatable_before(self, field_name, db_table_fields): """ check if field_name was translatable before syncing schema """ if field_name in db_table_fields: # this implies field was never translatable before, data is in this field return False else: return True def get_default_field(self, field_name, model): for lang_code, lang_name in get_languages(): field_name_i18n = get_real_fieldname(field_name, lang_code) f = model._meta.get_field(field_name_i18n) if not f.null: return f try: return model._meta.get_field(field_name) except FieldDoesNotExist: return None def get_value_default(self): return getattr(settings, 'TRANSMETA_VALUE_DEFAULT', VALUE_DEFAULT) def get_type_of_db_field(self, field_name, model): field = self.get_default_field(field_name, model) if not field: field = model._meta.get_field(get_real_fieldname(field_name)) try: col_type = field.db_type(connection) except TypeError: # old django col_type = field.db_type() return col_type def get_sync_sql(self, field_name, db_change_langs, model, db_table_fields): """ returns SQL needed for sync schema for a new translatable field """ qn = connection.ops.quote_name style = no_style() sql_output = [] db_table = model._meta.db_table was_translatable_before = self.was_translatable_before(field_name, db_table_fields) default_f = self.get_default_field(field_name, model) default_f_required = default_f and self.get_field_required_in_db(db_table, default_f.name, value_not_implemented=False) for lang in db_change_langs: new_field = get_real_fieldname(field_name, lang) try: f = model._meta.get_field(new_field) col_type = self.get_type_of_db_field(field_name, model) field_column = f.column except FieldDoesNotExist: # columns in db, removed the settings.LANGUGES field_column = new_field col_type = self.get_type_of_db_field(field_name, model) field_sql = [style.SQL_FIELD(qn(field_column)), style.SQL_COLTYPE(col_type)] alter_colum_set = 'ALTER COLUMN %s SET' % qn(field_column) if default_f: alter_colum_drop = 'ALTER COLUMN %s DROP' % qn(field_column) not_null = style.SQL_KEYWORD('NOT NULL') if 'mysql' in backend.__name__: alter_colum_set = 'MODIFY %s %s' % (qn(field_column), col_type) not_null = style.SQL_KEYWORD('NULL') if default_f: alter_colum_drop = 'MODIFY %s %s' % (qn(field_column), col_type) # column creation if not new_field in db_table_fields: sql_output.append("ALTER TABLE %s ADD COLUMN %s" % (qn(db_table), ' '.join(field_sql))) if lang == self.default_lang and not was_translatable_before: # data copy from old field (only for default language) sql_output.append("UPDATE %s SET %s = %s" % (qn(db_table), \ qn(field_column), qn(field_name))) if not f.null: # changing to NOT NULL after having data copied sql_output.append("ALTER TABLE %s %s %s" % \ (qn(db_table), alter_colum_set, \ style.SQL_KEYWORD('NOT NULL'))) elif default_f and not default_f.null: if lang == self.default_lang: f_required = self.get_field_required_in_db(db_table, field_column, value_not_implemented=False) if default_f.name == new_field and default_f_required: continue if not f_required: # data copy from old field (only for default language) sql_output.append(("UPDATE %(db_table)s SET %(f_colum)s = '%(value_default)s' " "WHERE %(f_colum)s is %(null)s or %(f_colum)s = '' " % {'db_table': qn(db_table), 'f_colum': qn(field_column), 'value_default': self.get_value_default(), 'null': style.SQL_KEYWORD('NULL'), })) # changing to NOT NULL after having data copied sql_output.append("ALTER TABLE %s %s %s" % \ (qn(db_table), alter_colum_set, \ style.SQL_KEYWORD('NOT NULL'))) else: f_required = self.get_field_required_in_db(db_table, field_column, value_not_implemented=True) if f_required: sql_output.append(("ALTER TABLE %s %s %s" % (qn(db_table), alter_colum_drop, not_null))) if not was_translatable_before: # we drop field only if field was no translatable before sql_output.append("ALTER TABLE %s DROP COLUMN %s" % (qn(db_table), qn(field_name))) return sql_output
lgpl-3.0
-9,098,419,429,064,430,000
3,639,866,725,096,217,000
45.960938
134
0.539594
false
manankalra/Twitter-Sentiment-Analysis
main/sentiment/tweepy_demo/tweep.py
1
1099
#!/usr/bin/env python """ tweepy(Twitter API) demo """ __author__ = "Manan Kalra" __email__ = "manankalr29@gmail.com" from tweepy import Stream, OAuthHandler from tweepy.streaming import StreamListener import time # Add your own consumer_key = "" consumer_secret = "" access_token = "" access_token_secret = "" class listener(StreamListener): def on_data(self, raw_data): try: # print(raw_data) tweet = raw_data.split(",\"text\":")[1].split(",\"source\"")[0] print(tweet) save_time = str(time.time()) + "::" + tweet save_file = open('tweetDB.csv', 'a') save_file.write(save_time) save_file.write("\n") save_file.close() return True except BaseException: print("Failed") def on_error(self, status_code): print(status_code) auth = OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) twitterStream = Stream(auth, listener()) twitterStream.filter(track=["<anything: noun/verb/adverb/...>"])
mit
3,438,555,079,804,068,400
-2,711,578,792,622,366,700
23.422222
75
0.605096
false
bop/foundation
lib/python2.7/site-packages/django/contrib/auth/tokens.py
96
2583
from datetime import date from django.conf import settings from django.utils.http import int_to_base36, base36_to_int from django.utils.crypto import constant_time_compare, salted_hmac class PasswordResetTokenGenerator(object): """ Strategy object used to generate and check tokens for the password reset mechanism. """ def make_token(self, user): """ Returns a token that can be used once to do a password reset for the given user. """ return self._make_token_with_timestamp(user, self._num_days(self._today())) def check_token(self, user, token): """ Check that a password reset token is correct for a given user. """ # Parse the token try: ts_b36, hash = token.split("-") except ValueError: return False try: ts = base36_to_int(ts_b36) except ValueError: return False # Check that the timestamp/uid has not been tampered with if not constant_time_compare(self._make_token_with_timestamp(user, ts), token): return False # Check the timestamp is within limit if (self._num_days(self._today()) - ts) > settings.PASSWORD_RESET_TIMEOUT_DAYS: return False return True def _make_token_with_timestamp(self, user, timestamp): # timestamp is number of days since 2001-1-1. Converted to # base 36, this gives us a 3 digit string until about 2121 ts_b36 = int_to_base36(timestamp) # By hashing on the internal state of the user and using state # that is sure to change (the password salt will change as soon as # the password is set, at least for current Django auth, and # last_login will also change), we produce a hash that will be # invalid as soon as it is used. # We limit the hash to 20 chars to keep URL short key_salt = "django.contrib.auth.tokens.PasswordResetTokenGenerator" # Ensure results are consistent across DB backends login_timestamp = user.last_login.replace(microsecond=0, tzinfo=None) value = (unicode(user.id) + user.password + unicode(login_timestamp) + unicode(timestamp)) hash = salted_hmac(key_salt, value).hexdigest()[::2] return "%s-%s" % (ts_b36, hash) def _num_days(self, dt): return (dt - date(2001, 1, 1)).days def _today(self): # Used for mocking in tests return date.today() default_token_generator = PasswordResetTokenGenerator()
gpl-2.0
5,408,643,527,369,926,000
-4,367,544,138,506,503,000
35.380282
87
0.630662
false
aaronplasek/CorEx
test_corex.py
2
4334
# Run tests with nosetests import corex import numpy as np from functools import partial, update_wrapper verbose = False seed = 3 def generate_data(n_samples=100, group_sizes=[2], dim_hidden=2, missing=0): Y_true = [np.random.randint(0, dim_hidden, n_samples) for _ in group_sizes] X = np.hstack([np.repeat(Y_true[i][:,np.newaxis], size, axis=1) for i, size in enumerate(group_sizes)]) clusters = [i for i in range(len(group_sizes)) for _ in range(group_sizes[i])] tcs = map(lambda z: (z-1)*np.log(dim_hidden), group_sizes) X = np.where(np.random.random(X.shape) >= missing, X, -1) return X, Y_true, clusters, tcs def generate_noisy_data(n_samples=100, group_sizes=[2], erasure_p=0): # Implement an erasure channel with erasure probability erasure_p # The capacity of a single such channel is 1-erasure_p, # So if we have group_size < 1/(1-p) , Shannon's bound forbids perfect recovery # Or, 1 - 1/g < p dim_hidden = 3 Y_true = [np.random.randint(0, 2, n_samples) for _ in group_sizes] X = np.hstack([np.repeat(Y_true[i][:,np.newaxis], size, axis=1) for i, size in enumerate(group_sizes)]) X = np.where(np.random.random(X.shape) < erasure_p, 2, X) # Erasure channel clusters = [i for i in range(len(group_sizes)) for _ in range(group_sizes[i])] tcs = map(lambda z: (z-1)*np.log(2), group_sizes) return X, Y_true, clusters, tcs def check_correct(clusters, tcs, Y_true, X, corex): assert np.array_equal(corex.transform(X), corex.labels) # Correctness of transform assert np.array_equal(corex.clusters, clusters), str(zip(corex.clusters, clusters)) # Check connections for j, tc in enumerate(tcs): assert np.abs(corex.tcs[j]-tc)/tc < 0.1, "Values %f, %f" %(corex.tcs[j], tc) # TC relative error is small assert len(set(map(tuple, zip(corex.labels.T[j], Y_true[j])))) == len(set(Y_true[j])), \ zip(corex.labels.T[j], Y_true[j]) # One-to-one correspondence of labels def test_corex_all(): n_samples = 100 for group_sizes in [[2], [3, 2]]: for dim_hidden in [2, 3]: np.random.seed(seed) X, Y_true, clusters, tcs = generate_data(n_samples=n_samples, group_sizes=group_sizes, dim_hidden=dim_hidden) methods = [ corex.Corex(n_hidden=len(group_sizes), dim_hidden=dim_hidden, missing_values=-1, seed=seed, verbose=verbose).fit(X) ] for i, method in enumerate(methods): f = partial(check_correct, clusters, method.tcs, Y_true, X, method) update_wrapper(f, check_correct) f.description = 'method: ' + ['base', 'gaussian', 'discrete', 'discrete NT', 'gaussian NT', 'beta NT'][i] + \ ', groups:' + str(group_sizes) + ', dim_hidden:' + str(dim_hidden) + ', seed: '+str(seed) yield (f, ) def test_missing_values(): n_samples = 100 dim_hidden = 2 missing = 0.1 group_sizes = [10, 7] # Chance of entire row missing smaller than missing^n np.random.seed(seed) X, Y_true, clusters, tcs = generate_data(n_samples=n_samples, group_sizes=group_sizes, dim_hidden=dim_hidden, missing=missing) methods = [ corex.Corex(n_hidden=len(group_sizes), dim_hidden=dim_hidden, missing_values=-1, seed=seed, verbose=verbose).fit(X) ] for i, method in enumerate(methods): f = partial(check_correct, clusters, method.tcs, Y_true, X, method) update_wrapper(f, check_correct) f.description = 'missing values, '+ ['base', 'gaussian', 'discrete', 'discrete NT', 'gaussian NT'][i] + ', seed: '+str(seed) yield (f, ) def test_near_shannon_limit(): X, Y_true, clusters, tcs = generate_noisy_data(n_samples=1000, group_sizes=[200], erasure_p=1.-3./200) out = corex.Corex(n_hidden=1, seed=seed, verbose=verbose).fit(X) assert max(np.mean(Y_true==out.labels.T), 1-np.mean(Y_true==out.labels.T)) > 0.95 # rate = 3*capacity, near perfect X, Y_true, clusters, tcs = generate_noisy_data(n_samples=1000, group_sizes=[200], erasure_p=1.-1./200) out = corex.Corex(n_hidden=1, seed=seed, verbose=verbose).fit(X) assert max(np.mean(Y_true==out.labels.T), 1-np.mean(Y_true==out.labels.T)) < 0.9 # rate=capacity, not perfect
gpl-2.0
-4,844,486,332,188,252,000
2,607,355,148,563,431,000
53.1875
132
0.623673
false
morningman/palo
gensrc/script/palo_builtins_functions.py
2
31879
# Modifications copyright (C) 2017, Baidu.com, Inc. # Copyright 2017 The Apache Software Foundation # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # This is a list of all the functions that are not auto-generated. # It contains all the meta data that describes the function. # The format is: # [sql aliases], <return_type>, [<args>], <backend symbol>, # With an optional # <prepare symbol>, <close symbol> # # 'sql aliases' are the function names that can be used from sql. There must be at least # one per function. # # The symbol can be empty for functions that are not yet implemented or are special-cased # in Expr::CreateExpr() (i.e., functions that are implemented via a custom Expr class # rather than a single function). visible_functions = [ # Bit and Byte functions # For functions corresponding to builtin operators, we can reuse the implementations [['bitand'], 'TINYINT', ['TINYINT', 'TINYINT'], '_ZN4palo9Operators32bitand_tiny_int_val_tiny_int_valEPN8palo_udf' '15FunctionContextERKNS1_10TinyIntValES6_'], [['bitand'], 'SMALLINT', ['SMALLINT', 'SMALLINT'], '_ZN4palo9Operators34bitand_small_int_val_small_int_valEPN8palo_udf' '15FunctionContextERKNS1_11SmallIntValES6_'], [['bitand'], 'INT', ['INT', 'INT'], '_ZN4palo9Operators22bitand_int_val_int_valEPN8palo_udf' '15FunctionContextERKNS1_6IntValES6_'], [['bitand'], 'BIGINT', ['BIGINT', 'BIGINT'], '_ZN4palo9Operators30bitand_big_int_val_big_int_valEPN8palo_udf' '15FunctionContextERKNS1_9BigIntValES6_'], [['bitand'], 'LARGEINT', ['LARGEINT', 'LARGEINT'], '_ZN4palo9Operators34bitand_large_int_val_large_int_valEPN8palo_udf' '15FunctionContextERKNS1_11LargeIntValES6_'], [['bitor'], 'TINYINT', ['TINYINT', 'TINYINT'], '_ZN4palo9Operators31bitor_tiny_int_val_tiny_int_valEPN8palo_udf' '15FunctionContextERKNS1_10TinyIntValES6_'], [['bitor'], 'SMALLINT', ['SMALLINT', 'SMALLINT'], '_ZN4palo9Operators33bitor_small_int_val_small_int_valEPN8palo_udf' '15FunctionContextERKNS1_11SmallIntValES6_'], [['bitor'], 'INT', ['INT', 'INT'], '_ZN4palo9Operators21bitor_int_val_int_valEPN8palo_udf' '15FunctionContextERKNS1_6IntValES6_'], [['bitor'], 'BIGINT', ['BIGINT', 'BIGINT'], '_ZN4palo9Operators29bitor_big_int_val_big_int_valEPN8palo_udf' '15FunctionContextERKNS1_9BigIntValES6_'], [['bitor'], 'LARGEINT', ['LARGEINT', 'LARGEINT'], '_ZN4palo9Operators33bitor_large_int_val_large_int_valEPN8palo_udf' '15FunctionContextERKNS1_11LargeIntValES6_'], [['bitxor'], 'TINYINT', ['TINYINT', 'TINYINT'], '_ZN4palo9Operators32bitxor_tiny_int_val_tiny_int_valEPN8palo_udf' '15FunctionContextERKNS1_10TinyIntValES6_'], [['bitxor'], 'SMALLINT', ['SMALLINT', 'SMALLINT'], '_ZN4palo9Operators34bitxor_small_int_val_small_int_valEPN8palo_udf' '15FunctionContextERKNS1_11SmallIntValES6_'], [['bitxor'], 'INT', ['INT', 'INT'], '_ZN4palo9Operators22bitxor_int_val_int_valEPN8palo_udf' '15FunctionContextERKNS1_6IntValES6_'], [['bitxor'], 'BIGINT', ['BIGINT', 'BIGINT'], '_ZN4palo9Operators30bitxor_big_int_val_big_int_valEPN8palo_udf' '15FunctionContextERKNS1_9BigIntValES6_'], [['bitxor'], 'LARGEINT', ['LARGEINT', 'LARGEINT'], '_ZN4palo9Operators34bitxor_large_int_val_large_int_valEPN8palo_udf' '15FunctionContextERKNS1_11LargeIntValES6_'], [['bitnot'], 'TINYINT', ['TINYINT'], '_ZN4palo9Operators19bitnot_tiny_int_valEPN8palo_udf' '15FunctionContextERKNS1_10TinyIntValE'], [['bitnot'], 'SMALLINT', ['SMALLINT'], '_ZN4palo9Operators20bitnot_small_int_valEPN8palo_udf' '15FunctionContextERKNS1_11SmallIntValE'], [['bitnot'], 'INT', ['INT'], '_ZN4palo9Operators14bitnot_int_valEPN8palo_udf' '15FunctionContextERKNS1_6IntValE'], [['bitnot'], 'BIGINT', ['BIGINT'], '_ZN4palo9Operators18bitnot_big_int_valEPN8palo_udf' '15FunctionContextERKNS1_9BigIntValE'], [['bitnot'], 'LARGEINT', ['LARGEINT'], '_ZN4palo9Operators20bitnot_large_int_valEPN8palo_udf' '15FunctionContextERKNS1_11LargeIntValE'], # Timestamp functions [['unix_timestamp'], 'INT', [], '_ZN4palo18TimestampFunctions7to_unixEPN8palo_udf15FunctionContextE'], [['unix_timestamp'], 'INT', ['DATETIME'], '_ZN4palo18TimestampFunctions7to_unixEPN8palo_udf15FunctionContextERKNS1_11DateTimeValE'], [['unix_timestamp'], 'INT', ['VARCHAR', 'VARCHAR'], '_ZN4palo18TimestampFunctions7to_unixEPN8palo_udf15FunctionContextERKNS1_9StringValES6_'], [['from_unixtime'], 'VARCHAR', ['INT'], '_ZN4palo18TimestampFunctions9from_unixEPN8palo_udf15FunctionContextERKNS1_6IntValE'], [['from_unixtime'], 'VARCHAR', ['INT', 'VARCHAR'], '_ZN4palo18TimestampFunctions9from_unixEPN8palo_udf' '15FunctionContextERKNS1_6IntValERKNS1_9StringValE'], [['now', 'current_timestamp'], 'DATETIME', [], '_ZN4palo18TimestampFunctions3nowEPN8palo_udf15FunctionContextE'], [['curtime', 'current_time'], 'DATETIME', [], '_ZN4palo18TimestampFunctions7curtimeEPN8palo_udf15FunctionContextE'], [['timestamp'], 'DATETIME', ['DATETIME'], '_ZN4palo18TimestampFunctions9timestampEPN8palo_udf15FunctionContextERKNS1_11DateTimeValE'], [['from_days'], 'DATE', ['INT'], '_ZN4palo18TimestampFunctions9from_daysEPN8palo_udf15FunctionContextERKNS1_6IntValE'], [['to_days'], 'INT', ['DATE'], '_ZN4palo18TimestampFunctions7to_daysEPN8palo_udf15FunctionContextERKNS1_11DateTimeValE'], [['year'], 'INT', ['DATETIME'], '_ZN4palo18TimestampFunctions4yearEPN8palo_udf15FunctionContextERKNS1_11DateTimeValE'], [['month'], 'INT', ['DATETIME'], '_ZN4palo18TimestampFunctions5monthEPN8palo_udf15FunctionContextERKNS1_11DateTimeValE'], [['quarter'], 'INT', ['DATETIME'], '_ZN4palo18TimestampFunctions7quarterEPN8palo_udf15FunctionContextERKNS1_11DateTimeValE'], [['day', 'dayofmonth'], 'INT', ['DATETIME'], '_ZN4palo18TimestampFunctions12day_of_monthEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValE'], [['dayofyear'], 'INT', ['DATETIME'], '_ZN4palo18TimestampFunctions11day_of_yearEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValE'], [['weekofyear'], 'INT', ['DATETIME'], '_ZN4palo18TimestampFunctions12week_of_yearEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValE'], [['hour'], 'INT', ['DATETIME'], '_ZN4palo18TimestampFunctions4hourEPN8palo_udf15FunctionContextERKNS1_11DateTimeValE'], [['minute'], 'INT', ['DATETIME'], '_ZN4palo18TimestampFunctions6minuteEPN8palo_udf15FunctionContextERKNS1_11DateTimeValE'], [['second'], 'INT', ['DATETIME'], '_ZN4palo18TimestampFunctions6secondEPN8palo_udf15FunctionContextERKNS1_11DateTimeValE'], [['years_add'], 'DATETIME', ['DATETIME', 'INT'], '_ZN4palo18TimestampFunctions9years_addEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'], [['years_sub'], 'DATETIME', ['DATETIME', 'INT'], '_ZN4palo18TimestampFunctions9years_subEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'], [['months_add', 'add_months'], 'DATETIME', ['DATETIME', 'INT'], '_ZN4palo18TimestampFunctions10months_addEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'], [['months_sub'], 'DATETIME', ['DATETIME', 'INT'], '_ZN4palo18TimestampFunctions10months_subEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'], [['weeks_add'], 'DATETIME', ['DATETIME', 'INT'], '_ZN4palo18TimestampFunctions9weeks_addEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'], [['weeks_sub'], 'DATETIME', ['DATETIME', 'INT'], '_ZN4palo18TimestampFunctions9weeks_subEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'], [['days_add', 'date_add', 'adddate'], 'DATETIME', ['DATETIME', 'INT'], '_ZN4palo18TimestampFunctions8days_addEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'], [['days_sub', 'date_sub', 'subdate'], 'DATETIME', ['DATETIME', 'INT'], '_ZN4palo18TimestampFunctions8days_subEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'], [['hours_add'], 'DATETIME', ['DATETIME', 'INT'], '_ZN4palo18TimestampFunctions9hours_addEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'], [['hours_sub'], 'DATETIME', ['DATETIME', 'INT'], '_ZN4palo18TimestampFunctions9hours_subEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'], [['minutes_add'], 'DATETIME', ['DATETIME', 'INT'], '_ZN4palo18TimestampFunctions11minutes_addEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'], [['minutes_sub'], 'DATETIME', ['DATETIME', 'INT'], '_ZN4palo18TimestampFunctions11minutes_subEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'], [['seconds_add'], 'DATETIME', ['DATETIME', 'INT'], '_ZN4palo18TimestampFunctions11seconds_addEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'], [['seconds_sub'], 'DATETIME', ['DATETIME', 'INT'], '_ZN4palo18TimestampFunctions11seconds_subEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'], [['microseconds_add'], 'DATETIME', ['DATETIME', 'INT'], '_ZN4palo18TimestampFunctions10micros_addEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'], [['microseconds_sub'], 'DATETIME', ['DATETIME', 'INT'], '_ZN4palo18TimestampFunctions10micros_subEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'], [['datediff'], 'INT', ['DATETIME', 'DATETIME'], '_ZN4palo18TimestampFunctions9date_diffEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValES6_'], [['timediff'], 'DATETIME', ['DATETIME', 'DATETIME'], '_ZN4palo18TimestampFunctions9time_diffEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValES6_'], [['str_to_date'], 'DATETIME', ['VARCHAR', 'VARCHAR'], '_ZN4palo18TimestampFunctions11str_to_dateEPN8palo_udf' '15FunctionContextERKNS1_9StringValES6_'], [['date_format'], 'VARCHAR', ['DATETIME', 'VARCHAR'], '_ZN4palo18TimestampFunctions11date_formatEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValERKNS1_9StringValE'], [['date', 'to_date'], 'DATE', ['DATETIME'], '_ZN4palo18TimestampFunctions7to_dateEPN8palo_udf15FunctionContextERKNS1_11DateTimeValE'], [['dayname'], 'VARCHAR', ['DATETIME'], '_ZN4palo18TimestampFunctions8day_nameEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValE'], [['monthname'], 'VARCHAR', ['DATETIME'], '_ZN4palo18TimestampFunctions10month_nameEPN8palo_udf' '15FunctionContextERKNS1_11DateTimeValE'], # Math builtin functions [['pi'], 'DOUBLE', [], '_ZN4palo13MathFunctions2piEPN8palo_udf15FunctionContextE'], [['e'], 'DOUBLE', [], '_ZN4palo13MathFunctions1eEPN8palo_udf15FunctionContextE'], [['abs'], 'DOUBLE', ['DOUBLE'], '_ZN4palo13MathFunctions3absEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'], [['sign'], 'FLOAT', ['DOUBLE'], '_ZN4palo13MathFunctions4signEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'], [['sin'], 'DOUBLE', ['DOUBLE'], '_ZN4palo13MathFunctions3sinEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'], [['asin'], 'DOUBLE', ['DOUBLE'], '_ZN4palo13MathFunctions4asinEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'], [['cos'], 'DOUBLE', ['DOUBLE'], '_ZN4palo13MathFunctions3cosEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'], [['acos'], 'DOUBLE', ['DOUBLE'], '_ZN4palo13MathFunctions4acosEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'], [['tan'], 'DOUBLE', ['DOUBLE'], '_ZN4palo13MathFunctions3tanEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'], [['atan'], 'DOUBLE', ['DOUBLE'], '_ZN4palo13MathFunctions4atanEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'], [['ceil', 'ceiling', 'dceil'], 'BIGINT', ['DOUBLE'], '_ZN4palo13MathFunctions4ceilEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'], [['floor', 'dfloor'], 'BIGINT', ['DOUBLE'], '_ZN4palo13MathFunctions5floorEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'], [['round', 'dround'], 'BIGINT', ['DOUBLE'], '_ZN4palo13MathFunctions5roundEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'], [['round', 'dround'], 'DOUBLE', ['DOUBLE', 'INT'], '_ZN4palo13MathFunctions11round_up_toEPN8palo_udf' '15FunctionContextERKNS1_9DoubleValERKNS1_6IntValE'], [['truncate'], 'DOUBLE', ['DOUBLE', 'INT'], '_ZN4palo13MathFunctions8truncateEPN8palo_udf' '15FunctionContextERKNS1_9DoubleValERKNS1_6IntValE'], [['ln', 'dlog1'], 'DOUBLE', ['DOUBLE'], '_ZN4palo13MathFunctions2lnEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'], [['log'], 'DOUBLE', ['DOUBLE', 'DOUBLE'], '_ZN4palo13MathFunctions3logEPN8palo_udf15FunctionContextERKNS1_9DoubleValES6_'], [['log2'], 'DOUBLE', ['DOUBLE'], '_ZN4palo13MathFunctions4log2EPN8palo_udf15FunctionContextERKNS1_9DoubleValE'], [['log10', 'dlog10'], 'DOUBLE', ['DOUBLE'], '_ZN4palo13MathFunctions5log10EPN8palo_udf15FunctionContextERKNS1_9DoubleValE'], [['exp', 'dexp'], 'DOUBLE', ['DOUBLE'], '_ZN4palo13MathFunctions3expEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'], [['radians'], 'DOUBLE', ['DOUBLE'], '_ZN4palo13MathFunctions7radiansEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'], [['degrees'], 'DOUBLE', ['DOUBLE'], '_ZN4palo13MathFunctions7degreesEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'], [['sqrt', 'dsqrt'], 'DOUBLE', ['DOUBLE'], '_ZN4palo13MathFunctions4sqrtEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'], [['pow', 'power', 'dpow', 'fpow'], 'DOUBLE', ['DOUBLE', 'DOUBLE'], '_ZN4palo13MathFunctions3powEPN8palo_udf15FunctionContextERKNS1_9DoubleValES6_'], [['rand', 'random'], 'DOUBLE', [], '_ZN4palo13MathFunctions4randEPN8palo_udf15FunctionContextE', '_ZN4palo13MathFunctions12rand_prepareEPN8palo_udf' '15FunctionContextENS2_18FunctionStateScopeE'], [['rand', 'random'], 'DOUBLE', ['BIGINT'], '_ZN4palo13MathFunctions9rand_seedEPN8palo_udf15FunctionContextERKNS1_9BigIntValE', '_ZN4palo13MathFunctions12rand_prepareEPN8palo_udf' '15FunctionContextENS2_18FunctionStateScopeE'], [['bin'], 'VARCHAR', ['BIGINT'], '_ZN4palo13MathFunctions3binEPN8palo_udf15FunctionContextERKNS1_9BigIntValE'], [['hex'], 'VARCHAR', ['BIGINT'], '_ZN4palo13MathFunctions7hex_intEPN8palo_udf15FunctionContextERKNS1_9BigIntValE'], [['hex'], 'VARCHAR', ['VARCHAR'], '_ZN4palo13MathFunctions10hex_stringEPN8palo_udf15FunctionContextERKNS1_9StringValE'], [['unhex'], 'VARCHAR', ['VARCHAR'], '_ZN4palo13MathFunctions5unhexEPN8palo_udf15FunctionContextERKNS1_9StringValE'], [['conv'], 'VARCHAR', ['BIGINT', 'TINYINT', 'TINYINT'], '_ZN4palo13MathFunctions8conv_intEPN8palo_udf' '15FunctionContextERKNS1_9BigIntValERKNS1_10TinyIntValES9_'], [['conv'], 'VARCHAR', ['VARCHAR', 'TINYINT', 'TINYINT'], '_ZN4palo13MathFunctions11conv_stringEPN8palo_udf' '15FunctionContextERKNS1_9StringValERKNS1_10TinyIntValES9_'], [['pmod'], 'BIGINT', ['BIGINT', 'BIGINT'], '_ZN4palo13MathFunctions11pmod_bigintEPN8palo_udf' '15FunctionContextERKNS1_9BigIntValES6_'], [['pmod'], 'DOUBLE', ['DOUBLE', 'DOUBLE'], '_ZN4palo13MathFunctions11pmod_doubleEPN8palo_udf' '15FunctionContextERKNS1_9DoubleValES6_'], [['mod'], 'TINYINT', ['TINYINT', 'TINYINT'], '_ZN4palo9Operators29mod_tiny_int_val_tiny_int_valEPN8palo_udf' '15FunctionContextERKNS1_10TinyIntValES6_'], [['mod'], 'SMALLINT', ['SMALLINT', 'SMALLINT'], '_ZN4palo9Operators31mod_small_int_val_small_int_valEPN8palo_udf' '15FunctionContextERKNS1_11SmallIntValES6_'], [['mod'], 'INT', ['INT', 'INT'], '_ZN4palo9Operators19mod_int_val_int_valEPN8palo_udf' '15FunctionContextERKNS1_6IntValES6_'], [['mod'], 'BIGINT', ['BIGINT', 'BIGINT'], '_ZN4palo9Operators27mod_big_int_val_big_int_valEPN8palo_udf' '15FunctionContextERKNS1_9BigIntValES6_'], [['mod'], 'LARGEINT', ['LARGEINT', 'LARGEINT'], '_ZN4palo9Operators31mod_large_int_val_large_int_valEPN8palo_udf' '15FunctionContextERKNS1_11LargeIntValES6_'], [['mod'], 'DECIMAL', ['DECIMAL', 'DECIMAL'], '_ZN4palo16DecimalOperators27mod_decimal_val_decimal_valEPN8palo_udf' '15FunctionContextERKNS1_10DecimalValES6_'], [['mod', 'fmod'], 'FLOAT', ['FLOAT', 'FLOAT'], '_ZN4palo13MathFunctions10fmod_floatEPN8palo_udf15FunctionContextERKNS1_8FloatValES6_'], [['mod', 'fmod'], 'DOUBLE', ['DOUBLE', 'DOUBLE'], '_ZN4palo13MathFunctions11fmod_doubleEPN8palo_udf15FunctionContextERKNS1_9DoubleValES6_'], [['positive'], 'BIGINT', ['BIGINT'], '_ZN4palo13MathFunctions15positive_bigintEPN8palo_udf' '15FunctionContextERKNS1_9BigIntValE'], [['positive'], 'DOUBLE', ['DOUBLE'], '_ZN4palo13MathFunctions15positive_doubleEPN8palo_udf' '15FunctionContextERKNS1_9DoubleValE'], [['positive'], 'DECIMAL', ['DECIMAL'], '_ZN4palo13MathFunctions16positive_decimalEPN8palo_udf' '15FunctionContextERKNS1_10DecimalValE'], [['negative'], 'BIGINT', ['BIGINT'], '_ZN4palo13MathFunctions15negative_bigintEPN8palo_udf' '15FunctionContextERKNS1_9BigIntValE'], [['negative'], 'DOUBLE', ['DOUBLE'], '_ZN4palo13MathFunctions15negative_doubleEPN8palo_udf' '15FunctionContextERKNS1_9DoubleValE'], [['negative'], 'DECIMAL', ['DECIMAL'], '_ZN4palo13MathFunctions16negative_decimalEPN8palo_udf' '15FunctionContextERKNS1_10DecimalValE'], [['least'], 'TINYINT', ['TINYINT', '...'], '_ZN4palo13MathFunctions5leastEPN8palo_udf15FunctionContextEiPKNS1_10TinyIntValE'], [['least'], 'SMALLINT', ['SMALLINT', '...'], '_ZN4palo13MathFunctions5leastEPN8palo_udf15FunctionContextEiPKNS1_11SmallIntValE'], [['least'], 'INT', ['INT', '...'], '_ZN4palo13MathFunctions5leastEPN8palo_udf15FunctionContextEiPKNS1_6IntValE'], [['least'], 'BIGINT', ['BIGINT', '...'], '_ZN4palo13MathFunctions5leastEPN8palo_udf15FunctionContextEiPKNS1_9BigIntValE'], [['least'], 'LARGEINT', ['LARGEINT', '...'], '_ZN4palo13MathFunctions5leastEPN8palo_udf15FunctionContextEiPKNS1_11LargeIntValE'], [['least'], 'FLOAT', ['FLOAT', '...'], '_ZN4palo13MathFunctions5leastEPN8palo_udf15FunctionContextEiPKNS1_8FloatValE'], [['least'], 'DOUBLE', ['DOUBLE', '...'], '_ZN4palo13MathFunctions5leastEPN8palo_udf15FunctionContextEiPKNS1_9DoubleValE'], [['least'], 'VARCHAR', ['VARCHAR', '...'], '_ZN4palo13MathFunctions5leastEPN8palo_udf15FunctionContextEiPKNS1_9StringValE'], [['least'], 'DATETIME', ['DATETIME', '...'], '_ZN4palo13MathFunctions5leastEPN8palo_udf15FunctionContextEiPKNS1_11DateTimeValE'], [['least'], 'DECIMAL', ['DECIMAL', '...'], '_ZN4palo13MathFunctions5leastEPN8palo_udf15FunctionContextEiPKNS1_10DecimalValE'], [['greatest'], 'TINYINT', ['TINYINT', '...'], '_ZN4palo13MathFunctions8greatestEPN8palo_udf15FunctionContextEiPKNS1_10TinyIntValE'], [['greatest'], 'SMALLINT', ['SMALLINT', '...'], '_ZN4palo13MathFunctions8greatestEPN8palo_udf15FunctionContextEiPKNS1_11SmallIntValE'], [['greatest'], 'INT', ['INT', '...'], '_ZN4palo13MathFunctions8greatestEPN8palo_udf15FunctionContextEiPKNS1_6IntValE'], [['greatest'], 'BIGINT', ['BIGINT', '...'], '_ZN4palo13MathFunctions8greatestEPN8palo_udf15FunctionContextEiPKNS1_9BigIntValE'], [['greatest'], 'LARGEINT', ['LARGEINT', '...'], '_ZN4palo13MathFunctions8greatestEPN8palo_udf15FunctionContextEiPKNS1_11LargeIntValE'], [['greatest'], 'FLOAT', ['FLOAT', '...'], '_ZN4palo13MathFunctions8greatestEPN8palo_udf15FunctionContextEiPKNS1_8FloatValE'], [['greatest'], 'DOUBLE', ['DOUBLE', '...'], '_ZN4palo13MathFunctions8greatestEPN8palo_udf15FunctionContextEiPKNS1_9DoubleValE'], [['greatest'], 'VARCHAR', ['VARCHAR', '...'], '_ZN4palo13MathFunctions8greatestEPN8palo_udf15FunctionContextEiPKNS1_9StringValE'], [['greatest'], 'DATETIME', ['DATETIME', '...'], '_ZN4palo13MathFunctions8greatestEPN8palo_udf15FunctionContextEiPKNS1_11DateTimeValE'], [['greatest'], 'DECIMAL', ['DECIMAL', '...'], '_ZN4palo13MathFunctions8greatestEPN8palo_udf15FunctionContextEiPKNS1_10DecimalValE'], # Conditional Functions # Some of these have empty symbols because the BE special-cases them based on the # function name [['if'], 'BOOLEAN', ['BOOLEAN', 'BOOLEAN', 'BOOLEAN'], ''], [['if'], 'TINYINT', ['BOOLEAN', 'TINYINT', 'TINYINT'], ''], [['if'], 'SMALLINT', ['BOOLEAN', 'SMALLINT', 'SMALLINT'], ''], [['if'], 'INT', ['BOOLEAN', 'INT', 'INT'], ''], [['if'], 'BIGINT', ['BOOLEAN', 'BIGINT', 'BIGINT'], ''], [['if'], 'LARGEINT', ['BOOLEAN', 'LARGEINT', 'LARGEINT'], ''], [['if'], 'FLOAT', ['BOOLEAN', 'FLOAT', 'FLOAT'], ''], [['if'], 'DOUBLE', ['BOOLEAN', 'DOUBLE', 'DOUBLE'], ''], [['if'], 'VARCHAR', ['BOOLEAN', 'VARCHAR', 'VARCHAR'], ''], [['if'], 'DATETIME', ['BOOLEAN', 'DATETIME', 'DATETIME'], ''], [['if'], 'DECIMAL', ['BOOLEAN', 'DECIMAL', 'DECIMAL'], ''], [['nullif'], 'BOOLEAN', ['BOOLEAN', 'BOOLEAN'], ''], [['nullif'], 'TINYINT', ['TINYINT', 'TINYINT'], ''], [['nullif'], 'SMALLINT', ['SMALLINT', 'SMALLINT'], ''], [['nullif'], 'INT', ['INT', 'INT'], ''], [['nullif'], 'BIGINT', ['BIGINT', 'BIGINT'], ''], [['nullif'], 'LARGEINT', ['LARGEINT', 'LARGEINT'], ''], [['nullif'], 'FLOAT', ['FLOAT', 'FLOAT'], ''], [['nullif'], 'DOUBLE', ['DOUBLE', 'DOUBLE'], ''], [['nullif'], 'VARCHAR', ['VARCHAR', 'VARCHAR'], ''], [['nullif'], 'DATETIME', ['DATETIME', 'DATETIME'], ''], [['nullif'], 'DECIMAL', ['DECIMAL', 'DECIMAL'], ''], [['ifnull'], 'BOOLEAN', ['BOOLEAN', 'BOOLEAN'], ''], [['ifnull'], 'TINYINT', ['TINYINT', 'TINYINT'], ''], [['ifnull'], 'SMALLINT', ['SMALLINT', 'SMALLINT'], ''], [['ifnull'], 'INT', ['INT', 'INT'], ''], [['ifnull'], 'BIGINT', ['BIGINT', 'BIGINT'], ''], [['ifnull'], 'LARGEINT', ['LARGEINT', 'LARGEINT'], ''], [['ifnull'], 'FLOAT', ['FLOAT', 'FLOAT'], ''], [['ifnull'], 'DOUBLE', ['DOUBLE', 'DOUBLE'], ''], [['ifnull'], 'VARCHAR', ['VARCHAR', 'VARCHAR'], ''], [['ifnull'], 'DATETIME', ['DATETIME', 'DATETIME'], ''], [['ifnull'], 'DECIMAL', ['DECIMAL', 'DECIMAL'], ''], [['coalesce'], 'BOOLEAN', ['BOOLEAN', '...'], ''], [['coalesce'], 'TINYINT', ['TINYINT', '...'], ''], [['coalesce'], 'SMALLINT', ['SMALLINT', '...'], ''], [['coalesce'], 'INT', ['INT', '...'], ''], [['coalesce'], 'BIGINT', ['BIGINT', '...'], ''], [['coalesce'], 'LARGEINT', ['LARGEINT', '...'], ''], [['coalesce'], 'FLOAT', ['FLOAT', '...'], ''], [['coalesce'], 'DOUBLE', ['DOUBLE', '...'], ''], [['coalesce'], 'VARCHAR', ['VARCHAR', '...'], ''], [['coalesce'], 'DATETIME', ['DATETIME', '...'], ''], [['coalesce'], 'DECIMAL', ['DECIMAL', '...'], ''], # String builtin functions [['substr', 'substring'], 'VARCHAR', ['VARCHAR', 'INT'], '_ZN4palo15StringFunctions9substringEPN' '8palo_udf15FunctionContextERKNS1_9StringValERKNS1_6IntValE'], [['substr', 'substring'], 'VARCHAR', ['VARCHAR', 'INT', 'INT'], '_ZN4palo15StringFunctions9substringEPN' '8palo_udf15FunctionContextERKNS1_9StringValERKNS1_6IntValES9_'], [['strleft'], 'VARCHAR', ['VARCHAR', 'INT'], '_ZN4palo15StringFunctions4leftEPN8palo_udf' '15FunctionContextERKNS1_9StringValERKNS1_6IntValE'], [['strright'], 'VARCHAR', ['VARCHAR', 'INT'], '_ZN4palo15StringFunctions5rightEPN8palo_udf' '15FunctionContextERKNS1_9StringValERKNS1_6IntValE'], [['space'], 'VARCHAR', ['INT'], '_ZN4palo15StringFunctions5spaceEPN8palo_udf15FunctionContextERKNS1_6IntValE'], [['repeat'], 'VARCHAR', ['VARCHAR', 'INT'], '_ZN4palo15StringFunctions6repeatEPN8palo_udf' '15FunctionContextERKNS1_9StringValERKNS1_6IntValE'], [['lpad'], 'VARCHAR', ['VARCHAR', 'INT', 'VARCHAR'], '_ZN4palo15StringFunctions4lpadEPN8palo_udf' '15FunctionContextERKNS1_9StringValERKNS1_6IntValES6_'], [['rpad'], 'VARCHAR', ['VARCHAR', 'INT', 'VARCHAR'], '_ZN4palo15StringFunctions4rpadEPN8palo_udf' '15FunctionContextERKNS1_9StringValERKNS1_6IntValES6_'], [['length'], 'INT', ['VARCHAR'], '_ZN4palo15StringFunctions6lengthEPN8palo_udf15FunctionContextERKNS1_9StringValE'], [['lower', 'lcase'], 'VARCHAR', ['VARCHAR'], '_ZN4palo15StringFunctions5lowerEPN8palo_udf15FunctionContextERKNS1_9StringValE'], [['upper', 'ucase'], 'VARCHAR', ['VARCHAR'], '_ZN4palo15StringFunctions5upperEPN8palo_udf15FunctionContextERKNS1_9StringValE'], [['reverse'], 'VARCHAR', ['VARCHAR'], '_ZN4palo15StringFunctions7reverseEPN8palo_udf15FunctionContextERKNS1_9StringValE'], [['trim'], 'VARCHAR', ['VARCHAR'], '_ZN4palo15StringFunctions4trimEPN8palo_udf15FunctionContextERKNS1_9StringValE'], [['ltrim'], 'VARCHAR', ['VARCHAR'], '_ZN4palo15StringFunctions5ltrimEPN8palo_udf15FunctionContextERKNS1_9StringValE'], [['rtrim'], 'VARCHAR', ['VARCHAR'], '_ZN4palo15StringFunctions5rtrimEPN8palo_udf15FunctionContextERKNS1_9StringValE'], [['ascii'], 'INT', ['VARCHAR'], '_ZN4palo15StringFunctions5asciiEPN8palo_udf15FunctionContextERKNS1_9StringValE'], [['instr'], 'INT', ['VARCHAR', 'VARCHAR'], '_ZN4palo15StringFunctions5instrEPN8palo_udf15FunctionContextERKNS1_9StringValES6_'], [['locate'], 'INT', ['VARCHAR', 'VARCHAR'], '_ZN4palo15StringFunctions6locateEPN8palo_udf15FunctionContextERKNS1_9StringValES6_'], [['locate'], 'INT', ['VARCHAR', 'VARCHAR', 'INT'], '_ZN4palo15StringFunctions10locate_posEPN8palo_udf' '15FunctionContextERKNS1_9StringValES6_RKNS1_6IntValE'], [['regexp_extract'], 'VARCHAR', ['VARCHAR', 'VARCHAR', 'BIGINT'], '_ZN4palo15StringFunctions14regexp_extractEPN8palo_udf' '15FunctionContextERKNS1_9StringValES6_RKNS1_9BigIntValE', '_ZN4palo15StringFunctions14regexp_prepareEPN8palo_udf' '15FunctionContextENS2_18FunctionStateScopeE', '_ZN4palo15StringFunctions12regexp_closeEPN8palo_udf' '15FunctionContextENS2_18FunctionStateScopeE'], [['regexp_replace'], 'VARCHAR', ['VARCHAR', 'VARCHAR', 'VARCHAR'], '_ZN4palo15StringFunctions14regexp_replaceEPN8palo_udf' '15FunctionContextERKNS1_9StringValES6_S6_', '_ZN4palo15StringFunctions14regexp_prepareEPN8palo_udf' '15FunctionContextENS2_18FunctionStateScopeE', '_ZN4palo15StringFunctions12regexp_closeEPN8palo_udf' '15FunctionContextENS2_18FunctionStateScopeE'], [['concat'], 'VARCHAR', ['VARCHAR', '...'], '_ZN4palo15StringFunctions6concatEPN8palo_udf15FunctionContextEiPKNS1_9StringValE'], [['concat_ws'], 'VARCHAR', ['VARCHAR', 'VARCHAR', '...'], '_ZN4palo15StringFunctions9concat_wsEPN8palo_udf' '15FunctionContextERKNS1_9StringValEiPS5_'], [['find_in_set'], 'INT', ['VARCHAR', 'VARCHAR'], '_ZN4palo15StringFunctions11find_in_setEPN8palo_udf' '15FunctionContextERKNS1_9StringValES6_'], [['parse_url'], 'VARCHAR', ['VARCHAR', 'VARCHAR'], '_ZN4palo15StringFunctions9parse_urlEPN8palo_udf' '15FunctionContextERKNS1_9StringValES6_', '_ZN4palo15StringFunctions17parse_url_prepareEPN8palo_udf' '15FunctionContextENS2_18FunctionStateScopeE', '_ZN4palo15StringFunctions15parse_url_closeEPN8palo_udf' '15FunctionContextENS2_18FunctionStateScopeE'], [['parse_url'], 'VARCHAR', ['VARCHAR', 'VARCHAR', 'VARCHAR'], '_ZN4palo15StringFunctions13parse_url_keyEPN8palo_udf' '15FunctionContextERKNS1_9StringValES6_S6_', '_ZN4palo15StringFunctions17parse_url_prepareEPN8palo_udf' '15FunctionContextENS2_18FunctionStateScopeE', '_ZN4palo15StringFunctions15parse_url_closeEPN8palo_udf' '15FunctionContextENS2_18FunctionStateScopeE'], # Utility functions [['sleep'], 'BOOLEAN', ['INT'], '_ZN4palo16UtilityFunctions5sleepEPN8palo_udf15FunctionContextERKNS1_6IntValE'], [['version'], 'VARCHAR', [], '_ZN4palo16UtilityFunctions7versionEPN8palo_udf15FunctionContextE'], # Json functions [['get_json_int'], 'INT', ['VARCHAR', 'VARCHAR'], '_ZN4palo13JsonFunctions12get_json_intEPN8palo_udf15FunctionContextERKNS1_9StringValES6_'], [['get_json_double'], 'DOUBLE', ['VARCHAR', 'VARCHAR'], '_ZN4palo13JsonFunctions15get_json_doubleEPN8palo_udf' '15FunctionContextERKNS1_9StringValES6_'], [['get_json_string'], 'VARCHAR', ['VARCHAR', 'VARCHAR'], '_ZN4palo13JsonFunctions15get_json_stringEPN8palo_udf' '15FunctionContextERKNS1_9StringValES6_'], #hll function [['hll_cardinality'], 'VARCHAR', ['VARCHAR'], '_ZN4palo16HllHashFunctions15hll_cardinalityEPN8palo_udf' '15FunctionContextERKNS1_9StringValE'], [['hll_hash'], 'VARCHAR', ['VARCHAR'], '_ZN4palo16HllHashFunctions8hll_hashEPN8palo_udf15FunctionContextERKNS1_9StringValE'], # aes and base64 function [['from_base64'], 'VARCHAR', ['VARCHAR'], '_ZN4palo19EncryptionFunctions11from_base64EPN8palo_udf' '15FunctionContextERKNS1_9StringValE'], [['to_base64'], 'VARCHAR', ['VARCHAR'], '_ZN4palo19EncryptionFunctions9to_base64EPN8palo_udf' '15FunctionContextERKNS1_9StringValE'], # for compatable with MySQL [['md5'], 'VARCHAR', ['VARCHAR'], '_ZN4palo19EncryptionFunctions3md5EPN8palo_udf15FunctionContextERKNS1_9StringValE'], [['md5sum'], 'VARCHAR', ['VARCHAR', '...'], '_ZN4palo19EncryptionFunctions6md5sumEPN8palo_udf15FunctionContextEiPKNS1_9StringValE'] ] invisible_functions = [ ]
apache-2.0
-3,359,225,859,892,023,000
7,435,210,170,473,094,000
55.323322
100
0.660372
false
bluemini/kuma
kuma/wiki/tests/test_views.py
2
168231
# -*- coding: utf-8 -*- import base64 import datetime import json import time import mock from nose.tools import eq_, ok_ from nose.plugins.attrib import attr from pyquery import PyQuery as pq from urlparse import urlparse from django.conf import settings from django.contrib.sites.models import Site from django.core import mail from django.db.models import Q from django.test.client import (FakePayload, encode_multipart, BOUNDARY, CONTENT_TYPE_RE, MULTIPART_CONTENT) from django.test.utils import override_settings from django.http import Http404 from django.utils.encoding import smart_str from constance import config from constance.test import override_config from jingo.helpers import urlparams from waffle.models import Flag, Switch from kuma.attachments.models import Attachment from kuma.attachments.utils import make_test_file from kuma.authkeys.models import Key from kuma.core.cache import memcache as cache from kuma.core.models import IPBan from kuma.core.urlresolvers import reverse from kuma.users.tests import UserTestCase, user from ..content import get_seo_description from ..events import EditDocumentEvent from ..forms import MIDAIR_COLLISION from ..models import (Document, Revision, RevisionIP, DocumentZone, DocumentTag, DocumentDeletionLog) from ..views.document import _get_seo_parent_title from . import (doc_rev, document, new_document_data, revision, normalize_html, create_template_test_users, make_translation, WikiTestCase, FakeResponse) class RedirectTests(UserTestCase, WikiTestCase): """Tests for the REDIRECT wiki directive""" localizing_client = True def test_redirect_suppression(self): """The document view shouldn't redirect when passed redirect=no.""" redirect, _ = doc_rev('REDIRECT <a class="redirect" ' 'href="/en-US/docs/blah">smoo</a>') url = redirect.get_absolute_url() + '?redirect=no' response = self.client.get(url, follow=True) self.assertContains(response, 'REDIRECT ') def test_redirects_only_internal(self): """Ensures redirects cannot be used to link to other sites""" redirect, _ = doc_rev('REDIRECT <a class="redirect" ' 'href="//davidwalsh.name">DWB</a>') url = redirect.get_absolute_url() response = self.client.get(url, follow=True) self.assertContains(response, 'DWB') def test_redirects_only_internal_2(self): """Ensures redirects cannot be used to link to other sites""" redirect, _ = doc_rev('REDIRECT <a class="redirect" ' 'href="http://davidwalsh.name">DWB</a>') url = redirect.get_absolute_url() response = self.client.get(url, follow=True) self.assertContains(response, 'DWB') def test_self_redirect_suppression(self): """The document view shouldn't redirect to itself.""" slug = 'redirdoc' html = ('REDIRECT <a class="redirect" href="/en-US/docs/%s">smoo</a>' % slug) doc = document(title='blah', slug=slug, html=html, save=True, locale=settings.WIKI_DEFAULT_LANGUAGE) revision(document=doc, content=html, is_approved=True, save=True) response = self.client.get(doc.get_absolute_url(), follow=True) eq_(200, response.status_code) response_html = pq(response.content) article_body = response_html.find('#wikiArticle').html() self.assertHTMLEqual(html, article_body) class LocaleRedirectTests(UserTestCase, WikiTestCase): """Tests for fallbacks to en-US and such for slug lookups.""" # Some of these may fail or be invalid if your WIKI_DEFAULT_LANGUAGE is de. localizing_client = True def test_fallback_to_translation(self): """If a slug isn't found in the requested locale but is in the default locale and if there is a translation of that default-locale document to the requested locale, the translation should be served.""" en_doc, de_doc = self._create_en_and_de_docs() response = self.client.get(reverse('wiki.document', args=(en_doc.slug,), locale='de'), follow=True) self.assertRedirects(response, de_doc.get_absolute_url()) def test_fallback_with_query_params(self): """The query parameters should be passed along to the redirect.""" en_doc, de_doc = self._create_en_and_de_docs() url = reverse('wiki.document', args=[en_doc.slug], locale='de') response = self.client.get(url + '?x=y&x=z', follow=True) self.assertRedirects(response, de_doc.get_absolute_url() + '?x=y&x=z') def test_redirect_with_no_slug(self): """Bug 775241: Fix exception in redirect for URL with ui-locale""" loc = settings.WIKI_DEFAULT_LANGUAGE url = '/%s/docs/%s/' % (loc, loc) try: self.client.get(url, follow=True) except Http404, e: pass except Exception as e: self.fail("The only exception should be a 404, not this: %s" % e) def _create_en_and_de_docs(self): en = settings.WIKI_DEFAULT_LANGUAGE en_doc = document(locale=en, slug='english-slug', save=True) de_doc = document(locale='de', parent=en_doc, save=True) revision(document=de_doc, is_approved=True, save=True) return en_doc, de_doc class ViewTests(UserTestCase, WikiTestCase): fixtures = UserTestCase.fixtures + ['wiki/documents.json'] localizing_client = True @attr('bug875349') def test_json_view(self): expected_tags = sorted(['foo', 'bar', 'baz']) expected_review_tags = sorted(['tech', 'editorial']) doc = Document.objects.get(pk=1) doc.tags.set(*expected_tags) doc.current_revision.review_tags.set(*expected_review_tags) url = reverse('wiki.json', locale=settings.WIKI_DEFAULT_LANGUAGE) resp = self.client.get(url, {'title': 'an article title'}) eq_(200, resp.status_code) data = json.loads(resp.content) eq_('article-title', data['slug']) result_tags = sorted([str(x) for x in data['tags']]) eq_(expected_tags, result_tags) result_review_tags = sorted([str(x) for x in data['review_tags']]) eq_(expected_review_tags, result_review_tags) url = reverse('wiki.json_slug', args=('article-title',), locale=settings.WIKI_DEFAULT_LANGUAGE) Switch.objects.create(name='application_ACAO', active=True) resp = self.client.get(url) ok_('Access-Control-Allow-Origin' in resp) eq_('*', resp['Access-Control-Allow-Origin']) eq_(200, resp.status_code) data = json.loads(resp.content) eq_('an article title', data['title']) ok_('translations' in data) result_tags = sorted([str(x) for x in data['tags']]) eq_(expected_tags, result_tags) result_review_tags = sorted([str(x) for x in data['review_tags']]) eq_(expected_review_tags, result_review_tags) def test_history_view(self): slug = 'history-view-test-doc' html = 'history view test doc' doc = document(title='History view test doc', slug=slug, html=html, save=True, locale=settings.WIKI_DEFAULT_LANGUAGE) for i in xrange(1, 51): revision(document=doc, content=html, comment='Revision %s' % i, is_approved=True, save=True) url = reverse('wiki.document_revisions', args=(slug,), locale=settings.WIKI_DEFAULT_LANGUAGE) resp = self.client.get(url) eq_(200, resp.status_code) all_url = urlparams(reverse('wiki.document_revisions', args=(slug,), locale=settings.WIKI_DEFAULT_LANGUAGE), limit='all') resp = self.client.get(all_url) eq_(403, resp.status_code) self.client.login(username='testuser', password='testpass') resp = self.client.get(all_url) eq_(200, resp.status_code) def test_toc_view(self): slug = 'toc_test_doc' html = '<h2>Head 2</h2><h3>Head 3</h3>' doc = document(title='blah', slug=slug, html=html, save=True, locale=settings.WIKI_DEFAULT_LANGUAGE) revision(document=doc, content=html, is_approved=True, save=True) url = reverse('wiki.toc', args=[slug], locale=settings.WIKI_DEFAULT_LANGUAGE) Switch.objects.create(name='application_ACAO', active=True) resp = self.client.get(url) ok_('Access-Control-Allow-Origin' in resp) eq_('*', resp['Access-Control-Allow-Origin']) self.assertHTMLEqual( resp.content, '<ol><li><a href="#Head_2" rel="internal">Head 2</a>' '<ol><li><a href="#Head_3" rel="internal">Head 3</a>' '</ol></li></ol>') @attr('bug875349') def test_children_view(self): test_content = '<p>Test <a href="http://example.com">Summary</a></p>' def _make_doc(title, slug, parent=None, is_redir=False): doc = document(title=title, slug=slug, save=True, is_redirect=is_redir) if is_redir: content = 'REDIRECT <a class="redirect" href="/en-US/blah">Blah</a>' else: content = test_content revision(document=doc, content=test_content, summary=get_seo_description( test_content, strip_markup=False), save=True) doc.html = content if parent: doc.parent_topic = parent doc.save() return doc root_doc = _make_doc('Root', 'Root') child_doc_1 = _make_doc('Child 1', 'Root/Child_1', root_doc) _make_doc('Grandchild 1', 'Root/Child_1/Grandchild_1', child_doc_1) grandchild_doc_2 = _make_doc('Grandchild 2', 'Root/Child_1/Grandchild_2', child_doc_1) _make_doc('Great Grandchild 1', 'Root/Child_1/Grandchild_2/Great_Grand_Child_1', grandchild_doc_2) _make_doc('Child 2', 'Root/Child_2', root_doc) _make_doc('Child 3', 'Root/Child_3', root_doc, True) Switch.objects.create(name='application_ACAO', active=True) for expand in (True, False): url = reverse('wiki.children', args=['Root'], locale=settings.WIKI_DEFAULT_LANGUAGE) if expand: url = '%s?expand' % url resp = self.client.get(url) ok_('Access-Control-Allow-Origin' in resp) eq_('*', resp['Access-Control-Allow-Origin']) json_obj = json.loads(resp.content) # Basic structure creation testing eq_(json_obj['slug'], 'Root') if not expand: ok_('summary' not in json_obj) else: eq_(json_obj['summary'], 'Test <a href="http://example.com">Summary</a>') ok_('tags' in json_obj) ok_('review_tags' in json_obj) eq_(len(json_obj['subpages']), 2) eq_(len(json_obj['subpages'][0]['subpages']), 2) eq_(json_obj['subpages'][0]['subpages'][1]['title'], 'Grandchild 2') # Depth parameter testing def _depth_test(depth, aught): url = reverse('wiki.children', args=['Root'], locale=settings.WIKI_DEFAULT_LANGUAGE) + '?depth=' + str(depth) resp = self.client.get(url) json_obj = json.loads(resp.content) eq_(len(json_obj['subpages'][0]['subpages'][1]['subpages']), aught) _depth_test(2, 0) _depth_test(3, 1) _depth_test(6, 1) # Sorting test sort_root_doc = _make_doc('Sort Root', 'Sort_Root') _make_doc('B Child', 'Sort_Root/B_Child', sort_root_doc) _make_doc('A Child', 'Sort_Root/A_Child', sort_root_doc) resp = self.client.get(reverse('wiki.children', args=['Sort_Root'], locale=settings.WIKI_DEFAULT_LANGUAGE)) json_obj = json.loads(resp.content) eq_(json_obj['subpages'][0]['title'], 'A Child') # Test if we are serving an error json if document does not exist no_doc_url = reverse('wiki.children', args=['nonexistentDocument'], locale=settings.WIKI_DEFAULT_LANGUAGE) resp = self.client.get(no_doc_url) result = json.loads(resp.content) eq_(result, {'error': 'Document does not exist.'}) def test_summary_view(self): """The ?summary option should restrict document view to summary""" d, r = doc_rev(""" <p>Foo bar <a href="http://example.com">baz</a></p> <p>Quux xyzzy</p> """) resp = self.client.get('%s?raw&summary' % d.get_absolute_url()) eq_(resp.content, 'Foo bar <a href="http://example.com">baz</a>') @override_settings(CELERY_ALWAYS_EAGER=True) @mock.patch('waffle.flag_is_active') @mock.patch('kuma.wiki.jobs.DocumentContributorsJob.get') def test_footer_contributors(self, get_contributors, flag_is_active): get_contributors.return_value = [ {'id': 1, 'username': 'ringo', 'email': 'ringo@apple.co.uk'}, {'id': 2, 'username': 'john', 'email': 'lennon@apple.co.uk'}, ] flag_is_active.return_value = True d, r = doc_rev('some content') resp = self.client.get(d.get_absolute_url()) page = pq(resp.content) contributors = (page.find(":contains('Contributors to this page')") .parent()) # just checking if the contributor link is rendered eq_(len(contributors.find('a')), 2) def test_revision_view_bleached_content(self): """Bug 821988: Revision content should be cleaned with bleach""" d, r = doc_rev(""" <a href="#" onload=alert(3)>Hahaha</a> <svg><svg onload=alert(3);> """) resp = self.client.get(r.get_absolute_url()) page = pq(resp.content) ct = page.find('#wikiArticle').html() ok_('<svg>' not in ct) ok_('<a href="#">Hahaha</a>' in ct) def test_raw_css_view(self): """The raw source for a document can be requested""" self.client.login(username='admin', password='testpass') doc = document(title='Template:CustomSampleCSS', slug='Template:CustomSampleCSS', save=True) revision( save=True, is_approved=True, document=doc, content=""" /* CSS here */ body { padding: 0; margin: 0; } svg:not(:root) { display:block; } """) response = self.client.get('%s?raw=true' % reverse('wiki.document', args=[doc.slug])) ok_('text/css' in response['Content-Type']) class PermissionTests(UserTestCase, WikiTestCase): localizing_client = True def setUp(self): """Set up the permissions, groups, and users needed for the tests""" super(PermissionTests, self).setUp() self.perms, self.groups, self.users, self.superuser = ( create_template_test_users()) def test_template_revert_permission(self): locale = 'en-US' slug = 'Template:test-revert-perm' doc = document(save=True, slug=slug, title=slug, locale=locale) rev = revision(save=True, document=doc) # Revision template should not show revert button url = reverse('wiki.revision', args=([doc.slug, rev.id])) resp = self.client.get(url) ok_('Revert' not in resp.content) # Revert POST should give permission denied to user without perm username = self.users['none'].username self.client.login(username=username, password='testpass') url = reverse('wiki.revert_document', args=([doc.slug, rev.id])) resp = self.client.post(url, {'comment': 'test'}) eq_(403, resp.status_code) # Revert POST should give success to user with perm username = self.users['change'].username self.client.login(username=username, password='testpass') url = reverse('wiki.revert_document', args=([doc.slug, rev.id])) resp = self.client.post(url, {'comment': 'test'}, follow=True) eq_(200, resp.status_code) def test_template_permissions(self): msg = ('edit', 'create') for is_add in (True, False): slug_trials = ( ('test_for_%s', ( (True, self.superuser), (True, self.users['none']), (True, self.users['all']), (True, self.users['add']), (True, self.users['change']), )), ('Template:test_for_%s', ( (True, self.superuser), (False, self.users['none']), (True, self.users['all']), (is_add, self.users['add']), (not is_add, self.users['change']), )) ) for slug_tmpl, trials in slug_trials: for expected, tmp_user in trials: username = tmp_user.username slug = slug_tmpl % username locale = settings.WIKI_DEFAULT_LANGUAGE Document.objects.all().filter(slug=slug).delete() if not is_add: doc = document(save=True, slug=slug, title=slug, locale=locale) revision(save=True, document=doc) self.client.login(username=username, password='testpass') data = new_document_data() slug = slug_tmpl % username data.update({"title": slug, "slug": slug}) if is_add: url = reverse('wiki.create', locale=locale) resp = self.client.post(url, data, follow=False) else: data['form'] = 'rev' url = reverse('wiki.edit', args=(slug,), locale=locale) resp = self.client.post(url, data, follow=False) if expected: eq_(302, resp.status_code, "%s should be able to %s %s" % (user, msg[is_add], slug)) Document.objects.filter(slug=slug).delete() else: eq_(403, resp.status_code, "%s should not be able to %s %s" % (user, msg[is_add], slug)) class ConditionalGetTests(UserTestCase, WikiTestCase): """Tests for conditional GET on document view""" localizing_client = True def test_last_modified(self): """Ensure the last-modified stamp of a document is cached""" doc, rev = doc_rev() get_url = reverse('wiki.document', args=[doc.slug], locale=settings.WIKI_DEFAULT_LANGUAGE) # There should be a last-modified date cached for this document already cache_key = doc.last_modified_cache_key ok_(cache.get(cache_key)) # Now, try a request, and ensure that the last-modified header is # present. response = self.client.get(get_url, follow=False) ok_(response.has_header('last-modified')) last_mod = response['last-modified'] # Try another request, using If-Modified-Since. This should be a 304 response = self.client.get(get_url, follow=False, HTTP_IF_MODIFIED_SINCE=last_mod) eq_(304, response.status_code) # Finally, ensure that the last-modified was cached. cached_last_mod = cache.get(cache_key) eq_(doc.modified.strftime('%s'), cached_last_mod) # Let the clock tick, so the last-modified will change on edit. time.sleep(1.0) # Edit the document, ensure the last-modified has been invalidated. revision(document=doc, content="New edits", save=True) ok_(cache.get(cache_key) != cached_last_mod) # This should be another 304, but the last-modified in response and # cache should have changed. response = self.client.get(get_url, follow=False, HTTP_IF_MODIFIED_SINCE=last_mod) eq_(200, response.status_code) ok_(last_mod != response['last-modified']) ok_(cached_last_mod != cache.get(cache_key)) def test_deletion_clears_last_modified(self): """Deleting a page clears any last-modified caching""" # Setup mostly the same as previous test, to get a doc and set # last-modified info. doc, rev = doc_rev() self.url = reverse('wiki.document', args=[doc.slug], locale=settings.WIKI_DEFAULT_LANGUAGE) cache_key = doc.last_modified_cache_key last_mod = cache.get(cache_key) ok_(last_mod) # exists already because pre-filled self.client.get(self.url, follow=False) ok_(cache.get(cache_key) == last_mod) # Now delete the doc and make sure there's no longer # last-modified data in the cache for it afterward. doc.delete() ok_(not cache.get(cache_key)) def test_deleted_doc_returns_404(self): """Requesting a deleted doc returns 404""" doc, rev = doc_rev() doc.delete() DocumentDeletionLog.objects.create(locale=doc.locale, slug=doc.slug, user=rev.creator, reason="test") response = self.client.get(doc.get_absolute_url(), follow=False) eq_(404, response.status_code) class ReadOnlyTests(UserTestCase, WikiTestCase): """Tests readonly scenarios""" fixtures = UserTestCase.fixtures + ['wiki/documents.json'] localizing_client = True def setUp(self): super(ReadOnlyTests, self).setUp() self.d, r = doc_rev() self.edit_url = reverse('wiki.edit', args=[self.d.slug]) def test_everyone(self): """ kumaediting: everyone, kumabanned: none """ self.kumaediting_flag.everyone = True self.kumaediting_flag.save() self.client.login(username='testuser', password='testpass') resp = self.client.get(self.edit_url) eq_(200, resp.status_code) def test_superusers_only(self): """ kumaediting: superusers, kumabanned: none """ self.kumaediting_flag.everyone = None self.kumaediting_flag.superusers = True self.kumaediting_flag.save() self.client.login(username='testuser', password='testpass') resp = self.client.get(self.edit_url) eq_(403, resp.status_code) ok_('The wiki is in read-only mode.' in resp.content) self.client.logout() self.client.login(username='admin', password='testpass') resp = self.client.get(self.edit_url) eq_(200, resp.status_code) def test_banned_users(self): """ kumaediting: everyone, kumabanned: testuser2 """ self.kumaediting_flag.everyone = True self.kumaediting_flag.save() # ban testuser2 kumabanned = Flag.objects.create(name='kumabanned') kumabanned.users = self.user_model.objects.filter(username='testuser2') kumabanned.save() # testuser can still access self.client.login(username='testuser', password='testpass') resp = self.client.get(self.edit_url) eq_(200, resp.status_code) self.client.logout() # testuser2 cannot self.client.login(username='testuser2', password='testpass') resp = self.client.get(self.edit_url) eq_(403, resp.status_code) ok_('Your profile has been banned from making edits.' in resp.content) # ban testuser01 and testuser2 kumabanned.users = self.user_model.objects.filter( Q(username='testuser2') | Q(username='testuser01')) kumabanned.save() # testuser can still access self.client.login(username='testuser', password='testpass') resp = self.client.get(self.edit_url) eq_(200, resp.status_code) self.client.logout() # testuser2 cannot access self.client.login(username='testuser2', password='testpass') resp = self.client.get(self.edit_url) eq_(403, resp.status_code) ok_('Your profile has been banned from making edits.' in resp.content) # testuser01 cannot access self.client.login(username='testuser01', password='testpass') resp = self.client.get(self.edit_url) eq_(403, resp.status_code) ok_('Your profile has been banned from making edits.' in resp.content) class BannedIPTests(UserTestCase, WikiTestCase): """Tests readonly scenarios""" fixtures = UserTestCase.fixtures + ['wiki/documents.json'] localizing_client = True def setUp(self): super(BannedIPTests, self).setUp() self.ip = '127.0.0.1' self.ip_ban = IPBan.objects.create(ip=self.ip) self.doc, rev = doc_rev() self.edit_url = reverse('wiki.edit', args=[self.doc.slug]) def tearDown(self): cache.clear() def test_banned_ip_cant_get_edit(self): self.client.login(username='testuser', password='testpass') response = self.client.get(self.edit_url, REMOTE_ADDR=self.ip) eq_(403, response.status_code) def test_banned_ip_cant_post_edit(self): self.client.login(username='testuser', password='testpass') response = self.client.get(self.edit_url, REMOTE_ADDR=self.ip) eq_(403, response.status_code) def test_banned_ip_can_still_get_articles(self): response = self.client.get(self.doc.get_absolute_url(), REMOTE_ADDR=self.ip) eq_(200, response.status_code) class KumascriptIntegrationTests(UserTestCase, WikiTestCase): """ Tests for usage of the kumascript service. Note that these tests really just check whether or not the service was used, and are not integration tests meant to exercise the real service. """ localizing_client = True def setUp(self): super(KumascriptIntegrationTests, self).setUp() self.d, self.r = doc_rev() self.r.content = "TEST CONTENT" self.r.save() self.d.tags.set('foo', 'bar', 'baz') self.url = reverse('wiki.document', args=(self.d.slug,), locale=self.d.locale) # TODO: upgrade mock to 0.8.0 so we can do this. # self.mock_kumascript_get = ( # mock.patch('kuma.wiki.kumascript.get')) # self.mock_kumascript_get.return_value = self.d.html def tearDown(self): super(KumascriptIntegrationTests, self).tearDown() # TODO: upgrade mock to 0.8.0 so we can do this. # self.mock_kumascript_get.stop() @override_config(KUMASCRIPT_TIMEOUT=1.0) @mock.patch('kuma.wiki.kumascript.get') def test_basic_view(self, mock_kumascript_get): """When kumascript timeout is non-zero, the service should be used""" mock_kumascript_get.return_value = (self.d.html, None) self.client.get(self.url, follow=False) ok_(mock_kumascript_get.called, "kumascript should have been used") @override_config(KUMASCRIPT_TIMEOUT=0.0) @mock.patch('kuma.wiki.kumascript.get') def test_disabled(self, mock_kumascript_get): """When disabled, the kumascript service should not be used""" mock_kumascript_get.return_value = (self.d.html, None) self.client.get(self.url, follow=False) ok_(not mock_kumascript_get.called, "kumascript not should have been used") @override_config(KUMASCRIPT_TIMEOUT=0.0) @mock.patch('kuma.wiki.kumascript.get') @override_settings(CELERY_ALWAYS_EAGER=True) def test_disabled_rendering(self, mock_kumascript_get): """When disabled, the kumascript service should not be used in rendering""" mock_kumascript_get.return_value = (self.d.html, None) self.d.schedule_rendering('max-age=0') ok_(not mock_kumascript_get.called, "kumascript not should have been used") @override_config(KUMASCRIPT_TIMEOUT=1.0) @mock.patch('kuma.wiki.kumascript.get') def test_nomacros(self, mock_kumascript_get): mock_kumascript_get.return_value = (self.d.html, None) self.client.get('%s?nomacros' % self.url, follow=False) ok_(not mock_kumascript_get.called, "kumascript should not have been used") @override_config(KUMASCRIPT_TIMEOUT=1.0) @mock.patch('kuma.wiki.kumascript.get') def test_raw(self, mock_kumascript_get): mock_kumascript_get.return_value = (self.d.html, None) self.client.get('%s?raw' % self.url, follow=False) ok_(not mock_kumascript_get.called, "kumascript should not have been used") @override_config(KUMASCRIPT_TIMEOUT=1.0) @mock.patch('kuma.wiki.kumascript.get') def test_raw_macros(self, mock_kumascript_get): mock_kumascript_get.return_value = (self.d.html, None) self.client.get('%s?raw&macros' % self.url, follow=False) ok_(mock_kumascript_get.called, "kumascript should have been used") @override_config(KUMASCRIPT_TIMEOUT=1.0, KUMASCRIPT_MAX_AGE=1234) @mock.patch('requests.get') def test_ua_max_age_zero(self, mock_requests_get): """Authenticated users can request a zero max-age for kumascript""" trap = {} def my_requests_get(url, headers=None, timeout=None): trap['headers'] = headers return FakeResponse(status_code=200, headers={}, text='HELLO WORLD') mock_requests_get.side_effect = my_requests_get self.client.get(self.url, follow=False, HTTP_CACHE_CONTROL='no-cache') eq_('max-age=1234', trap['headers']['Cache-Control']) self.client.login(username='admin', password='testpass') self.client.get(self.url, follow=False, HTTP_CACHE_CONTROL='no-cache') eq_('no-cache', trap['headers']['Cache-Control']) @override_config(KUMASCRIPT_TIMEOUT=1.0, KUMASCRIPT_MAX_AGE=1234) @mock.patch('requests.get') def test_ua_no_cache(self, mock_requests_get): """Authenticated users can request no-cache for kumascript""" trap = {} def my_requests_get(url, headers=None, timeout=None): trap['headers'] = headers return FakeResponse(status_code=200, headers={}, text='HELLO WORLD') mock_requests_get.side_effect = my_requests_get self.client.get(self.url, follow=False, HTTP_CACHE_CONTROL='no-cache') eq_('max-age=1234', trap['headers']['Cache-Control']) self.client.login(username='admin', password='testpass') self.client.get(self.url, follow=False, HTTP_CACHE_CONTROL='no-cache') eq_('no-cache', trap['headers']['Cache-Control']) @override_config(KUMASCRIPT_TIMEOUT=1.0, KUMASCRIPT_MAX_AGE=1234) @mock.patch('requests.get') def test_conditional_get(self, mock_requests_get): """Ensure conditional GET in requests to kumascript work as expected""" expected_etag = "8675309JENNY" expected_modified = "Wed, 14 Mar 2012 22:29:17 GMT" expected_content = "HELLO THERE, WORLD" trap = dict(req_cnt=0) def my_requests_get(url, headers=None, timeout=None): trap['req_cnt'] += 1 trap['headers'] = headers if trap['req_cnt'] in [1, 2]: return FakeResponse( status_code=200, text=expected_content, headers={ "etag": expected_etag, "last-modified": expected_modified, "age": 456 }) else: return FakeResponse( status_code=304, text='', headers={ "etag": expected_etag, "last-modified": expected_modified, "age": 123 }) mock_requests_get.side_effect = my_requests_get # First request to let the view cache etag / last-modified response = self.client.get(self.url) # Clear rendered_html to force another request. self.d.rendered_html = '' self.d.save() # Second request to verify the view sends them back response = self.client.get(self.url) eq_(expected_etag, trap['headers']['If-None-Match']) eq_(expected_modified, trap['headers']['If-Modified-Since']) # Third request to verify content was cached and served on a 304 response = self.client.get(self.url) ok_(expected_content in response.content) @override_config(KUMASCRIPT_TIMEOUT=1.0, KUMASCRIPT_MAX_AGE=600) @mock.patch('requests.get') def test_error_reporting(self, mock_requests_get): """Kumascript reports errors in HTTP headers, Kuma should display""" # Make sure we have enough log messages to ensure there are more than # 10 lines of Base64 in headers. This ensures that there'll be a # failure if the view sorts FireLogger sequence number alphabetically # instead of numerically. expected_errors = { "logs": [ {"level": "debug", "message": "Message #1", "args": ['TestError', {}, {'name': 'SomeMacro', 'token': {'args': 'arguments here'}}], "time": "12:32:03 GMT-0400 (EDT)", "timestamp": "1331829123101000"}, {"level": "warning", "message": "Message #2", "args": ['TestError', {}, {'name': 'SomeMacro2'}], "time": "12:33:58 GMT-0400 (EDT)", "timestamp": "1331829238052000"}, {"level": "info", "message": "Message #3", "args": ['TestError'], "time": "12:34:22 GMT-0400 (EDT)", "timestamp": "1331829262403000"}, {"level": "debug", "message": "Message #4", "time": "12:32:03 GMT-0400 (EDT)", "timestamp": "1331829123101000"}, {"level": "warning", "message": "Message #5", "time": "12:33:58 GMT-0400 (EDT)", "timestamp": "1331829238052000"}, {"level": "info", "message": "Message #6", "time": "12:34:22 GMT-0400 (EDT)", "timestamp": "1331829262403000"}, ] } # Pack it up, get ready to ship it out. d_json = json.dumps(expected_errors) d_b64 = base64.encodestring(d_json) d_lines = [x for x in d_b64.split("\n") if x] # Headers are case-insensitive, so let's just drive that point home p = ['firelogger', 'FIRELOGGER', 'FireLogger'] fl_uid = 8675309 headers_out = {} for i in range(0, len(d_lines)): headers_out['%s-%s-%s' % (p[i % len(p)], fl_uid, i)] = d_lines[i] # Now, trap the request from the view. trap = {} def my_requests_get(url, headers=None, timeout=None): trap['headers'] = headers return FakeResponse( status_code=200, text='HELLO WORLD', headers=headers_out ) mock_requests_get.side_effect = my_requests_get # Finally, fire off the request to the view and ensure that the log # messages were received and displayed on the page. But, only for a # logged in user. self.client.login(username='admin', password='testpass') response = self.client.get(self.url) eq_(trap['headers']['X-FireLogger'], '1.2') for error in expected_errors['logs']: ok_(error['message'] in response.content) eq_(response.status_code, 200) @override_config(KUMASCRIPT_TIMEOUT=1.0, KUMASCRIPT_MAX_AGE=600) @mock.patch('requests.post') def test_preview_nonascii(self, mock_post): """POSTing non-ascii to kumascript should encode to utf8""" content = u'Français' trap = {} def my_post(url, timeout=None, headers=None, data=None): trap['data'] = data return FakeResponse(status_code=200, headers={}, text=content.encode('utf8')) mock_post.side_effect = my_post self.client.login(username='admin', password='testpass') self.client.post(reverse('wiki.preview'), {'content': content}) try: trap['data'].decode('utf8') except UnicodeDecodeError: self.fail("Data wasn't posted as utf8") @attr('bug1197971') @override_config(KUMASCRIPT_TIMEOUT=1.0, KUMASCRIPT_MAX_AGE=600) @mock.patch('kuma.wiki.kumascript.post') def test_dont_render_previews_for_deferred_docs(self, mock_post): """ When a user previews a document with deferred rendering, we want to force the preview to skip the kumascript POST, so that big previews can't use up too many kumascript connections. """ self.d.defer_rendering = True self.d.save() def should_not_post(*args, **kwargs): self.fail("Preview doc with deferred rendering should not " "post to KumaScript.") mock_post.side_effect = should_not_post self.client.login(username='admin', password='testpass') self.client.post(reverse('wiki.preview'), {'doc_id': self.d.id}) class DocumentSEOTests(UserTestCase, WikiTestCase): """Tests for the document seo logic""" localizing_client = True @attr('bug1190212') def test_get_seo_parent_doesnt_throw_404(self): slug_dict = {'seo_root': 'Root/Does/Not/Exist'} try: _get_seo_parent_title(slug_dict, 'bn-BD') except Http404: self.fail('Missing parent should not cause 404 from ' '_get_seo_parent_title') def test_seo_title(self): self.client.login(username='admin', password='testpass') # Utility to make a quick doc def _make_doc(title, aught_titles, slug): doc = document(save=True, slug=slug, title=title, locale=settings.WIKI_DEFAULT_LANGUAGE) revision(save=True, document=doc) response = self.client.get(reverse('wiki.document', args=[slug], locale=settings.WIKI_DEFAULT_LANGUAGE)) page = pq(response.content) ok_(page.find('title').text() in aught_titles) # Test nested document titles _make_doc('One', ['One | MDN'], 'one') _make_doc('Two', ['Two - One | MDN'], 'one/two') _make_doc('Three', ['Three - One | MDN'], 'one/two/three') _make_doc(u'Special Φ Char', [u'Special \u03a6 Char - One | MDN', u'Special \xce\xa6 Char - One | MDN'], 'one/two/special_char') # Additional tests for /Web/* changes _make_doc('Firefox OS', ['Firefox OS | MDN'], 'firefox_os') _make_doc('Email App', ['Email App - Firefox OS | MDN'], 'firefox_os/email_app') _make_doc('Web', ['Web | MDN'], 'Web') _make_doc('HTML', ['HTML | MDN'], 'Web/html') _make_doc('Fieldset', ['Fieldset - HTML | MDN'], 'Web/html/fieldset') _make_doc('Legend', ['Legend - HTML | MDN'], 'Web/html/fieldset/legend') def test_seo_script(self): self.client.login(username='admin', password='testpass') def make_page_and_compare_seo(slug, content, aught_preview): # Create the doc data = new_document_data() data.update({'title': 'blah', 'slug': slug, 'content': content}) response = self.client.post(reverse('wiki.create', locale=settings.WIKI_DEFAULT_LANGUAGE), data) eq_(302, response.status_code) # Connect to newly created page response = self.client.get(reverse('wiki.document', args=[slug], locale=settings.WIKI_DEFAULT_LANGUAGE)) page = pq(response.content) meta_content = page.find('meta[name=description]').attr('content') eq_(str(meta_content).decode('utf-8'), str(aught_preview).decode('utf-8')) # Test pages - very basic good = 'This is the content which should be chosen, man.' make_page_and_compare_seo('one', '<p>' + good + '</p>', good) # No content, no seo make_page_and_compare_seo('two', 'blahblahblahblah<br />', None) # No summary, no seo make_page_and_compare_seo('three', '<div><p>You cant see me</p></div>', None) # Warning paragraph ignored make_page_and_compare_seo('four', '<div class="geckoVersion">' '<p>No no no</p></div><p>yes yes yes</p>', 'yes yes yes') # Warning paragraph ignored, first one chosen if multiple matches make_page_and_compare_seo('five', '<div class="geckoVersion"><p>No no no</p>' '</div><p>yes yes yes</p>' '<p>ignore ignore ignore</p>', 'yes yes yes') # Don't take legacy crumbs make_page_and_compare_seo('six', u'<p>« CSS</p><p>I am me!</p>', 'I am me!') # Take the seoSummary class'd element make_page_and_compare_seo('seven', u'<p>I could be taken</p>' '<p class="seoSummary">I should be though</p>', 'I should be though') # Two summaries append make_page_and_compare_seo('eight', u'<p>I could be taken</p>' '<p class="seoSummary">a</p>' '<p class="seoSummary">b</p>', 'a b') # No brackets make_page_and_compare_seo('nine', u'<p>I <em>am</em> awesome.' ' <a href="blah">A link</a> is also &lt;cool&gt;</p>', u'I am awesome. A link is also cool') class DocumentEditingTests(UserTestCase, WikiTestCase): """Tests for the document-editing view""" localizing_client = True def test_noindex_post(self): self.client.login(username='admin', password='testpass') # Go to new document page to ensure no-index header works response = self.client.get(reverse('wiki.create', args=[], locale=settings.WIKI_DEFAULT_LANGUAGE)) eq_(response['X-Robots-Tag'], 'noindex') @attr('bug821986') def test_editor_safety_filter(self): """Safety filter should be applied before rendering editor""" self.client.login(username='admin', password='testpass') r = revision(save=True, content=""" <svg><circle onload=confirm(3)> """) args = [r.document.slug] urls = ( reverse('wiki.edit', args=args), '%s?tolocale=%s' % (reverse('wiki.translate', args=args), 'fr') ) for url in urls: page = pq(self.client.get(url).content) editor_src = page.find('#id_content').text() ok_('onload' not in editor_src) def test_create_on_404(self): self.client.login(username='admin', password='testpass') # Create the parent page. d, r = doc_rev() # Establish attribs of child page. locale = settings.WIKI_DEFAULT_LANGUAGE local_slug = 'Some_New_Title' slug = '%s/%s' % (d.slug, local_slug) url = reverse('wiki.document', args=[slug], locale=locale) # Ensure redirect to create new page on attempt to visit non-existent # child page. resp = self.client.get(url) eq_(302, resp.status_code) ok_('docs/new' in resp['Location']) ok_('?slug=%s' % local_slug in resp['Location']) # Ensure real 404 for visit to non-existent page with params common to # kumascript and raw content API. for p_name in ('raw', 'include', 'nocreate'): sub_url = '%s?%s=1' % (url, p_name) resp = self.client.get(sub_url) eq_(404, resp.status_code) # Ensure root level documents work, not just children response = self.client.get(reverse('wiki.document', args=['noExist'], locale=locale)) eq_(302, response.status_code) response = self.client.get(reverse('wiki.document', args=['Template:NoExist'], locale=locale)) eq_(302, response.status_code) def test_new_document_comment(self): """Creating a new document with a revision comment saves the comment""" self.client.login(username='admin', password='testpass') comment = 'I am the revision comment' slug = 'Test-doc-comment' loc = settings.WIKI_DEFAULT_LANGUAGE # Create a new doc. data = new_document_data() data.update({'slug': slug, 'comment': comment}) self.client.post(reverse('wiki.create'), data) doc = Document.objects.get(slug=slug, locale=loc) eq_(comment, doc.current_revision.comment) @attr('toc') def test_toc_initial(self): self.client.login(username='admin', password='testpass') resp = self.client.get(reverse('wiki.create')) eq_(200, resp.status_code) page = pq(resp.content) toc_select = page.find('#id_toc_depth') toc_options = toc_select.find('option') for option in toc_options: opt_element = pq(option) found_selected = False if opt_element.attr('selected'): found_selected = True eq_(str(Revision.TOC_DEPTH_H4), opt_element.attr('value')) if not found_selected: raise AssertionError("No ToC depth initially selected.") @attr('retitle') def test_retitling_solo_doc(self): """ Editing just title of non-parent doc: * Changes title * Doesn't cause errors * Doesn't create redirect """ # Not testing slug changes separately; the model tests cover those plus # slug+title changes. If title changes work in the view, the rest # should also. self.client.login(username='admin', password='testpass') new_title = 'Some New Title' d, r = doc_rev() old_title = d.title data = new_document_data() data.update({'title': new_title, 'form': 'rev'}) data['slug'] = '' url = reverse('wiki.edit', args=[d.slug]) self.client.post(url, data) eq_(new_title, Document.objects.get(slug=d.slug, locale=d.locale).title) try: Document.objects.get(title=old_title) self.fail("Should not find doc by old title after retitling.") except Document.DoesNotExist: pass @attr('retitle') def test_retitling_parent_doc(self): """ Editing just title of parent doc: * Changes title * Doesn't cause errors * Doesn't create redirect """ # Not testing slug changes separately; the model tests cover those plus # slug+title changes. If title changes work in the view, the rest # should also. self.client.login(username='admin', password='testpass') # create parent doc & rev along with child doc & rev d = document(title='parent', save=True) revision(document=d, content='parent', save=True) d2 = document(title='child', parent_topic=d, save=True) revision(document=d2, content='child', save=True) old_title = d.title new_title = 'Some New Title' data = new_document_data() data.update({'title': new_title, 'form': 'rev'}) data['slug'] = '' url = reverse('wiki.edit', args=[d.slug]) self.client.post(url, data) eq_(new_title, Document.objects.get(slug=d.slug, locale=d.locale).title) try: Document.objects.get(title=old_title) self.fail("Should not find doc by old title after retitling.") except Document.DoesNotExist: pass def test_slug_change_ignored_for_iframe(self): """When the title of an article is edited in an iframe, the change is ignored.""" self.client.login(username='admin', password='testpass') new_slug = 'some_new_slug' d, r = doc_rev() old_slug = d.slug data = new_document_data() data.update({'title': d.title, 'slug': new_slug, 'form': 'rev'}) self.client.post('%s?iframe=1' % reverse('wiki.edit', args=[d.slug]), data) eq_(old_slug, Document.objects.get(slug=d.slug, locale=d.locale).slug) assert "REDIRECT" not in Document.objects.get(slug=old_slug).html @attr('clobber') def test_slug_collision_errors(self): """When an attempt is made to retitle an article and another with that title already exists, there should be form errors""" self.client.login(username='admin', password='testpass') exist_slug = "existing-doc" # Create a new doc. data = new_document_data() data.update({"slug": exist_slug}) resp = self.client.post(reverse('wiki.create'), data) eq_(302, resp.status_code) # Create another new doc. data = new_document_data() data.update({"slug": 'some-new-title'}) resp = self.client.post(reverse('wiki.create'), data) eq_(302, resp.status_code) # Now, post an update with duplicate slug data.update({ 'form': 'rev', 'slug': exist_slug }) resp = self.client.post(reverse('wiki.edit', args=['some-new-title']), data) eq_(200, resp.status_code) p = pq(resp.content) ok_(p.find('.errorlist').length > 0) ok_(p.find('.errorlist a[href="#id_slug"]').length > 0) @attr('clobber') def test_redirect_can_be_clobbered(self): """When an attempt is made to retitle an article, and another article with that title exists but is a redirect, there should be no errors and the redirect should be replaced.""" self.client.login(username='admin', password='testpass') exist_title = "Existing doc" exist_slug = "existing-doc" changed_title = 'Changed title' changed_slug = 'changed-title' # Create a new doc. data = new_document_data() data.update({"title": exist_title, "slug": exist_slug}) resp = self.client.post(reverse('wiki.create'), data) eq_(302, resp.status_code) # Change title and slug data.update({'form': 'rev', 'title': changed_title, 'slug': changed_slug}) resp = self.client.post(reverse('wiki.edit', args=[exist_slug]), data) eq_(302, resp.status_code) # Change title and slug back to originals, clobbering the redirect data.update({'form': 'rev', 'title': exist_title, 'slug': exist_slug}) resp = self.client.post(reverse('wiki.edit', args=[changed_slug]), data) eq_(302, resp.status_code) def test_invalid_slug(self): """Slugs cannot contain "$", but can contain "/".""" self.client.login(username='admin', password='testpass') data = new_document_data() data['title'] = 'valid slug' data['slug'] = 'valid' response = self.client.post(reverse('wiki.create'), data) self.assertRedirects(response, reverse('wiki.document', args=[data['slug']], locale=settings.WIKI_DEFAULT_LANGUAGE)) new_url = reverse('wiki.create') invalid_slugs = [ 'va/lid', # slashes 'inva$lid', # dollar signs 'inva?lid', # question marks 'inva%lid', # percentage sign '"invalid\'', # quotes 'in valid', # whitespace ] for invalid_slug in invalid_slugs: data['title'] = 'invalid with %s' % invalid_slug data['slug'] = invalid_slug response = self.client.post(new_url, data) self.assertContains(response, 'The slug provided is not valid.') def test_invalid_reserved_term_slug(self): """Slugs should not collide with reserved URL patterns""" self.client.login(username='admin', password='testpass') data = new_document_data() # TODO: This is info derived from urls.py, but unsure how to DRY it reserved_slugs = ( 'ckeditor_config.js', 'watch-ready-for-review', 'unwatch-ready-for-review', 'watch-approved', 'unwatch-approved', '.json', 'new', 'all', 'preview-wiki-content', 'category/10', 'needs-review/technical', 'needs-review/', 'feeds/atom/all/', 'feeds/atom/needs-review/technical', 'feeds/atom/needs-review/', 'tag/tasty-pie' ) for term in reserved_slugs: data['title'] = 'invalid with %s' % term data['slug'] = term response = self.client.post(reverse('wiki.create'), data) self.assertContains(response, 'The slug provided is not valid.') def test_slug_revamp(self): self.client.login(username='admin', password='testpass') def _createAndRunTests(slug): # Create some vars locale = settings.WIKI_DEFAULT_LANGUAGE foreign_locale = 'es' new_doc_url = reverse('wiki.create') invalid_slug = "some/thing" invalid_slugs = [ "some/thing", "some?thing", "some thing", "some%thing", "$omething", ] child_slug = 'kiddy' grandchild_slug = 'grandkiddy' # Create the document data doc_data = new_document_data() doc_data['title'] = slug + ' Doc' doc_data['slug'] = slug doc_data['content'] = 'This is the content' doc_data['is_localizable'] = True """ NEW DOCUMENT CREATION, CHILD CREATION """ # Create the document, validate it exists response = self.client.post(new_doc_url, doc_data) eq_(302, response.status_code) # 302 = good, forward to new page ok_(slug in response['Location']) self.assertRedirects(response, reverse('wiki.document', locale=locale, args=[slug])) doc_url = reverse('wiki.document', locale=locale, args=[slug]) eq_(self.client.get(doc_url).status_code, 200) doc = Document.objects.get(locale=locale, slug=slug) eq_(doc.slug, slug) eq_(0, len(Document.objects.filter(title=doc_data['title'] + 'Redirect'))) # Create child document data child_data = new_document_data() child_data['title'] = slug + ' Child Doc' child_data['slug'] = invalid_slug child_data['content'] = 'This is the content' child_data['is_localizable'] = True # Attempt to create the child with invalid slug, validate it fails def test_invalid_slug(inv_slug, url, data, doc): data['slug'] = inv_slug response = self.client.post(url, data) page = pq(response.content) eq_(200, response.status_code) # 200 = bad, invalid data # Slug doesn't add parent eq_(inv_slug, page.find('input[name=slug]')[0].value) eq_(doc.get_absolute_url(), page.find('.metadataDisplay').attr('href')) self.assertContains(response, 'The slug provided is not valid.') for invalid_slug in invalid_slugs: test_invalid_slug(invalid_slug, new_doc_url + '?parent=' + str(doc.id), child_data, doc) # Attempt to create the child with *valid* slug, # should succeed and redirect child_data['slug'] = child_slug full_child_slug = slug + '/' + child_data['slug'] response = self.client.post(new_doc_url + '?parent=' + str(doc.id), child_data) eq_(302, response.status_code) self.assertRedirects(response, reverse('wiki.document', locale=locale, args=[full_child_slug])) child_doc = Document.objects.get(locale=locale, slug=full_child_slug) eq_(child_doc.slug, full_child_slug) eq_(0, len(Document.objects.filter( title=child_data['title'] + ' Redirect 1', locale=locale))) # Create grandchild data grandchild_data = new_document_data() grandchild_data['title'] = slug + ' Grandchild Doc' grandchild_data['slug'] = invalid_slug grandchild_data['content'] = 'This is the content' grandchild_data['is_localizable'] = True # Attempt to create the child with invalid slug, validate it fails response = self.client.post( new_doc_url + '?parent=' + str(child_doc.id), grandchild_data) page = pq(response.content) eq_(200, response.status_code) # 200 = bad, invalid data # Slug doesn't add parent eq_(invalid_slug, page.find('input[name=slug]')[0].value) eq_(child_doc.get_absolute_url(), page.find('.metadataDisplay').attr('href')) self.assertContains(response, 'The slug provided is not valid.') # Attempt to create the child with *valid* slug, # should succeed and redirect grandchild_data['slug'] = grandchild_slug full_grandchild_slug = (full_child_slug + '/' + grandchild_data['slug']) response = self.client.post( new_doc_url + '?parent=' + str(child_doc.id), grandchild_data) eq_(302, response.status_code) self.assertRedirects(response, reverse('wiki.document', locale=locale, args=[full_grandchild_slug])) grandchild_doc = Document.objects.get(locale=locale, slug=full_grandchild_slug) eq_(grandchild_doc.slug, full_grandchild_slug) missing_title = grandchild_data['title'] + ' Redirect 1' eq_(0, len(Document.objects.filter(title=missing_title, locale=locale))) def _run_edit_tests(edit_slug, edit_data, edit_doc, edit_parent_path): """EDIT DOCUMENT TESTING""" # Load "Edit" page for the root doc, ensure no "/" in the slug # Also ensure the 'parent' link is not present response = self.client.get(reverse('wiki.edit', args=[edit_doc.slug], locale=locale)) eq_(200, response.status_code) page = pq(response.content) eq_(edit_data['slug'], page.find('input[name=slug]')[0].value) eq_(edit_parent_path, page.find('.metadataDisplay').attr('href')) # Attempt an invalid edit of the root, # ensure the slug stays the same (i.e. no parent prepending) def test_invalid_slug_edit(inv_slug, url, data): data['slug'] = inv_slug data['form'] = 'rev' response = self.client.post(url, data) eq_(200, response.status_code) # 200 = bad, invalid data page = pq(response.content) # Slug doesn't add parent eq_(inv_slug, page.find('input[name=slug]')[0].value) eq_(edit_parent_path, page.find('.metadataDisplay').attr('href')) self.assertContains(response, 'The slug provided is not valid.') # Ensure no redirect redirect_title = data['title'] + ' Redirect 1' eq_(0, len(Document.objects.filter(title=redirect_title, locale=locale))) # Push a valid edit, without changing the slug edit_data['slug'] = edit_slug edit_data['form'] = 'rev' response = self.client.post(reverse('wiki.edit', args=[edit_doc.slug], locale=locale), edit_data) eq_(302, response.status_code) # Ensure no redirect redirect_title = edit_data['title'] + ' Redirect 1' eq_(0, len(Document.objects.filter(title=redirect_title, locale=locale))) self.assertRedirects(response, reverse('wiki.document', locale=locale, args=[edit_doc.slug])) def _run_translate_tests(translate_slug, translate_data, translate_doc): """TRANSLATION DOCUMENT TESTING""" foreign_url = (reverse('wiki.translate', args=[translate_doc.slug], locale=locale) + '?tolocale=' + foreign_locale) foreign_doc_url = reverse('wiki.document', args=[translate_doc.slug], locale=foreign_locale) # Verify translate page form is populated correctly response = self.client.get(foreign_url) eq_(200, response.status_code) page = pq(response.content) eq_(translate_data['slug'], page.find('input[name=slug]')[0].value) # Attempt an invalid edit of the root # ensure the slug stays the same (i.e. no parent prepending) def test_invalid_slug_translate(inv_slug, url, data): data['slug'] = inv_slug data['form'] = 'both' response = self.client.post(url, data) eq_(200, response.status_code) # 200 = bad, invalid data page = pq(response.content) # Slug doesn't add parent eq_(inv_slug, page.find('input[name=slug]')[0].value) self.assertContains(response, 'The slug provided is not valid.') # Ensure no redirect eq_(0, len(Document.objects.filter(title=data['title'] + ' Redirect 1', locale=foreign_locale))) # Push a valid translation translate_data['slug'] = translate_slug translate_data['form'] = 'both' response = self.client.post(foreign_url, translate_data) eq_(302, response.status_code) # Ensure no redirect redirect_title = translate_data['title'] + ' Redirect 1' eq_(0, len(Document.objects.filter(title=redirect_title, locale=foreign_locale))) self.assertRedirects(response, foreign_doc_url) return Document.objects.get(locale=foreign_locale, slug=translate_doc.slug) _run_translate_tests(slug, doc_data, doc) _run_translate_tests(child_slug, child_data, child_doc) _run_translate_tests(grandchild_slug, grandchild_data, grandchild_doc) def _run_translate_edit_tests(edit_slug, edit_data, edit_doc): """TEST BASIC EDIT OF TRANSLATION""" # Hit the initial URL response = self.client.get(reverse('wiki.edit', args=[edit_doc.slug], locale=foreign_locale)) eq_(200, response.status_code) page = pq(response.content) eq_(edit_data['slug'], page.find('input[name=slug]')[0].value) # Attempt an invalid edit of the root, ensure the slug stays # the same (i.e. no parent prepending) edit_data['slug'] = invalid_slug edit_data['form'] = 'both' response = self.client.post(reverse('wiki.edit', args=[edit_doc.slug], locale=foreign_locale), edit_data) eq_(200, response.status_code) # 200 = bad, invalid data page = pq(response.content) # Slug doesn't add parent eq_(invalid_slug, page.find('input[name=slug]')[0].value) self.assertContains(response, page.find('ul.errorlist li' ' a[href="#id_slug"]'). text()) # Ensure no redirect eq_(0, len(Document.objects.filter(title=edit_data['title'] + ' Redirect 1', locale=foreign_locale))) # Push a valid edit, without changing the slug edit_data['slug'] = edit_slug response = self.client.post(reverse('wiki.edit', args=[edit_doc.slug], locale=foreign_locale), edit_data) eq_(302, response.status_code) # Ensure no redirect eq_(0, len(Document.objects.filter(title=edit_data['title'] + ' Redirect 1', locale=foreign_locale))) self.assertRedirects(response, reverse('wiki.document', locale=foreign_locale, args=[edit_doc.slug])) """ TEST EDITING SLUGS AND TRANSLATIONS """ def _run_slug_edit_tests(edit_slug, edit_data, edit_doc, loc): edit_data['slug'] = edit_data['slug'] + '_Updated' edit_data['form'] = 'rev' response = self.client.post(reverse('wiki.edit', args=[edit_doc.slug], locale=loc), edit_data) eq_(302, response.status_code) # HACK: the es doc gets a 'Redirigen 1' if locale/ is updated # Ensure *1* redirect eq_(1, len(Document.objects.filter( title__contains=edit_data['title'] + ' Redir', locale=loc))) self.assertRedirects(response, reverse('wiki.document', locale=loc, args=[edit_doc.slug.replace( edit_slug, edit_data['slug'])])) # Run all of the tests _createAndRunTests("parent") # Test that slugs with the same "specific" slug but in different levels # in the heiharachy are validate properly upon submission # Create base doc parent_doc = document(title='Length', slug='length', is_localizable=True, locale=settings.WIKI_DEFAULT_LANGUAGE) parent_doc.save() r = revision(document=parent_doc) r.save() # Create child, try to use same slug, should work child_data = new_document_data() child_data['title'] = 'Child Length' child_data['slug'] = 'length' child_data['content'] = 'This is the content' child_data['is_localizable'] = True child_url = (reverse('wiki.create') + '?parent=' + str(parent_doc.id)) response = self.client.post(child_url, child_data) eq_(302, response.status_code) self.assertRedirects(response, reverse('wiki.document', args=['length/length'], locale=settings.WIKI_DEFAULT_LANGUAGE)) # Editing "length/length" document doesn't cause errors child_data['form'] = 'rev' child_data['slug'] = '' edit_url = reverse('wiki.edit', args=['length/length'], locale=settings.WIKI_DEFAULT_LANGUAGE) response = self.client.post(edit_url, child_data) eq_(302, response.status_code) self.assertRedirects(response, reverse('wiki.document', args=['length/length'], locale=settings.WIKI_DEFAULT_LANGUAGE)) # Creating a new translation of "length" and "length/length" # doesn't cause errors child_data['form'] = 'both' child_data['slug'] = 'length' translate_url = reverse('wiki.document', args=[child_data['slug']], locale=settings.WIKI_DEFAULT_LANGUAGE) response = self.client.post(translate_url + '$translate?tolocale=es', child_data) eq_(302, response.status_code) self.assertRedirects(response, reverse('wiki.document', args=[child_data['slug']], locale='es')) translate_url = reverse('wiki.document', args=['length/length'], locale=settings.WIKI_DEFAULT_LANGUAGE) response = self.client.post(translate_url + '$translate?tolocale=es', child_data) eq_(302, response.status_code) slug = 'length/' + child_data['slug'] self.assertRedirects(response, reverse('wiki.document', args=[slug], locale='es')) def test_translate_keeps_topical_parent(self): self.client.login(username='admin', password='testpass') en_doc, de_doc = make_translation() en_child_doc = document(parent_topic=en_doc, slug='en-child', save=True) en_child_rev = revision(document=en_child_doc, save=True) de_child_doc = document(parent_topic=de_doc, locale='de', slug='de-child', parent=en_child_doc, save=True) revision(document=de_child_doc, save=True) post_data = {} post_data['slug'] = de_child_doc.slug post_data['title'] = 'New title' post_data['form'] = 'both' post_data['content'] = 'New translation' post_data['tolocale'] = 'de' post_data['toc_depth'] = 0 post_data['based_on'] = en_child_rev.id post_data['parent_id'] = en_child_doc.id translate_url = reverse('wiki.edit', args=[de_child_doc.slug], locale='de') self.client.post(translate_url, post_data) de_child_doc = Document.objects.get(locale='de', slug='de-child') eq_(en_child_doc, de_child_doc.parent) eq_(de_doc, de_child_doc.parent_topic) eq_('New translation', de_child_doc.current_revision.content) def test_translate_keeps_toc_depth(self): self.client.login(username='admin', password='testpass') locale = settings.WIKI_DEFAULT_LANGUAGE original_slug = 'eng-doc' foreign_locale = 'es' foreign_slug = 'es-doc' en_doc = document(title='Eng Doc', slug=original_slug, is_localizable=True, locale=locale) en_doc.save() r = revision(document=en_doc, toc_depth=1) r.save() post_data = new_document_data() post_data['title'] = 'ES Doc' post_data['slug'] = foreign_slug post_data['content'] = 'This is the content' post_data['is_localizable'] = True post_data['form'] = 'both' post_data['toc_depth'] = r.toc_depth translate_url = reverse('wiki.document', args=[original_slug], locale=settings.WIKI_DEFAULT_LANGUAGE) translate_url += '$translate?tolocale=' + foreign_locale response = self.client.post(translate_url, post_data) self.assertRedirects(response, reverse('wiki.document', args=[foreign_slug], locale=foreign_locale)) es_d = Document.objects.get(locale=foreign_locale, slug=foreign_slug) eq_(r.toc_depth, es_d.current_revision.toc_depth) @override_config(KUMASCRIPT_TIMEOUT=1.0) def test_translate_rebuilds_source_json(self): self.client.login(username='admin', password='testpass') # Create an English original and a Spanish translation. en_slug = 'en-doc' es_locale = 'es' es_slug = 'es-doc' en_doc = document(title='EN Doc', slug=en_slug, is_localizable=True, locale=settings.WIKI_DEFAULT_LANGUAGE) en_doc.save() en_doc.render() en_doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE, slug=en_slug) json.loads(en_doc.json) r = revision(document=en_doc) r.save() translation_data = new_document_data() translation_data['title'] = 'ES Doc' translation_data['slug'] = es_slug translation_data['content'] = 'This is the content' translation_data['is_localizable'] = False translation_data['form'] = 'both' translate_url = reverse('wiki.document', args=[en_slug], locale=settings.WIKI_DEFAULT_LANGUAGE) translate_url += '$translate?tolocale=' + es_locale response = self.client.post(translate_url, translation_data) # Sanity to make sure the translate succeeded. self.assertRedirects(response, reverse('wiki.document', args=[es_slug], locale=es_locale)) es_doc = Document.objects.get(locale=es_locale, slug=es_slug) es_doc.render() new_en_json = json.loads(Document.objects.get(pk=en_doc.pk).json) ok_('translations' in new_en_json) ok_(translation_data['title'] in [t['title'] for t in new_en_json['translations']]) es_translation_json = [t for t in new_en_json['translations'] if t['title'] == translation_data['title']][0] eq_(es_translation_json['last_edit'], es_doc.current_revision.created.isoformat()) def test_slug_translate(self): """Editing a translated doc keeps the correct slug""" self.client.login(username='admin', password='testpass') # Settings original_slug = 'eng-doc' child_slug = 'child-eng-doc' foreign_locale = 'es' foreign_slug = 'es-doc' foreign_child_slug = 'child-es-doc' # Create the one-level English Doc en_doc = document(title='Eng Doc', slug=original_slug, is_localizable=True, locale=settings.WIKI_DEFAULT_LANGUAGE) en_doc.save() r = revision(document=en_doc) r.save() # Translate to ES parent_data = new_document_data() parent_data['title'] = 'ES Doc' parent_data['slug'] = foreign_slug parent_data['content'] = 'This is the content' parent_data['is_localizable'] = True parent_data['form'] = 'both' translate_url = reverse('wiki.document', args=[original_slug], locale=settings.WIKI_DEFAULT_LANGUAGE) translate_url += '$translate?tolocale=' + foreign_locale response = self.client.post(translate_url, parent_data) self.assertRedirects(response, reverse('wiki.document', args=[foreign_slug], locale=foreign_locale)) # Go to edit the translation, ensure the the slug is correct response = self.client.get(reverse('wiki.edit', args=[foreign_slug], locale=foreign_locale)) page = pq(response.content) eq_(page.find('input[name=slug]')[0].value, foreign_slug) # Create an English child now en_doc = document(title='Child Eng Doc', slug=original_slug + '/' + child_slug, is_localizable=True, locale=settings.WIKI_DEFAULT_LANGUAGE, parent_topic=en_doc) en_doc.save() r = revision(document=en_doc) r.save() # Translate to ES child_data = new_document_data() child_data['title'] = 'ES Child Doc' child_data['slug'] = foreign_child_slug child_data['content'] = 'This is the content' child_data['is_localizable'] = True child_data['form'] = 'both' translate_url = reverse('wiki.document', args=[original_slug + '/' + child_slug], locale=settings.WIKI_DEFAULT_LANGUAGE) translate_url += '$translate?tolocale=' + foreign_locale response = self.client.post(translate_url, child_data) slug = foreign_slug + '/' + child_data['slug'] self.assertRedirects(response, reverse('wiki.document', args=[slug], locale=foreign_locale)) def test_clone(self): self.client.login(username='admin', password='testpass') slug = None title = None content = '<p>Hello!</p>' test_revision = revision(save=True, title=title, slug=slug, content=content) document = test_revision.document response = self.client.get(reverse('wiki.create', args=[], locale=settings.WIKI_DEFAULT_LANGUAGE) + '?clone=' + str(document.id)) page = pq(response.content) eq_(page.find('input[name=title]')[0].value, title) eq_(page.find('input[name=slug]')[0].value, slug) self.assertHTMLEqual(page.find('textarea[name=content]')[0].value, content) def test_localized_based_on(self): """Editing a localized article 'based on' an older revision of the localization is OK.""" self.client.login(username='admin', password='testpass') en_r = revision(save=True) fr_d = document(parent=en_r.document, locale='fr', save=True) fr_r = revision(document=fr_d, based_on=en_r, save=True) url = reverse('wiki.new_revision_based_on', locale='fr', args=(fr_d.slug, fr_r.pk,)) response = self.client.get(url) input = pq(response.content)('#id_based_on')[0] eq_(int(input.value), en_r.pk) def test_restore_translation_source(self): """Edit a localized article without an English parent allows user to set translation parent.""" # Create english doc self.client.login(username='admin', password='testpass') data = new_document_data() self.client.post(reverse('wiki.create'), data) en_d = Document.objects.get(locale=data['locale'], slug=data['slug']) # Create french doc data.update({'locale': 'fr', 'title': 'A Tést Articlé', 'content': "C'ést bon."}) self.client.post(reverse('wiki.create', locale='fr'), data) fr_d = Document.objects.get(locale=data['locale'], slug=data['slug']) # Check edit doc page for choose parent box url = reverse('wiki.edit', args=[fr_d.slug], locale='fr') response = self.client.get(url) ok_(pq(response.content)('li.metadata-choose-parent')) # Set the parent data.update({'form': 'rev', 'parent_id': en_d.id}) resp = self.client.post(url, data) eq_(302, resp.status_code) ok_('fr/docs/a-test-article' in resp['Location']) # Check the languages drop-down resp = self.client.get(resp['Location']) translations = pq(resp.content)('ul#translations li') ok_('A Test Article' in translations.html()) ok_('English (US)' in translations.text()) def test_translation_source(self): """Allow users to change "translation source" settings""" self.client.login(username='admin', password='testpass') data = new_document_data() self.client.post(reverse('wiki.create'), data) parent = Document.objects.get(locale=data['locale'], slug=data['slug']) data.update({'title': 'Another Test Article', 'content': "Yahoooo!", 'parent_id': parent.id}) self.client.post(reverse('wiki.create'), data) child = Document.objects.get(locale=data['locale'], slug=data['slug']) url = reverse('wiki.edit', args=[child.slug]) response = self.client.get(url) content = pq(response.content) ok_(content('li.metadata-choose-parent')) ok_(str(parent.id) in content.html()) @attr('tags') @mock.patch.object(Site.objects, 'get_current') def test_document_tags(self, get_current): """Document tags can be edited through revisions""" data = new_document_data() locale = data['locale'] slug = data['slug'] path = slug ts1 = ('JavaScript', 'AJAX', 'DOM') ts2 = ('XML', 'JSON') get_current.return_value.domain = 'su.mo.com' self.client.login(username='admin', password='testpass') def assert_tag_state(yes_tags, no_tags): # Ensure the tags are found for the Documents doc = Document.objects.get(locale=locale, slug=slug) doc_tags = [x.name for x in doc.tags.all()] for t in yes_tags: ok_(t in doc_tags) for t in no_tags: ok_(t not in doc_tags) # Ensure the tags are found in the Document view response = self.client.get(reverse('wiki.document', args=[doc.slug]), data) page = pq(response.content) for t in yes_tags: eq_(1, page.find('.tags li a:contains("%s")' % t).length, '%s should NOT appear in document view tags' % t) for t in no_tags: eq_(0, page.find('.tags li a:contains("%s")' % t).length, '%s should appear in document view tags' % t) # Check for the document slug (title in feeds) in the tag listing for t in yes_tags: response = self.client.get(reverse('wiki.tag', args=[t])) self.assertContains(response, doc.slug, msg_prefix=t) response = self.client.get(reverse('wiki.feeds.recent_documents', args=['atom', t])) self.assertContains(response, doc.title) for t in no_tags: response = self.client.get(reverse('wiki.tag', args=[t])) ok_(doc.slug not in response.content.decode('utf-8')) response = self.client.get(reverse('wiki.feeds.recent_documents', args=['atom', t])) self.assertNotContains(response, doc.title) # Create a new doc with tags data.update({'slug': slug, 'tags': ','.join(ts1)}) self.client.post(reverse('wiki.create'), data) assert_tag_state(ts1, ts2) # Now, update the tags. data.update({'form': 'rev', 'tags': ', '.join(ts2)}) self.client.post(reverse('wiki.edit', args=[path]), data) assert_tag_state(ts2, ts1) @attr('review_tags') @mock.patch.object(Site.objects, 'get_current') def test_review_tags(self, get_current): """Review tags can be managed on document revisions""" get_current.return_value.domain = 'su.mo.com' self.client.login(username='admin', password='testpass') # Create a new doc with one review tag data = new_document_data() data.update({'review_tags': ['technical']}) response = self.client.post(reverse('wiki.create'), data) # Ensure there's now a doc with that expected tag in its newest # revision doc = Document.objects.get(slug="a-test-article") rev = doc.revisions.order_by('-id').all()[0] review_tags = [x.name for x in rev.review_tags.all()] eq_(['technical'], review_tags) # Now, post an update with two tags data.update({ 'form': 'rev', 'review_tags': ['editorial', 'technical'], }) response = self.client.post(reverse('wiki.edit', args=[doc.slug]), data) # Ensure the doc's newest revision has both tags. doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE, slug="a-test-article") rev = doc.revisions.order_by('-id').all()[0] review_tags = [x.name for x in rev.review_tags.all()] review_tags.sort() eq_(['editorial', 'technical'], review_tags) # Now, ensure that warning boxes appear for the review tags. response = self.client.get(reverse('wiki.document', args=[doc.slug]), data) page = pq(response.content) eq_(2, page.find('.warning.warning-review').length) # Ensure the page appears on the listing pages response = self.client.get(reverse('wiki.list_review')) eq_(1, pq(response.content).find("ul.document-list li a:contains('%s')" % doc.title).length) response = self.client.get(reverse('wiki.list_review_tag', args=('technical',))) eq_(1, pq(response.content).find("ul.document-list li a:contains('%s')" % doc.title).length) response = self.client.get(reverse('wiki.list_review_tag', args=('editorial',))) eq_(1, pq(response.content).find("ul.document-list li a:contains('%s')" % doc.title).length) # Also, ensure that the page appears in the proper feeds # HACK: Too lazy to parse the XML. Lazy lazy. response = self.client.get(reverse('wiki.feeds.list_review', args=('atom',))) ok_('<entry><title>%s</title>' % doc.title in response.content) response = self.client.get(reverse('wiki.feeds.list_review_tag', args=('atom', 'technical', ))) ok_('<entry><title>%s</title>' % doc.title in response.content) response = self.client.get(reverse('wiki.feeds.list_review_tag', args=('atom', 'editorial', ))) ok_('<entry><title>%s</title>' % doc.title in response.content) # Post an edit that removes one of the tags. data.update({ 'form': 'rev', 'review_tags': ['editorial', ] }) response = self.client.post(reverse('wiki.edit', args=[doc.slug]), data) # Ensure only one of the tags' warning boxes appears, now. response = self.client.get(reverse('wiki.document', args=[doc.slug]), data) page = pq(response.content) eq_(1, page.find('.warning.warning-review').length) # Ensure the page appears on the listing pages response = self.client.get(reverse('wiki.list_review')) eq_(1, pq(response.content).find("ul.document-list li a:contains('%s')" % doc.title).length) response = self.client.get(reverse('wiki.list_review_tag', args=('technical',))) eq_(0, pq(response.content).find("ul.document-list li a:contains('%s')" % doc.title).length) response = self.client.get(reverse('wiki.list_review_tag', args=('editorial',))) eq_(1, pq(response.content).find("ul.document-list li a:contains('%s')" % doc.title).length) # Also, ensure that the page appears in the proper feeds # HACK: Too lazy to parse the XML. Lazy lazy. response = self.client.get(reverse('wiki.feeds.list_review', args=('atom',))) ok_('<entry><title>%s</title>' % doc.title in response.content) response = self.client.get(reverse('wiki.feeds.list_review_tag', args=('atom', 'technical', ))) ok_('<entry><title>%s</title>' % doc.title not in response.content) response = self.client.get(reverse('wiki.feeds.list_review_tag', args=('atom', 'editorial', ))) ok_('<entry><title>%s</title>' % doc.title in response.content) @attr('review-tags') def test_quick_review(self): """Test the quick-review button.""" self.client.login(username='admin', password='testpass') test_data = [ { 'params': {'approve_technical': 1}, 'expected_tags': ['editorial'], 'name': 'technical', 'message_contains': ['Technical review completed.'] }, { 'params': {'approve_editorial': 1}, 'expected_tags': ['technical'], 'name': 'editorial', 'message_contains': ['Editorial review completed.'] }, { 'params': { 'approve_technical': 1, 'approve_editorial': 1 }, 'expected_tags': [], 'name': 'editorial-technical', 'message_contains': [ 'Technical review completed.', 'Editorial review completed.', ] } ] for data_dict in test_data: slug = 'test-quick-review-%s' % data_dict['name'] data = new_document_data() data.update({'review_tags': ['editorial', 'technical'], 'slug': slug}) resp = self.client.post(reverse('wiki.create'), data) doc = Document.objects.get(slug=slug) rev = doc.revisions.order_by('-id').all()[0] review_url = reverse('wiki.quick_review', args=[doc.slug]) params = dict(data_dict['params'], revision_id=rev.id) resp = self.client.post(review_url, params) eq_(302, resp.status_code) doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE, slug=slug) rev = doc.revisions.order_by('-id').all()[0] review_tags = [x.name for x in rev.review_tags.all()] review_tags.sort() for expected_str in data_dict['message_contains']: ok_(expected_str in rev.summary) ok_(expected_str in rev.comment) eq_(data_dict['expected_tags'], review_tags) @attr('midair') def test_edit_midair_collision(self): self.client.login(username='admin', password='testpass') # Post a new document. data = new_document_data() resp = self.client.post(reverse('wiki.create'), data) doc = Document.objects.get(slug=data['slug']) # Edit #1 starts... resp = self.client.get(reverse('wiki.edit', args=[doc.slug])) page = pq(resp.content) rev_id1 = page.find('input[name="current_rev"]').attr('value') # Edit #2 starts... resp = self.client.get(reverse('wiki.edit', args=[doc.slug])) page = pq(resp.content) rev_id2 = page.find('input[name="current_rev"]').attr('value') # Edit #2 submits successfully data.update({ 'form': 'rev', 'content': 'This edit got there first', 'current_rev': rev_id2 }) resp = self.client.post(reverse('wiki.edit', args=[doc.slug]), data) eq_(302, resp.status_code) # Edit #1 submits, but receives a mid-aired notification data.update({ 'form': 'rev', 'content': 'This edit gets mid-aired', 'current_rev': rev_id1 }) resp = self.client.post(reverse('wiki.edit', args=[doc.slug]), data) eq_(200, resp.status_code) ok_(unicode(MIDAIR_COLLISION).encode('utf-8') in resp.content, "Midair collision message should appear") @attr('toc') def test_toc_toggle_off(self): """Toggling of table of contents in revisions""" self.client.login(username='admin', password='testpass') d, _ = doc_rev() data = new_document_data() ok_(Document.objects.get(slug=d.slug, locale=d.locale).show_toc) data['form'] = 'rev' data['toc_depth'] = 0 data['slug'] = d.slug data['title'] = d.title self.client.post(reverse('wiki.edit', args=[d.slug]), data) doc = Document.objects.get(slug=d.slug, locale=d.locale) eq_(0, doc.current_revision.toc_depth) @attr('toc') def test_toc_toggle_on(self): """Toggling of table of contents in revisions""" self.client.login(username='admin', password='testpass') d, r = doc_rev() new_r = revision(document=d, content=r.content, toc_depth=0, is_approved=True) new_r.save() ok_(not Document.objects.get(slug=d.slug, locale=d.locale).show_toc) data = new_document_data() data['form'] = 'rev' data['slug'] = d.slug data['title'] = d.title self.client.post(reverse('wiki.edit', args=[d.slug]), data) ok_(Document.objects.get(slug=d.slug, locale=d.locale).show_toc) def test_parent_topic(self): """Selection of a parent topic when creating a document.""" self.client.login(username='admin', password='testpass') d = document(title='HTML8') d.save() r = revision(document=d) r.save() data = new_document_data() data['title'] = 'Replicated local storage' data['parent_topic'] = d.id resp = self.client.post(reverse('wiki.create'), data) eq_(302, resp.status_code) ok_(d.children.count() == 1) ok_(d.children.all()[0].title == 'Replicated local storage') def test_repair_breadcrumbs(self): english_top = document(locale=settings.WIKI_DEFAULT_LANGUAGE, title='English top', save=True) english_mid = document(locale=settings.WIKI_DEFAULT_LANGUAGE, title='English mid', parent_topic=english_top, save=True) english_bottom = document(locale=settings.WIKI_DEFAULT_LANGUAGE, title='English bottom', parent_topic=english_mid, save=True) french_top = document(locale='fr', title='French top', parent=english_top, save=True) french_mid = document(locale='fr', title='French mid', parent=english_mid, parent_topic=english_mid, save=True) french_bottom = document(locale='fr', title='French bottom', parent=english_bottom, parent_topic=english_bottom, save=True) self.client.login(username='admin', password='testpass') resp = self.client.get(reverse('wiki.repair_breadcrumbs', args=[french_bottom.slug], locale='fr')) eq_(302, resp.status_code) ok_(french_bottom.get_absolute_url() in resp['Location']) french_bottom_fixed = Document.objects.get(locale='fr', title=french_bottom.title) eq_(french_mid.id, french_bottom_fixed.parent_topic.id) eq_(french_top.id, french_bottom_fixed.parent_topic.parent_topic.id) def test_translate_on_edit(self): d1 = document(title="Doc1", locale=settings.WIKI_DEFAULT_LANGUAGE, save=True) revision(document=d1, save=True) d2 = document(title="TransDoc1", locale='de', parent=d1, save=True) revision(document=d2, save=True) self.client.login(username='admin', password='testpass') url = reverse('wiki.edit', args=(d2.slug,), locale=d2.locale) resp = self.client.get(url) eq_(200, resp.status_code) def test_discard_location(self): """Testing that the 'discard' HREF goes to the correct place when it's explicitely and implicitely set""" self.client.login(username='admin', password='testpass') def _create_doc(slug, locale): doc = document(slug=slug, is_localizable=True, locale=locale) doc.save() r = revision(document=doc) r.save() return doc # Test that the 'discard' button on an edit goes to the original page doc = _create_doc('testdiscarddoc', settings.WIKI_DEFAULT_LANGUAGE) response = self.client.get(reverse('wiki.edit', args=[doc.slug], locale=doc.locale)) eq_(pq(response.content).find('.btn-discard').attr('href'), reverse('wiki.document', args=[doc.slug], locale=doc.locale)) # Test that the 'discard button on a new translation goes # to the en-US page' response = self.client.get(reverse('wiki.translate', args=[doc.slug], locale=doc.locale) + '?tolocale=es') eq_(pq(response.content).find('.btn-discard').attr('href'), reverse('wiki.document', args=[doc.slug], locale=doc.locale)) # Test that the 'discard' button on an existing translation goes # to the 'es' page foreign_doc = _create_doc('testdiscarddoc', 'es') response = self.client.get(reverse('wiki.edit', args=[foreign_doc.slug], locale=foreign_doc.locale)) eq_(pq(response.content).find('.btn-discard').attr('href'), reverse('wiki.document', args=[foreign_doc.slug], locale=foreign_doc.locale)) # Test new response = self.client.get(reverse('wiki.create', locale=settings.WIKI_DEFAULT_LANGUAGE)) eq_(pq(response.content).find('.btn-discard').attr('href'), reverse('wiki.create', locale=settings.WIKI_DEFAULT_LANGUAGE)) @override_config(KUMASCRIPT_TIMEOUT=1.0) @mock.patch('kuma.wiki.kumascript.get') def test_revert(self, mock_kumascript_get): self.client.login(username='admin', password='testpass') mock_kumascript_get.return_value = ( 'lorem ipsum dolor sit amet', None) data = new_document_data() data['title'] = 'A Test Article For Reverting' data['slug'] = 'test-article-for-reverting' response = self.client.post(reverse('wiki.create'), data) doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE, slug='test-article-for-reverting') rev = doc.revisions.order_by('-id').all()[0] data['content'] = 'Not lorem ipsum anymore' data['comment'] = 'Nobody likes Latin anyway' response = self.client.post(reverse('wiki.edit', args=[doc.slug]), data) mock_kumascript_get.called = False response = self.client.post(reverse('wiki.revert_document', args=[doc.slug, rev.id]), {'revert': True, 'comment': 'Blah blah'}) ok_(mock_kumascript_get.called, "kumascript should have been used") ok_(302 == response.status_code) rev = doc.revisions.order_by('-id').all()[0] ok_('lorem ipsum dolor sit amet' == rev.content) ok_('Blah blah' in rev.comment) mock_kumascript_get.called = False rev = doc.revisions.order_by('-id').all()[1] response = self.client.post(reverse('wiki.revert_document', args=[doc.slug, rev.id]), {'revert': True}) ok_(302 == response.status_code) rev = doc.revisions.order_by('-id').all()[0] ok_(': ' not in rev.comment) ok_(mock_kumascript_get.called, "kumascript should have been used") def test_revert_moved(self): doc = document(slug='move-me', save=True) rev = revision(document=doc, save=True) prev_rev_id = rev.id doc._move_tree('moved-doc') self.client.login(username='admin', password='testpass') resp = self.client.post(reverse('wiki.revert_document', args=[doc.slug, prev_rev_id], locale=doc.locale), follow=True) eq_(200, resp.status_code) ok_("cannot revert a document that has been moved" in resp.content) def test_store_revision_ip(self): self.client.login(username='testuser', password='testpass') data = new_document_data() slug = 'test-article-for-storing-revision-ip' data.update({'title': 'A Test Article For Storing Revision IP', 'slug': slug}) self.client.post(reverse('wiki.create'), data) doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE, slug=slug) data.update({'form': 'rev', 'content': 'This revision should NOT record IP', 'comment': 'This revision should NOT record IP'}) self.client.post(reverse('wiki.edit', args=[doc.slug]), data) eq_(0, RevisionIP.objects.all().count()) Switch.objects.create(name='store_revision_ips', active=True) data.update({'content': 'Store the IP address for the revision.', 'comment': 'Store the IP address for the revision.'}) self.client.post(reverse('wiki.edit', args=[doc.slug]), data) eq_(1, RevisionIP.objects.all().count()) rev = doc.revisions.order_by('-id').all()[0] rev_ip = RevisionIP.objects.get(revision=rev) eq_('127.0.0.1', rev_ip.ip) @mock.patch.object(Site.objects, 'get_current') def test_email_for_first_edits(self, get_current): get_current.return_value.domain = 'dev.mo.org' self.client.login(username='testuser', password='testpass') data = new_document_data() slug = 'test-article-for-storing-revision-ip' data.update({'title': 'A Test Article For First Edit Emails', 'slug': slug}) self.client.post(reverse('wiki.create'), data) eq_(1, len(mail.outbox)) doc = Document.objects.get( locale=settings.WIKI_DEFAULT_LANGUAGE, slug=slug) data.update({'form': 'rev', 'content': 'This edit should not send an email', 'comment': 'This edit should not send an email'}) self.client.post(reverse('wiki.edit', args=[doc.slug]), data) eq_(1, len(mail.outbox)) self.client.login(username='admin', password='testpass') data.update({'content': 'Admin first edit should send an email', 'comment': 'Admin first edit should send an email'}) self.client.post(reverse('wiki.edit', args=[doc.slug]), data) eq_(2, len(mail.outbox)) def _check_message_for_headers(message, username): ok_("%s made their first edit" % username in message.subject) eq_({'X-Kuma-Document-Url': "https://dev.mo.org%s" % doc.get_absolute_url(), 'X-Kuma-Editor-Username': username}, message.extra_headers) testuser_message = mail.outbox[0] admin_message = mail.outbox[1] _check_message_for_headers(testuser_message, 'testuser') _check_message_for_headers(admin_message, 'admin') class DocumentWatchTests(UserTestCase, WikiTestCase): """Tests for un/subscribing to document edit notifications.""" localizing_client = True def setUp(self): super(DocumentWatchTests, self).setUp() self.document, self.r = doc_rev() self.client.login(username='testuser', password='testpass') def test_watch_GET_405(self): """Watch document with HTTP GET results in 405.""" response = self.client.get(reverse('wiki.subscribe', args=[self.document.slug]), follow=True) eq_(405, response.status_code) def test_unwatch_GET_405(self): """Unwatch document with HTTP GET results in 405.""" response = self.client.get(reverse('wiki.subscribe', args=[self.document.slug]), follow=True) eq_(405, response.status_code) def test_watch_unwatch(self): """Watch and unwatch a document.""" user = self.user_model.objects.get(username='testuser') # Subscribe response = self.client.post(reverse('wiki.subscribe', args=[self.document.slug]), follow=True) eq_(200, response.status_code) assert EditDocumentEvent.is_notifying(user, self.document), \ 'Watch was not created' # Unsubscribe response = self.client.post(reverse('wiki.subscribe', args=[self.document.slug]), follow=True) eq_(200, response.status_code) assert not EditDocumentEvent.is_notifying(user, self.document), \ 'Watch was not destroyed' class SectionEditingResourceTests(UserTestCase, WikiTestCase): localizing_client = True def test_raw_source(self): """The raw source for a document can be requested""" self.client.login(username='admin', password='testpass') d, r = doc_rev(""" <h1 id="s1">s1</h1> <p>test</p> <p>test</p> <h1 id="s2">s2</h1> <p>test</p> <p>test</p> <h1 id="s3">s3</h1> <p>test</p> <p>test</p> """) expected = """ <h1 id="s1">s1</h1> <p>test</p> <p>test</p> <h1 id="s2">s2</h1> <p>test</p> <p>test</p> <h1 id="s3">s3</h1> <p>test</p> <p>test</p> """ Switch.objects.create(name='application_ACAO', active=True) response = self.client.get('%s?raw=true' % reverse('wiki.document', args=[d.slug]), HTTP_X_REQUESTED_WITH='XMLHttpRequest') ok_('Access-Control-Allow-Origin' in response) eq_('*', response['Access-Control-Allow-Origin']) eq_(normalize_html(expected), normalize_html(response.content)) @attr('bug821986') def test_raw_editor_safety_filter(self): """Safety filter should be applied before rendering editor""" self.client.login(username='admin', password='testpass') d, r = doc_rev(""" <p onload=alert(3)>FOO</p> <svg><circle onload=confirm(3)>HI THERE</circle></svg> """) response = self.client.get('%s?raw=true' % reverse('wiki.document', args=[d.slug]), HTTP_X_REQUESTED_WITH='XMLHttpRequest') ok_('<p onload=' not in response.content) ok_('<circle onload=' not in response.content) def test_raw_with_editing_links_source(self): """The raw source for a document can be requested, with section editing links""" self.client.login(username='admin', password='testpass') d, r = doc_rev(""" <h1 id="s1">s1</h1> <p>test</p> <p>test</p> <h1 id="s2">s2</h1> <p>test</p> <p>test</p> <h1 id="s3">s3</h1> <p>test</p> <p>test</p> """) expected = """ <h1 id="s1"><a class="edit-section" data-section-id="s1" data-section-src-url="/en-US/docs/%(slug)s?raw=true&amp;section=s1" href="/en-US/docs/%(slug)s$edit?section=s1&amp;edit_links=true" title="Edit section">Edit</a>s1</h1> <p>test</p> <p>test</p> <h1 id="s2"><a class="edit-section" data-section-id="s2" data-section-src-url="/en-US/docs/%(slug)s?raw=true&amp;section=s2" href="/en-US/docs/%(slug)s$edit?section=s2&amp;edit_links=true" title="Edit section">Edit</a>s2</h1> <p>test</p> <p>test</p> <h1 id="s3"><a class="edit-section" data-section-id="s3" data-section-src-url="/en-US/docs/%(slug)s?raw=true&amp;section=s3" href="/en-US/docs/%(slug)s$edit?section=s3&amp;edit_links=true" title="Edit section">Edit</a>s3</h1> <p>test</p> <p>test</p> """ % {'slug': d.slug} response = self.client.get('%s?raw=true&edit_links=true' % reverse('wiki.document', args=[d.slug]), HTTP_X_REQUESTED_WITH='XMLHttpRequest') eq_(normalize_html(expected), normalize_html(response.content)) def test_raw_section_source(self): """The raw source for a document section can be requested""" self.client.login(username='admin', password='testpass') d, r = doc_rev(""" <h1 id="s1">s1</h1> <p>test</p> <p>test</p> <h1 id="s2">s2</h1> <p>test</p> <p>test</p> <h1 id="s3">s3</h1> <p>test</p> <p>test</p> """) expected = """ <h1 id="s2">s2</h1> <p>test</p> <p>test</p> """ response = self.client.get('%s?section=s2&raw=true' % reverse('wiki.document', args=[d.slug]), HTTP_X_REQUESTED_WITH='XMLHttpRequest') eq_(normalize_html(expected), normalize_html(response.content)) @attr('midair') @attr('rawsection') def test_raw_section_edit(self): self.client.login(username='admin', password='testpass') d, r = doc_rev(""" <h1 id="s1">s1</h1> <p>test</p> <p>test</p> <h1 id="s2">s2</h1> <p>test</p> <p>test</p> <h1 id="s3">s3</h1> <p>test</p> <p>test</p> """) replace = """ <h1 id="s2">s2</h1> <p>replace</p> """ expected = """ <h1 id="s2">s2</h1> <p>replace</p> """ response = self.client.post('%s?section=s2&raw=true' % reverse('wiki.edit', args=[d.slug]), {"form": "rev", "slug": d.slug, "content": replace}, follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest') eq_(normalize_html(expected), normalize_html(response.content)) expected = """ <h1 id="s1">s1</h1> <p>test</p> <p>test</p> <h1 id="s2">s2</h1> <p>replace</p> <h1 id="s3">s3</h1> <p>test</p> <p>test</p> """ response = self.client.get('%s?raw=true' % reverse('wiki.document', args=[d.slug]), HTTP_X_REQUESTED_WITH='XMLHttpRequest') eq_(normalize_html(expected), normalize_html(response.content)) @attr('midair') def test_midair_section_merge(self): """If a page was changed while someone was editing, but the changes didn't affect the specific section being edited, then ignore the midair warning""" self.client.login(username='admin', password='testpass') doc, rev = doc_rev(""" <h1 id="s1">s1</h1> <p>test</p> <p>test</p> <h1 id="s2">s2</h1> <p>test</p> <p>test</p> <h1 id="s3">s3</h1> <p>test</p> <p>test</p> """) replace_1 = """ <h1 id="replace1">replace1</h1> <p>replace</p> """ replace_2 = """ <h1 id="replace2">replace2</h1> <p>replace</p> """ expected = """ <h1 id="replace1">replace1</h1> <p>replace</p> <h1 id="replace2">replace2</h1> <p>replace</p> <h1 id="s3">s3</h1> <p>test</p> <p>test</p> """ data = { 'form': 'rev', 'content': rev.content, 'slug': '' } # Edit #1 starts... resp = self.client.get('%s?section=s1' % reverse('wiki.edit', args=[doc.slug]), HTTP_X_REQUESTED_WITH='XMLHttpRequest') page = pq(resp.content) rev_id1 = page.find('input[name="current_rev"]').attr('value') # Edit #2 starts... resp = self.client.get('%s?section=s2' % reverse('wiki.edit', args=[doc.slug]), HTTP_X_REQUESTED_WITH='XMLHttpRequest') page = pq(resp.content) rev_id2 = page.find('input[name="current_rev"]').attr('value') # Edit #2 submits successfully data.update({ 'form': 'rev', 'content': replace_2, 'current_rev': rev_id2, 'slug': doc.slug }) resp = self.client.post('%s?section=s2&raw=true' % reverse('wiki.edit', args=[doc.slug]), data, HTTP_X_REQUESTED_WITH='XMLHttpRequest') eq_(302, resp.status_code) # Edit #1 submits, but since it's a different section, there's no # mid-air collision data.update({ 'form': 'rev', 'content': replace_1, 'current_rev': rev_id1 }) resp = self.client.post('%s?section=s1&raw=true' % reverse('wiki.edit', args=[doc.slug]), data, HTTP_X_REQUESTED_WITH='XMLHttpRequest') # No conflict, but we should get a 205 Reset as an indication that the # page needs a refresh. eq_(205, resp.status_code) # Finally, make sure that all the edits landed response = self.client.get('%s?raw=true' % reverse('wiki.document', args=[doc.slug]), HTTP_X_REQUESTED_WITH='XMLHttpRequest') eq_(normalize_html(expected), normalize_html(response.content)) # Also, ensure that the revision is slipped into the headers eq_(unicode(Document.objects.get(slug=doc.slug, locale=doc.locale) .current_revision.id), unicode(response['x-kuma-revision'])) @attr('midair') def test_midair_section_collision(self): """If both a revision and the edited section has changed, then a section edit is a collision.""" self.client.login(username='admin', password='testpass') doc, rev = doc_rev(""" <h1 id="s1">s1</h1> <p>test</p> <p>test</p> <h1 id="s2">s2</h1> <p>test</p> <p>test</p> <h1 id="s3">s3</h1> <p>test</p> <p>test</p> """) replace_1 = """ <h1 id="s2">replace</h1> <p>replace</p> """ replace_2 = """ <h1 id="s2">first replace</h1> <p>first replace</p> """ data = { 'form': 'rev', 'content': rev.content } # Edit #1 starts... resp = self.client.get('%s?section=s2' % reverse('wiki.edit', args=[doc.slug]), HTTP_X_REQUESTED_WITH='XMLHttpRequest') page = pq(resp.content) rev_id1 = page.find('input[name="current_rev"]').attr('value') # Edit #2 starts... resp = self.client.get('%s?section=s2' % reverse('wiki.edit', args=[doc.slug]), HTTP_X_REQUESTED_WITH='XMLHttpRequest') page = pq(resp.content) rev_id2 = page.find('input[name="current_rev"]').attr('value') # Edit #2 submits successfully data.update({ 'form': 'rev', 'content': replace_2, 'slug': doc.slug, 'current_rev': rev_id2 }) resp = self.client.post('%s?section=s2&raw=true' % reverse('wiki.edit', args=[doc.slug]), data, HTTP_X_REQUESTED_WITH='XMLHttpRequest') eq_(302, resp.status_code) # Edit #1 submits, but since it's the same section, there's a collision data.update({ 'form': 'rev', 'content': replace_1, 'current_rev': rev_id1 }) resp = self.client.post('%s?section=s2&raw=true' % reverse('wiki.edit', args=[doc.slug]), data, HTTP_X_REQUESTED_WITH='XMLHttpRequest') # With the raw API, we should get a 409 Conflict on collision. eq_(409, resp.status_code) def test_raw_include_option(self): doc_src = u""" <div class="noinclude">{{ XULRefAttr() }}</div> <dl> <dt>{{ XULAttr(&quot;maxlength&quot;) }}</dt> <dd>Type: <em>integer</em></dd> <dd>Przykłady 例 예제 示例</dd> </dl> <div class="noinclude"> <p>{{ languages( { &quot;ja&quot;: &quot;ja/XUL/Attribute/maxlength&quot; } ) }}</p> </div> """ doc, rev = doc_rev(doc_src) expected = u""" <dl> <dt>{{ XULAttr(&quot;maxlength&quot;) }}</dt> <dd>Type: <em>integer</em></dd> <dd>Przykłady 例 예제 示例</dd> </dl> """ resp = self.client.get('%s?raw&include' % reverse('wiki.document', args=[doc.slug]), HTTP_X_REQUESTED_WITH='XMLHttpRequest') eq_(normalize_html(expected), normalize_html(resp.content.decode('utf-8'))) def test_section_edit_toc(self): """show_toc is preserved in section editing.""" self.client.login(username='admin', password='testpass') doc, rev = doc_rev(""" <h1 id="s1">s1</h1> <p>test</p> <p>test</p> <h1 id="s2">s2</h1> <p>test</p> <p>test</p> <h1 id="s3">s3</h1> <p>test</p> <p>test</p> """) rev.toc_depth = 1 rev.save() replace = """ <h1 id="s2">s2</h1> <p>replace</p> """ self.client.post('%s?section=s2&raw=true' % reverse('wiki.edit', args=[doc.slug]), {"form": "rev", "slug": doc.slug, "content": replace}, follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest') changed = Document.objects.get(pk=doc.id).current_revision ok_(rev.id != changed.id) eq_(1, changed.toc_depth) def test_section_edit_review_tags(self): """review tags are preserved in section editing.""" self.client.login(username='admin', password='testpass') doc, rev = doc_rev(""" <h1 id="s1">s1</h1> <p>test</p> <p>test</p> <h1 id="s2">s2</h1> <p>test</p> <p>test</p> <h1 id="s3">s3</h1> <p>test</p> <p>test</p> """) tags_to_save = ['bar', 'foo'] rev.save() rev.review_tags.set(*tags_to_save) replace = """ <h1 id="s2">s2</h1> <p>replace</p> """ self.client.post('%s?section=s2&raw=true' % reverse('wiki.edit', args=[doc.slug]), {"form": "rev", "slug": doc.slug, "content": replace}, follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest') changed = Document.objects.get(pk=doc.id).current_revision ok_(rev.id != changed.id) eq_(set(tags_to_save), set([t.name for t in changed.review_tags.all()])) class MindTouchRedirectTests(UserTestCase, WikiTestCase): """ Test that we appropriately redirect old-style MindTouch URLs to new-style kuma URLs. """ # A note on these tests: we could try to use assertRedirects on # these, but for the most part we're just constructing a URL # similar enough to the wiki app's own built-in redirects that # it'll pick up the request and do what we want with it. But it # may end up issuing its own redirects, which are tricky to sort # out from the ones the legacy MindTouch handling will emit, so # instead we just test that A) we did issue a redirect and B) the # URL we constructed is enough for the document views to go on. localizing_client = True server_prefix = 'http://testserver/%s/docs' % settings.WIKI_DEFAULT_LANGUAGE namespace_urls = ( # One for each namespace. {'mindtouch': '/Help:Foo', 'kuma': '%s/Help:Foo' % server_prefix}, {'mindtouch': '/Help_talk:Foo', 'kuma': '%s/Help_talk:Foo' % server_prefix}, {'mindtouch': '/Project:En/MDC_editor_guide', 'kuma': '%s/Project:MDC_editor_guide' % server_prefix}, {'mindtouch': '/Project_talk:En/MDC_style_guide', 'kuma': '%s/Project_talk:MDC_style_guide' % server_prefix}, {'mindtouch': '/Special:Foo', 'kuma': '%s/Special:Foo' % server_prefix}, {'mindtouch': '/Talk:en/Foo', 'kuma': '%s/Talk:Foo' % server_prefix}, {'mindtouch': '/Template:Foo', 'kuma': '%s/Template:Foo' % server_prefix}, {'mindtouch': '/User:Foo', 'kuma': '%s/User:Foo' % server_prefix}, ) documents = ( {'title': 'XHTML', 'mt_locale': 'cn', 'kuma_locale': 'zh-CN', 'expected': '/zh-CN/docs/XHTML'}, {'title': 'JavaScript', 'mt_locale': 'zh_cn', 'kuma_locale': 'zh-CN', 'expected': '/zh-CN/docs/JavaScript'}, {'title': 'XHTML6', 'mt_locale': 'zh_tw', 'kuma_locale': 'zh-CN', 'expected': '/zh-TW/docs/XHTML6'}, {'title': 'HTML7', 'mt_locale': 'fr', 'kuma_locale': 'fr', 'expected': '/fr/docs/HTML7'}, ) def test_namespace_urls(self): new_doc = document() new_doc.title = 'User:Foo' new_doc.slug = 'User:Foo' new_doc.save() for namespace_test in self.namespace_urls: resp = self.client.get(namespace_test['mindtouch'], follow=False) eq_(301, resp.status_code) eq_(namespace_test['kuma'], resp['Location']) def test_trailing_slash(self): d = document() d.locale = 'zh-CN' d.slug = 'foofoo' d.title = 'FooFoo' d.save() mt_url = '/cn/%s/' % (d.slug,) resp = self.client.get(mt_url) eq_(301, resp.status_code) eq_('http://testserver%s' % d.get_absolute_url(), resp['Location']) def test_document_urls(self): for doc in self.documents: d = document() d.title = doc['title'] d.slug = doc['title'] d.locale = doc['kuma_locale'] d.save() mt_url = '/%s' % '/'.join([doc['mt_locale'], doc['title']]) resp = self.client.get(mt_url) eq_(301, resp.status_code) eq_('http://testserver%s' % doc['expected'], resp['Location']) def test_view_param(self): d = document() d.locale = settings.WIKI_DEFAULT_LANGUAGE d.slug = 'HTML/HTML5' d.title = 'HTML 5' d.save() mt_url = '/en-US/%s?view=edit' % (d.slug,) resp = self.client.get(mt_url) eq_(301, resp.status_code) expected_url = 'http://testserver%s$edit' % d.get_absolute_url() eq_(expected_url, resp['Location']) class AutosuggestDocumentsTests(WikiTestCase): """ Test the we're properly filtering out the Redirects from the document list """ localizing_client = True def test_autosuggest_no_term(self): url = reverse('wiki.autosuggest_documents', locale=settings.WIKI_DEFAULT_LANGUAGE) resp = self.client.get(url) eq_(400, resp.status_code) def test_document_redirects(self): # All contain "e", so that will be the search term invalid_documents = ( { 'title': 'Something Redirect 8', 'html': 'REDIRECT <a class="redirect" href="/blah">Something Redirect</a>', 'is_redirect': 1 }, ) valid_documents = ( {'title': 'e 6', 'html': '<p>Blah text Redirect'}, {'title': 'e 7', 'html': 'AppleTalk'}, {'title': 'Response.Redirect'}, ) for doc in invalid_documents + valid_documents: d = document() d.title = doc['title'] if 'html' in doc: d.html = doc['html'] if 'slug' in doc: d.slug = doc['slug'] if 'is_redirect' in doc: d.is_redirect = 1 d.save() url = reverse('wiki.autosuggest_documents', locale=settings.WIKI_DEFAULT_LANGUAGE) + '?term=e' Switch.objects.create(name='application_ACAO', active=True) resp = self.client.get(url) ok_('Access-Control-Allow-Origin' in resp) eq_('*', resp['Access-Control-Allow-Origin']) eq_(200, resp.status_code) data = json.loads(resp.content) eq_(len(data), len(valid_documents)) # Ensure that the valid docs found are all in the valid list for d in data: found = False for v in valid_documents: if v['title'] in d['title']: found = True break eq_(True, found) def test_list_no_redirects(self): Document.objects.all().delete() invalid_documents = [ { 'title': 'Something Redirect 8', 'slug': 'xx', 'html': 'REDIRECT <a class="redirect" href="%s">yo</a>' % settings.SITE_URL }, { 'title': 'My Template', 'slug': 'Template:Something', 'html': 'blah', }, ] valid_documents = [ {'title': 'A Doc', 'slug': 'blah', 'html': 'Blah blah blah'} ] for doc in invalid_documents + valid_documents: document(save=True, slug=doc['slug'], title=doc['title'], html=doc['html']) resp = self.client.get(reverse('wiki.all_documents', locale=settings.WIKI_DEFAULT_LANGUAGE)) eq_(len(valid_documents), len(pq(resp.content).find('.document-list li'))) class CodeSampleViewTests(UserTestCase, WikiTestCase): localizing_client = True @override_config( KUMA_WIKI_IFRAME_ALLOWED_HOSTS='^https?\:\/\/testserver') def test_code_sample_1(self): """The raw source for a document can be requested""" d, r = doc_rev(""" <p>This is a page. Deal with it.</p> <div id="sample1" class="code-sample"> <pre class="brush: html">Some HTML</pre> <pre class="brush: css">.some-css { color: red; }</pre> <pre class="brush: js">window.alert("HI THERE")</pre> </div> <p>test</p> """) expecteds = ( '<style type="text/css">.some-css { color: red; }</style>', 'Some HTML', '<script type="text/javascript">window.alert("HI THERE")</script>', ) Switch.objects.create(name='application_ACAO', active=True) response = self.client.get(reverse('wiki.code_sample', args=[d.slug, 'sample1']), HTTP_HOST='testserver') ok_('Access-Control-Allow-Origin' in response) eq_('*', response['Access-Control-Allow-Origin']) eq_(200, response.status_code) normalized = normalize_html(response.content) # Content checks ok_('<!DOCTYPE html>' in response.content) for item in expecteds: ok_(item in normalized) @override_config( KUMA_WIKI_IFRAME_ALLOWED_HOSTS='^https?\:\/\/sampleserver') def test_code_sample_host_restriction(self): d, r = doc_rev(""" <p>This is a page. Deal with it.</p> <div id="sample1" class="code-sample"> <pre class="brush: html">Some HTML</pre> <pre class="brush: css">.some-css { color: red; }</pre> <pre class="brush: js">window.alert("HI THERE")</pre> </div> <p>test</p> """) response = self.client.get(reverse('wiki.code_sample', args=[d.slug, 'sample1']), HTTP_HOST='testserver') eq_(403, response.status_code) response = self.client.get(reverse('wiki.code_sample', args=[d.slug, 'sample1']), HTTP_HOST='sampleserver') eq_(200, response.status_code) @override_config( KUMA_WIKI_IFRAME_ALLOWED_HOSTS='^https?\:\/\/sampleserver') def test_code_sample_iframe_embed(self): slug = 'test-code-embed' embed_url = ('https://sampleserver/%s/docs/%s$samples/sample1' % (settings.WIKI_DEFAULT_LANGUAGE, slug)) doc_src = """ <p>This is a page. Deal with it.</p> <div id="sample1" class="code-sample"> <pre class="brush: html">Some HTML</pre> <pre class="brush: css">.some-css { color: red; }</pre> <pre class="brush: js">window.alert("HI THERE")</pre> </div> <iframe id="if1" src="%(embed_url)s"></iframe> <iframe id="if2" src="http://testserver"></iframe> <iframe id="if3" src="https://some.alien.site.com"></iframe> <p>test</p> """ % dict(embed_url=embed_url) slug = 'test-code-doc' d, r = doc_rev() revision(save=True, document=d, title="Test code doc", slug=slug, content=doc_src) response = self.client.get(reverse('wiki.document', args=(d.slug,))) eq_(200, response.status_code) page = pq(response.content) if1 = page.find('#if1') eq_(if1.length, 1) eq_(if1.attr('src'), embed_url) if2 = page.find('#if2') eq_(if2.length, 1) eq_(if2.attr('src'), '') if3 = page.find('#if3') eq_(if3.length, 1) eq_(if3.attr('src'), '') class CodeSampleViewFileServingTests(UserTestCase, WikiTestCase): @override_config( KUMA_WIKI_IFRAME_ALLOWED_HOSTS='^https?\:\/\/testserver', WIKI_ATTACHMENT_ALLOWED_TYPES='text/plain') @override_settings(ATTACHMENT_HOST='testserver') def test_code_sample_file_serving(self): self.client.login(username='admin', password='testpass') # first let's upload a file file_for_upload = make_test_file(content='Something something unique') post_data = { 'title': 'An uploaded file', 'description': 'A unique experience for your file serving needs.', 'comment': 'Yadda yadda yadda', 'file': file_for_upload, } response = self.client.post(reverse('attachments.new_attachment'), data=post_data) eq_(response.status_code, 302) # then build the document and revision we need to test attachment = Attachment.objects.get(title='An uploaded file') filename = attachment.current_revision.filename() url_css = 'url("files/%(attachment_id)s/%(filename)s")' % { 'attachment_id': attachment.id, 'filename': filename, } doc, rev = doc_rev(""" <p>This is a page. Deal with it.</p> <div id="sample1" class="code-sample"> <pre class="brush: html">Some HTML</pre> <pre class="brush: css">.some-css { background: %s }</pre> <pre class="brush: js">window.alert("HI THERE")</pre> </div> <p>test</p> """ % url_css) # then see of the code sample view has successfully found the sample response = self.client.get(reverse('wiki.code_sample', args=[doc.slug, 'sample1'], locale='en-US')) eq_(response.status_code, 200) normalized = normalize_html(response.content) ok_(url_css in normalized) # and then we try if a redirect by the file serving view redirects # to the main file serving view response = self.client.get(reverse('wiki.raw_code_sample_file', args=[doc.slug, 'sample1', attachment.id, filename], locale='en-US')) eq_(response.status_code, 302) eq_(response['Location'], attachment.get_file_url()) class DeferredRenderingViewTests(UserTestCase, WikiTestCase): """Tests for the deferred rendering system and interaction with views""" localizing_client = True def setUp(self): super(DeferredRenderingViewTests, self).setUp() self.rendered_content = 'HELLO RENDERED CONTENT' self.raw_content = 'THIS IS RAW CONTENT' self.d, self.r = doc_rev(self.raw_content) # Disable TOC, makes content inspection easier. self.r.toc_depth = 0 self.r.save() self.d.html = self.raw_content self.d.rendered_html = self.rendered_content self.d.save() self.url = reverse('wiki.document', args=(self.d.slug,), locale=self.d.locale) config.KUMASCRIPT_TIMEOUT = 5.0 config.KUMASCRIPT_MAX_AGE = 600 def tearDown(self): super(DeferredRenderingViewTests, self).tearDown() config.KUMASCRIPT_TIMEOUT = 0 config.KUMASCRIPT_MAX_AGE = 0 @mock.patch('kuma.wiki.kumascript.get') def test_rendered_content(self, mock_kumascript_get): """Document view should serve up rendered content when available""" mock_kumascript_get.return_value = (self.rendered_content, None) resp = self.client.get(self.url, follow=False) p = pq(resp.content) txt = p.find('#wikiArticle').text() ok_(self.rendered_content in txt) ok_(self.raw_content not in txt) eq_(0, p.find('#doc-rendering-in-progress').length) eq_(0, p.find('#doc-render-raw-fallback').length) def test_rendering_in_progress_warning(self): """Document view should serve up rendered content when available""" # Make the document look like there's a rendering in progress. self.d.render_started_at = datetime.datetime.now() self.d.save() resp = self.client.get(self.url, follow=False) p = pq(resp.content) txt = p.find('#wikiArticle').text() # Even though a rendering looks like it's in progress, ensure the # last-known render is displayed. ok_(self.rendered_content in txt) ok_(self.raw_content not in txt) eq_(0, p.find('#doc-rendering-in-progress').length) # Only for logged-in users, ensure the render-in-progress warning is # displayed. self.client.login(username='testuser', password='testpass') resp = self.client.get(self.url, follow=False) p = pq(resp.content) eq_(1, p.find('#doc-rendering-in-progress').length) @mock.patch('kuma.wiki.kumascript.get') def test_raw_content_during_initial_render(self, mock_kumascript_get): """Raw content should be displayed during a document's initial deferred rendering""" mock_kumascript_get.return_value = (self.rendered_content, None) # Make the document look like there's no rendered content, but that a # rendering is in progress. self.d.html = self.raw_content self.d.rendered_html = '' self.d.render_started_at = datetime.datetime.now() self.d.save() # Now, ensure that raw content is shown in the view. resp = self.client.get(self.url, follow=False) p = pq(resp.content) txt = p.find('#wikiArticle').text() ok_(self.rendered_content not in txt) ok_(self.raw_content in txt) eq_(0, p.find('#doc-render-raw-fallback').length) # Only for logged-in users, ensure that a warning is displayed about # the fallback self.client.login(username='testuser', password='testpass') resp = self.client.get(self.url, follow=False) p = pq(resp.content) eq_(1, p.find('#doc-render-raw-fallback').length) @attr('schedule_rendering') @mock.patch.object(Document, 'schedule_rendering') @mock.patch('kuma.wiki.kumascript.get') def test_schedule_rendering(self, mock_kumascript_get, mock_document_schedule_rendering): mock_kumascript_get.return_value = (self.rendered_content, None) self.client.login(username='testuser', password='testpass') data = new_document_data() data.update({ 'form': 'rev', 'content': 'This is an update', }) edit_url = reverse('wiki.edit', args=[self.d.slug]) resp = self.client.post(edit_url, data) eq_(302, resp.status_code) ok_(mock_document_schedule_rendering.called) mock_document_schedule_rendering.reset_mock() data.update({ 'form': 'both', 'content': 'This is a translation', }) translate_url = (reverse('wiki.translate', args=[data['slug']], locale=settings.WIKI_DEFAULT_LANGUAGE) + '?tolocale=fr') response = self.client.post(translate_url, data) eq_(302, response.status_code) ok_(mock_document_schedule_rendering.called) @mock.patch('kuma.wiki.kumascript.get') @mock.patch('requests.post') def test_alternate_bleach_whitelist(self, mock_requests_post, mock_kumascript_get): # Some test content with contentious tags. test_content = """ <p id="foo"> <a style="position: absolute; border: 1px;" href="http://example.com">This is a test</a> <textarea name="foo"></textarea> </p> """ # Expected result filtered through old/current Bleach rules expected_content_old = """ <p id="foo"> <a style="position: absolute; border: 1px;" href="http://example.com">This is a test</a> <textarea name="foo"></textarea> </p> """ # Expected result filtered through alternate whitelist expected_content_new = """ <p id="foo"> <a style="border: 1px;" href="http://example.com">This is a test</a> &lt;textarea name="foo"&gt;&lt;/textarea&gt; </p> """ # Set up an alternate set of whitelists... config.BLEACH_ALLOWED_TAGS = json.dumps([ "a", "p" ]) config.BLEACH_ALLOWED_ATTRIBUTES = json.dumps({ "a": ['href', 'style'], "p": ['id'] }) config.BLEACH_ALLOWED_STYLES = json.dumps([ "border" ]) config.KUMASCRIPT_TIMEOUT = 100 # Rig up a mocked response from KumaScript GET method mock_kumascript_get.return_value = (test_content, None) # Rig up a mocked response from KumaScript POST service # Digging a little deeper into the stack, so that the rest of # kumascript.post processing happens. from StringIO import StringIO m_resp = mock.Mock() m_resp.status_code = 200 m_resp.text = test_content m_resp.read = StringIO(test_content).read mock_requests_post.return_value = m_resp d, r = doc_rev(test_content) trials = ( (False, '', expected_content_old), (False, '&bleach_new', expected_content_old), (True, '', expected_content_old), (True, '&bleach_new', expected_content_new), ) for trial in trials: do_login, param, expected = trial if do_login: self.client.login(username='testuser', password='testpass') else: self.client.logout() url = ('%s?raw&macros%s' % ( reverse('wiki.document', args=(d.slug,), locale=d.locale), param)) resp = self.client.get(url, follow=True) eq_(normalize_html(expected), normalize_html(resp.content), "Should match? %s %s %s %s" % (do_login, param, expected, resp.content)) class APITests(UserTestCase, WikiTestCase): localizing_client = True def setUp(self): super(APITests, self).setUp() self.username = 'tester23' self.password = 'trustno1' self.email = 'tester23@example.com' self.user = user(username=self.username, email=self.email, password=self.password, save=True) self.key = Key(user=self.user, description='Test Key 1') self.secret = self.key.generate_secret() self.key_id = self.key.key self.key.save() auth = '%s:%s' % (self.key_id, self.secret) self.basic_auth = 'Basic %s' % base64.encodestring(auth) self.d, self.r = doc_rev(""" <h3 id="S1">Section 1</h3> <p>This is a page. Deal with it.</p> <h3 id="S2">Section 2</h3> <p>This is a page. Deal with it.</p> <h3 id="S3">Section 3</h3> <p>This is a page. Deal with it.</p> """) self.r.tags = "foo, bar, baz" self.r.review_tags.set('technical', 'editorial') self.url = self.d.get_absolute_url() def tearDown(self): super(APITests, self).tearDown() Document.objects.filter(current_revision__creator=self.user).delete() Revision.objects.filter(creator=self.user).delete() Key.objects.filter(user=self.user).delete() self.user.delete() def test_put_existing(self): """PUT API should allow overwrite of existing document content""" data = dict( summary="Look, I made an edit!", content=""" <p>This is an edit to the page. We've dealt with it.</p> """, ) # No auth key leads to a 403 Forbidden resp = self._put(self.url, data) eq_(403, resp.status_code) # But, this should work, given a proper auth key resp = self._put(self.url, data, HTTP_AUTHORIZATION=self.basic_auth) eq_(205, resp.status_code) # Verify the edit happened. curr_d = Document.objects.get(pk=self.d.pk) eq_(normalize_html(data['content'].strip()), normalize_html(Document.objects.get(pk=self.d.pk).html)) # Also, verify that this resulted in a new revision. curr_r = curr_d.current_revision ok_(self.r.pk != curr_r.pk) eq_(data['summary'], curr_r.summary) r_tags = ','.join(sorted(t.name for t in curr_r.review_tags.all())) eq_('editorial,technical', r_tags) def test_put_section_edit(self): """PUT API should allow overwrite of a specific section of an existing document""" data = dict( content=""" <h3 id="S2">Section 2</h3> <p>This is an edit to the page. We've dealt with it.</p> """, # Along with the section, let's piggyback in some other metadata # edits just for good measure. They're not tied to section edit # though. title="Hahah this is a new title!", tags="hello,quux,xyzzy", review_tags="technical", ) resp = self._put('%s?section=S2' % self.url, data, HTTP_AUTHORIZATION=self.basic_auth) eq_(205, resp.status_code) expected = """ <h3 id="S1">Section 1</h3> <p>This is a page. Deal with it.</p> <h3 id="S2">Section 2</h3> <p>This is an edit to the page. We've dealt with it.</p> <h3 id="S3">Section 3</h3> <p>This is a page. Deal with it.</p> """ # Verify the section edit happened. curr_d = Document.objects.get(pk=self.d.pk) eq_(normalize_html(expected.strip()), normalize_html(curr_d.html)) eq_(data['title'], curr_d.title) d_tags = ','.join(sorted(t.name for t in curr_d.tags.all())) eq_(data['tags'], d_tags) # Also, verify that this resulted in a new revision. curr_r = curr_d.current_revision ok_(self.r.pk != curr_r.pk) r_tags = ','.join(sorted(t.name for t in curr_r.review_tags.all())) eq_(data['review_tags'], r_tags) def test_put_new_root(self): """PUT API should allow creation of a document whose path would place it at the root of the topic hierarchy.""" slug = 'new-root-doc' url = reverse('wiki.document', args=(slug,), locale=settings.WIKI_DEFAULT_LANGUAGE) data = dict( title="This is the title of a new page", content=""" <p>This is a new page, hooray!</p> """, tags="hello,quux,xyzzy", review_tags="technical", ) resp = self._put(url, data, HTTP_AUTHORIZATION=self.basic_auth) eq_(201, resp.status_code) def test_put_new_child(self): """PUT API should allow creation of a document whose path would make it a child of an existing parent.""" data = dict( title="This is the title of a new page", content=""" <p>This is a new page, hooray!</p> """, tags="hello,quux,xyzzy", review_tags="technical", ) # This first attempt should fail; the proposed parent does not exist. url = '%s/nonexistent/newchild' % self.url resp = self._put(url, data, HTTP_AUTHORIZATION=self.basic_auth) eq_(404, resp.status_code) # TODO: I suppose we could rework this part to create the chain of # missing parents with stub content, but currently this demands # that API users do that themselves. # Now, fill in the parent gap... p_doc = document(slug='%s/nonexistent' % self.d.slug, locale=settings.WIKI_DEFAULT_LANGUAGE, parent_topic=self.d) p_doc.save() p_rev = revision(document=p_doc, slug='%s/nonexistent' % self.d.slug, title='I EXIST NOW', save=True) p_rev.save() # The creation should work, now. resp = self._put(url, data, HTTP_AUTHORIZATION=self.basic_auth) eq_(201, resp.status_code) new_slug = '%s/nonexistent/newchild' % self.d.slug new_doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE, slug=new_slug) eq_(p_doc.pk, new_doc.parent_topic.pk) def test_put_unsupported_content_type(self): """PUT API should complain with a 400 Bad Request on an unsupported content type submission""" slug = 'new-root-doc' url = reverse('wiki.document', args=(slug,), locale=settings.WIKI_DEFAULT_LANGUAGE) data = "I don't even know what this content is." resp = self._put(url, json.dumps(data), content_type='x-super-happy-fun-text', HTTP_AUTHORIZATION=self.basic_auth) eq_(400, resp.status_code) def test_put_json(self): """PUT API should handle application/json requests""" slug = 'new-root-json-doc' url = reverse('wiki.document', args=(slug,), locale=settings.WIKI_DEFAULT_LANGUAGE) data = dict( title="This is the title of a new page", content=""" <p>This is a new page, hooray!</p> """, tags="hello,quux,xyzzy", review_tags="technical", ) resp = self._put(url, json.dumps(data), content_type='application/json', HTTP_AUTHORIZATION=self.basic_auth) eq_(201, resp.status_code) new_doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE, slug=slug) eq_(data['title'], new_doc.title) eq_(normalize_html(data['content']), normalize_html(new_doc.html)) def test_put_simple_html(self): """PUT API should handle text/html requests""" slug = 'new-root-html-doc-1' url = reverse('wiki.document', args=(slug,), locale=settings.WIKI_DEFAULT_LANGUAGE) html = """ <p>This is a new page, hooray!</p> """ resp = self._put(url, html, content_type='text/html', HTTP_AUTHORIZATION=self.basic_auth) eq_(201, resp.status_code) new_doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE, slug=slug) eq_(normalize_html(html), normalize_html(new_doc.html)) def test_put_complex_html(self): """PUT API should handle text/html requests with complex HTML documents and extract document fields from the markup""" slug = 'new-root-html-doc-2' url = reverse('wiki.document', args=(slug,), locale=settings.WIKI_DEFAULT_LANGUAGE) data = dict( title='This is a complex document', content=""" <p>This is a new page, hooray!</p> """, ) html = """ <html> <head> <title>%(title)s</title> </head> <body>%(content)s</body> </html> """ % data resp = self._put(url, html, content_type='text/html', HTTP_AUTHORIZATION=self.basic_auth) eq_(201, resp.status_code) new_doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE, slug=slug) eq_(data['title'], new_doc.title) eq_(normalize_html(data['content']), normalize_html(new_doc.html)) # TODO: Anything else useful to extract from HTML? # Extract tags from head metadata? def test_put_track_authkey(self): """Revisions modified by PUT API should track the auth key used""" slug = 'new-root-doc' url = reverse('wiki.document', args=(slug,), locale=settings.WIKI_DEFAULT_LANGUAGE) data = dict( title="This is the title of a new page", content=""" <p>This is a new page, hooray!</p> """, tags="hello,quux,xyzzy", review_tags="technical", ) resp = self._put(url, data, HTTP_AUTHORIZATION=self.basic_auth) eq_(201, resp.status_code) last_log = self.key.history.order_by('-pk').all()[0] eq_('created', last_log.action) data['title'] = 'New title for old page' resp = self._put(url, data, HTTP_AUTHORIZATION=self.basic_auth) eq_(205, resp.status_code) last_log = self.key.history.order_by('-pk').all()[0] eq_('updated', last_log.action) def test_put_etag_conflict(self): """A PUT request with an if-match header throws a 412 Precondition Failed if the underlying document has been changed.""" resp = self.client.get(self.url) orig_etag = resp['ETag'] content1 = """ <h2 id="s1">Section 1</h2> <p>New section 1</p> <h2 id="s2">Section 2</h2> <p>New section 2</p> """ # First update should work. resp = self._put(self.url, dict(content=content1), HTTP_IF_MATCH=orig_etag, HTTP_AUTHORIZATION=self.basic_auth) eq_(205, resp.status_code) # Get the new etag, ensure it doesn't match the original. resp = self.client.get(self.url) new_etag = resp['ETag'] ok_(orig_etag != new_etag) # But, the ETag should have changed, so this update shouldn't work. # Using the old ETag suggests a mid-air edit collision happened. resp = self._put(self.url, dict(content=content1), HTTP_IF_MATCH=orig_etag, HTTP_AUTHORIZATION=self.basic_auth) eq_(412, resp.status_code) # Just for good measure, switching to the new ETag should work resp = self._put(self.url, dict(content=content1), HTTP_IF_MATCH=new_etag, HTTP_AUTHORIZATION=self.basic_auth) eq_(205, resp.status_code) def _put(self, path, data={}, content_type=MULTIPART_CONTENT, follow=False, **extra): """django.test.client.put() does the wrong thing, here. This does better, based on post().""" if content_type is MULTIPART_CONTENT: post_data = encode_multipart(BOUNDARY, data) else: # Encode the content so that the byte representation is correct. match = CONTENT_TYPE_RE.match(content_type) if match: charset = match.group(1) else: charset = settings.DEFAULT_CHARSET post_data = smart_str(data, encoding=charset) parsed = urlparse(path) params = { 'CONTENT_LENGTH': len(post_data), 'CONTENT_TYPE': content_type, 'PATH_INFO': self.client._get_path(parsed), 'QUERY_STRING': parsed[4], 'REQUEST_METHOD': 'PUT', 'wsgi.input': FakePayload(post_data), } params.update(extra) response = self.client.request(**params) if follow: response = self.client._handle_redirects(response, **extra) return response class PageMoveTests(UserTestCase, WikiTestCase): localizing_client = True def setUp(self): super(PageMoveTests, self).setUp() page_move_flag = Flag.objects.create(name='page_move') page_move_flag.users = self.user_model.objects.filter(is_superuser=True) page_move_flag.save() def test_move_conflict(self): parent = revision(title='Test page move views', slug='test-page-move-views', is_approved=True, save=True) parent_doc = parent.document child = revision(title='Child of page-move view test', slug='page-move/test-views', is_approved=True, save=True) child_doc = child.document child_doc.parent_topic = parent.document child_doc.save() revision(title='Conflict for page-move view', slug='moved/test-page-move-views/test-views', is_approved=True, save=True) data = {'slug': 'moved/test-page-move-views'} self.client.login(username='admin', password='testpass') resp = self.client.post(reverse('wiki.move', args=(parent_doc.slug,), locale=parent_doc.locale), data=data) eq_(200, resp.status_code) class DocumentZoneTests(UserTestCase, WikiTestCase): localizing_client = True def setUp(self): super(DocumentZoneTests, self).setUp() root_rev = revision(title='ZoneRoot', slug='ZoneRoot', content='This is the Zone Root', is_approved=True, save=True) self.root_doc = root_rev.document middle_rev = revision(title='middlePage', slug='middlePage', content='This is a middlepage', is_approved=True, save=True) self.middle_doc = middle_rev.document self.middle_doc.parent_topic = self.root_doc self.middle_doc.save() sub_rev = revision(title='SubPage', slug='SubPage', content='This is a subpage', is_approved=True, save=True) self.sub_doc = sub_rev.document self.sub_doc.parent_topic = self.middle_doc self.sub_doc.save() self.root_zone = DocumentZone(document=self.root_doc) self.root_zone.styles = """ article { color: blue; } """ self.root_zone.save() self.middle_zone = DocumentZone(document=self.middle_doc) self.middle_zone.styles = """ article { font-weight: bold; } """ self.middle_zone.save() def test_zone_styles(self): """Ensure CSS styles for a zone can be fetched""" url = reverse('wiki.styles', args=(self.root_doc.slug,), locale=settings.WIKI_DEFAULT_LANGUAGE) response = self.client.get(url, follow=True) eq_(self.root_zone.styles, response.content) url = reverse('wiki.styles', args=(self.middle_doc.slug,), locale=settings.WIKI_DEFAULT_LANGUAGE) response = self.client.get(url, follow=True) eq_(self.middle_zone.styles, response.content) url = reverse('wiki.styles', args=(self.sub_doc.slug,), locale=settings.WIKI_DEFAULT_LANGUAGE) response = self.client.get(url, follow=True) eq_(404, response.status_code) def test_zone_styles_links(self): """Ensure link to zone style appears in child document views""" url = reverse('wiki.document', args=(self.sub_doc.slug,), locale=settings.WIKI_DEFAULT_LANGUAGE) response = self.client.get(url, follow=True) styles_url = reverse('wiki.styles', args=(self.root_doc.slug,), locale=settings.WIKI_DEFAULT_LANGUAGE) root_expected = ('<link rel="stylesheet" type="text/css" href="%s"' % styles_url) ok_(root_expected in response.content) styles_url = reverse('wiki.styles', args=(self.middle_doc.slug,), locale=settings.WIKI_DEFAULT_LANGUAGE) middle_expected = ('<link rel="stylesheet" type="text/css" href="%s"' % styles_url) ok_(middle_expected in response.content) class ListDocumentTests(UserTestCase, WikiTestCase): """Tests for list_documents view""" localizing_client = True fixtures = UserTestCase.fixtures + ['wiki/documents.json'] def test_case_insensitive_tags(self): """ Bug 976071 - Tags should be case insensitive https://bugzil.la/976071 """ lower_tag = DocumentTag.objects.create(name='foo', slug='foo') lower_tag.save() doc = Document.objects.get(pk=1) doc.tags.set(lower_tag) response = self.client.get(reverse('wiki.tag', args=['foo'])) ok_(doc.slug in response.content.decode('utf-8')) response = self.client.get(reverse('wiki.tag', args=['Foo'])) ok_(doc.slug in response.content.decode('utf-8'))
mpl-2.0
-8,583,401,128,490,466,000
2,108,430,497,269,176,300
40.226225
237
0.526138
false
odoomrp/odoomrp-wip
crm_claim_links/models/res_partner.py
31
1096
# -*- encoding: utf-8 -*- ############################################################################## # # Daniel Campos (danielcampos@avanzosc.es) Date: 26/08/2014 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # ############################################################################## from openerp import fields, models class ResPartner(models.Model): _inherit = 'res.partner' claims = fields.One2many('crm.claim', 'partner_id', string='Claims')
agpl-3.0
8,433,713,325,418,169,000
1,243,610,189,025,086,000
38.142857
78
0.612226
false
Preetwinder/scrapy
tests/test_spidermiddleware_depth.py
136
1348
from unittest import TestCase from scrapy.spidermiddlewares.depth import DepthMiddleware from scrapy.http import Response, Request from scrapy.spiders import Spider from scrapy.statscollectors import StatsCollector from scrapy.utils.test import get_crawler class TestDepthMiddleware(TestCase): def setUp(self): crawler = get_crawler(Spider) self.spider = crawler._create_spider('scrapytest.org') self.stats = StatsCollector(crawler) self.stats.open_spider(self.spider) self.mw = DepthMiddleware(1, self.stats, True) def test_process_spider_output(self): req = Request('http://scrapytest.org') resp = Response('http://scrapytest.org') resp.request = req result = [Request('http://scrapytest.org')] out = list(self.mw.process_spider_output(resp, result, self.spider)) self.assertEquals(out, result) rdc = self.stats.get_value('request_depth_count/1', spider=self.spider) self.assertEquals(rdc, 1) req.meta['depth'] = 1 out2 = list(self.mw.process_spider_output(resp, result, self.spider)) self.assertEquals(out2, []) rdm = self.stats.get_value('request_depth_max', spider=self.spider) self.assertEquals(rdm, 1) def tearDown(self): self.stats.close_spider(self.spider, '')
bsd-3-clause
4,176,938,898,376,451,600
-6,102,641,437,574,541,000
30.348837
79
0.674332
false
Bladefidz/wfuzz
plugins/iterations.py
1
2703
from externals.moduleman.plugin import moduleman_plugin import itertools class piterator_void: text="void" def count(self): return self.__count def __init__(self, *i): self._dic = i self.__count = max(map(lambda x:x.count(), i)) self.it = self._dic[0] def next(self): return (self.it.next(),) def restart(self): for dic in self._dic: dic.restart() self.it = self._dic[0] def __iter__(self): self.restart() return self @moduleman_plugin("restart", "count", "next", "__iter__") class zip: name = "zip" description = "Returns an iterator that aggregates elements from each of the iterables." category = ["default"] priority = 99 def __init__(self, *i): self._dic = i self.it = itertools.izip(*self._dic) self.__count = min(map(lambda x:x.count(), i)) # Only possible match counted. def count(self): return self.__count def restart(self): for dic in self._dic: dic.restart() self.it = itertools.izip.__init__(self, *self._dic) def next(self): return self.it.next() def __iter__(self): self.restart() return self @moduleman_plugin("restart", "count", "next", "__iter__") class product: name = "product" description = "Returns an iterator cartesian product of input iterables." category = ["default"] priority = 99 def __init__(self, *i): self._dic = i self.it = itertools.product(*self._dic) self.__count = reduce(lambda x,y:x*y.count(), i[1:], i[0].count()) def restart(self): for dic in self._dic: dic.restart() self.it = itertools.product(*self._dic) def count(self): return self.__count def next(self): return self.it.next() def __iter__(self): self.restart() return self @moduleman_plugin("restart", "count", "next", "__iter__") class chain: name = "chain" description = "Returns an iterator returns elements from the first iterable until it is exhausted, then proceeds to the next iterable, until all of the iterables are exhausted." category = ["default"] priority = 99 def count(self): return self.__count def __init__(self, *i): self.__count = sum(map(lambda x:x.count(), i)) self._dic = i self.it = itertools.chain(*i) def restart(self): for dic in self._dic: dic.restart() self.it = itertools.chain(*self._dic) def next(self): return (self.it.next(),) def __iter__(self): self.restart() return self
gpl-2.0
5,252,061,649,703,927,000
-3,340,124,666,094,371,000
24.261682
181
0.564928
false
bcroq/kansha
kansha/card_addons/label/tests.py
2
1553
# -*- coding:utf-8 -*- #-- # Copyright (c) 2012-2014 Net-ng. # All rights reserved. # # This software is licensed under the BSD License, as described in # the file LICENSE.txt, which you should have received as part of # this distribution. #-- from kansha.cardextension.tests import CardExtensionTestCase from .comp import CardLabels class CardLabelsTest(CardExtensionTestCase): extension_name = 'labels' extension_class = CardLabels def test_activate(self): self.assertTrue(self.extension.get_available_labels() > 0) self.assertEqual(len(self.extension.labels), 0) label = self.extension.get_available_labels()[0] self.extension.activate(label) self.assertIn(label, self.extension.labels) self.extension.activate(label) self.assertNotIn(label, self.extension.labels) def test_copy(self): labels = self.extension.get_available_labels() for label in labels: self.extension.activate(label) cpy = self.extension_copy labels2 = zip(self.extension.labels, cpy.labels) for labela, labelb in labels2: assert(labela.get_title() == labelb.get_title()) def test_update_document(self): doc = self.card.schema(docid=None) label = self.extension.get_available_labels()[0] self.extension.activate(label) label = self.extension.get_available_labels()[1] self.extension.activate(label) self.extension.update_document(doc) self.assertEqual(doc.labels, u'Green Red')
bsd-3-clause
549,430,190,486,056,770
-8,275,881,598,346,012,000
32.76087
66
0.675467
false
kuiwei/edx-platform
common/djangoapps/student/helpers.py
6
3995
"""Helpers for the student app. """ import time from django.utils.http import cookie_date from django.conf import settings from django.core.urlresolvers import reverse from opaque_keys.edx.keys import CourseKey from course_modes.models import CourseMode from third_party_auth import ( # pylint: disable=W0611 pipeline, provider, is_enabled as third_party_auth_enabled ) def auth_pipeline_urls(auth_entry, redirect_url=None, course_id=None): """Retrieve URLs for each enabled third-party auth provider. These URLs are used on the "sign up" and "sign in" buttons on the login/registration forms to allow users to begin authentication with a third-party provider. Optionally, we can redirect the user to an arbitrary url after auth completes successfully. We use this to redirect the user to a page that required login, or to send users to the payment flow when enrolling in a course. Args: auth_entry (string): Either `pipeline.AUTH_ENTRY_LOGIN` or `pipeline.AUTH_ENTRY_REGISTER` Keyword Args: redirect_url (unicode): If provided, send users to this URL after they successfully authenticate. course_id (unicode): The ID of the course the user is enrolling in. We use this to send users to the track selection page if the course has a payment option. Note that `redirect_url` takes precedence over the redirect to the track selection page. Returns: dict mapping provider names to URLs """ if not third_party_auth_enabled(): return {} if redirect_url is not None: pipeline_redirect = redirect_url elif course_id is not None: # If the course is white-label (paid), then we send users # to the shopping cart. (There is a third party auth pipeline # step that will add the course to the cart.) if CourseMode.is_white_label(CourseKey.from_string(course_id)): pipeline_redirect = reverse("shoppingcart.views.show_cart") # Otherwise, send the user to the track selection page. # The track selection page may redirect the user to the dashboard # (if the only available mode is honor), or directly to verification # (for professional ed). else: pipeline_redirect = reverse( "course_modes_choose", kwargs={'course_id': unicode(course_id)} ) else: pipeline_redirect = None return { provider.NAME: pipeline.get_login_url( provider.NAME, auth_entry, enroll_course_id=course_id, redirect_url=pipeline_redirect ) for provider in provider.Registry.enabled() } def set_logged_in_cookie(request, response): """Set a cookie indicating that the user is logged in. Some installations have an external marketing site configured that displays a different UI when the user is logged in (e.g. a link to the student dashboard instead of to the login page) Arguments: request (HttpRequest): The request to the view, used to calculate the cookie's expiration date based on the session expiration date. response (HttpResponse): The response on which the cookie will be set. Returns: HttpResponse """ if request.session.get_expire_at_browser_close(): max_age = None expires = None else: max_age = request.session.get_expiry_age() expires_time = time.time() + max_age expires = cookie_date(expires_time) response.set_cookie( settings.EDXMKTG_COOKIE_NAME, 'true', max_age=max_age, expires=expires, domain=settings.SESSION_COOKIE_DOMAIN, path='/', secure=None, httponly=None, ) return response def is_logged_in_cookie_set(request): """Check whether the request has the logged in cookie set. """ return settings.EDXMKTG_COOKIE_NAME in request.COOKIES
agpl-3.0
3,355,927,165,421,566,500
-1,886,720,448,177,962,800
34.353982
97
0.665832
false
spreg-git/pysal
pysal/esda/tests/test_getisord.py
14
1952
import unittest from pysal.weights.Distance import DistanceBand from pysal.esda import getisord import numpy as np POINTS = [(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)] W = DistanceBand(POINTS, threshold=15) Y = np.array([2, 3, 3.2, 5, 8, 7]) class G_Tester(unittest.TestCase): def setUp(self): self.w = W self.y = Y np.random.seed(10) def test_G(self): g = getisord.G(self.y, self.w) self.assertAlmostEquals(g.G, 0.55709779, places=8) self.assertAlmostEquals(g.p_norm, 0.1729, places=4) class G_Local_Tester(unittest.TestCase): def setUp(self): self.w = W self.y = Y np.random.seed(10) def test_G_Local_Binary(self): lg = getisord.G_Local(self.y, self.w, transform='B') self.assertAlmostEquals(lg.Zs[0], -1.0136729, places=7) self.assertAlmostEquals(lg.p_sim[0], 0.10100000000000001, places=7) def test_G_Local_Row_Standardized(self): lg = getisord.G_Local(self.y, self.w, transform='R') self.assertAlmostEquals(lg.Zs[0], -0.62074534, places=7) self.assertAlmostEquals(lg.p_sim[0], 0.10100000000000001, places=7) def test_G_star_Local_Binary(self): lg = getisord.G_Local(self.y, self.w, transform='B', star=True) self.assertAlmostEquals(lg.Zs[0], -1.39727626, places=8) self.assertAlmostEquals(lg.p_sim[0], 0.10100000000000001, places=7) def test_G_star_Row_Standardized(self): lg = getisord.G_Local(self.y, self.w, transform='R', star=True) self.assertAlmostEquals(lg.Zs[0], -0.62488094, places=8) self.assertAlmostEquals(lg.p_sim[0], 0.10100000000000001, places=7) suite = unittest.TestSuite() test_classes = [G_Tester, G_Local_Tester] for i in test_classes: a = unittest.TestLoader().loadTestsFromTestCase(i) suite.addTest(a) if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite)
bsd-3-clause
-569,953,971,746,942,850
-1,036,799,685,888,969,100
32.084746
75
0.643955
false
imsparsh/python-for-android
python-modules/twisted/twisted/test/test_strcred.py
56
21750
# Copyright (c) 2007-2010 Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.cred.strcred}. """ import os import StringIO from twisted import plugin from twisted.trial import unittest from twisted.cred import credentials, checkers, error, strcred from twisted.plugins import cred_file, cred_anonymous from twisted.python import usage from twisted.python.filepath import FilePath from twisted.python.fakepwd import UserDatabase try: import crypt except ImportError: crypt = None try: import pwd except ImportError: pwd = None try: import spwd except ImportError: spwd = None def getInvalidAuthType(): """ Helper method to produce an auth type that doesn't exist. """ invalidAuthType = 'ThisPluginDoesNotExist' while (invalidAuthType in [factory.authType for factory in strcred.findCheckerFactories()]): invalidAuthType += '_' return invalidAuthType class TestPublicAPI(unittest.TestCase): def test_emptyDescription(self): """ Test that the description string cannot be empty. """ iat = getInvalidAuthType() self.assertRaises(strcred.InvalidAuthType, strcred.makeChecker, iat) self.assertRaises(strcred.InvalidAuthType, strcred.findCheckerFactory, iat) def test_invalidAuthType(self): """ Test that an unrecognized auth type raises an exception. """ iat = getInvalidAuthType() self.assertRaises(strcred.InvalidAuthType, strcred.makeChecker, iat) self.assertRaises(strcred.InvalidAuthType, strcred.findCheckerFactory, iat) class TestStrcredFunctions(unittest.TestCase): def test_findCheckerFactories(self): """ Test that findCheckerFactories returns all available plugins. """ availablePlugins = list(strcred.findCheckerFactories()) for plg in plugin.getPlugins(strcred.ICheckerFactory): self.assertIn(plg, availablePlugins) def test_findCheckerFactory(self): """ Test that findCheckerFactory returns the first plugin available for a given authentication type. """ self.assertIdentical(strcred.findCheckerFactory('file'), cred_file.theFileCheckerFactory) class TestMemoryChecker(unittest.TestCase): def setUp(self): self.admin = credentials.UsernamePassword('admin', 'asdf') self.alice = credentials.UsernamePassword('alice', 'foo') self.badPass = credentials.UsernamePassword('alice', 'foobar') self.badUser = credentials.UsernamePassword('x', 'yz') self.checker = strcred.makeChecker('memory:admin:asdf:alice:foo') def test_isChecker(self): """ Verifies that strcred.makeChecker('memory') returns an object that implements the L{ICredentialsChecker} interface. """ self.assertTrue(checkers.ICredentialsChecker.providedBy(self.checker)) self.assertIn(credentials.IUsernamePassword, self.checker.credentialInterfaces) def test_badFormatArgString(self): """ Test that an argument string which does not contain user:pass pairs (i.e., an odd number of ':' characters) raises an exception. """ self.assertRaises(strcred.InvalidAuthArgumentString, strcred.makeChecker, 'memory:a:b:c') def test_memoryCheckerSucceeds(self): """ Test that the checker works with valid credentials. """ def _gotAvatar(username): self.assertEquals(username, self.admin.username) return (self.checker .requestAvatarId(self.admin) .addCallback(_gotAvatar)) def test_memoryCheckerFailsUsername(self): """ Test that the checker fails with an invalid username. """ return self.assertFailure(self.checker.requestAvatarId(self.badUser), error.UnauthorizedLogin) def test_memoryCheckerFailsPassword(self): """ Test that the checker fails with an invalid password. """ return self.assertFailure(self.checker.requestAvatarId(self.badPass), error.UnauthorizedLogin) class TestAnonymousChecker(unittest.TestCase): def test_isChecker(self): """ Verifies that strcred.makeChecker('anonymous') returns an object that implements the L{ICredentialsChecker} interface. """ checker = strcred.makeChecker('anonymous') self.assertTrue(checkers.ICredentialsChecker.providedBy(checker)) self.assertIn(credentials.IAnonymous, checker.credentialInterfaces) def testAnonymousAccessSucceeds(self): """ Test that we can log in anonymously using this checker. """ checker = strcred.makeChecker('anonymous') request = checker.requestAvatarId(credentials.Anonymous()) def _gotAvatar(avatar): self.assertIdentical(checkers.ANONYMOUS, avatar) return request.addCallback(_gotAvatar) class TestUnixChecker(unittest.TestCase): users = { 'admin': 'asdf', 'alice': 'foo', } def _spwd(self, username): return (username, crypt.crypt(self.users[username], 'F/'), 0, 0, 99999, 7, -1, -1, -1) def setUp(self): self.admin = credentials.UsernamePassword('admin', 'asdf') self.alice = credentials.UsernamePassword('alice', 'foo') self.badPass = credentials.UsernamePassword('alice', 'foobar') self.badUser = credentials.UsernamePassword('x', 'yz') self.checker = strcred.makeChecker('unix') # Hack around the pwd and spwd modules, since we can't really # go about reading your /etc/passwd or /etc/shadow files if pwd: database = UserDatabase() for username, password in self.users.items(): database.addUser( username, crypt.crypt(password, 'F/'), 1000, 1000, username, '/home/' + username, '/bin/sh') self.patch(pwd, 'getpwnam', database.getpwnam) if spwd: self._spwd_getspnam = spwd.getspnam spwd.getspnam = self._spwd def tearDown(self): if spwd: spwd.getspnam = self._spwd_getspnam def test_isChecker(self): """ Verifies that strcred.makeChecker('unix') returns an object that implements the L{ICredentialsChecker} interface. """ self.assertTrue(checkers.ICredentialsChecker.providedBy(self.checker)) self.assertIn(credentials.IUsernamePassword, self.checker.credentialInterfaces) def test_unixCheckerSucceeds(self): """ Test that the checker works with valid credentials. """ def _gotAvatar(username): self.assertEquals(username, self.admin.username) return (self.checker .requestAvatarId(self.admin) .addCallback(_gotAvatar)) def test_unixCheckerFailsUsername(self): """ Test that the checker fails with an invalid username. """ return self.assertFailure(self.checker.requestAvatarId(self.badUser), error.UnauthorizedLogin) def test_unixCheckerFailsPassword(self): """ Test that the checker fails with an invalid password. """ return self.assertFailure(self.checker.requestAvatarId(self.badPass), error.UnauthorizedLogin) if None in (pwd, spwd, crypt): availability = [] for module, name in ((pwd, "pwd"), (spwd, "swpd"), (crypt, "crypt")): if module is None: availability += [name] for method in (test_unixCheckerSucceeds, test_unixCheckerFailsUsername, test_unixCheckerFailsPassword): method.skip = ("Required module(s) are unavailable: " + ", ".join(availability)) class TestFileDBChecker(unittest.TestCase): """ Test for the --auth=file:... file checker. """ def setUp(self): self.admin = credentials.UsernamePassword('admin', 'asdf') self.alice = credentials.UsernamePassword('alice', 'foo') self.badPass = credentials.UsernamePassword('alice', 'foobar') self.badUser = credentials.UsernamePassword('x', 'yz') self.filename = self.mktemp() FilePath(self.filename).setContent('admin:asdf\nalice:foo\n') self.checker = strcred.makeChecker('file:' + self.filename) def _fakeFilename(self): filename = '/DoesNotExist' while os.path.exists(filename): filename += '_' return filename def test_isChecker(self): """ Verifies that strcred.makeChecker('memory') returns an object that implements the L{ICredentialsChecker} interface. """ self.assertTrue(checkers.ICredentialsChecker.providedBy(self.checker)) self.assertIn(credentials.IUsernamePassword, self.checker.credentialInterfaces) def test_fileCheckerSucceeds(self): """ Test that the checker works with valid credentials. """ def _gotAvatar(username): self.assertEquals(username, self.admin.username) return (self.checker .requestAvatarId(self.admin) .addCallback(_gotAvatar)) def test_fileCheckerFailsUsername(self): """ Test that the checker fails with an invalid username. """ return self.assertFailure(self.checker.requestAvatarId(self.badUser), error.UnauthorizedLogin) def test_fileCheckerFailsPassword(self): """ Test that the checker fails with an invalid password. """ return self.assertFailure(self.checker.requestAvatarId(self.badPass), error.UnauthorizedLogin) def test_failsWithEmptyFilename(self): """ Test that an empty filename raises an error. """ self.assertRaises(ValueError, strcred.makeChecker, 'file') self.assertRaises(ValueError, strcred.makeChecker, 'file:') def test_warnWithBadFilename(self): """ When the file auth plugin is given a file that doesn't exist, it should produce a warning. """ oldOutput = cred_file.theFileCheckerFactory.errorOutput newOutput = StringIO.StringIO() cred_file.theFileCheckerFactory.errorOutput = newOutput checker = strcred.makeChecker('file:' + self._fakeFilename()) cred_file.theFileCheckerFactory.errorOutput = oldOutput self.assertIn(cred_file.invalidFileWarning, newOutput.getvalue()) class DummyOptions(usage.Options, strcred.AuthOptionMixin): """ Simple options for testing L{strcred.AuthOptionMixin}. """ class TestCheckerOptions(unittest.TestCase): def test_createsList(self): """ Test that the --auth command line creates a list in the Options instance and appends values to it. """ options = DummyOptions() options.parseOptions(['--auth', 'memory']) self.assertEqual(len(options['credCheckers']), 1) options = DummyOptions() options.parseOptions(['--auth', 'memory', '--auth', 'memory']) self.assertEqual(len(options['credCheckers']), 2) def test_invalidAuthError(self): """ Test that the --auth command line raises an exception when it gets a parameter it doesn't understand. """ options = DummyOptions() # If someone adds a 'ThisPluginDoesNotExist' then this unit # test should still run. invalidParameter = getInvalidAuthType() self.assertRaises( usage.UsageError, options.parseOptions, ['--auth', invalidParameter]) self.assertRaises( usage.UsageError, options.parseOptions, ['--help-auth-type', invalidParameter]) def test_createsDictionary(self): """ Test that the --auth command line creates a dictionary mapping supported interfaces to the list of credentials checkers that support it. """ options = DummyOptions() options.parseOptions(['--auth', 'memory', '--auth', 'anonymous']) chd = options['credInterfaces'] self.assertEquals(len(chd[credentials.IAnonymous]), 1) self.assertEquals(len(chd[credentials.IUsernamePassword]), 1) chdAnonymous = chd[credentials.IAnonymous][0] chdUserPass = chd[credentials.IUsernamePassword][0] self.assertTrue(checkers.ICredentialsChecker.providedBy(chdAnonymous)) self.assertTrue(checkers.ICredentialsChecker.providedBy(chdUserPass)) self.assertIn(credentials.IAnonymous, chdAnonymous.credentialInterfaces) self.assertIn(credentials.IUsernamePassword, chdUserPass.credentialInterfaces) def test_credInterfacesProvidesLists(self): """ Test that when two --auth arguments are passed along which support the same interface, a list with both is created. """ options = DummyOptions() options.parseOptions(['--auth', 'memory', '--auth', 'unix']) self.assertEquals( options['credCheckers'], options['credInterfaces'][credentials.IUsernamePassword]) def test_listDoesNotDisplayDuplicates(self): """ Test that the list for --help-auth does not duplicate items. """ authTypes = [] options = DummyOptions() for cf in options._checkerFactoriesForOptHelpAuth(): self.assertNotIn(cf.authType, authTypes) authTypes.append(cf.authType) def test_displaysListCorrectly(self): """ Test that the --help-auth argument correctly displays all available authentication plugins, then exits. """ newStdout = StringIO.StringIO() options = DummyOptions() options.authOutput = newStdout self.assertRaises(SystemExit, options.parseOptions, ['--help-auth']) for checkerFactory in strcred.findCheckerFactories(): self.assertIn(checkerFactory.authType, newStdout.getvalue()) def test_displaysHelpCorrectly(self): """ Test that the --help-auth-for argument will correctly display the help file for a particular authentication plugin. """ newStdout = StringIO.StringIO() options = DummyOptions() options.authOutput = newStdout self.assertRaises( SystemExit, options.parseOptions, ['--help-auth-type', 'file']) for line in cred_file.theFileCheckerFactory.authHelp: if line.strip(): self.assertIn(line.strip(), newStdout.getvalue()) def test_unexpectedException(self): """ When the checker specified by --auth raises an unexpected error, it should be caught and re-raised within a L{usage.UsageError}. """ options = DummyOptions() err = self.assertRaises(usage.UsageError, options.parseOptions, ['--auth', 'file']) self.assertEquals(str(err), "Unexpected error: 'file' requires a filename") class OptionsForUsernamePassword(usage.Options, strcred.AuthOptionMixin): supportedInterfaces = (credentials.IUsernamePassword,) class OptionsForUsernameHashedPassword(usage.Options, strcred.AuthOptionMixin): supportedInterfaces = (credentials.IUsernameHashedPassword,) class OptionsSupportsAllInterfaces(usage.Options, strcred.AuthOptionMixin): supportedInterfaces = None class OptionsSupportsNoInterfaces(usage.Options, strcred.AuthOptionMixin): supportedInterfaces = [] class TestLimitingInterfaces(unittest.TestCase): """ Tests functionality that allows an application to limit the credential interfaces it can support. For the purposes of this test, we use IUsernameHashedPassword, although this will never really be used by the command line. (I have, to date, not thought of a half-decent way for a user to specify a hash algorithm via the command-line. Nor do I think it's very useful.) I should note that, at first, this test is counter-intuitive, because we're using the checker with a pre-defined hash function as the 'bad' checker. See the documentation for L{twisted.cred.checkers.FilePasswordDB.hash} for more details. """ def setUp(self): self.filename = self.mktemp() file(self.filename, 'w').write('admin:asdf\nalice:foo\n') self.goodChecker = checkers.FilePasswordDB(self.filename) self.badChecker = checkers.FilePasswordDB(self.filename, hash=self._hash) self.anonChecker = checkers.AllowAnonymousAccess() def _hash(self, networkUsername, networkPassword, storedPassword): """ A dumb hash that doesn't really do anything. """ return networkPassword def test_supportsInterface(self): """ Test that the supportsInterface method behaves appropriately. """ options = OptionsForUsernamePassword() self.assertTrue( options.supportsInterface(credentials.IUsernamePassword)) self.assertFalse( options.supportsInterface(credentials.IAnonymous)) self.assertRaises( strcred.UnsupportedInterfaces, options.addChecker, self.anonChecker) def test_supportsAllInterfaces(self): """ Test that the supportsInterface method behaves appropriately when the supportedInterfaces attribute is None. """ options = OptionsSupportsAllInterfaces() self.assertTrue( options.supportsInterface(credentials.IUsernamePassword)) self.assertTrue( options.supportsInterface(credentials.IAnonymous)) def test_supportsCheckerFactory(self): """ Test that the supportsCheckerFactory method behaves appropriately. """ options = OptionsForUsernamePassword() fileCF = cred_file.theFileCheckerFactory anonCF = cred_anonymous.theAnonymousCheckerFactory self.assertTrue(options.supportsCheckerFactory(fileCF)) self.assertFalse(options.supportsCheckerFactory(anonCF)) def test_canAddSupportedChecker(self): """ Test that when addChecker is called with a checker that implements at least one of the interfaces our application supports, it is successful. """ options = OptionsForUsernamePassword() options.addChecker(self.goodChecker) iface = options.supportedInterfaces[0] # Test that we did get IUsernamePassword self.assertIdentical(options['credInterfaces'][iface][0], self.goodChecker) self.assertIdentical(options['credCheckers'][0], self.goodChecker) # Test that we didn't get IUsernameHashedPassword self.assertEquals(len(options['credInterfaces'][iface]), 1) self.assertEquals(len(options['credCheckers']), 1) def test_failOnAddingUnsupportedChecker(self): """ Test that when addChecker is called with a checker that does not implement any supported interfaces, it fails. """ options = OptionsForUsernameHashedPassword() self.assertRaises(strcred.UnsupportedInterfaces, options.addChecker, self.badChecker) def test_unsupportedInterfaceError(self): """ Test that the --auth command line raises an exception when it gets a checker we don't support. """ options = OptionsSupportsNoInterfaces() authType = cred_anonymous.theAnonymousCheckerFactory.authType self.assertRaises( usage.UsageError, options.parseOptions, ['--auth', authType]) def test_helpAuthLimitsOutput(self): """ Test that --help-auth will only list checkers that purport to supply at least one of the credential interfaces our application can use. """ options = OptionsForUsernamePassword() for factory in options._checkerFactoriesForOptHelpAuth(): invalid = True for interface in factory.credentialInterfaces: if options.supportsInterface(interface): invalid = False if invalid: raise strcred.UnsupportedInterfaces() def test_helpAuthTypeLimitsOutput(self): """ Test that --help-auth-type will display a warning if you get help for an authType that does not supply at least one of the credential interfaces our application can use. """ options = OptionsForUsernamePassword() # Find an interface that we can use for our test invalidFactory = None for factory in strcred.findCheckerFactories(): if not options.supportsCheckerFactory(factory): invalidFactory = factory break self.assertNotIdentical(invalidFactory, None) # Capture output and make sure the warning is there newStdout = StringIO.StringIO() options.authOutput = newStdout self.assertRaises(SystemExit, options.parseOptions, ['--help-auth-type', 'anonymous']) self.assertIn(strcred.notSupportedWarning, newStdout.getvalue())
apache-2.0
8,885,102,933,040,256,000
-6,027,063,937,818,271,000
33.688995
83
0.644644
false
Chuban/moose
python/peacock/tests/input_tab/ExecutableInfo/test_ExecutableInfo.py
4
3032
#!/usr/bin/env python import unittest from peacock.Input.ExecutableInfo import ExecutableInfo from peacock.utils import Testing from PyQt5 import QtWidgets class Tests(Testing.PeacockTester): qapp = QtWidgets.QApplication([]) def checkFile(self, output, gold_file, write_output=False): if write_output: with open("tmp_out.txt", "w") as f: f.write(output) with open(gold_file, "r") as f: gold_output = f.read() self.assertEqual(gold_output, output) def testInfo(self): e = ExecutableInfo() e.clearCache() e.setPath("") self.assertFalse(e.valid()) e.setPath("no_exist") self.assertFalse(e.valid()) exe_path = Testing.find_moose_test_exe() e.setPath(exe_path) self.assertTrue(e.valid()) e.setPath(exe_path) self.assertTrue(e.valid()) e.setPath("") self.assertTrue(e.valid()) e.setPath("no_exist") self.assertFalse(e.valid()) # this should hit the cache e.setPath(exe_path) self.assertTrue(e.valid()) def testTree(self): e = ExecutableInfo() e.clearCache() exe_path = Testing.find_moose_test_exe() e.setPath(exe_path) root = e.path_map["/"] self.assertIn("Mesh", root.children_list) m = root.children["Mesh"] self.assertEqual(m.hard, True) self.assertEqual(e.path_map["/Mesh"], m) out = e.dumpDefaultTree(hard_only=False) self.assertIn("Partitioner", out) self.assertIn("Partitioner", out) self.assertIn("ScalarKernels", out) self.assertNotIn("DirichletBC", out) def testPickle(self): exe_path = Testing.find_moose_test_exe() e = ExecutableInfo() e.clearCache() e.setPath(exe_path) p = e.toPickle() e2 = ExecutableInfo() e2.fromPickle(p) self.assertEqual(e2.path_map, e.path_map) def checkPath(self, e, path, star, hard): p = e.path_map.get(path) self.assertNotEqual(p, None) self.assertEqual(p.star, star) self.assertEqual(p.hard, hard) def testCombined(self): e = ExecutableInfo() e.setPath(Testing.find_moose_test_exe(dirname="modules/combined", exe_base="combined")) self.checkPath(e, "/Preconditioning", True, True) self.checkPath(e, "/BCs", True, True) self.checkPath(e, "/BCs/Pressure", True, True) self.checkPath(e, "/SolidMechanics", True, True) self.checkPath(e, "/Adaptivity", False, True) self.checkPath(e, "/Adaptivity/Markers", True, True) self.checkPath(e, "/GlobalParams", False, True) self.checkPath(e, "/Mesh", False, True) self.checkPath(e, "/AuxVariables", True, True) self.checkPath(e, "/AuxVariables/*/InitialCondition", False, False) self.checkPath(e, "/Variables/*/InitialCondition", False, False) if __name__ == '__main__': unittest.main()
lgpl-2.1
477,742,526,963,515,400
-4,393,417,863,556,416,500
31.602151
95
0.596966
false
berkmancenter/mediacloud
apps/common/src/python/mediawords/db/locks.py
1
3477
"""Constants and routines for handling advisory postgres locks.""" import mediawords.db from mediawords.util.log import create_logger from mediawords.util.perl import decode_object_from_bytes_if_needed log = create_logger(__name__) """ This package just has constants that can be passed to the first value of the postgres pg_advisory_*lock functions. If you are using an advisory lock, you should use the two key version and use a constant from this package to avoid conflicts. """ # locks to make sure we are not mining or snapshotting a topic in more than one process at a time LOCK_TYPES = { 'test-a': 10, 'test-b': 11, 'MediaWords::Job::TM::MineTopic': 12, 'MediaWords::Job::TM::SnapshotTopic': 13, 'MediaWords::TM::Media::media_normalized_urls': 14, 'MediaWords::Crawler::Engine::run_fetcher': 15, # Testing lock types 'TestPerlWorkerLock': 900, 'TestPythonWorkerLock': 901, } class McDBLocksException(Exception): """Default exception for package.""" pass def get_session_lock(db: mediawords.db.DatabaseHandler, lock_type: str, lock_id: int, wait: bool = False) -> bool: """Get a postgres advisory lock with the lock_type and lock_id as the two keys. Arguments: db - db handle lock_type - must be in LOCK_TYPES dict above lock_id - id for the particular lock within the type wait - if true, block while waiting for the lock, else return false if the lock is not available Returns: True if the lock is available """ lock_type = str(decode_object_from_bytes_if_needed(lock_type)) if isinstance(lock_id, bytes): lock_id = decode_object_from_bytes_if_needed(lock_id) lock_id = int(lock_id) if isinstance(wait, bytes): wait = decode_object_from_bytes_if_needed(wait) wait = bool(wait) log.debug("trying for lock: %s, %d" % (lock_type, lock_id)) if lock_type not in LOCK_TYPES: raise McDBLocksException("lock type not in LOCK_TYPES: %s" % lock_type) lock_type_id = LOCK_TYPES[lock_type] if wait: db.query("select pg_advisory_lock(%(a)s, %(b)s)", {'a': lock_type_id, 'b': lock_id}) return True else: r = db.query("select pg_try_advisory_lock(%(a)s, %(b)s) as locked", {'a': lock_type_id, 'b': lock_id}).hash() return r['locked'] def release_session_lock(db: mediawords.db.DatabaseHandler, lock_type: str, lock_id: int) -> None: """Release the postgres advisory lock if it is held.""" lock_type = str(decode_object_from_bytes_if_needed(lock_type)) if isinstance(lock_id, bytes): lock_id = decode_object_from_bytes_if_needed(lock_id) lock_id = int(lock_id) if lock_type not in LOCK_TYPES: raise McDBLocksException("lock type not in LOCK_TYPES: %s" % lock_type) lock_type_id = LOCK_TYPES[lock_type] db.query("select pg_advisory_unlock(%(a)s, %(b)s)", {'a': lock_type_id, 'b': lock_id}) def list_session_locks(db: mediawords.db.DatabaseHandler, lock_type: str) -> list: """Return a list of all locked ids for the given lock_type.""" lock_type = str(decode_object_from_bytes_if_needed(lock_type)) if lock_type not in LOCK_TYPES: raise McDBLocksException("lock type not in LOCK_TYPES: %s" % lock_type) lock_type_id = LOCK_TYPES[lock_type] # noinspection SqlResolve return db.query( "select objid from pg_locks where locktype = 'advisory' and classid = %(a)s", {'a': lock_type_id}).flat()
agpl-3.0
9,089,212,418,535,366,000
-353,982,875,060,302,340
33.425743
117
0.667242
false
mixturemodel-flow/tensorflow
tensorflow/contrib/graph_editor/select.py
75
28656
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Various ways of selecting operations and tensors in a graph.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re from six import iteritems from six import string_types from tensorflow.contrib.graph_editor import util from tensorflow.python.framework import ops as tf_ops __all__ = [ "can_be_regex", "make_regex", "filter_ts", "filter_ts_from_regex", "filter_ops", "filter_ops_from_regex", "get_name_scope_ops", "check_cios", "get_ops_ios", "compute_boundary_ts", "get_within_boundary_ops", "get_forward_walk_ops", "get_backward_walk_ops", "get_walks_intersection_ops", "get_walks_union_ops", "select_ops", "select_ts", "select_ops_and_ts", ] _RE_TYPE = type(re.compile("")) def can_be_regex(obj): """Return True if obj can be turned into a regular expression.""" return isinstance(obj, string_types + (_RE_TYPE,)) def make_regex(obj): """Return a compiled regular expression. Args: obj: a string or a regular expression. Returns: A compiled regular expression. Raises: ValueError: if obj could not be converted to a regular expression. """ if not can_be_regex(obj): raise ValueError("Expected a string or a regex, got: {}".format(type(obj))) if isinstance(obj, string_types): return re.compile(obj) else: return obj def _get_input_ts(ops): """Compute the list of unique input tensors of all the op in ops. Args: ops: an object convertible to a list of `tf.Operation`. Returns: The list of unique input tensors of all the op in ops. Raises: TypeError: if ops cannot be converted to a list of `tf.Operation`. """ ops = util.make_list_of_op(ops) ts = [] ts_set = set() for op in ops: for t in op.inputs: if t not in ts_set: ts.append(t) ts_set.add(t) return ts def _get_output_ts(ops): """Compute the list of unique output tensors of all the op in ops. Args: ops: an object convertible to a list of tf.Operation. Returns: The list of unique output tensors of all the op in ops. Raises: TypeError: if ops cannot be converted to a list of tf.Operation. """ ops = util.make_list_of_op(ops) ts = [] for op in ops: ts += op.outputs return ts def filter_ts(ops, positive_filter): """Get all the tensors which are input or output of an op in ops. Args: ops: an object convertible to a list of `tf.Operation`. positive_filter: a function deciding whether to keep a tensor or not. If `True`, all the tensors are returned. Returns: A list of `tf.Tensor`. Raises: TypeError: if ops cannot be converted to a list of `tf.Operation`. """ ops = util.make_list_of_op(ops) ts = _get_input_ts(ops) util.concatenate_unique(ts, _get_output_ts(ops)) if positive_filter is not True: ts = [t for t in ts if positive_filter(t)] return ts def filter_ts_from_regex(ops, regex): r"""Get all the tensors linked to ops that match the given regex. Args: ops: an object convertible to a list of tf.Operation. regex: a regular expression matching the tensors' name. For example, "^foo(/.*)?:\d+$" will match all the tensors in the "foo" scope. Returns: A list of tf.Tensor. Raises: TypeError: if ops cannot be converted to a list of tf.Operation. """ ops = util.make_list_of_op(ops) regex_obj = make_regex(regex) return filter_ts(ops, positive_filter=lambda op: regex_obj.search(op.name)) def filter_ops(ops, positive_filter): """Get the ops passing the given filter. Args: ops: an object convertible to a list of tf.Operation. positive_filter: a function deciding where to keep an operation or not. If True, all the operations are returned. Returns: A list of selected tf.Operation. Raises: TypeError: if ops cannot be converted to a list of tf.Operation. """ ops = util.make_list_of_op(ops) if positive_filter is not True: # pylint: disable=g-explicit-bool-comparison ops = [op for op in ops if positive_filter(op)] return ops def filter_ops_from_regex(ops, regex): """Get all the operations that match the given regex. Args: ops: an object convertible to a list of `tf.Operation`. regex: a regular expression matching the operation's name. For example, `"^foo(/.*)?$"` will match all the operations in the "foo" scope. Returns: A list of `tf.Operation`. Raises: TypeError: if ops cannot be converted to a list of `tf.Operation`. """ ops = util.make_list_of_op(ops) regex_obj = make_regex(regex) return filter_ops(ops, lambda op: regex_obj.search(op.name)) def get_name_scope_ops(ops, scope): """Get all the operations under the given scope path. Args: ops: an object convertible to a list of tf.Operation. scope: a scope path. Returns: A list of tf.Operation. Raises: TypeError: if ops cannot be converted to a list of tf.Operation. """ if scope and scope[-1] == "/": scope = scope[:-1] return filter_ops_from_regex(ops, "^{}(/.*)?$".format(scope)) def check_cios(control_inputs=False, control_outputs=None, control_ios=None): """Do various check on control_inputs and control_outputs. Args: control_inputs: A boolean indicating whether control inputs are enabled. control_outputs: An instance of util.ControlOutputs or None. If not None, control outputs are enabled. control_ios: An instance of util.ControlOutputs or None. If not None, both control inputs and control outputs are enabled. This is equivalent to set control_inputs to True and control_outputs to the util.ControlOutputs instance. Returns: A tuple `(control_inputs, control_outputs)` where: `control_inputs` is a boolean indicating whether to use control inputs. `control_outputs` is an instance of util.ControlOutputs or None Raises: ValueError: if control_inputs is an instance of util.ControlOutputs but control_outputs is not None TypeError: if control_outputs is not None and is not a util.ControlOutputs. """ if control_ios is not None: if not isinstance(control_ios, util.ControlOutputs): raise TypeError("Expected a util.ControlOutputs, got: {}".format( type(control_ios))) if control_outputs is not None: raise ValueError("control_outputs should be None when using control_ios.") control_inputs = True control_outputs = control_ios elif control_outputs is not None: if not isinstance(control_outputs, util.ControlOutputs): raise TypeError("Expected a util.ControlOutputs, got: {}".format( type(control_outputs))) if control_outputs is not None: control_outputs.update() return control_inputs, control_outputs def get_ops_ios(ops, control_inputs=False, control_outputs=None, control_ios=None): """Return all the `tf.Operation` which are connected to an op in ops. Args: ops: an object convertible to a list of `tf.Operation`. control_inputs: A boolean indicating whether control inputs are enabled. control_outputs: An instance of `util.ControlOutputs` or `None`. If not `None`, control outputs are enabled. control_ios: An instance of `util.ControlOutputs` or `None`. If not `None`, both control inputs and control outputs are enabled. This is equivalent to set `control_inputs` to `True` and `control_outputs` to the `util.ControlOutputs` instance. Returns: All the `tf.Operation` surrounding the given ops. Raises: TypeError: if `ops` cannot be converted to a list of `tf.Operation`. """ control_inputs, control_outputs = check_cios(control_inputs, control_outputs, control_ios) ops = util.make_list_of_op(ops) res = [] for op in ops: util.concatenate_unique(res, [t.op for t in op.inputs]) for t in op.outputs: util.concatenate_unique(res, t.consumers()) if control_outputs is not None: util.concatenate_unique(res, control_outputs.get(op)) if control_inputs: util.concatenate_unique(res, op.control_inputs) return res def compute_boundary_ts(ops): """Compute the tensors at the boundary of a set of ops. This function looks at all the tensors connected to the given ops (in/out) and classify them into three categories: 1) input tensors: tensors whose generating operation is not in ops. 2) output tensors: tensors whose consumer operations are not in ops 3) inside tensors: tensors which are neither input nor output tensors. Note that a tensor can be both an inside tensor and an output tensor if it is consumed by operations both outside and inside of `ops`. Args: ops: an object convertible to a list of tf.Operation. Returns: A tuple `(outside_input_ts, outside_output_ts, inside_ts)` where: `outside_input_ts` is a Python list of input tensors; `outside_output_ts` is a python list of output tensors; `inside_ts` is a python list of inside tensors. Since a tensor can be both an inside tensor and an output tensor, `outside_output_ts` and `inside_ts` might intersect. Raises: TypeError: if ops cannot be converted to a list of tf.Operation. """ ops = util.make_list_of_op(ops) input_ts = _get_input_ts(ops) output_ts = _get_output_ts(ops) output_ts_set = frozenset(output_ts) ops_set = frozenset(ops) # Compute inside tensors. inside_ts = [] only_inside_ts = [] for t in input_ts: # Skip if the input tensor is not also an output tensor. if t not in output_ts_set: continue # Mark as "inside". inside_ts.append(t) # Mark as "only inside" if the tensor is not both inside and output. consumers = frozenset(t.consumers()) if consumers - ops_set: continue only_inside_ts.append(t) inside_ts_set = frozenset(inside_ts) only_inside_ts_set = frozenset(only_inside_ts) outside_output_ts = [t for t in output_ts if t not in only_inside_ts_set] outside_input_ts = [t for t in input_ts if t not in inside_ts_set] return outside_input_ts, outside_output_ts, inside_ts def get_within_boundary_ops(ops, seed_ops, boundary_ops=(), inclusive=True, control_inputs=False, control_outputs=None, control_ios=None): """Return all the `tf.Operation` within the given boundary. Args: ops: an object convertible to a list of `tf.Operation`. those ops define the set in which to perform the operation (if a `tf.Graph` is given, it will be converted to the list of all its operations). seed_ops: the operations from which to start expanding. boundary_ops: the ops forming the boundary. inclusive: if `True`, the result will also include the boundary ops. control_inputs: A boolean indicating whether control inputs are enabled. control_outputs: An instance of `util.ControlOutputs` or `None`. If not `None`, control outputs are enabled. control_ios: An instance of `util.ControlOutputs` or `None`. If not `None`, both control inputs and control outputs are enabled. This is equivalent to set control_inputs to True and control_outputs to the `util.ControlOutputs` instance. Returns: All the `tf.Operation` surrounding the given ops. Raises: TypeError: if `ops` or `seed_ops` cannot be converted to a list of `tf.Operation`. ValueError: if the boundary is intersecting with the seeds. """ control_inputs, control_outputs = check_cios(control_inputs, control_outputs, control_ios) ops = util.make_list_of_op(ops) seed_ops = util.make_list_of_op(seed_ops, allow_graph=False) boundary_ops = set(util.make_list_of_op(boundary_ops)) res = set(seed_ops) if boundary_ops & res: raise ValueError("Boundary is intersecting with the seeds.") wave = set(seed_ops) while wave: new_wave = set() ops_io = get_ops_ios(wave, control_inputs, control_outputs) for op in ops_io: if op in res: continue if op in boundary_ops: if inclusive: res.add(op) else: new_wave.add(op) res.update(new_wave) wave = new_wave return [op for op in ops if op in res] def get_forward_walk_ops(seed_ops, inclusive=True, within_ops=None, stop_at_ts=(), control_outputs=None): """Do a forward graph walk and return all the visited ops. Args: seed_ops: an iterable of operations from which the forward graph walk starts. If a list of tensors is given instead, the seed_ops are set to be the consumers of those tensors. inclusive: if True the given seed_ops are also part of the resulting set. within_ops: an iterable of `tf.Operation` within which the search is restricted. If `within_ops` is `None`, the search is performed within the whole graph. stop_at_ts: an iterable of tensors at which the graph walk stops. control_outputs: a `util.ControlOutputs` instance or None. If not `None`, it will be used while walking the graph forward. Returns: A Python set of all the `tf.Operation` ahead of `seed_ops`. Raises: TypeError: if `seed_ops` or `within_ops` cannot be converted to a list of `tf.Operation`. """ _, control_outputs = check_cios(False, control_outputs) if not util.is_iterable(seed_ops): seed_ops = [seed_ops] if not seed_ops: return [] if isinstance(seed_ops[0], tf_ops.Tensor): ts = util.make_list_of_t(seed_ops, allow_graph=False) seed_ops = util.get_consuming_ops(ts) else: seed_ops = util.make_list_of_op(seed_ops, allow_graph=False) seed_ops = frozenset(seed_ops) stop_at_ts = frozenset(util.make_list_of_t(stop_at_ts)) if within_ops: within_ops = util.make_list_of_op(within_ops, allow_graph=False) within_ops = frozenset(within_ops) seed_ops &= within_ops def is_within(op): return within_ops is None or op in within_ops result = list(seed_ops) wave = set(seed_ops) while wave: new_wave = set() for op in wave: for new_t in op.outputs: if new_t in stop_at_ts: continue for new_op in new_t.consumers(): if new_op not in result and is_within(new_op): new_wave.add(new_op) if control_outputs is not None: for new_op in control_outputs.get(op): if new_op not in result and is_within(new_op): new_wave.add(new_op) util.concatenate_unique(result, new_wave) wave = new_wave if not inclusive: result = [op for op in result if op not in seed_ops] return result def get_backward_walk_ops(seed_ops, inclusive=True, within_ops=None, stop_at_ts=(), control_inputs=False): """Do a backward graph walk and return all the visited ops. Args: seed_ops: an iterable of operations from which the backward graph walk starts. If a list of tensors is given instead, the seed_ops are set to be the generators of those tensors. inclusive: if True the given seed_ops are also part of the resulting set. within_ops: an iterable of `tf.Operation` within which the search is restricted. If `within_ops` is `None`, the search is performed within the whole graph. stop_at_ts: an iterable of tensors at which the graph walk stops. control_inputs: if True, control inputs will be used while moving backward. Returns: A Python set of all the `tf.Operation` behind `seed_ops`. Raises: TypeError: if `seed_ops` or `within_ops` cannot be converted to a list of `tf.Operation`. """ if not util.is_iterable(seed_ops): seed_ops = [seed_ops] if not seed_ops: return [] if isinstance(seed_ops[0], tf_ops.Tensor): ts = util.make_list_of_t(seed_ops, allow_graph=False) seed_ops = util.get_generating_ops(ts) else: seed_ops = util.make_list_of_op(seed_ops, allow_graph=False) stop_at_ts = frozenset(util.make_list_of_t(stop_at_ts)) seed_ops = frozenset(util.make_list_of_op(seed_ops)) if within_ops: within_ops = util.make_list_of_op(within_ops, allow_graph=False) within_ops = frozenset(within_ops) seed_ops &= within_ops def is_within(op): return within_ops is None or op in within_ops result = list(seed_ops) wave = set(seed_ops) while wave: new_wave = set() for op in wave: for new_t in op.inputs: if new_t in stop_at_ts: continue if new_t.op not in result and is_within(new_t.op): new_wave.add(new_t.op) if control_inputs: for new_op in op.control_inputs: if new_op not in result and is_within(new_op): new_wave.add(new_op) util.concatenate_unique(result, new_wave) wave = new_wave if not inclusive: result = [op for op in result if op not in seed_ops] return result def get_walks_intersection_ops(forward_seed_ops, backward_seed_ops, forward_inclusive=True, backward_inclusive=True, within_ops=None, control_inputs=False, control_outputs=None, control_ios=None): """Return the intersection of a forward and a backward walk. Args: forward_seed_ops: an iterable of operations from which the forward graph walk starts. If a list of tensors is given instead, the seed_ops are set to be the consumers of those tensors. backward_seed_ops: an iterable of operations from which the backward graph walk starts. If a list of tensors is given instead, the seed_ops are set to be the generators of those tensors. forward_inclusive: if True the given forward_seed_ops are also part of the resulting set. backward_inclusive: if True the given backward_seed_ops are also part of the resulting set. within_ops: an iterable of tf.Operation within which the search is restricted. If within_ops is None, the search is performed within the whole graph. control_inputs: A boolean indicating whether control inputs are enabled. control_outputs: An instance of util.ControlOutputs or None. If not None, control outputs are enabled. control_ios: An instance of util.ControlOutputs or None. If not None, both control inputs and control outputs are enabled. This is equivalent to set control_inputs to True and control_outputs to the util.ControlOutputs instance. Returns: A Python set of all the tf.Operation in the intersection of a forward and a backward walk. Raises: TypeError: if `forward_seed_ops` or `backward_seed_ops` or `within_ops` cannot be converted to a list of `tf.Operation`. """ control_inputs, control_outputs = check_cios(control_inputs, control_outputs, control_ios) forward_ops = get_forward_walk_ops( forward_seed_ops, inclusive=forward_inclusive, within_ops=within_ops, control_outputs=control_outputs) backward_ops = get_backward_walk_ops( backward_seed_ops, inclusive=backward_inclusive, within_ops=within_ops, control_inputs=control_inputs) return [op for op in forward_ops if op in backward_ops] def get_walks_union_ops(forward_seed_ops, backward_seed_ops, forward_inclusive=True, backward_inclusive=True, within_ops=None, control_inputs=False, control_outputs=None, control_ios=None): """Return the union of a forward and a backward walk. Args: forward_seed_ops: an iterable of operations from which the forward graph walk starts. If a list of tensors is given instead, the seed_ops are set to be the consumers of those tensors. backward_seed_ops: an iterable of operations from which the backward graph walk starts. If a list of tensors is given instead, the seed_ops are set to be the generators of those tensors. forward_inclusive: if True the given forward_seed_ops are also part of the resulting set. backward_inclusive: if True the given backward_seed_ops are also part of the resulting set. within_ops: restrict the search within those operations. If within_ops is None, the search is done within the whole graph. control_inputs: A boolean indicating whether control inputs are enabled. control_outputs: An instance of util.ControlOutputs or None. If not None, control outputs are enabled. control_ios: An instance of util.ControlOutputs or None. If not None, both control inputs and control outputs are enabled. This is equivalent to set control_inputs to True and control_outputs to the util.ControlOutputs instance. Returns: A Python set of all the tf.Operation in the union of a forward and a backward walk. Raises: TypeError: if forward_seed_ops or backward_seed_ops or within_ops cannot be converted to a list of tf.Operation. """ control_inputs, control_outputs = check_cios(control_inputs, control_outputs, control_ios) forward_ops = get_forward_walk_ops( forward_seed_ops, inclusive=forward_inclusive, within_ops=within_ops, control_outputs=control_outputs) backward_ops = get_backward_walk_ops( backward_seed_ops, inclusive=backward_inclusive, within_ops=within_ops, control_inputs=control_inputs) return util.concatenate_unique(forward_ops, backward_ops) def select_ops(*args, **kwargs): """Helper to select operations. Args: *args: list of 1) regular expressions (compiled or not) or 2) (array of) `tf.Operation`. `tf.Tensor` instances are silently ignored. **kwargs: 'graph': `tf.Graph` in which to perform the regex query.This is required when using regex. 'positive_filter': an elem if selected only if `positive_filter(elem)` is `True`. This is optional. 'restrict_ops_regex': a regular expression is ignored if it doesn't start with the substring "(?#ops)". Returns: A list of `tf.Operation`. Raises: TypeError: if the optional keyword argument graph is not a `tf.Graph` or if an argument in args is not an (array of) `tf.Operation` or an (array of) `tf.Tensor` (silently ignored) or a string or a regular expression. ValueError: if one of the keyword arguments is unexpected or if a regular expression is used without passing a graph as a keyword argument. """ # get keywords arguments graph = None positive_filter = None restrict_ops_regex = False for k, v in iteritems(kwargs): if k == "graph": graph = v if graph is not None and not isinstance(graph, tf_ops.Graph): raise TypeError("Expected a tf.Graph, got: {}".format(type(graph))) elif k == "positive_filter": positive_filter = v elif k == "restrict_ops_regex": restrict_ops_regex = v elif k == "restrict_ts_regex": pass else: raise ValueError("Wrong keywords argument: {}.".format(k)) ops = [] for arg in args: if can_be_regex(arg): if graph is None: raise ValueError("Use the keyword argument 'graph' to use regex.") regex = make_regex(arg) if regex.pattern.startswith("(?#ts)"): continue if restrict_ops_regex and not regex.pattern.startswith("(?#ops)"): continue ops_ = filter_ops_from_regex(graph, regex) for op_ in ops_: if op_ not in ops: if positive_filter is None or positive_filter(op_): ops.append(op_) else: ops_aux = util.make_list_of_op(arg, ignore_ts=True) if positive_filter is not None: ops_aux = [op for op in ops_aux if positive_filter(op)] ops_aux = [op for op in ops_aux if op not in ops] ops += ops_aux return ops def select_ts(*args, **kwargs): """Helper to select tensors. Args: *args: list of 1) regular expressions (compiled or not) or 2) (array of) `tf.Tensor`. `tf.Operation` instances are silently ignored. **kwargs: 'graph': `tf.Graph` in which to perform the regex query.This is required when using regex. 'positive_filter': an elem if selected only if `positive_filter(elem)` is `True`. This is optional. 'restrict_ts_regex': a regular expression is ignored if it doesn't start with the substring "(?#ts)". Returns: A list of `tf.Tensor`. Raises: TypeError: if the optional keyword argument graph is not a `tf.Graph` or if an argument in args is not an (array of) `tf.Tensor` or an (array of) `tf.Operation` (silently ignored) or a string or a regular expression. ValueError: if one of the keyword arguments is unexpected or if a regular expression is used without passing a graph as a keyword argument. """ # get keywords arguments graph = None positive_filter = None restrict_ts_regex = False for k, v in iteritems(kwargs): if k == "graph": graph = v if graph is not None and not isinstance(graph, tf_ops.Graph): raise TypeError("Expected a tf.Graph, got {}".format(type(graph))) elif k == "positive_filter": positive_filter = v elif k == "restrict_ts_regex": restrict_ts_regex = v elif k == "restrict_ops_regex": pass else: raise ValueError("Wrong keywords argument: {}.".format(k)) ts = [] for arg in args: if can_be_regex(arg): if graph is None: raise ValueError("Use the keyword argument 'graph' to use regex.") regex = make_regex(arg) if regex.pattern.startswith("(?#ops)"): continue if restrict_ts_regex and not regex.pattern.startswith("(?#ts)"): continue ts_ = filter_ts_from_regex(graph, regex) for t_ in ts_: if t_ not in ts: if positive_filter is None or positive_filter(t_): ts.append(t_) else: ts_aux = util.make_list_of_t(arg, ignore_ops=True) if positive_filter is not None: ts_aux = [t for t in ts_aux if positive_filter(t)] ts_aux = [t for t in ts_aux if t not in ts] ts += ts_aux return ts def select_ops_and_ts(*args, **kwargs): """Helper to select operations and tensors. Args: *args: list of 1) regular expressions (compiled or not) or 2) (array of) `tf.Operation` 3) (array of) tf.Tensor. Regular expressions matching tensors must start with the comment `"(?#ts)"`, for instance: `"(?#ts)^foo/.*"`. **kwargs: 'graph': `tf.Graph` in which to perform the regex query.This is required when using regex. 'positive_filter': an elem if selected only if `positive_filter(elem)` is `True`. This is optional. Returns: A tuple `(ops, ts)` where: `ops` is a list of `tf.Operation`, and `ts` is a list of `tf.Tensor` Raises: TypeError: if the optional keyword argument graph is not a `tf.Graph` or if an argument in args is not an (array of) `tf.Tensor` or an (array of) `tf.Operation` or a string or a regular expression. ValueError: if one of the keyword arguments is unexpected or if a regular expression is used without passing a graph as a keyword argument. """ ops = select_ops(*args, restrict_ops_regex=False, **kwargs) ts = select_ts(*args, restrict_ts_regex=True, **kwargs) return ops, ts
apache-2.0
-2,682,866,556,799,101,000
6,306,134,345,449,295,000
35.927835
80
0.655709
false
devs1991/test_edx_docmode
venv/lib/python2.7/site-packages/newrelic-2.46.0.37/newrelic/hooks/component_cornice.py
2
1974
"""Instrumentation for the Cornice REST library for Pyramid. """ import functools from newrelic.agent import (ObjectProxy, function_wrapper, callable_name, current_transaction, FunctionTrace, wrap_function_wrapper) module_cornice_service = None @function_wrapper def wrapper_Resource_method(wrapped, instance, args, kwargs): transaction = current_transaction() if transaction is None: return wrapped(*args, **kwargs) name = callable_name(wrapped) transaction.set_transaction_name(name) with FunctionTrace(transaction, name): return wrapped(*args, **kwargs) def wrapper_Resource(view): @function_wrapper def _wrapper_Resource(wrapped, instance, args, kwargs): ob = wrapped(*args, **kwargs) method = getattr(ob, view) setattr(ob, view, wrapper_Resource_method(method)) return ob return _wrapper_Resource def wrapper_decorate_view(wrapped, instance, args, kwargs): def _bind_params(view, args, method): return view, args, method _view, _args, _method = _bind_params(*args, **kwargs) if 'klass' in _args and not callable(_view): if module_cornice_service.is_string(_view): _klass = _args['klass'] _args = dict(_args) _args['klass'] = wrapper_Resource(_view)(_klass) return wrapped(_view, _args, _method) # For Cornice 0.17 or older we need to fixup the fact that they do # not copy the wrapped view attributes to the wrapper it returns. # This is only needed where the view is not a string. wrapper = wrapped(*args, **kwargs) if not module_cornice_service.is_string(_view): if wrapper.__name__ != _view.__name__: return functools.wraps(_view)(wrapper) return wrapper def instrument_cornice_service(module): global module_cornice_service module_cornice_service = module wrap_function_wrapper(module, 'decorate_view', wrapper_decorate_view)
agpl-3.0
-3,397,589,709,252,855,000
3,692,343,767,085,909,000
29.84375
73
0.668693
false
nrb/ansible-modules-extras
cloud/amazon/route53_zone.py
37
5487
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' module: route53_zone short_description: add or delete Route53 zones description: - Creates and deletes Route53 private and public zones version_added: "2.0" options: zone: description: - "The DNS zone record (eg: foo.com.)" required: true state: description: - whether or not the zone should exist or not required: false default: true choices: [ "present", "absent" ] vpc_id: description: - The VPC ID the zone should be a part of (if this is going to be a private zone) required: false default: null vpc_region: description: - The VPC Region the zone should be a part of (if this is going to be a private zone) required: false default: null comment: description: - Comment associated with the zone required: false default: '' extends_documentation_fragment: aws author: "Christopher Troup (@minichate)" ''' import time try: import boto import boto.ec2 from boto import route53 from boto.route53 import Route53Connection from boto.route53.zone import Zone HAS_BOTO = True except ImportError: HAS_BOTO = False def main(): module = AnsibleModule( argument_spec=dict( zone=dict(required=True), state=dict(default='present', choices=['present', 'absent']), vpc_id=dict(default=None), vpc_region=dict(default=None), comment=dict(default=''), ) ) if not HAS_BOTO: module.fail_json(msg='boto required for this module') zone_in = module.params.get('zone').lower() state = module.params.get('state').lower() vpc_id = module.params.get('vpc_id') vpc_region = module.params.get('vpc_region') comment = module.params.get('comment') private_zone = vpc_id is not None and vpc_region is not None _, _, aws_connect_kwargs = get_aws_connection_info(module) # connect to the route53 endpoint try: conn = Route53Connection(**aws_connect_kwargs) except boto.exception.BotoServerError, e: module.fail_json(msg=e.error_message) results = conn.get_all_hosted_zones() zones = {} for r53zone in results['ListHostedZonesResponse']['HostedZones']: zone_id = r53zone['Id'].replace('/hostedzone/', '') zone_details = conn.get_hosted_zone(zone_id)['GetHostedZoneResponse'] if vpc_id and 'VPCs' in zone_details: # this is to deal with this boto bug: https://github.com/boto/boto/pull/2882 if isinstance(zone_details['VPCs'], dict): if zone_details['VPCs']['VPC']['VPCId'] == vpc_id: zones[r53zone['Name']] = zone_id else: # Forward compatibility for when boto fixes that bug if vpc_id in [v['VPCId'] for v in zone_details['VPCs']]: zones[r53zone['Name']] = zone_id else: zones[r53zone['Name']] = zone_id record = { 'private_zone': private_zone, 'vpc_id': vpc_id, 'vpc_region': vpc_region, 'comment': comment, } if state == 'present' and zone_in in zones: if private_zone: details = conn.get_hosted_zone(zones[zone_in]) if 'VPCs' not in details['GetHostedZoneResponse']: module.fail_json( msg="Can't change VPC from public to private" ) vpc_details = details['GetHostedZoneResponse']['VPCs']['VPC'] current_vpc_id = vpc_details['VPCId'] current_vpc_region = vpc_details['VPCRegion'] if current_vpc_id != vpc_id: module.fail_json( msg="Can't change VPC ID once a zone has been created" ) if current_vpc_region != vpc_region: module.fail_json( msg="Can't change VPC Region once a zone has been created" ) record['zone_id'] = zones[zone_in] record['name'] = zone_in module.exit_json(changed=False, set=record) elif state == 'present': result = conn.create_hosted_zone(zone_in, **record) hosted_zone = result['CreateHostedZoneResponse']['HostedZone'] zone_id = hosted_zone['Id'].replace('/hostedzone/', '') record['zone_id'] = zone_id record['name'] = zone_in module.exit_json(changed=True, set=record) elif state == 'absent' and zone_in in zones: conn.delete_hosted_zone(zones[zone_in]) module.exit_json(changed=True) elif state == 'absent': module.exit_json(changed=False) from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * main()
gpl-3.0
2,165,733,703,494,000,400
7,604,880,128,248,336,000
32.457317
97
0.608347
false
KaranToor/MA450
google-cloud-sdk/.install/.backup/platform/ext-runtime/ruby/test/runtime_test.py
2
18352
#!/usr/bin/python # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from gae_ext_runtime import testutil RUNTIME_DEF_ROOT = os.path.dirname(os.path.dirname(__file__)) DOCKERFILE_TEXT = '''\ # This Dockerfile for a Ruby application was generated by gcloud. # The base Dockerfile installs: # * A number of packages needed by the Ruby runtime and by gems # commonly used in Ruby web apps (such as libsqlite3) # * A recent version of NodeJS # * A recent version of the standard Ruby runtime to use by default # * The bundler gem FROM gcr.io/google_appengine/ruby:{base_image_tag} # If your application requires a specific ruby version (compatible with rbenv), # set it here. Leave blank to use the currently recommended default. ARG REQUESTED_RUBY_VERSION="{ruby_version}" # Install any requested ruby if not already preinstalled by the base image. # Tries installing a prebuilt package first, then falls back to a source build. RUN if test -n "$REQUESTED_RUBY_VERSION" -a \\ ! -x /rbenv/versions/$REQUESTED_RUBY_VERSION/bin/ruby; then \\ (apt-get update -y \\ && apt-get install -y -q gcp-ruby-$REQUESTED_RUBY_VERSION) \\ || (cd /rbenv/plugins/ruby-build \\ && git pull \\ && rbenv install -s $REQUESTED_RUBY_VERSION) \\ && rbenv global $REQUESTED_RUBY_VERSION \\ && gem install -q --no-rdoc --no-ri bundler --version $BUNDLER_VERSION \\ && apt-get clean \\ && rm -f /var/lib/apt/lists/*_*; \\ fi ENV RBENV_VERSION=${{REQUESTED_RUBY_VERSION:-$RBENV_VERSION}} # Copy the application files. COPY . /app/ # Install required gems if Gemfile.lock is present. RUN if test -f Gemfile.lock; then \\ bundle install --deployment --without="development test" \\ && rbenv rehash; \\ fi # Temporary. Will be moved to base image later. ENV RACK_ENV=production \\ RAILS_ENV=production \\ RAILS_SERVE_STATIC_FILES=true # Run asset pipeline if we're in a Rails app. RUN if test -d app/assets -a -f config/application.rb; then \\ bundle exec rake assets:precompile || true; \\ fi # BUG: Reset entrypoint to override base image. ENTRYPOINT [] # Start application on port $PORT. CMD {entrypoint} ''' class RuntimeTestCase(testutil.TestBase): """Tests for the Ruby external runtime fingerprinter.""" def file_contents(self, filename): """Reads the contents of the file from the tempdir. Args: filename: (str) filename to be joined with tempdir prefix. Returns: File contents. """ with open(self.full_path(filename)) as f: return f.read() def stub_response(self, response): """Stubs the console response from the user. Args: response: (str) stubbed response. Returns: A function to reset the stubbed functions to their original implementations. """ can_prompt = self.exec_env.CanPrompt prompt_response = self.exec_env.PromptResponse def unstub(): self.exec_env.CanPrompt = can_prompt self.exec_env.PromptResponse = prompt_response self.exec_env.CanPrompt = lambda: True self.exec_env.PromptResponse = lambda prompt: response return unstub def setUp(self): self.runtime_def_root = RUNTIME_DEF_ROOT super(RuntimeTestCase, self).setUp() def test_generate_without_ruby_files(self): self.write_file('index.html', 'index') self.generate_configs() self.assertFalse(os.path.exists(self.full_path('app.yaml'))) self.assertFalse(os.path.exists(self.full_path('Dockerfile'))) self.assertFalse(os.path.exists(self.full_path('.dockerignore'))) def test_generate_without_ruby_files_no_write(self): """Tests generate_config_data does nothing if no ruby files.""" self.write_file('index.html', 'index') self.assertIsNone(self.generate_config_data()) self.assertFalse(os.path.exists(self.full_path('app.yaml'))) def test_generate_with_ruby_files(self): self.write_file('index.rb', 'class Index; end') self.write_file('Gemfile', 'source "https://rubygems.org"') self.write_file('config.ru', 'run Index.app') unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment') self.generate_configs() unstub() app_yaml = self.file_contents('app.yaml') self.assertIn('runtime: ruby\n', app_yaml) self.assertIn('env: flex\n', app_yaml) self.assertIn('entrypoint: bundle exec rackup -p $PORT -E deployment\n', app_yaml) self.assertFalse(os.path.exists(self.full_path('Dockerfile'))) self.assertFalse(os.path.exists(self.full_path('.dockerignore'))) def test_generate_with_ruby_files_no_write(self): """Tests generate_config_data with basic Ruby files. Tests that app.yaml is written with correct contents given entrypoint response, and that Dockerfile and .dockerignore not written to disk. """ self.write_file('index.rb', 'class Index; end') self.write_file('Gemfile', 'source "https://rubygems.org"') self.write_file('config.ru', 'run Index.app') unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment') cfg_files = self.generate_config_data() unstub() app_yaml = self.file_contents('app.yaml') self.assertIn('runtime: ruby\n', app_yaml) self.assertIn('env: flex\n', app_yaml) self.assertIn('entrypoint: bundle exec rackup -p $PORT -E deployment\n', app_yaml) self.assertNotIn('Dockerfile', [f.filename for f in cfg_files]) self.assertNotIn('.dockerignore', [f.filename for f in cfg_files]) def test_generate_with_deploy(self): self.write_file('index.rb', 'class Index; end') self.write_file('Gemfile', 'source "https://rubygems.org"') self.write_file('config.ru', 'run Index.app') self.write_file('.ruby-version', 'rbx-3.9') unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment') self.generate_configs(deploy=True) unstub() dockerfile = self.file_contents('Dockerfile') self.assertEqual( dockerfile, DOCKERFILE_TEXT.format( ruby_version='rbx-3.9', entrypoint='bundle exec rackup -p $PORT -E deployment')) dockerignore = self.file_contents('.dockerignore') self.assertIn('.dockerignore\n', dockerignore) self.assertIn('Dockerfile\n', dockerignore) self.assertIn('.git\n', dockerignore) self.assertIn('.hg\n', dockerignore) self.assertIn('.svn\n', dockerignore) def test_generate_with_deploy_no_write(self): """Tests generate_config_data with deploy=True. Tests that .dockerignore and Dockerfile contents are correct based on contents of app. """ self.write_file('index.rb', 'class Index; end') self.write_file('Gemfile', 'source "https://rubygems.org"') self.write_file('config.ru', 'run Index.app') self.write_file('.ruby-version', 'rbx-3.9') unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment') cfg_files = self.generate_config_data(deploy=True) unstub() self.assert_genfile_exists_with_contents( cfg_files, 'Dockerfile', DOCKERFILE_TEXT.format( ruby_version='rbx-3.9', entrypoint='bundle exec rackup -p $PORT -E deployment')) self.assertIn('.dockerignore', [f.filename for f in cfg_files]) dockerignore = [f.contents for f in cfg_files if f.filename == '.dockerignore'][0] self.assertIn('.dockerignore\n', dockerignore) self.assertIn('Dockerfile\n', dockerignore) self.assertIn('.git\n', dockerignore) self.assertIn('.hg\n', dockerignore) self.assertIn('.svn\n', dockerignore) def test_generate_with_custom(self): self.write_file('index.rb', 'class Index; end') self.write_file('Gemfile', 'source "https://rubygems.org"') self.write_file('config.ru', 'run Index.app') unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment') self.generate_configs(custom=True) unstub() app_yaml = self.file_contents('app.yaml') self.assertIn('runtime: custom\n', app_yaml) self.assertIn('env: flex\n', app_yaml) self.assertIn('entrypoint: bundle exec rackup -p $PORT -E deployment\n', app_yaml) dockerfile = self.file_contents('Dockerfile') self.assertEqual( dockerfile, DOCKERFILE_TEXT.format( ruby_version='', entrypoint='bundle exec rackup -p $PORT -E deployment')) dockerignore = self.file_contents('.dockerignore') self.assertIn('.dockerignore\n', dockerignore) self.assertIn('Dockerfile\n', dockerignore) self.assertIn('.git\n', dockerignore) self.assertIn('.hg\n', dockerignore) self.assertIn('.svn\n', dockerignore) def test_generate_with_custom_no_write(self): """Tests generate_config_data with custom=True. Tests that app.yaml is written with correct parameters and Dockerfile, .dockerignore contents are correctly returned by method. """ self.write_file('index.rb', 'class Index; end') self.write_file('Gemfile', 'source "https://rubygems.org"') self.write_file('config.ru', 'run Index.app') unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment') cfg_files = self.generate_config_data(custom=True) unstub() app_yaml = self.file_contents('app.yaml') self.assertIn('runtime: custom\n', app_yaml) self.assertIn('env: flex\n', app_yaml) self.assertIn('entrypoint: bundle exec rackup -p $PORT -E deployment\n', app_yaml) self.assert_genfile_exists_with_contents( cfg_files, 'Dockerfile', DOCKERFILE_TEXT.format( ruby_version='', entrypoint='bundle exec rackup -p $PORT -E deployment')) self.assertIn('.dockerignore', [f.filename for f in cfg_files]) dockerignore = [f.contents for f in cfg_files if f.filename == '.dockerignore'][0] self.assertIn('.dockerignore\n', dockerignore) self.assertIn('Dockerfile\n', dockerignore) self.assertIn('.git\n', dockerignore) self.assertIn('.hg\n', dockerignore) self.assertIn('.svn\n', dockerignore) def test_generate_with_existing_appinfo(self): self.write_file('index.rb', 'class Index; end') self.write_file('Gemfile', 'source "https://rubygems.org"') self.write_file('config.ru', 'run Index.app') appinfo = testutil.AppInfoFake( entrypoint='bundle exec ruby index.rb $PORT', runtime='ruby', vm=True) self.generate_configs(appinfo=appinfo, deploy=True) self.assertFalse(os.path.exists(self.full_path('app.yaml'))) dockerfile = self.file_contents('Dockerfile') self.assertEqual( dockerfile, DOCKERFILE_TEXT.format( ruby_version='', entrypoint='bundle exec ruby index.rb $PORT')) dockerignore = self.file_contents('.dockerignore') self.assertIn('.dockerignore\n', dockerignore) self.assertIn('Dockerfile\n', dockerignore) self.assertIn('.git\n', dockerignore) self.assertIn('.hg\n', dockerignore) self.assertIn('.svn\n', dockerignore) def test_generate_with_existing_appinfo_no_write(self): """Tests generate_config_data with passed appinfo.""" self.write_file('index.rb', 'class Index; end') self.write_file('Gemfile', 'source "https://rubygems.org"') self.write_file('config.ru', 'run Index.app') appinfo = testutil.AppInfoFake( entrypoint='bundle exec ruby index.rb $PORT', runtime='ruby', vm=True) cfg_files = self.generate_config_data(appinfo=appinfo, deploy=True) self.assertFalse(os.path.exists(self.full_path('app.yaml'))) self.assert_genfile_exists_with_contents( cfg_files, 'Dockerfile', DOCKERFILE_TEXT.format( ruby_version='', entrypoint='bundle exec ruby index.rb $PORT')) self.assertIn('.dockerignore', [f.filename for f in cfg_files]) dockerignore = [f.contents for f in cfg_files if f.filename == '.dockerignore'][0] self.assertIn('.dockerignore\n', dockerignore) self.assertIn('Dockerfile\n', dockerignore) self.assertIn('.git\n', dockerignore) self.assertIn('.hg\n', dockerignore) self.assertIn('.svn\n', dockerignore) def test_generate_with_ruby_version(self): self.write_file('index.rb', 'class Index; end') self.write_file('Gemfile', 'source "https://rubygems.org"') self.write_file('config.ru', 'run Index.app') self.write_file('.ruby-version', '2.3.1\n') appinfo = testutil.AppInfoFake( entrypoint='bundle exec ruby index.rb $PORT', runtime='ruby', vm=True) self.generate_configs(appinfo=appinfo, deploy=True) self.assertFalse(os.path.exists(self.full_path('app.yaml'))) dockerfile = self.file_contents('Dockerfile') self.assertEqual( dockerfile, DOCKERFILE_TEXT.format( ruby_version='2.3.1', entrypoint='bundle exec ruby index.rb $PORT')) dockerignore = self.file_contents('.dockerignore') self.assertIn('.dockerignore\n', dockerignore) self.assertIn('Dockerfile\n', dockerignore) self.assertIn('.git\n', dockerignore) self.assertIn('.hg\n', dockerignore) self.assertIn('.svn\n', dockerignore) def test_generate_with_ruby_version_no_write(self): """Tests generate_config_data with .ruby-version file.""" self.write_file('index.rb', 'class Index; end') self.write_file('Gemfile', 'source "https://rubygems.org"') self.write_file('config.ru', 'run Index.app') self.write_file('.ruby-version', '2.3.1\n') appinfo = testutil.AppInfoFake( entrypoint='bundle exec ruby index.rb $PORT', runtime='ruby', vm=True) cfg_files = self.generate_config_data(appinfo=appinfo, deploy=True) self.assertFalse(os.path.exists(self.full_path('app.yaml'))) self.assert_genfile_exists_with_contents( cfg_files, 'Dockerfile', DOCKERFILE_TEXT.format( ruby_version='2.3.1', entrypoint='bundle exec ruby index.rb $PORT')) self.assertIn('.dockerignore', [f.filename for f in cfg_files]) dockerignore = [f.contents for f in cfg_files if f.filename == '.dockerignore'][0] self.assertIn('.dockerignore\n', dockerignore) self.assertIn('Dockerfile\n', dockerignore) self.assertIn('.git\n', dockerignore) self.assertIn('.hg\n', dockerignore) self.assertIn('.svn\n', dockerignore) def test_generate_with_prompt(self): self.write_file('index.rb', 'class Index; end') self.write_file('Gemfile', 'source "https://rubygems.org"') unstub = self.stub_response('bundle exec ruby index.rb $PORT') self.generate_configs(deploy=True) unstub() dockerfile = self.file_contents('Dockerfile') self.assertEqual( dockerfile, DOCKERFILE_TEXT.format( ruby_version='', entrypoint='bundle exec ruby index.rb $PORT')) dockerignore = self.file_contents('.dockerignore') self.assertIn('.dockerignore\n', dockerignore) self.assertIn('Dockerfile\n', dockerignore) self.assertIn('.git\n', dockerignore) self.assertIn('.hg\n', dockerignore) self.assertIn('.svn\n', dockerignore) def test_generate_with_prompt_no_write(self): """Tests generate_config_data with entrypoint given by prompt.""" self.write_file('index.rb', 'class Index; end') self.write_file('Gemfile', 'source "https://rubygems.org"') unstub = self.stub_response('bundle exec ruby index.rb $PORT') cfg_files = self.generate_config_data(deploy=True) unstub() self.assert_genfile_exists_with_contents( cfg_files, 'Dockerfile', DOCKERFILE_TEXT.format( ruby_version='', entrypoint='bundle exec ruby index.rb $PORT')) self.assertIn('.dockerignore', [f.filename for f in cfg_files]) dockerignore = [f.contents for f in cfg_files if f.filename == '.dockerignore'][0] self.assertIn('.dockerignore\n', dockerignore) self.assertIn('Dockerfile\n', dockerignore) self.assertIn('.git\n', dockerignore) self.assertIn('.hg\n', dockerignore) self.assertIn('.svn\n', dockerignore) if __name__ == '__main__': unittest.main()
apache-2.0
-8,639,986,368,576,278,000
-5,825,352,261,694,108,000
38.722944
80
0.609797
false
premanandchandrasekar/boto
boto/ec2/buyreservation.py
56
3813
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import boto.ec2 from boto.sdb.db.property import StringProperty, IntegerProperty from boto.manage import propget InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'cc1.4xlarge', 't1.micro'] class BuyReservation(object): def get_region(self, params): if not params.get('region', None): prop = StringProperty(name='region', verbose_name='EC2 Region', choices=boto.ec2.regions) params['region'] = propget.get(prop, choices=boto.ec2.regions) def get_instance_type(self, params): if not params.get('instance_type', None): prop = StringProperty(name='instance_type', verbose_name='Instance Type', choices=InstanceTypes) params['instance_type'] = propget.get(prop) def get_quantity(self, params): if not params.get('quantity', None): prop = IntegerProperty(name='quantity', verbose_name='Number of Instances') params['quantity'] = propget.get(prop) def get_zone(self, params): if not params.get('zone', None): prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone', choices=self.ec2.get_all_zones) params['zone'] = propget.get(prop) def get(self, params): self.get_region(params) self.ec2 = params['region'].connect() self.get_instance_type(params) self.get_zone(params) self.get_quantity(params) if __name__ == "__main__": obj = BuyReservation() params = {} obj.get(params) offerings = obj.ec2.get_all_reserved_instances_offerings(instance_type=params['instance_type'], availability_zone=params['zone'].name) print '\nThe following Reserved Instances Offerings are available:\n' for offering in offerings: offering.describe() prop = StringProperty(name='offering', verbose_name='Offering', choices=offerings) offering = propget.get(prop) print '\nYou have chosen this offering:' offering.describe() unit_price = float(offering.fixed_price) total_price = unit_price * params['quantity'] print '!!! You are about to purchase %d of these offerings for a total of $%.2f !!!' % (params['quantity'], total_price) answer = raw_input('Are you sure you want to do this? If so, enter YES: ') if answer.strip().lower() == 'yes': offering.purchase(params['quantity']) else: print 'Purchase cancelled'
mit
8,079,379,675,302,432,000
-7,631,533,063,041,277,000
44.392857
124
0.644375
false
vileopratama/vitech
src/openerp/report/printscreen/ps_list.py
48
11008
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import openerp from openerp.report.interface import report_int import openerp.tools as tools from openerp.tools.safe_eval import safe_eval as eval from lxml import etree from openerp.report import render, report_sxw import locale import time, os from operator import itemgetter from datetime import datetime class report_printscreen_list(report_int): def __init__(self, name): report_int.__init__(self, name) self.context = {} self.groupby = [] self.cr='' def _parse_node(self, root_node): result = [] for node in root_node: field_name = node.get('name') if not eval(str(node.attrib.get('invisible',False)),{'context':self.context}): if node.tag == 'field': if field_name in self.groupby: continue result.append(field_name) else: result.extend(self._parse_node(node)) return result def _parse_string(self, view): try: dom = etree.XML(view.encode('utf-8')) except Exception: dom = etree.XML(view) return self._parse_node(dom) def create(self, cr, uid, ids, datas, context=None): if not context: context={} self.cr=cr self.context = context self.groupby = context.get('group_by',[]) self.groupby_no_leaf = context.get('group_by_no_leaf',False) registry = openerp.registry(cr.dbname) model = registry[datas['model']] model_id = registry['ir.model'].search(cr, uid, [('model','=',model._name)]) model_desc = model._description if model_id: model_desc = registry['ir.model'].browse(cr, uid, model_id[0], context).name self.title = model_desc datas['ids'] = ids result = model.fields_view_get(cr, uid, view_type='tree', context=context) fields_order = self.groupby + self._parse_string(result['arch']) if self.groupby: rows = [] def get_groupby_data(groupby = [], domain = []): records = model.read_group(cr, uid, domain, fields_order, groupby , 0, None, context) for rec in records: rec['__group'] = True rec['__no_leaf'] = self.groupby_no_leaf rec['__grouped_by'] = groupby[0] if (isinstance(groupby, list) and groupby) else groupby for f in fields_order: if f not in rec: rec.update({f:False}) elif isinstance(rec[f], tuple): rec[f] = rec[f][1] rows.append(rec) inner_groupby = (rec.get('__context', {})).get('group_by',[]) inner_domain = rec.get('__domain', []) if inner_groupby: get_groupby_data(inner_groupby, inner_domain) else: if self.groupby_no_leaf: continue child_ids = model.search(cr, uid, inner_domain) res = model.read(cr, uid, child_ids, result['fields'].keys(), context) res.sort(lambda x,y: cmp(ids.index(x['id']), ids.index(y['id']))) rows.extend(res) dom = [('id','in',ids)] if self.groupby_no_leaf and len(ids) and not ids[0]: dom = datas.get('_domain',[]) get_groupby_data(self.groupby, dom) else: rows = model.read(cr, uid, datas['ids'], result['fields'].keys(), context) ids2 = map(itemgetter('id'), rows) # getting the ids from read result if datas['ids'] != ids2: # sorted ids were not taken into consideration for print screen rows_new = [] for id in datas['ids']: rows_new += [elem for elem in rows if elem['id'] == id] rows = rows_new res = self._create_table(uid, datas['ids'], result['fields'], fields_order, rows, context, model_desc) return self.obj.get(), 'pdf' def _create_table(self, uid, ids, fields, fields_order, results, context, title=''): pageSize=[297.0, 210.0] new_doc = etree.Element("report") config = etree.SubElement(new_doc, 'config') def _append_node(name, text): n = etree.SubElement(config, name) n.text = text #_append_node('date', time.strftime('%d/%m/%Y')) _append_node('date', time.strftime(str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y')))) _append_node('PageSize', '%.2fmm,%.2fmm' % tuple(pageSize)) _append_node('PageWidth', '%.2f' % (pageSize[0] * 2.8346,)) _append_node('PageHeight', '%.2f' %(pageSize[1] * 2.8346,)) _append_node('report-header', title) registry = openerp.registry(self.cr.dbname) _append_node('company', registry['res.users'].browse(self.cr,uid,uid).company_id.name) rpt_obj = registry['res.users'] rml_obj=report_sxw.rml_parse(self.cr, uid, rpt_obj._name,context) _append_node('header-date', str(rml_obj.formatLang(time.strftime("%Y-%m-%d"),date=True))+' ' + str(time.strftime("%H:%M"))) l = [] t = 0 strmax = (pageSize[0]-40) * 2.8346 temp = [] tsum = [] for i in range(0, len(fields_order)): temp.append(0) tsum.append(0) ince = -1 for f in fields_order: s = 0 ince += 1 if fields[f]['type'] in ('date','time','datetime','float','integer'): s = 60 strmax -= s if fields[f]['type'] in ('float','integer'): temp[ince] = 1 else: t += fields[f].get('size', 80) / 28 + 1 l.append(s) for pos in range(len(l)): if not l[pos]: s = fields[fields_order[pos]].get('size', 80) / 28 + 1 l[pos] = strmax * s / t _append_node('tableSize', ','.join(map(str,l)) ) header = etree.SubElement(new_doc, 'header') for f in fields_order: field = etree.SubElement(header, 'field') field.text = tools.ustr(fields[f]['string'] or '') lines = etree.SubElement(new_doc, 'lines') for line in results: node_line = etree.SubElement(lines, 'row') count = -1 for f in fields_order: float_flag = 0 count += 1 if fields[f]['type']=='many2one' and line[f]: if not line.get('__group'): line[f] = line[f][1] if fields[f]['type']=='selection' and line[f]: for key, value in fields[f]['selection']: if key == line[f]: line[f] = value break if fields[f]['type'] in ('one2many','many2many') and line[f]: line[f] = '( '+tools.ustr(len(line[f])) + ' )' if fields[f]['type'] == 'float' and line[f]: precision=(('digits' in fields[f]) and fields[f]['digits'][1]) or 2 prec ='%.' + str(precision) +'f' line[f]=prec%(line[f]) float_flag = 1 if fields[f]['type'] == 'date' and line[f]: new_d1 = line[f] if not line.get('__group'): format = str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y')) d1 = datetime.strptime(line[f],'%Y-%m-%d') new_d1 = d1.strftime(format) line[f] = new_d1 if fields[f]['type'] == 'time' and line[f]: new_d1 = line[f] if not line.get('__group'): format = str(locale.nl_langinfo(locale.T_FMT)) d1 = datetime.strptime(line[f], '%H:%M:%S') new_d1 = d1.strftime(format) line[f] = new_d1 if fields[f]['type'] == 'datetime' and line[f]: new_d1 = line[f] if not line.get('__group'): format = str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y'))+' '+str(locale.nl_langinfo(locale.T_FMT)) d1 = datetime.strptime(line[f], '%Y-%m-%d %H:%M:%S') new_d1 = d1.strftime(format) line[f] = new_d1 if line.get('__group'): col = etree.SubElement(node_line, 'col', para='group', tree='no') else: col = etree.SubElement(node_line, 'col', para='yes', tree='no') # Prevent empty labels in groups if f == line.get('__grouped_by') and line.get('__group') and not line[f] and not float_flag and not temp[count]: col.text = line[f] = 'Undefined' col.set('tree', 'undefined') if line[f] is not None: col.text = tools.ustr(line[f] or '') if float_flag: col.set('tree','float') if line.get('__no_leaf') and temp[count] == 1 and f != 'id' and not line['__context']['group_by']: tsum[count] = float(tsum[count]) + float(line[f]) if not line.get('__group') and f != 'id' and temp[count] == 1: tsum[count] = float(tsum[count]) + float(line[f]) else: col.text = '/' node_line = etree.SubElement(lines, 'row') for f in range(0, len(fields_order)): col = etree.SubElement(node_line, 'col', para='group', tree='no') col.set('tree', 'float') if tsum[f] is not None: if tsum[f] != 0.0: digits = fields[fields_order[f]].get('digits', (16, 2)) prec = '%%.%sf' % (digits[1], ) total = prec % (tsum[f], ) txt = str(total or '') else: txt = str(tsum[f] or '') else: txt = '/' if f == 0: txt ='Total' col.set('tree','no') col.text = tools.ustr(txt or '') transform = etree.XSLT( etree.parse(os.path.join(tools.config['root_path'], 'addons/base/report/custom_new.xsl'))) rml = etree.tostring(transform(new_doc)) self.obj = render.rml(rml, title=self.title) self.obj.render() return True report_printscreen_list('report.printscreen.list')
mit
3,983,894,288,318,291,500
-3,076,435,409,321,872,000
42.68254
132
0.477925
false
bigmlcom/python
bigml/tests/test_34_time_series.py
2
3565
# -*- coding: utf-8 -*- # # Copyright 2017-2021 BigML # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Creating time series forecasts """ from .world import world, setup_module, teardown_module from . import create_source_steps as source_create from . import create_dataset_steps as dataset_create from . import create_time_series_steps as time_series_create from . import create_forecast_steps as forecast_create class TestTimeSeries(object): def setup(self): """ Debug information """ print("\n-------------------\nTests in: %s\n" % __name__) def teardown(self): """ Debug information """ print("\nEnd of tests in: %s\n-------------------\n" % __name__) def test_scenario1(self): """ Scenario: Successfully creating forecasts from a dataset: Given I create a data source uploading a "<data>" file And I wait until the source is ready less than <time_1> secs And I create a dataset And I wait until the dataset is ready less than <time_2> secs And I create time-series from a dataset And I wait until the time series is ready less than <time_3> secs And I update the time series name to "<time_series_name>" When I wait until the time series is ready less than <time_4> secs Then the time series name is "<time_series_name>" And I create a forecast for "<input_data>" Then the forecasts are "<forecast_points>" Examples: | data | time_1 | time_2 | time_3 | time_4 | time_series_name |input_data | forecast_points | ../data/grades.csv | 10 | 10 | 20 | 50 | my new time_series name | {"000005": {"horizon": 5}], {}} """ print(self.test_scenario1.__doc__) examples = [ ['data/grades.csv', '30', '30', '50', '50', 'my new time series name', '{"000005": {"horizon": 5}}', '{"000005": [{"point_forecast": [73.96192, 74.04106, 74.12029, 74.1996, 74.27899], "model": "M,M,N"}]}']] for example in examples: print("\nTesting with:\n", example) source_create.i_upload_a_file(self, example[0]) source_create.the_source_is_finished(self, example[1]) dataset_create.i_create_a_dataset(self) dataset_create.the_dataset_is_finished_in_less_than(self, example[2]) time_series_create.i_create_a_time_series(self) time_series_create.the_time_series_is_finished_in_less_than(self, example[3]) time_series_create.i_update_time_series_name(self, example[5]) time_series_create.the_time_series_is_finished_in_less_than(self, example[4]) time_series_create.i_check_time_series_name(self, example[5]) forecast_create.i_create_a_forecast(self, example[6]) forecast_create.the_forecast_is(self, example[7])
apache-2.0
-8,900,209,487,065,437,000
442,664,600,348,079,940
44.705128
148
0.601683
false
shsingh/ansible
lib/ansible/modules/network/nxos/nxos_vpc_interface.py
18
10331
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = ''' --- module: nxos_vpc_interface extends_documentation_fragment: nxos version_added: "2.2" short_description: Manages interface VPC configuration description: - Manages interface VPC configuration author: - Jason Edelman (@jedelman8) - Gabriele Gerbino (@GGabriele) notes: - Tested against NXOSv 7.3.(0)D1(1) on VIRL - Either vpc or peer_link param is required, but not both. - C(state=absent) removes whatever VPC config is on a port-channel if one exists. - Re-assigning a vpc or peerlink from one portchannel to another is not supported. The module will force the user to unconfigure an existing vpc/pl before configuring the same value on a new portchannel options: portchannel: description: - Group number of the portchannel that will be configured. required: true vpc: description: - VPC group/id that will be configured on associated portchannel. peer_link: description: - Set to true/false for peer link config on associated portchannel. type: bool state: description: - Manages desired state of the resource. required: true choices: ['present','absent'] default: present ''' EXAMPLES = ''' - nxos_vpc_interface: portchannel: 10 vpc: 100 ''' RETURN = ''' commands: description: commands sent to the device returned: always type: list sample: ["interface port-channel100", "vpc 10"] ''' from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands from ansible.module_utils.network.nxos.nxos import nxos_argument_spec from ansible.module_utils.basic import AnsibleModule def flatten_list(command_lists): flat_command_list = [] for command in command_lists: if isinstance(command, list): flat_command_list.extend(command) else: flat_command_list.append(command) return flat_command_list def get_portchannel_list(module): portchannels = [] pc_list = [] try: body = run_commands(module, ['show port-channel summary | json'])[0] pc_list = body['TABLE_channel']['ROW_channel'] except (KeyError, AttributeError, TypeError): return portchannels if pc_list: if isinstance(pc_list, dict): pc_list = [pc_list] for pc in pc_list: portchannels.append(pc['group']) return portchannels def get_existing_portchannel_to_vpc_mappings(module): pc_vpc_mapping = {} try: body = run_commands(module, ['show vpc brief | json'])[0] vpc_table = body['TABLE_vpc']['ROW_vpc'] except (KeyError, AttributeError, TypeError): vpc_table = None if vpc_table: if isinstance(vpc_table, dict): vpc_table = [vpc_table] for vpc in vpc_table: pc_vpc_mapping[str(vpc['vpc-id'])] = str(vpc['vpc-ifindex']) return pc_vpc_mapping def peer_link_exists(module): found = False run = get_config(module, flags=['vpc']) vpc_list = run.split('\n') for each in vpc_list: if 'peer-link' in each: found = True return found def get_active_vpc_peer_link(module): peer_link = None try: body = run_commands(module, ['show vpc brief | json'])[0] peer_link = body['TABLE_peerlink']['ROW_peerlink']['peerlink-ifindex'] except (KeyError, AttributeError, TypeError): return peer_link return peer_link def get_portchannel_vpc_config(module, portchannel): peer_link_pc = None peer_link = False vpc = "" pc = "" config = {} try: body = run_commands(module, ['show vpc brief | json'])[0] table = body['TABLE_peerlink']['ROW_peerlink'] except (KeyError, AttributeError, TypeError): table = {} if table: peer_link_pc = table.get('peerlink-ifindex', None) if peer_link_pc: plpc = str(peer_link_pc[2:]) if portchannel == plpc: config['portchannel'] = portchannel config['peer-link'] = True config['vpc'] = vpc mapping = get_existing_portchannel_to_vpc_mappings(module) for existing_vpc, port_channel in mapping.items(): port_ch = str(port_channel[2:]) if port_ch == portchannel: pc = port_ch vpc = str(existing_vpc) config['portchannel'] = pc config['peer-link'] = peer_link config['vpc'] = vpc return config def get_commands_to_config_vpc_interface(portchannel, delta, config_value, existing): commands = [] if not delta.get('peer-link') and existing.get('peer-link'): commands.append('no vpc peer-link') commands.insert(0, 'interface port-channel{0}'.format(portchannel)) elif delta.get('peer-link') and not existing.get('peer-link'): commands.append('vpc peer-link') commands.insert(0, 'interface port-channel{0}'.format(portchannel)) elif delta.get('vpc') and not existing.get('vpc'): command = 'vpc {0}'.format(config_value) commands.append(command) commands.insert(0, 'interface port-channel{0}'.format(portchannel)) return commands def state_present(portchannel, delta, config_value, existing): commands = [] command = get_commands_to_config_vpc_interface( portchannel, delta, config_value, existing ) commands.append(command) return commands def state_absent(portchannel, existing): commands = [] if existing.get('vpc'): command = 'no vpc' commands.append(command) elif existing.get('peer-link'): command = 'no vpc peer-link' commands.append(command) if commands: commands.insert(0, 'interface port-channel{0}'.format(portchannel)) return commands def main(): argument_spec = dict( portchannel=dict(required=True, type='str'), vpc=dict(required=False, type='str'), peer_link=dict(required=False, type='bool'), state=dict(choices=['absent', 'present'], default='present') ) argument_spec.update(nxos_argument_spec) module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[['vpc', 'peer_link']], supports_check_mode=True) warnings = list() commands = [] results = {'changed': False, 'warnings': warnings} portchannel = module.params['portchannel'] vpc = module.params['vpc'] peer_link = module.params['peer_link'] state = module.params['state'] args = {'portchannel': portchannel, 'vpc': vpc, 'peer-link': peer_link} active_peer_link = None if portchannel not in get_portchannel_list(module): if not portchannel.isdigit() or int(portchannel) not in get_portchannel_list(module): module.fail_json(msg="The portchannel you are trying to make a" " VPC or PL is not created yet. " "Create it first!") if vpc: mapping = get_existing_portchannel_to_vpc_mappings(module) if vpc in mapping and portchannel != mapping[vpc].strip('Po'): module.fail_json(msg="This vpc is already configured on " "another portchannel. Remove it first " "before trying to assign it here. ", existing_portchannel=mapping[vpc]) for vpcid, existing_pc in mapping.items(): if portchannel == existing_pc.strip('Po') and vpcid != vpc: module.fail_json(msg="This portchannel already has another" " VPC configured. Remove it first " "before assigning this one", existing_vpc=vpcid) if peer_link_exists(module): active_peer_link = get_active_vpc_peer_link(module) if active_peer_link[-2:] == portchannel: module.fail_json(msg="That port channel is the current " "PEER LINK. Remove it if you want it" " to be a VPC") config_value = vpc elif peer_link is not None: if peer_link_exists(module): active_peer_link = get_active_vpc_peer_link(module)[2::] if active_peer_link != portchannel: if peer_link: module.fail_json(msg="A peer link already exists on" " the device. Remove it first", current_peer_link='Po{0}'.format(active_peer_link)) config_value = 'peer-link' proposed = dict((k, v) for k, v in args.items() if v is not None) existing = get_portchannel_vpc_config(module, portchannel) if state == 'present': delta = dict(set(proposed.items()).difference(existing.items())) if delta: commands = state_present(portchannel, delta, config_value, existing) elif state == 'absent' and existing: commands = state_absent(portchannel, existing) cmds = flatten_list(commands) if cmds: if module.check_mode: module.exit_json(changed=True, commands=cmds) else: load_config(module, cmds) results['changed'] = True if 'configure' in cmds: cmds.pop(0) results['commands'] = cmds module.exit_json(**results) if __name__ == '__main__': main()
gpl-3.0
-5,325,626,293,695,447,000
7,882,407,802,490,814,000
30.401216
93
0.60846
false
QTek/QRadio
tramatego/src/tramatego/transforms/ipv4_to_score.py
1
1161
#!/usr/bin/env python from canari.maltego.utils import debug, progress from canari.framework import configure #, superuser from canari.maltego.entities import IPv4Address, Phrase from common.launchers import get_qradio_data __author__ = 'Zappus' __copyright__ = 'Copyright 2016, TramaTego Project' __credits__ = [] __license__ = 'GPL' __version__ = '0.1' __maintainer__ = 'Zappus' __email__ = 'zappus@protonmail.com' __status__ = 'Development' __all__ = [ 'dotransform', #'onterminate' # comment out this line if you don't need this function. ] #@superuser @configure( label='IPv4 to Score', description='Converts IPv4 into Score using QRadio.', uuids=[ 'TramaTego.v1.IPv4ToScore' ], inputs=[ ( 'TramaTego', IPv4Address ) ], debug=True ) def dotransform(request, response, config): command = "--ipv4_to_score " + request.value qradio_output = get_qradio_data(command, 3) for entry in qradio_output: response += Phrase(entry) return response def onterminate(): """ TODO: Write your cleanup logic below or delete the onterminate function and remove it from the __all__ variable """ pass
apache-2.0
8,731,454,012,376,150,000
-8,591,176,247,446,955,000
24.822222
115
0.676141
false
coinkite/connectrum
connectrum/findall.py
1
4527
#!/usr/bin/env python3 # # import bottom, random, time, asyncio from .svr_info import ServerInfo import logging logger = logging.getLogger('connectrum') class IrcListener(bottom.Client): def __init__(self, irc_nickname=None, irc_password=None, ssl=True): self.my_nick = irc_nickname or 'XC%d' % random.randint(1E11, 1E12) self.password = irc_password or None self.results = {} # by hostname self.servers = set() self.all_done = asyncio.Event() super(IrcListener, self).__init__(host='irc.freenode.net', port=6697 if ssl else 6667, ssl=ssl) # setup event handling self.on('CLIENT_CONNECT', self.connected) self.on('PING', self.keepalive) self.on('JOIN', self.joined) self.on('RPL_NAMREPLY', self.got_users) self.on('RPL_WHOREPLY', self.got_who_reply) self.on("client_disconnect", self.reconnect) self.on('RPL_ENDOFNAMES', self.got_end_of_names) async def collect_data(self): # start it process self.loop.create_task(self.connect()) # wait until done await self.all_done.wait() # return the results return self.results def connected(self, **kwargs): logger.debug("Connected") self.send('NICK', nick=self.my_nick) self.send('USER', user=self.my_nick, realname='Connectrum Client') # long delay here as it does an failing Ident probe (10 seconds min) self.send('JOIN', channel='#electrum') #self.send('WHO', mask='E_*') def keepalive(self, message, **kwargs): self.send('PONG', message=message) async def joined(self, nick=None, **kwargs): # happens when we or someone else joins the channel # seem to take 10 seconds or longer for me to join logger.debug('Joined: %r' % kwargs) if nick != self.my_nick: await self.add_server(nick) async def got_who_reply(self, nick=None, real_name=None, **kws): ''' Server replied to one of our WHO requests, with details. ''' #logger.debug('who reply: %r' % kws) nick = nick[2:] if nick[0:2] == 'E_' else nick host, ports = real_name.split(' ', 1) self.servers.remove(nick) logger.debug("Found: '%s' at %s with port list: %s",nick, host, ports) self.results[host.lower()] = ServerInfo(nick, host, ports) if not self.servers: self.all_done.set() async def got_users(self, users=[], **kws): # After successful join to channel, we are given a list of # users on the channel. Happens a few times for busy channels. logger.debug('Got %d (more) users in channel', len(users)) for nick in users: await self.add_server(nick) async def add_server(self, nick): # ignore everyone but electrum servers if nick.startswith('E_'): self.servers.add(nick[2:]) async def who_worker(self): # Fetch details on each Electrum server nick we see logger.debug('who task starts') copy = self.servers.copy() for nn in copy: logger.debug('do WHO for: ' + nn) self.send('WHO', mask='E_'+nn) logger.debug('who task done') def got_end_of_names(self, *a, **k): logger.debug('Got all the user names') assert self.servers, "No one on channel!" # ask for details on all of those users self.loop.create_task(self.who_worker()) async def reconnect(self, **kwargs): # Trigger an event that may cascade to a client_connect. # Don't continue until a client_connect occurs, which may be never. logger.warn("Disconnected (will reconnect)") # Note that we're not in a coroutine, so we don't have access # to await and asyncio.sleep time.sleep(3) # After this line we won't necessarily be connected. # We've simply scheduled the connect to happen in the future self.loop.create_task(self.connect()) logger.debug("Reconnect scheduled.") if __name__ == '__main__': import logging logging.getLogger('bottom').setLevel(logging.DEBUG) logging.getLogger('connectrum').setLevel(logging.DEBUG) logging.getLogger('asyncio').setLevel(logging.DEBUG) bot = IrcListener(ssl=False) bot.loop.set_debug(True) fut = bot.collect_data() #bot.loop.create_task(bot.connect()) rv = bot.loop.run_until_complete(fut) print(rv)
mit
8,093,416,164,229,684,000
-5,412,866,719,244,360,000
31.106383
103
0.610559
false
wilima/cryptography
tests/test.py
1
3828
import unittest from cryptography import (eratosthenes, euler, extended_gcd, factorization, gcd, modular_multiplicative_inverse) from cryptography.ciphers import affine, shift, substitution, vigener from .context import cryptography class GcdTestSuite(unittest.TestCase): """Basic test cases.""" def test_gcd(self): self.assertEqual( gcd.gcd(1071, 462), 21) def test_gcd2(self): self.assertEqual( gcd.gcd(270, 192), 6) class ExtendedGcdTestSuite(unittest.TestCase): """Basic test cases.""" def test_extended_gcd(self): self.assertEqual( extended_gcd.extended_gcd(1914, 899), (29, 8, -17)) class ModularInverseTestSuite(unittest.TestCase): """Basic test cases.""" def test_modular_inverse(self): self.assertEqual( modular_multiplicative_inverse.inverse(5, 26), 21) class FactorizationTestSuite(unittest.TestCase): """Basic test cases.""" def test_factorization(self): self.assertEqual( factorization.integer_factorization(315), [3, 3, 5, 7]) class EratosthenesTestSuite(unittest.TestCase): """Basic test cases.""" def test_eratosthenes_sieve(self): self.assertEqual( eratosthenes.eratosthenes_sieve(20), [2, 3, 5, 7, 11, 13, 17, 19]) class EulerFunctionTestSuite(unittest.TestCase): """Basic test cases.""" def test_euler_function(self): self.assertEqual( euler.euler_function(1), 1) def test_euler_function2(self): self.assertEqual( euler.euler_function(5), 4) class ShiftCipherFunctionTestSuite(unittest.TestCase): """Basic test cases.""" def test_shift_encrypt_function(self): self.assertEqual( shift.encrypt('BARBARIUTOCI', 3), 'eduedulxwrfl'.upper()) def test_shift_decrypt_function(self): self.assertEqual( shift.decrypt('eduedulxwrfl', 3), 'BARBARIUTOCI') def test_shift_crack_function(self): self.assertEqual( 'BARBARIUTOCI' in shift.crack('eduedulxwrfl', 26), True) class AffineCipherFunctionTestSuite(unittest.TestCase): """Basic test cases.""" def test_affine_encrypt_function(self): self.assertEqual( affine.encrypt('THEINITIAL', (5, 9)), 'ASDXWXAXJM') def test_affine_decrypt_function(self): self.assertEqual( affine.decrypt('ASDXWXAXJM', (5, 9)), 'THEINITIAL') def test_affine_crack_function(self): self.assertEqual( 'THEINITIAL' in affine.crack('ASDXWXAXJM', 26), True) class SubstitutionCipherFunctionTestSuite(unittest.TestCase): """Basic test cases.""" def test_substitution_encrypt_function(self): self.assertEqual( substitution.encrypt('FLEEATONCEWEAREDISCOVERED', ('ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'ZEBRASCDFGHIJKLMNOPQTUVWXY')), 'SIAAZQLKBAVAZOARFPBLUAOAR') def test_substitution_decrypt_function(self): self.assertEqual( substitution.decrypt('SIAAZQLKBAVAZOARFPBLUAOAR', ('ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'ZEBRASCDFGHIJKLMNOPQTUVWXY')), 'FLEEATONCEWEAREDISCOVERED') class VigenerCipherFunctionTestSuite(unittest.TestCase): """Basic test cases.""" def test_vigener_encrypt_function(self): self.assertEqual( vigener.encrypt('KULTURNIATASEJESPION', 'PES'), 'ZYDIYJCMSIEKTNWHTADR') def test_vigener_decrypt_function(self): self.assertEqual( vigener.decrypt('ZYDIYJCMSIEKTNWHTADR', 'PES'), 'KULTURNIATASEJESPION') if __name__ == '__main__': unittest.main()
mit
-1,191,072,872,483,968,800
-8,073,504,345,474,465,000
26.148936
124
0.625653
false
scottdangelo/RemoveVolumeMangerLocks
cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_7mode.py
5
5546
# Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for the NetApp 7mode NFS storage driver """ import ddt import mock from os_brick.remotefs import remotefs as remotefs_brick from oslo_utils import units from cinder import test from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake from cinder.tests.unit.volume.drivers.netapp import fakes as na_fakes from cinder import utils from cinder.volume.drivers.netapp.dataontap import nfs_7mode from cinder.volume.drivers.netapp import utils as na_utils @ddt.ddt class NetApp7modeNfsDriverTestCase(test.TestCase): def setUp(self): super(NetApp7modeNfsDriverTestCase, self).setUp() kwargs = {'configuration': self.get_config_7mode()} with mock.patch.object(utils, 'get_root_helper', return_value=mock.Mock()): with mock.patch.object(remotefs_brick, 'RemoteFsClient', return_value=mock.Mock()): self.driver = nfs_7mode.NetApp7modeNfsDriver(**kwargs) self.driver._mounted_shares = [fake.NFS_SHARE] self.driver.ssc_vols = True self.driver.zapi_client = mock.Mock() def get_config_7mode(self): config = na_fakes.create_configuration_cmode() config.netapp_storage_protocol = 'nfs' config.netapp_login = 'root' config.netapp_password = 'pass' config.netapp_server_hostname = '127.0.0.1' config.netapp_transport_type = 'http' config.netapp_server_port = '80' return config @ddt.data({'nfs_sparsed_volumes': True}, {'nfs_sparsed_volumes': False}) @ddt.unpack def test_get_pool_stats(self, nfs_sparsed_volumes): self.driver.configuration.nfs_sparsed_volumes = nfs_sparsed_volumes thick = not nfs_sparsed_volumes total_capacity_gb = na_utils.round_down( fake.TOTAL_BYTES / units.Gi, '0.01') free_capacity_gb = na_utils.round_down( fake.AVAILABLE_BYTES / units.Gi, '0.01') provisioned_capacity_gb = total_capacity_gb - free_capacity_gb capacity = { 'reserved_percentage': fake.RESERVED_PERCENTAGE, 'max_over_subscription_ratio': fake.MAX_OVER_SUBSCRIPTION_RATIO, 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'provisioned_capacity_gb': provisioned_capacity_gb, } self.mock_object(self.driver, '_get_share_capacity_info', mock.Mock(return_value=capacity)) result = self.driver._get_pool_stats() expected = [{'pool_name': '192.168.99.24:/fake/export/path', 'QoS_support': False, 'thick_provisioning_support': thick, 'thin_provisioning_support': not thick, 'free_capacity_gb': 12.0, 'total_capacity_gb': 4468.0, 'reserved_percentage': 7, 'max_over_subscription_ratio': 19.0, 'provisioned_capacity_gb': 4456.0}] self.assertEqual(expected, result) def test_shortlist_del_eligible_files(self): mock_get_path_for_export = self.mock_object( self.driver.zapi_client, 'get_actual_path_for_export') mock_get_path_for_export.return_value = fake.FLEXVOL mock_get_file_usage = self.mock_object( self.driver.zapi_client, 'get_file_usage') mock_get_file_usage.return_value = fake.CAPACITY_VALUES[0] expected = [(old_file, fake.CAPACITY_VALUES[0]) for old_file in fake.FILE_LIST] result = self.driver._shortlist_del_eligible_files( fake.NFS_SHARE, fake.FILE_LIST) self.assertEqual(expected, result) def test_shortlist_del_eligible_files_empty_list(self): mock_get_export_ip_path = self.mock_object( self.driver, '_get_export_ip_path') mock_get_export_ip_path.return_value = ('', '/export_path') mock_get_path_for_export = self.mock_object( self.driver.zapi_client, 'get_actual_path_for_export') mock_get_path_for_export.return_value = fake.FLEXVOL result = self.driver._shortlist_del_eligible_files( fake.NFS_SHARE, []) self.assertEqual([], result) @ddt.data({'has_space': True, 'expected': True}, {'has_space': False, 'expected': False}) @ddt.unpack def test_is_share_clone_compatible(self, has_space, expected): mock_share_has_space_for_clone = self.mock_object( self.driver, '_share_has_space_for_clone') mock_share_has_space_for_clone.return_value = has_space result = self.driver._is_share_clone_compatible(fake.VOLUME, fake.NFS_SHARE) self.assertEqual(expected, result)
apache-2.0
-7,790,494,102,878,716,000
-1,734,985,231,134,613,200
39.481752
78
0.619726
false
mvesper/invenio
modules/websubmit/lib/functions/Test_Status.py
3
3087
# This file is part of Invenio. # Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. __revision__ = "$Id$" ## Description: function Test_Status ## This function checks whether the document is still waiting ## for approval or not. ## Author: T.Baron ## ## PARAMETERS: - from invenio.dbquery import run_sql from invenio.websubmit_config import InvenioWebSubmitFunctionStop def Test_Status(parameters, curdir, form, user_info=None): """ This function checks whether the considered document has been requested for approval and is still waiting for approval. It also checks whether the password stored in file 'password' of the submission directory corresponds to the password associated with the document. """ global rn res = run_sql("SELECT status, access FROM sbmAPPROVAL WHERE rn=%s", (rn,)) if len(res) == 0: raise InvenioWebSubmitFunctionStop(printNotRequested(rn)) else: if res[0][0] == "approved": raise InvenioWebSubmitFunctionStop(printApproved(rn)) elif res[0][0] == "rejected": raise InvenioWebSubmitFunctionStop(printRejected(rn)) return "" def printNotRequested(rn): t=""" <SCRIPT> document.forms[0].action="/submit"; document.forms[0].curpage.value = 1; document.forms[0].step.value = 0; user_must_confirm_before_leaving_page = false; alert('The document %s has never been asked for approval.\\nAnyway, you can still choose another document if you wish.'); document.forms[0].submit(); </SCRIPT>""" % rn return t def printApproved(rn): t=""" <SCRIPT> document.forms[0].action="/submit"; document.forms[0].curpage.value = 1; document.forms[0].step.value = 0; user_must_confirm_before_leaving_page = false; alert('The document %s has already been approved.\\nAnyway, you can still choose another document if you wish.'); document.forms[0].submit(); </SCRIPT>""" % rn return t def printRejected(rn): t=""" <SCRIPT> document.forms[0].action="/submit"; document.forms[0].curpage.value = 1; document.forms[0].step.value = 0; user_must_confirm_before_leaving_page = false; alert('The document %s has already been rejected.\\nAnyway, you can still choose another document if you wish.'); document.forms[0].submit(); </SCRIPT>""" % rn return t
gpl-2.0
8,220,769,128,250,570,000
2,213,027,908,626,514,000
35.75
124
0.690962
false